diff --git a/.config/nextest.toml b/.config/nextest.toml new file mode 100644 index 0000000000000..c111b0abeb7a2 --- /dev/null +++ b/.config/nextest.toml @@ -0,0 +1,124 @@ +# This is the default config used by nextest. It is embedded in the binary at +# build time. It may be used as a template for .config/nextest.toml. + +[store] +# The directory under the workspace root at which nextest-related files are +# written. Profile-specific storage is currently written to dir/. +dir = "target/nextest" + +# This section defines the default nextest profile. Custom profiles are layered +# on top of the default profile. +[profile.default] +# "retries" defines the number of times a test should be retried. If set to a +# non-zero value, tests that succeed on a subsequent attempt will be marked as +# non-flaky. Can be overridden through the `--retries` option. +# Examples +# * retries = 3 +# * retries = { backoff = "fixed", count = 2, delay = "1s" } +# * retries = { backoff = "exponential", count = 10, delay = "1s", jitter = true, max-delay = "10s" } +retries = 0 + +# The number of threads to run tests with. Supported values are either an integer or +# the string "num-cpus". Can be overridden through the `--test-threads` option. +test-threads = "num-cpus" + +# The number of threads required for each test. This is generally used in overrides to +# mark certain tests as heavier than others. However, it can also be set as a global parameter. +threads-required = 1 + +# Show these test statuses in the output. +# +# The possible values this can take are: +# * none: no output +# * fail: show failed (including exec-failed) tests +# * retry: show flaky and retried tests +# * slow: show slow tests +# * pass: show passed tests +# * skip: show skipped tests (most useful for CI) +# * all: all of the above +# +# Each value includes all the values above it; for example, "slow" includes +# failed and retried tests. +# +# Can be overridden through the `--status-level` flag. +status-level = "pass" + +# Similar to status-level, show these test statuses at the end of the run. +final-status-level = "flaky" + +# "failure-output" defines when standard output and standard error for failing tests are produced. +# Accepted values are +# * "immediate": output failures as soon as they happen +# * "final": output failures at the end of the test run +# * "immediate-final": output failures as soon as they happen and at the end of +# the test run; combination of "immediate" and "final" +# * "never": don't output failures at all +# +# For large test suites and CI it is generally useful to use "immediate-final". +# +# Can be overridden through the `--failure-output` option. +failure-output = "immediate" + +# "success-output" controls production of standard output and standard error on success. This should +# generally be set to "never". +success-output = "never" + +# Cancel the test run on the first failure. For CI runs, consider setting this +# to false. +fail-fast = true + +# Treat a test that takes longer than the configured 'period' as slow, and print a message. +# See for more information. +# +# Optional: specify the parameter 'terminate-after' with a non-zero integer, +# which will cause slow tests to be terminated after the specified number of +# periods have passed. +# Example: slow-timeout = { period = "60s", terminate-after = 2 } +slow-timeout = { period = "60s" } + +# Treat a test as leaky if after the process is shut down, standard output and standard error +# aren't closed within this duration. +# +# This usually happens in case of a test that creates a child process and lets it inherit those +# handles, but doesn't clean the child process up (especially when it fails). +# +# See for more information. +leak-timeout = "100ms" + +[profile.default.junit] +# Output a JUnit report into the given file inside 'store.dir/'. +# If unspecified, JUnit is not written out. + +# path = "junit.xml" + +# The name of the top-level "report" element in JUnit report. If aggregating +# reports across different test runs, it may be useful to provide separate names +# for each report. +report-name = "nextest-run" + +# Whether standard output and standard error for passing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +store-success-output = false + +# Whether standard output and standard error for failing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +# +# Note that if a description can be extracted from the output, it is always stored in the +# element. +store-failure-output = true + +# This profile is activated if MIRI_SYSROOT is set. +[profile.default-miri] +# Miri tests take up a lot of memory, so only run 1 test at a time by default. +test-threads = 1 + +# Mutual exclusion of tests with `cargo build` invocation as a lock to avoid multiple +# simultaneous invocations clobbering each other. +[test-groups] +serial-integration = { max-threads = 1 } + +# Running UI tests sequentially +# More info can be found here: https://github.com/paritytech/ci_cd/issues/754 +[[profile.default.overrides]] +filter = 'test(/(^ui$|_ui|ui_)/)' +test-group = 'serial-integration' diff --git a/.gitattributes b/.gitattributes index 1762f1a04d802..a77c52fccdb77 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,3 +1,4 @@ Cargo.lock linguist-generated=true /.gitlab-ci.yml filter=ci-prettier /scripts/ci/gitlab/pipeline/*.yml filter=ci-prettier +frame/**/src/weights.rs linguist-generated=true diff --git a/.github/pr-custom-review.yml b/.github/pr-custom-review.yml index 769ac1e110d33..3620f7e770674 100644 --- a/.github/pr-custom-review.yml +++ b/.github/pr-custom-review.yml @@ -9,7 +9,7 @@ rules: condition: include: .* # excluding files from 'CI team' rules - exclude: ^\.gitlab-ci\.yml|^scripts/ci/.*|^\.github/.* + exclude: ^\.gitlab-ci\.yml|^scripts/ci/.*|^\.github/.*|^\.config/nextest.toml min_approvals: 2 teams: - core-devs @@ -17,7 +17,7 @@ rules: - name: CI team check_type: changed_files condition: - include: ^\.gitlab-ci\.yml|^scripts/ci/.*|^\.github/.* + include: ^\.gitlab-ci\.yml|^scripts/ci/.*|^\.github/.*|^\.config/nextest.toml min_approvals: 2 teams: - ci diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 44f98db3bf185..59d42936e1769 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -49,15 +49,17 @@ variables: DOCKER_OS: "debian:stretch" ARCH: "x86_64" CI_IMAGE: "paritytech/ci-linux:production" - BUILDAH_IMAGE: "quay.io/buildah/stable:v1.27" + BUILDAH_IMAGE: "quay.io/buildah/stable:v1.29" + BUILDAH_COMMAND: "buildah --storage-driver overlay2" + RELENG_SCRIPTS_BRANCH: "master" RUSTY_CACHIER_SINGLE_BRANCH: master RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true" RUSTY_CACHIER_COMPRESSION_METHOD: zstd NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.37" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.43" -.shared-default: &shared-default +default: retry: max: 2 when: @@ -65,21 +67,7 @@ variables: - unknown_failure - api_failure cache: {} - -.default-pipeline-definitions: - default: - <<: *shared-default - interruptible: true - -.crate-publishing-pipeline-definitions: - default: - <<: *shared-default - # The crate-publishing pipeline defaults to `interruptible: false` so that we'll be able to - # reach and run the publishing jobs despite the "Auto-cancel redundant pipelines" CI setting. - # The setting is relevant because the crate-publishing pipeline runs on `master`, thus future - # pipelines on `master` (e.g. created for new commits or other schedules) might unintendedly - # cancel the publishing jobs or its dependencies before we get to actually publish the crates. - interruptible: false + interruptible: true .collect-artifacts: artifacts: @@ -105,6 +93,8 @@ variables: # $WASM_BUILD_WORKSPACE_HINT enables wasm-builder to find the Cargo.lock from within generated # packages - export WASM_BUILD_WORKSPACE_HINT="$PWD" + # ensure that RUSTFLAGS are set correctly + - echo $RUSTFLAGS .job-switcher: before_script: @@ -176,7 +166,7 @@ variables: # exclude cargo-check-benches from such runs .test-refs-check-benches: rules: - - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "parent_pipeline" && $CI_IMAGE =~ /staging$/ + - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "pipeline" && $CI_IMAGE =~ /staging$/ when: never - if: $CI_PIPELINE_SOURCE == "web" - if: $CI_PIPELINE_SOURCE == "schedule" @@ -226,11 +216,6 @@ variables: .zombienet-refs: extends: .build-refs -.nightly-pipeline: - rules: - # this job runs only on nightly pipeline with the mentioned variable, against `master` branch - - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly" - .crates-publishing-variables: variables: CRATESIO_CRATES_OWNER: parity-crate-owner @@ -256,23 +241,12 @@ variables: - artifacts/ variables: SPUB_TMP: artifacts + # disable timestamping for the crate publishing jobs, they leave stray child processes behind + # which don't interact well with the timestamping script + CI_DISABLE_TIMESTAMP: 1 #### stage: .pre -skip-if-draft: - extends: .kubernetes-env - variables: - CI_IMAGE: "paritytech/tools:latest" - stage: .pre - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - script: - - echo "Commit message is ${CI_COMMIT_MESSAGE}" - - echo "Ref is ${CI_COMMIT_REF_NAME}" - - echo "pipeline source is ${CI_PIPELINE_SOURCE}" - - ./scripts/ci/gitlab/skip_if_draft.sh - allow_failure: true - check-crates-publishing-pipeline: stage: .pre extends: @@ -285,6 +259,23 @@ check-crates-publishing-pipeline: https://github.com/paritytech/releng-scripts.git - ONLY_CHECK_PIPELINE=true ./releng-scripts/publish-crates +# By default our pipelines are interruptible, but some special pipelines shouldn't be interrupted: +# * multi-project pipelines such as the ones triggered by the scripts repo +# * the scheduled automatic-crate-publishing pipeline +# +# In those cases, we add an uninterruptible .pre job; once that one has started, +# the entire pipeline becomes uninterruptible +uninterruptible-pipeline: + extends: .kubernetes-env + variables: + CI_IMAGE: "paritytech/tools:latest" + stage: .pre + interruptible: false + rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + - if: $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "automatic-crate-publishing" + script: "true" + include: # check jobs - scripts/ci/gitlab/pipeline/check.yml @@ -305,14 +296,8 @@ include: # pipeline is made uninterruptible to ensure that test jobs also get a chance to run to # completion, because the publishing jobs depends on them AS INTENDED: crates should not be # published before their source code is checked. - - local: scripts/ci/gitlab/crate-publishing-pipeline.yml - rules: - - if: $PIPELINE == "automatic-crate-publishing" - # For normal pipelines: run it with defaults + `interruptible: true` - - local: scripts/ci/gitlab/default-pipeline.yml - rules: - - if: $PIPELINE != "automatic-crate-publishing" - project: parity/infrastructure/ci_cd/shared + ref: v0.2 file: /common/timestamp.yml #### stage: notify diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index ba9ac0798844d..38bb4de26362f 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -15,9 +15,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for {{pallet}}. pub trait WeightInfo { diff --git a/Cargo.lock b/Cargo.lock index ac98c08228283..cbff1e45657f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -115,9 +115,9 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" +checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" dependencies = [ "aead 0.5.2", "aes 0.8.2", @@ -188,6 +188,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -398,6 +404,21 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" +[[package]] +name = "assert_cmd" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86d6b683edf8d1119fe420a94f8a7e389239666aa72e65495d91c00462510151" +dependencies = [ + "anstyle", + "bstr", + "doc-comment", + "predicates 3.0.3", + "predicates-core", + "predicates-tree", + "wait-timeout", +] + [[package]] name = "assert_matches" version = "1.5.0" @@ -444,6 +465,17 @@ dependencies = [ "event-listener", ] +[[package]] +name = "async-recursion" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.18", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -463,7 +495,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -474,7 +506,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -541,10 +573,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" [[package]] -name = "base58" +name = "base16ct" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" [[package]] name = "base64" @@ -554,9 +586,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -647,7 +679,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -742,9 +774,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "bounded-collections" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3888522b497857eb606bf51695988dba7096941822c1bcf676e3a929a9ae7a0" +checksum = "07fbd1d11282a1eb134d3c3b7cf8ce213b5161c6e5f73fb1b98618482c606b64" dependencies = [ "log", "parity-scale-codec", @@ -760,11 +792,13 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09" +checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5" dependencies = [ "memchr", + "once_cell", + "regex-automata", "serde", ] @@ -779,9 +813,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.1" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b1ce199063694f33ffb7dd4e0ee620741495c32833cde5aa08f02a0bf96f0c8" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -939,13 +973,13 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "fdbc37d37da9e5bce8173f3a41b71d9bf3c674deebbaceacd0ebdabde76efb03" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "time 0.1.45", "wasm-bindgen", @@ -1054,9 +1088,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.2.7" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d21f9bf1b425d2968943631ec91202fe5e837264063503708b83013f8fc938" +checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc" dependencies = [ "clap_builder", "clap_derive", @@ -1065,27 +1099,27 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.2.7" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914c8c79fb560f238ef6429439a30023c862f7a28e688c58f7203f12b29970bd" +checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990" dependencies = [ "anstream", "anstyle", "bitflags", - "clap_lex 0.4.1", + "clap_lex 0.5.0", "strsim", ] [[package]] name = "clap_derive" -version = "4.2.0" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9644cd56d6b87dbe899ef8b053e331c0637664e9e21a33dfcdc36093f5c5c4" +checksum = "191d9573962933b4027f932c600cd252ce27a8ad5979418fe78e43c07996f27b" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -1099,9 +1133,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" [[package]] name = "codespan-reporting" @@ -1121,9 +1155,9 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "comfy-table" -version = "6.1.4" +version = "6.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e7b787b0dc42e8111badfdbe4c3059158ccb2db8780352fa1b01e8ccf45cc4d" +checksum = "7e959d788268e3bf9d35ace83e81b124190378e4c91c9067524675e33394b8ba" dependencies = [ "strum", "strum_macros", @@ -1420,6 +1454,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "crypto-bigint" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" +dependencies = [ + "generic-array 0.14.7", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -1521,9 +1567,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" +checksum = "109308c20e8445959c2792e81871054c6a17e6976489a93d2769641a2ba5839c" dependencies = [ "cc", "cxxbridge-flags", @@ -1533,9 +1579,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" +checksum = "daf4c6755cdf10798b97510e0e2b3edb9573032bd9379de8fffa59d68165494f" dependencies = [ "cc", "codespan-reporting", @@ -1543,24 +1589,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "cxxbridge-flags" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" +checksum = "882074421238e84fe3b4c65d0081de34e5b323bf64555d3e61991f76eb64a7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" +checksum = "4a076022ece33e7686fb76513518e219cca4fce5750a8ae6d1ce6c0f48fd1af9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -1600,15 +1646,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "data-encoding-macro" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86927b7cd2fe88fa698b87404b287ab98d1a0063a34071d92e575b72d3029aca" +checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1616,9 +1662,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" +checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" dependencies = [ "data-encoding", "syn 1.0.109", @@ -1635,6 +1681,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56acb310e15652100da43d130af8d97b509e95af61aab1c5a7939ef24337ee17" +dependencies = [ + "const-oid", + "zeroize", +] + [[package]] name = "der-parser" version = "7.0.0" @@ -1761,11 +1817,12 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", + "const-oid", "crypto-common", "subtle", ] @@ -1819,7 +1876,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -1828,6 +1885,12 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "210ec60ae7d710bed8683e333e9d2855a8a56a3e9892b38bad3bb0d4d29b0d5e" +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + [[package]] name = "downcast" version = "0.11.0" @@ -1879,10 +1942,24 @@ version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "der", - "elliptic-curve", - "rfc6979", - "signature", + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", + "signature 1.6.4", +] + +[[package]] +name = "ecdsa" +version = "0.16.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0997c976637b606099b9985693efa3581e84e41f5c11ba5255f88711058ad428" +dependencies = [ + "der 0.7.6", + "digest 0.10.7", + "elliptic-curve 0.13.5", + "rfc6979 0.4.0", + "signature 2.1.0", + "spki 0.7.2", ] [[package]] @@ -1891,7 +1968,7 @@ version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ - "signature", + "signature 1.6.4", ] [[package]] @@ -1934,18 +2011,37 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ - "base16ct", - "crypto-bigint", - "der", - "digest 0.10.6", - "ff", + "base16ct 0.1.1", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest 0.10.7", + "ff 0.12.1", "generic-array 0.14.7", - "group", + "group 0.12.1", "hkdf", "pem-rfc7468", - "pkcs8", + "pkcs8 0.9.0", "rand_core 0.6.4", - "sec1", + "sec1 0.3.0", + "subtle", + "zeroize", +] + +[[package]] +name = "elliptic-curve" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" +dependencies = [ + "base16ct 0.2.0", + "crypto-bigint 0.5.2", + "digest 0.10.7", + "ff 0.13.0", + "generic-array 0.14.7", + "group 0.13.0", + "pkcs8 0.10.2", + "rand_core 0.6.4", + "sec1 0.7.2", "subtle", "zeroize", ] @@ -1979,7 +2075,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -2118,6 +2214,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "fiat-crypto" version = "0.1.20" @@ -2223,6 +2329,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fraction" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3027ae1df8d41b4bed2241c8fdad4acc1e7af60c8e17743534b545e77182d678" +dependencies = [ + "lazy_static", + "num", +] + [[package]] name = "fragile" version = "2.0.0" @@ -2264,7 +2380,7 @@ dependencies = [ "Inflector", "array-bytes", "chrono", - "clap 4.2.7", + "clap 4.3.0", "comfy-table", "frame-benchmarking", "frame-support", @@ -2335,7 +2451,7 @@ dependencies = [ "quote", "scale-info", "sp-arithmetic", - "syn 1.0.109", + "syn 2.0.18", "trybuild", ] @@ -2361,7 +2477,7 @@ dependencies = [ name = "frame-election-solution-type-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.2.7", + "clap 4.3.0", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-support", @@ -2419,8 +2535,10 @@ dependencies = [ name = "frame-remote-externalities" version = "0.10.0-dev" dependencies = [ + "async-recursion", "frame-support", "futures", + "jsonrpsee", "log", "pallet-elections-phragmen", "parity-scale-codec", @@ -2466,7 +2584,7 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-tracing", - "sp-weights", + "sp-weights 4.0.0", "tt-call", ] @@ -2479,9 +2597,10 @@ dependencies = [ "derive-syn-parse", "frame-support-procedural-tools", "itertools 0.10.5", + "proc-macro-warning", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -2492,7 +2611,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -2501,7 +2620,7 @@ version = "3.0.0" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -2517,6 +2636,7 @@ dependencies = [ "rustversion", "scale-info", "serde", + "sp-api", "sp-arithmetic", "sp-core", "sp-io", @@ -2568,7 +2688,7 @@ dependencies = [ "sp-std", "sp-ver", "sp-version", - "sp-weights", + "sp-weights 4.0.0", "substrate-test-runtime-client", ] @@ -2710,7 +2830,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -2799,6 +2919,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -2908,16 +3029,27 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ - "ff", + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff 0.13.0", "rand_core 0.6.4", "subtle", ] [[package]] name = "h2" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes", "fnv", @@ -2940,9 +3072,9 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "handlebars" -version = "4.3.6" +version = "4.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "035ef95d03713f2c347a72547b7cd38cbc9af7cd51e6099fb62d586d4a6dee3a" +checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" dependencies = [ "log", "pest", @@ -3068,7 +3200,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -3188,6 +3320,7 @@ dependencies = [ "rustls-native-certs", "tokio", "tokio-rustls", + "webpki-roots", ] [[package]] @@ -3206,12 +3339,11 @@ dependencies = [ [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -3364,9 +3496,9 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi 0.3.1", "libc", @@ -3444,9 +3576,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" dependencies = [ "wasm-bindgen", ] @@ -3458,6 +3590,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d291e3a5818a2384645fd9756362e6d89cf0541b0b916fa7702ea4a9833608e" dependencies = [ "jsonrpsee-core", + "jsonrpsee-http-client", "jsonrpsee-proc-macros", "jsonrpsee-server", "jsonrpsee-types", @@ -3514,6 +3647,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "jsonrpsee-http-client" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc345b0a43c6bc49b947ebeb936e886a419ee3d894421790c969cc56040542ad" +dependencies = [ + "async-trait", + "hyper", + "hyper-rustls", + "jsonrpsee-core", + "jsonrpsee-types", + "rustc-hash", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "jsonrpsee-proc-macros" version = "0.16.2" @@ -3577,13 +3729,14 @@ dependencies = [ [[package]] name = "k256" -version = "0.11.6" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" +checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.7", + "elliptic-curve 0.13.5", + "once_cell", "sha2 0.10.6", ] @@ -3623,6 +3776,7 @@ dependencies = [ "log", "node-primitives", "pallet-alliance", + "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-authority-discovery", @@ -3730,9 +3884,9 @@ dependencies = [ [[package]] name = "kvdb-rocksdb" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2182b8219fee6bd83aacaab7344e840179ae079d5216aa4e249b4d704646a844" +checksum = "fe7a749456510c45f795e8b04a6a3e0976d0139213ecbf465843830ad55e2217" dependencies = [ "kvdb", "num_cpus", @@ -3796,9 +3950,9 @@ checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" [[package]] name = "libm" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "libp2p" @@ -3859,7 +4013,7 @@ dependencies = [ "prost-build", "rand 0.8.5", "rw-stream-sink", - "sec1", + "sec1 0.3.0", "sha2 0.10.6", "smallvec", "thiserror", @@ -4255,9 +4409,9 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "0.8.3+7.4.4" +version = "0.10.0+7.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "557b255ff04123fcc176162f56ed0c9cd42d8f357cf55b3fabeb60f7413741b3" +checksum = "0fe4d5874f5ff2bc616e55e8c6086d478fcda13faf9495768a4aa1c22042d30b" dependencies = [ "bindgen", "bzip2-sys", @@ -4369,9 +4523,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "lite-json" @@ -4403,12 +4557,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" [[package]] name = "lru" @@ -4483,7 +4634,7 @@ dependencies = [ [[package]] name = "mangata-types" version = "0.1.0" -source = "git+https://github.com/mangata-finance/substrate?branch=mangata-dev#b711c0b17cc95cae27fd469f88cfd19343417b98" +source = "git+https://github.com/mangata-finance/substrate?branch=mangata-dev#afd05d5ed525572ed1b6d6a19aeac2465f552e1f" dependencies = [ "parity-scale-codec", "scale-info", @@ -4538,7 +4689,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -4574,6 +4725,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +dependencies = [ + "autocfg", +] + [[package]] name = "memoffset" version = "0.8.0" @@ -4636,14 +4796,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "eebffdb73fe72e917997fad08bdbf31ac50b0fa91cec93e69a0662e4264d454c" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -4697,7 +4856,7 @@ dependencies = [ "fragile", "lazy_static", "mockall_derive", - "predicates", + "predicates 2.1.5", "predicates-tree", ] @@ -4771,7 +4930,7 @@ dependencies = [ "blake2s_simd", "blake3", "core2", - "digest 0.10.6", + "digest 0.10.7", "multihash-derive", "sha2 0.10.6", "sha3", @@ -4937,6 +5096,20 @@ dependencies = [ "memoffset 0.6.5", ] +[[package]] +name = "nix" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" +dependencies = [ + "bitflags", + "cfg-if", + "libc", + "memoffset 0.7.1", + "pin-utils", + "static_assertions", +] + [[package]] name = "node-primitives" version = "2.0.0" @@ -4953,7 +5126,7 @@ dependencies = [ name = "node-runtime-generate-bags" version = "3.0.0" dependencies = [ - "clap 4.2.7", + "clap 4.3.0", "generate-bags", "kitchensink-runtime", ] @@ -4962,7 +5135,7 @@ dependencies = [ name = "node-template" version = "4.0.0-dev" dependencies = [ - "clap 4.2.7", + "clap 4.3.0", "frame-benchmarking", "frame-benchmarking-cli", "frame-system", @@ -5070,6 +5243,20 @@ dependencies = [ "winapi", ] +[[package]] +name = "num" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -5110,6 +5297,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-rational" version = "0.4.1" @@ -5129,7 +5327,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", - "libm 0.2.6", + "libm 0.2.7", ] [[package]] @@ -5183,9 +5381,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "9670a07f94779e00908f3e686eab508878ebb390ba6e604d3a284c00e8d0487b" [[package]] name = "oorandom" @@ -5214,7 +5412,7 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "orml-tokens" version = "0.4.1-dev" -source = "git+https://github.com/mangata-finance//open-runtime-module-library?branch=mangata-dev#1d6fbd1374eabcd016ada4253d81c316b1084228" +source = "git+https://github.com/mangata-finance//open-runtime-module-library?branch=mangata-dev#82fa856f8bf2815b998a758e7ce70958bec0c540" dependencies = [ "frame-benchmarking", "frame-support", @@ -5231,7 +5429,7 @@ dependencies = [ [[package]] name = "orml-traits" version = "0.4.1-dev" -source = "git+https://github.com/mangata-finance//open-runtime-module-library?branch=mangata-dev#1d6fbd1374eabcd016ada4253d81c316b1084228" +source = "git+https://github.com/mangata-finance//open-runtime-module-library?branch=mangata-dev#82fa856f8bf2815b998a758e7ce70958bec0c540" dependencies = [ "frame-support", "impl-trait-for-tuples", @@ -5240,6 +5438,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", + "sp-core", "sp-io", "sp-runtime", "sp-std", @@ -5249,7 +5448,7 @@ dependencies = [ [[package]] name = "orml-utilities" version = "0.4.1-dev" -source = "git+https://github.com/mangata-finance//open-runtime-module-library?branch=mangata-dev#1d6fbd1374eabcd016ada4253d81c316b1084228" +source = "git+https://github.com/mangata-finance//open-runtime-module-library?branch=mangata-dev#82fa856f8bf2815b998a758e7ce70958bec0c540" dependencies = [ "frame-support", "parity-scale-codec", @@ -5287,8 +5486,8 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", "sha2 0.10.6", ] @@ -5298,8 +5497,8 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", "sha2 0.10.6", ] @@ -5334,6 +5533,22 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-asset-rate" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-asset-tx-payment" version = "4.0.0-dev" @@ -5478,7 +5693,6 @@ dependencies = [ "scale-info", "sp-application-crypto", "sp-consensus-babe", - "sp-consensus-vrf", "sp-core", "sp-io", "sp-runtime", @@ -5704,7 +5918,7 @@ dependencies = [ "sp-runtime", "sp-std", "wasm-instrument 0.4.0", - "wasmi 0.20.0", + "wasmi 0.28.0", "wasmparser-nostd", "wat", ] @@ -5718,7 +5932,7 @@ dependencies = [ "scale-info", "sp-runtime", "sp-std", - "sp-weights", + "sp-weights 4.0.0", ] [[package]] @@ -5727,7 +5941,7 @@ version = "4.0.0-dev" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -5757,8 +5971,6 @@ dependencies = [ "frame-support", "frame-system", "log", - "pallet-ranked-collective", - "pallet-salary", "parity-scale-codec", "scale-info", "sp-arithmetic", @@ -5788,6 +6000,22 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-dev-mode" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "log", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" @@ -6072,7 +6300,7 @@ dependencies = [ "sp-runtime", "sp-std", "sp-tracing", - "sp-weights", + "sp-weights 4.0.0", ] [[package]] @@ -6495,7 +6723,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", - "sp-weights", + "sp-weights 4.0.0", "substrate-test-utils", ] @@ -6612,7 +6840,7 @@ dependencies = [ "proc-macro2", "quote", "sp-runtime", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -6653,7 +6881,7 @@ dependencies = [ "substrate-state-trie-migration-rpc", "thousands", "tokio", - "zstd", + "zstd 0.12.3+zstd.1.5.2", ] [[package]] @@ -6783,7 +7011,7 @@ dependencies = [ "sp-core", "sp-rpc", "sp-runtime", - "sp-weights", + "sp-weights 4.0.0", ] [[package]] @@ -6794,7 +7022,7 @@ dependencies = [ "parity-scale-codec", "sp-api", "sp-runtime", - "sp-weights", + "sp-weights 4.0.0", ] [[package]] @@ -6809,7 +7037,7 @@ dependencies = [ "sp-core", "sp-rpc", "sp-runtime", - "sp-weights", + "sp-weights 4.0.0", ] [[package]] @@ -6820,7 +7048,7 @@ dependencies = [ "parity-scale-codec", "sp-api", "sp-runtime", - "sp-weights", + "sp-weights 4.0.0", ] [[package]] @@ -6970,9 +7198,9 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd4572a52711e2ccff02b4973ec7e4a5b5c23387ebbfbd6cd42b34755714cefc" +checksum = "4890dcb9556136a4ec2b0c51fa4a08c8b733b829506af8fff2e853f3a065985b" dependencies = [ "blake2", "crc32fast", @@ -7127,7 +7355,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -7190,7 +7418,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -7216,22 +7444,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -7258,8 +7486,18 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der", - "spki", + "der 0.6.1", + "spki 0.6.0", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der 0.7.6", + "spki 0.7.2", ] [[package]] @@ -7356,7 +7594,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug 0.3.0", - "universal-hash 0.5.0", + "universal-hash 0.5.1", ] [[package]] @@ -7379,6 +7617,18 @@ dependencies = [ "regex", ] +[[package]] +name = "predicates" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09963355b9f467184c04017ced4a2ba2d75cbcb4e7462690d388233253d4b1a9" +dependencies = [ + "anstyle", + "difflib", + "itertools 0.10.5", + "predicates-core", +] + [[package]] name = "predicates-core" version = "1.0.6" @@ -7437,7 +7687,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" dependencies = [ "thiserror", - "toml", + "toml 0.5.11", ] [[package]] @@ -7464,11 +7714,22 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-warning" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e99670bafb56b9a106419397343bdbc8b8742c3cc449fec6345f86173f47cd4" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.18", +] + [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "6aeca18b86b413c660b781aa319e4e2648a3e6f9eadc9b47e9038e6fe9f3451b" dependencies = [ "unicode-ident", ] @@ -7641,9 +7902,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ "proc-macro2", ] @@ -7843,7 +8104,7 @@ checksum = "8d2275aab483050ab2a7364c1a46604865ee7d6906684e08db0f090acf74f9e7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -7860,13 +8121,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.1" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "81ca098a9821bd52d6b24fd8b10bd081f47d39c22778cafaa75a2857a62c6390" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-syntax 0.7.1", + "regex-syntax 0.7.2", ] [[package]] @@ -7886,9 +8147,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "region" @@ -7918,11 +8179,21 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ - "crypto-bigint", + "crypto-bigint 0.4.9", "hmac 0.12.1", "zeroize", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac 0.12.1", + "subtle", +] + [[package]] name = "ring" version = "0.16.20" @@ -7940,9 +8211,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.19.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9562ea1d70c0cc63a34a22d977753b50cca91cc6b6527750463bd5dd8697bc" +checksum = "015439787fce1e75d55f279078d33ff14b4af5d93d995e8838ee4631301c8a99" dependencies = [ "libc", "librocksdb-sys", @@ -7980,7 +8251,7 @@ dependencies = [ "log", "netlink-packet-route", "netlink-proto", - "nix", + "nix 0.24.3", "thiserror", "tokio", ] @@ -8056,9 +8327,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.13" +version = "0.36.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a38f9520be93aba504e8ca974197f46158de5dcaa9fa04b57c57cd6a679d658" +checksum = "14e4d67015953998ad0eb82887a0eb0129e18a7e2f3b7b0f6c422fddcd503d62" dependencies = [ "bitflags", "errno", @@ -8078,7 +8349,7 @@ dependencies = [ "errno", "io-lifetimes", "libc", - "linux-raw-sys 0.3.7", + "linux-raw-sys 0.3.8", "windows-sys 0.48.0", ] @@ -8125,7 +8396,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", ] [[package]] @@ -8346,7 +8617,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -8355,7 +8626,7 @@ version = "0.10.0-dev" dependencies = [ "array-bytes", "chrono", - "clap 4.2.7", + "clap 4.3.0", "fdlimit", "futures", "futures-timer", @@ -8523,7 +8794,6 @@ dependencies = [ "fork-tree", "futures", "log", - "merlin", "num-bigint", "num-rational", "num-traits", @@ -8540,7 +8810,6 @@ dependencies = [ "sc-network-test", "sc-telemetry", "scale-info", - "schnorrkel", "sp-api", "sp-application-crypto", "sp-block-builder", @@ -8548,16 +8817,13 @@ dependencies = [ "sp-consensus", "sp-consensus-babe", "sp-consensus-slots", - "sp-consensus-vrf", "sp-core", "sp-inherents", - "sp-io", "sp-keyring", "sp-keystore", "sp-runtime", "sp-timestamp", "sp-tracing", - "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "thiserror", @@ -8587,7 +8853,6 @@ dependencies = [ "sp-keystore", "sp-runtime", "substrate-test-runtime-client", - "tempfile", "thiserror", "tokio", ] @@ -8900,7 +9165,7 @@ dependencies = [ "once_cell", "parity-scale-codec", "paste", - "rustix 0.36.13", + "rustix 0.36.14", "sc-allocator", "sc-executor-common", "sc-runtime-test", @@ -8953,6 +9218,7 @@ dependencies = [ "asynchronous-codec", "bytes", "either", + "enum-as-inner", "fnv", "futures", "futures-timer", @@ -8978,6 +9244,7 @@ dependencies = [ "serde", "serde_json", "smallvec", + "snow", "sp-arithmetic", "sp-blockchain", "sp-consensus", @@ -9151,6 +9418,7 @@ dependencies = [ "sc-network-light", "sc-network-sync", "sc-service", + "sc-utils", "sp-blockchain", "sp-consensus", "sp-consensus-babe", @@ -9294,7 +9562,6 @@ dependencies = [ "sp-core", "sp-rpc", "sp-runtime", - "sp-tracing", "sp-version", "thiserror", ] @@ -9331,6 +9598,7 @@ dependencies = [ "sc-chain-spec", "sc-client-api", "sc-transaction-pool-api", + "sc-utils", "serde", "serde_json", "sp-api", @@ -9415,7 +9683,6 @@ dependencies = [ "sp-session", "sp-state-machine", "sp-storage", - "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", @@ -9483,7 +9750,7 @@ dependencies = [ name = "sc-storage-monitor" version = "0.1.0" dependencies = [ - "clap 4.2.7", + "clap 4.3.0", "fs4", "futures", "log", @@ -9587,7 +9854,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -9655,9 +9922,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfdef77228a4c05dc94211441595746732131ad7f6530c6c18f045da7b7ab937" +checksum = "b569c32c806ec3abdf3b5869fb8bf1e0d275a7c1c9b0b05603d9464632649edf" dependencies = [ "bitvec", "cfg-if", @@ -9767,10 +10034,24 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ - "base16ct", - "der", + "base16ct 0.1.1", + "der 0.6.1", + "generic-array 0.14.7", + "pkcs8 0.9.0", + "subtle", + "zeroize", +] + +[[package]] +name = "sec1" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0aec48e813d6b90b15f0b8948af3c63483992dee44c03e9930b3eebdabe046e" +dependencies = [ + "base16ct 0.2.0", + "der 0.7.6", "generic-array 0.14.7", - "pkcs8", + "pkcs8 0.10.2", "subtle", "zeroize", ] @@ -9804,9 +10085,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags", "core-foundation", @@ -9817,9 +10098,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -9860,22 +10141,22 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.162" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71b2f6e1ab5c2b98c05f0f35b236b22e8df7ead6ffbf51d7808da7f8817e7ab6" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.162" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a0814352fd64b58489904a44ea8d90cb1a91dcb6b4f5ebabc32c8318e93cb6" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -9889,6 +10170,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d" +dependencies = [ + "serde", +] + [[package]] name = "sha-1" version = "0.9.8" @@ -9910,7 +10200,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -9946,7 +10236,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -9955,7 +10245,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "keccak", ] @@ -9989,7 +10279,17 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", + "rand_core 0.6.4", +] + +[[package]] +name = "signature" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +dependencies = [ + "digest 0.10.7", "rand_core 0.6.4", ] @@ -10090,8 +10390,10 @@ dependencies = [ "hash-db 0.16.0", "log", "parity-scale-codec", + "scale-info", "sp-api-proc-macro", "sp-core", + "sp-metadata-ir", "sp-runtime", "sp-state-machine", "sp-std", @@ -10106,12 +10408,13 @@ name = "sp-api-proc-macro" version = "4.0.0-dev" dependencies = [ "Inflector", + "assert_matches", "blake2", "expander", "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -10131,6 +10434,7 @@ dependencies = [ "sp-state-machine", "sp-tracing", "sp-version", + "static_assertions", "substrate-test-runtime-client", "trybuild", ] @@ -10180,8 +10484,11 @@ dependencies = [ name = "sp-arithmetic-fuzzer" version = "2.0.0" dependencies = [ + "arbitrary", + "fraction", "honggfuzz", "num-bigint", + "num-traits", "primitive-types", "sp-arithmetic", ] @@ -10263,7 +10570,6 @@ name = "sp-consensus-babe" version = "0.10.0-dev" dependencies = [ "async-trait", - "merlin", "parity-scale-codec", "scale-info", "serde", @@ -10271,7 +10577,6 @@ dependencies = [ "sp-application-crypto", "sp-consensus", "sp-consensus-slots", - "sp-consensus-vrf", "sp-core", "sp-inherents", "sp-keystore", @@ -10335,33 +10640,19 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-arithmetic", - "sp-runtime", "sp-std", "sp-timestamp", ] -[[package]] -name = "sp-consensus-vrf" -version = "0.10.0-dev" -dependencies = [ - "parity-scale-codec", - "scale-info", - "schnorrkel", - "sp-core", - "sp-runtime", - "sp-std", -] - [[package]] name = "sp-core" version = "7.0.0" dependencies = [ "array-bytes", - "base58", "bitflags", "blake2", "bounded-collections", + "bs58", "criterion", "dyn-clonable", "ed25519-zebra", @@ -10376,6 +10667,7 @@ dependencies = [ "parity-scale-codec", "parity-util-mem", "parking_lot 0.12.1", + "paste", "primitive-types", "rand 0.8.5", "regex", @@ -10406,7 +10698,7 @@ version = "5.0.0" dependencies = [ "blake2b_simd", "byteorder", - "digest 0.10.6", + "digest 0.10.7", "sha2 0.10.6", "sha3", "sp-std", @@ -10420,7 +10712,7 @@ dependencies = [ "proc-macro2", "quote", "sp-core-hashing", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -10437,7 +10729,7 @@ version = "5.0.0" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -10476,6 +10768,7 @@ dependencies = [ "libsecp256k1", "log", "parity-scale-codec", + "rustversion", "secp256k1", "sp-core", "sp-externalities", @@ -10503,14 +10796,11 @@ dependencies = [ name = "sp-keystore" version = "0.13.0" dependencies = [ - "async-trait", "futures", - "merlin", "parity-scale-codec", "parking_lot 0.12.1", "rand 0.7.3", "rand_chacha 0.2.2", - "schnorrkel", "serde", "sp-core", "sp-externalities", @@ -10522,7 +10812,17 @@ name = "sp-maybe-compressed-blob" version = "4.1.0-dev" dependencies = [ "thiserror", - "zstd", + "zstd 0.12.3+zstd.1.5.2", +] + +[[package]] +name = "sp-metadata-ir" +version = "0.1.0" +dependencies = [ + "frame-metadata", + "parity-scale-codec", + "scale-info", + "sp-std", ] [[package]] @@ -10562,7 +10862,7 @@ dependencies = [ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.2.7", + "clap 4.3.0", "honggfuzz", "parity-scale-codec", "rand 0.8.5", @@ -10622,9 +10922,9 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-tracing", - "sp-weights", + "sp-weights 4.0.0", "substrate-test-runtime-client", - "zstd", + "zstd 0.12.3+zstd.1.5.2", ] [[package]] @@ -10658,7 +10958,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -10727,6 +11027,7 @@ version = "4.0.0-dev" dependencies = [ "parity-scale-codec", "scale-info", + "serde", "sp-core", "sp-runtime", "sp-std", @@ -10777,6 +11078,7 @@ name = "sp-test-primitives" version = "2.0.0" dependencies = [ "parity-scale-codec", + "scale-info", "serde", "sp-application-crypto", "sp-core", @@ -10839,7 +11141,7 @@ dependencies = [ "array-bytes", "criterion", "hash-db 0.16.0", - "hashbrown 0.12.3", + "hashbrown 0.13.2", "lazy_static", "memory-db", "nohash-hasher", @@ -10899,7 +11201,7 @@ dependencies = [ "proc-macro2", "quote", "sp-version", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -10929,6 +11231,21 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-weights" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.40#98f2e3451c9143278ec53c6718940aeabcd3b68a" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "smallvec", + "sp-arithmetic", + "sp-core", + "sp-debug-derive", + "sp-std", +] + [[package]] name = "spin" version = "0.5.2" @@ -10948,7 +11265,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der", + "der 0.6.1", +] + +[[package]] +name = "spki" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +dependencies = [ + "base64ct", + "der 0.7.6", ] [[package]] @@ -11057,7 +11384,7 @@ dependencies = [ name = "subkey" version = "3.0.0" dependencies = [ - "clap 4.2.7", + "clap 4.3.0", "sc-cli", ] @@ -11081,11 +11408,25 @@ dependencies = [ "platforms 2.0.0", ] +[[package]] +name = "substrate-cli-test-utils" +version = "0.1.0" +dependencies = [ + "assert_cmd", + "futures", + "nix 0.26.2", + "node-primitives", + "regex", + "substrate-rpc-client", + "tempfile", + "tokio", +] + [[package]] name = "substrate-frame-cli" version = "4.0.0-dev" dependencies = [ - "clap 4.2.7", + "clap 4.3.0", "frame-support", "frame-system", "sc-cli", @@ -11303,7 +11644,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -11326,7 +11667,7 @@ dependencies = [ "sp-maybe-compressed-blob", "strum", "tempfile", - "toml", + "toml 0.7.4", "walkdir", "wasm-opt", ] @@ -11359,9 +11700,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.15" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" dependencies = [ "proc-macro2", "quote", @@ -11382,9 +11723,9 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75182f12f490e953596550b65ee31bda7c8e043d9386174b353bda50838c3fd" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags", "core-foundation", @@ -11464,7 +11805,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -11595,9 +11936,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.0" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg", "bytes", @@ -11620,7 +11961,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -11683,6 +12024,40 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6135d499e69981f9ff0ef2167955a5333c35e36f6937d382974566b3d5b94ec" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tower" version = "0.4.13" @@ -11745,14 +12120,14 @@ checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -11934,13 +12309,15 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" name = "try-runtime-cli" version = "0.10.0-dev" dependencies = [ + "assert_cmd", "async-trait", - "clap 4.2.7", + "clap 4.3.0", "frame-remote-externalities", "frame-try-runtime", "hex", "log", "parity-scale-codec", + "regex", "sc-cli", "sc-executor", "sc-service", @@ -11961,10 +12338,11 @@ dependencies = [ "sp-timestamp", "sp-transaction-storage-proof", "sp-version", - "sp-weights", + "sp-weights 4.0.0", + "substrate-cli-test-utils", "substrate-rpc-client", "tokio", - "zstd", + "zstd 0.12.3+zstd.1.5.2", ] [[package]] @@ -12015,7 +12393,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", - "digest 0.10.6", + "digest 0.10.7", "rand 0.8.5", "static_assertions", ] @@ -12052,9 +12430,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" [[package]] name = "unicode-normalization" @@ -12089,9 +12467,9 @@ dependencies = [ [[package]] name = "universal-hash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ "crypto-common", "subtle", @@ -12134,9 +12512,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" +checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" dependencies = [ "getrandom 0.2.9", ] @@ -12182,6 +12560,15 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "waitgroup" version = "0.1.2" @@ -12237,9 +12624,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -12247,24 +12634,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" dependencies = [ "cfg-if", "js-sys", @@ -12274,9 +12661,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -12284,28 +12671,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" +checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" [[package]] name = "wasm-encoder" -version = "0.26.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05d0b6fcd0aeb98adf16e7975331b3c17222aa815148f5b976370ce589d80ef" +checksum = "18c41dbd92eaebf3612a39be316540b8377c871cb9bde6b064af962984912881" dependencies = [ "leb128", ] @@ -12397,13 +12784,13 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.20.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01bf50edb2ea9d922aa75a7bf3c15e26a6c9e2d18c56e862b49737a582901729" +checksum = "8e61a7006b0fdf24f6bbe8dcfdad5ca1b350de80061fb2827f31c82fbbb9565a" dependencies = [ "spin 0.9.8", "wasmi_arena", - "wasmi_core 0.5.0", + "wasmi_core 0.12.0", "wasmparser-nostd", ] @@ -12418,9 +12805,9 @@ dependencies = [ [[package]] name = "wasmi_arena" -version = "0.1.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ea379cbb0b41f3a9f0bf7b47036d036aae7f43383d8cc487d4deccf40dee0a" +checksum = "401c1f35e413fac1846d4843745589d9ec678977ab35a384db8ae7830525d468" [[package]] name = "wasmi_core" @@ -12429,7 +12816,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57d20cb3c59b788653d99541c646c561c9dd26506f25c0cebfe810659c54c6d7" dependencies = [ "downcast-rs", - "libm 0.2.6", + "libm 0.2.7", "memory_units", "num-rational", "num-traits", @@ -12438,13 +12825,14 @@ dependencies = [ [[package]] name = "wasmi_core" -version = "0.5.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5bf998ab792be85e20e771fe14182b4295571ad1d4f89d3da521c1bef5f597a" +checksum = "624e6333e861ef49095d2d678b76ebf30b06bf37effca845be7e5b87c90071b7" dependencies = [ "downcast-rs", - "libm 0.2.6", + "libm 0.2.7", "num-traits", + "paste", ] [[package]] @@ -12459,9 +12847,9 @@ dependencies = [ [[package]] name = "wasmparser-nostd" -version = "0.91.0" +version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c37f310b5a62bfd5ae7c0f1d8e6f98af16a5d6d84ba764e9c36439ec14e318b" +checksum = "9157cab83003221bfd385833ab587a039f5d6fa7304854042ba358a3b09e0724" dependencies = [ "indexmap-nostd", ] @@ -12515,12 +12903,12 @@ dependencies = [ "directories-next", "file-per-thread-logger", "log", - "rustix 0.36.13", + "rustix 0.36.14", "serde", "sha2 0.10.6", - "toml", + "toml 0.5.11", "windows-sys 0.42.0", - "zstd", + "zstd 0.11.2+zstd.1.5.2", ] [[package]] @@ -12595,7 +12983,7 @@ checksum = "eed41cbcbf74ce3ff6f1d07d1b707888166dc408d1a880f651268f4f7c9194b2" dependencies = [ "object 0.29.0", "once_cell", - "rustix 0.36.13", + "rustix 0.36.14", ] [[package]] @@ -12626,7 +13014,7 @@ dependencies = [ "memoffset 0.6.5", "paste", "rand 0.8.5", - "rustix 0.36.13", + "rustix 0.36.14", "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-jit-debug", @@ -12647,9 +13035,9 @@ dependencies = [ [[package]] name = "wast" -version = "57.0.0" +version = "60.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eb0f5ed17ac4421193c7477da05892c2edafd67f9639e3c11a82086416662dc" +checksum = "bd06cc744b536e30387e72a48fdd492105b9c938bb4f415c39c616a7a0a697ad" dependencies = [ "leb128", "memchr", @@ -12659,18 +13047,18 @@ dependencies = [ [[package]] name = "wat" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab9ab0d87337c3be2bb6fc5cd331c4ba9fd6bcb4ee85048a0dd59ed9ecf92e53" +checksum = "5abe520f0ab205366e9ac7d3e6b2fc71de44e32a2b58f2ec871b6b575bdcea3b" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.61" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" dependencies = [ "js-sys", "wasm-bindgen", @@ -12768,7 +13156,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" dependencies = [ "aes 0.6.0", - "aes-gcm 0.10.1", + "aes-gcm 0.10.2", "async-trait", "bincode", "block-modes", @@ -12776,7 +13164,7 @@ dependencies = [ "ccm", "curve25519-dalek 3.2.0", "der-parser 8.2.0", - "elliptic-curve", + "elliptic-curve 0.12.3", "hkdf", "hmac 0.12.1", "log", @@ -12788,11 +13176,11 @@ dependencies = [ "rcgen 0.9.3", "ring", "rustls 0.19.1", - "sec1", + "sec1 0.3.0", "serde", "sha1", "sha2 0.10.6", - "signature", + "signature 1.6.4", "subtle", "thiserror", "tokio", @@ -12907,7 +13295,7 @@ dependencies = [ "lazy_static", "libc", "log", - "nix", + "nix 0.24.3", "rand 0.8.5", "thiserror", "tokio", @@ -12927,9 +13315,9 @@ dependencies = [ [[package]] name = "wide" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b689b6c49d6549434bf944e6b0f39238cf63693cb7a147e9d887507fffa3b223" +checksum = "5cd0496a71f3cc6bc4bf0ed91346426a5099e93d89807e663162dc5a1069ff65" dependencies = [ "bytemuck", "safe_arch", @@ -13171,6 +13559,15 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +[[package]] +name = "winnow" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.10.1" @@ -13250,22 +13647,24 @@ dependencies = [ [[package]] name = "xcm" -version = "0.9.36" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" +version = "0.9.40" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.40#95fe4c8862810bffd68343231a517e62689c05c0" dependencies = [ + "bounded-collections", "derivative", "impl-trait-for-tuples", "log", "parity-scale-codec", "scale-info", - "sp-runtime", + "serde", + "sp-weights 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.40)", "xcm-procedural", ] [[package]] name = "xcm-procedural" -version = "0.9.36" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" +version = "0.9.40" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.40#95fe4c8862810bffd68343231a517e62689c05c0" dependencies = [ "Inflector", "proc-macro2", @@ -13319,7 +13718,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -13328,7 +13727,16 @@ version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ - "zstd-safe", + "zstd-safe 5.0.2+zstd.1.5.2", +] + +[[package]] +name = "zstd" +version = "0.12.3+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806" +dependencies = [ + "zstd-safe 6.0.5+zstd.1.5.4", ] [[package]] @@ -13341,6 +13749,16 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "zstd-safe" +version = "6.0.5+zstd.1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d56d9e60b4b1758206c238a10165fbcae3ca37b01744e394c463463f6529d23b" +dependencies = [ + "libc", + "zstd-sys", +] + [[package]] name = "zstd-sys" version = "2.0.8+zstd.1.5.5" @@ -13351,11 +13769,3 @@ dependencies = [ "libc", "pkg-config", ] - -[[patch.unused]] -name = "sp-authorship" -version = "4.0.0-dev" - -[[patch.unused]] -name = "sp-authorship" -version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 60f3cfbee6283..10dd4ce96946e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -109,6 +109,7 @@ members = [ "frame/election-provider-support/solution-type/fuzzer", "frame/examples/basic", "frame/examples/offchain-worker", + "frame/examples/dev-mode", "frame/executive", "frame/nis", "frame/grandpa", @@ -175,6 +176,7 @@ members = [ "frame/transaction-payment-mangata/rpc/runtime-api", "frame/transaction-storage", "frame/treasury", + "frame/asset-rate", "frame/tips", "frame/uniques", "frame/utility", @@ -201,7 +203,6 @@ members = [ "primitives/consensus/grandpa", "primitives/consensus/pow", "primitives/consensus/slots", - "primitives/consensus/vrf", "primitives/core", "primitives/core/hashing", "primitives/core/hashing/proc-macro", @@ -215,6 +216,7 @@ members = [ "primitives/keystore", "primitives/maybe-compressed-blob", "primitives/merkle-mountain-range", + "primitives/metadata-ir", "primitives/npos-elections", "primitives/npos-elections/fuzzer", "primitives/offchain", @@ -336,7 +338,6 @@ panic = "unwind" [patch."https://github.com/mangata-finance/substrate"] sp-debug-derive = { path = "../substrate/primitives/debug-derive" } extrinsic-shuffler = { path = "../substrate/primitives/shuffler" } -sp-authorship = { path = "../substrate/primitives/authorship" } sp-storage = { path = "../substrate/primitives/storage" } sp-timestamp = { path = "../substrate/primitives/timestamp" } sp-trie = { path = "../substrate/primitives/trie" } @@ -372,7 +373,6 @@ sp-runtime = { path = "../substrate/primitives/runtime" } sp-consensus-slots = { path = "../substrate/primitives/consensus/slots" } sp-consensus-babe = { path = "../substrate/primitives/consensus/babe" } sp-consensus-aura = { path = "../substrate/primitives/consensus/aura" } -sp-consensus-vrf = { path = "../substrate/primitives/consensus/vrf" } sp-consensus = { path = "../substrate/primitives/consensus/common" } sp-ver = { path = "../substrate/primitives/ver" } sp-rpc = { path = "../substrate/primitives/rpc" } @@ -490,7 +490,6 @@ pallet-society = { path = "../substrate/frame/society" } [patch."https://github.com/paritytech/substrate"] sp-debug-derive = { path = "../substrate/primitives/debug-derive" } extrinsic-shuffler = { path = "../substrate/primitives/shuffler" } -sp-authorship = { path = "../substrate/primitives/authorship" } sp-storage = { path = "../substrate/primitives/storage" } sp-timestamp = { path = "../substrate/primitives/timestamp" } sp-trie = { path = "../substrate/primitives/trie" } @@ -526,7 +525,6 @@ sp-runtime = { path = "../substrate/primitives/runtime" } sp-consensus-slots = { path = "../substrate/primitives/consensus/slots" } sp-consensus-babe = { path = "../substrate/primitives/consensus/babe" } sp-consensus-aura = { path = "../substrate/primitives/consensus/aura" } -sp-consensus-vrf = { path = "../substrate/primitives/consensus/vrf" } sp-consensus = { path = "../substrate/primitives/consensus/common" } sp-ver = { path = "../substrate/primitives/ver" } sp-rpc = { path = "../substrate/primitives/rpc" } diff --git a/bin/node-template/LICENSE b/bin/node-template/LICENSE index cf1ab25da0349..ffa0b3f2df035 100644 --- a/bin/node-template/LICENSE +++ b/bin/node-template/LICENSE @@ -1,24 +1,16 @@ -This is free and unencumbered software released into the public domain. +MIT No Attribution -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. +Copyright Parity Technologies (UK) Ltd. -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. +Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 979c00120ddb7..ac64b8881caef 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -5,7 +5,7 @@ description = "A fresh FRAME-based Substrate node, ready for hacking." authors = ["Substrate DevHub "] homepage = "https://substrate.io/" edition = "2021" -license = "Unlicense" +license = "MIT-0" publish = false repository = "https://github.com/substrate-developer-hub/substrate-node-template/" build = "build.rs" diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 34e4e566d92fc..723d1db3eea37 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -5,7 +5,6 @@ use sc_client_api::BlockBackend; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_consensus_grandpa::SharedVoterState; pub use sc_executor::NativeElseWasmExecutor; -use sc_keystore::LocalKeystore; use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; @@ -58,10 +57,6 @@ pub fn new_partial( >, ServiceError, > { - if config.keystore_remote.is_some() { - return Err(ServiceError::Other("Remote Keystores are not supported.".into())) - } - let telemetry = config .telemetry_endpoints .clone() @@ -73,12 +68,7 @@ pub fn new_partial( }) .transpose()?; - let executor = NativeElseWasmExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - config.runtime_cache_size, - ); + let executor = sc_service::new_native_or_wasm_executor(&config); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( @@ -147,13 +137,6 @@ pub fn new_partial( }) } -fn remote_keystore(_url: &String) -> Result, &'static str> { - // FIXME: here would the concrete keystore be built, - // must return a concrete type (NOT `LocalKeystore`) that - // implements `CryptoStore` and `SyncCryptoStore` - Err("Remote Keystore not supported.") -} - /// Builds a new service for a full client. pub fn new_full(mut config: Configuration) -> Result { let sc_service::PartialComponents { @@ -161,22 +144,12 @@ pub fn new_full(mut config: Configuration) -> Result backend, mut task_manager, import_queue, - mut keystore_container, + keystore_container, select_chain, transaction_pool, other: (block_import, grandpa_link, mut telemetry), } = new_partial(&config)?; - if let Some(url) = &config.keystore_remote { - match remote_keystore(url) { - Ok(k) => keystore_container.set_remote_keystore(k), - Err(e) => - return Err(ServiceError::Other(format!( - "Error hooking up remote keystore for {}: {}", - url, e - ))), - }; - } let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name( &client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"), &config.chain_spec, @@ -233,7 +206,7 @@ pub fn new_full(mut config: Configuration) -> Result let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { network: network.clone(), client: client.clone(), - keystore: keystore_container.sync_keystore(), + keystore: keystore_container.keystore(), task_manager: &mut task_manager, transaction_pool: transaction_pool.clone(), rpc_builder: rpc_extensions_builder, @@ -276,7 +249,7 @@ pub fn new_full(mut config: Configuration) -> Result }, force_authoring, backoff_authoring_blocks, - keystore: keystore_container.sync_keystore(), + keystore: keystore_container.keystore(), sync_oracle: sync_service.clone(), justification_sync_link: sync_service.clone(), block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), @@ -296,8 +269,7 @@ pub fn new_full(mut config: Configuration) -> Result if enable_grandpa { // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = - if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; + let keystore = if role.is_authority() { Some(keystore_container.keystore()) } else { None }; let grandpa_config = sc_consensus_grandpa::Config { // FIXME #1578 make this available through chainspec diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index f6607b25c4d62..bb6d8c511af7e 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -5,7 +5,7 @@ description = "FRAME pallet template for defining custom runtime logic." authors = ["Substrate DevHub "] homepage = "https://substrate.io" edition = "2021" -license = "Unlicense" +license = "MIT-0" publish = false repository = "https://github.com/substrate-developer-hub/substrate-node-template/" @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../../../../frame/benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../../frame/support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../../frame/system" } diff --git a/bin/node-template/pallets/template/README.md b/bin/node-template/pallets/template/README.md index 8d751a42207dd..d0d59537c12d7 100644 --- a/bin/node-template/pallets/template/README.md +++ b/bin/node-template/pallets/template/README.md @@ -1 +1 @@ -License: Unlicense \ No newline at end of file +License: MIT-0 \ No newline at end of file diff --git a/bin/node-template/pallets/template/src/benchmarking.rs b/bin/node-template/pallets/template/src/benchmarking.rs index 1790849970440..5a262417629c5 100644 --- a/bin/node-template/pallets/template/src/benchmarking.rs +++ b/bin/node-template/pallets/template/src/benchmarking.rs @@ -1,19 +1,34 @@ //! Benchmarking setup for pallet-template - +#![cfg(feature = "runtime-benchmarks")] use super::*; #[allow(unused)] use crate::Pallet as Template; -use frame_benchmarking::v1::{benchmarks, whitelisted_caller}; +use frame_benchmarking::v2::*; use frame_system::RawOrigin; -benchmarks! { - do_something { - let s in 0 .. 100; +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn do_something() { + let value = 100u32.into(); let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), s) - verify { - assert_eq!(Something::::get(), Some(s)); + #[extrinsic_call] + do_something(RawOrigin::Signed(caller), value); + + assert_eq!(Something::::get(), Some(value)); + } + + #[benchmark] + fn cause_error() { + Something::::put(100u32); + let caller: T::AccountId = whitelisted_caller(); + #[extrinsic_call] + cause_error(RawOrigin::Signed(caller)); + + assert_eq!(Something::::get(), Some(101u32)); } impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 9f17623db328f..9550d3d546cca 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -13,9 +13,12 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod weights; +pub use weights::*; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -27,6 +30,8 @@ pub mod pallet { pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// Type representing the weight of this pallet + type WeightInfo: WeightInfo; } // The pallet's runtime storage items. @@ -64,7 +69,7 @@ pub mod pallet { /// An example dispatchable that takes a singles value as a parameter, writes the value to /// storage and emits an event. This function must be dispatched by a signed extrinsic. #[pallet::call_index(0)] - #[pallet::weight(10_000 + T::DbWeight::get().writes(1).ref_time())] + #[pallet::weight(T::WeightInfo::do_something())] pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. @@ -82,7 +87,7 @@ pub mod pallet { /// An example dispatchable that may throw a custom error. #[pallet::call_index(1)] - #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1).ref_time())] + #[pallet::weight(T::WeightInfo::cause_error())] pub fn cause_error(origin: OriginFor) -> DispatchResult { let _who = ensure_signed(origin)?; diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index 91a1bf6ed5fc8..b4d6905378a5d 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -50,6 +50,7 @@ impl frame_system::Config for Test { impl pallet_template::Config for Test { type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); } // Build genesis storage according to the mock runtime. diff --git a/bin/node-template/pallets/template/src/weights.rs b/bin/node-template/pallets/template/src/weights.rs new file mode 100644 index 0000000000000..e8fbc09bad8e9 --- /dev/null +++ b/bin/node-template/pallets/template/src/weights.rs @@ -0,0 +1,91 @@ + +//! Autogenerated weights for pallet_template +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Alexs-MacBook-Pro-2.local`, CPU: `` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// ../../target/release/node-template +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_template +// --extrinsic +// * +// --steps=50 +// --repeat=20 +// --execution=wasm +// --wasm-execution=compiled +// --output +// pallets/template/src/weights.rs +// --template +// ../../.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for pallet_template. +pub trait WeightInfo { + fn do_something() -> Weight; + fn cause_error() -> Weight; +} + +/// Weights for pallet_template using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: TemplateModule Something (r:0 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn do_something() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: TemplateModule Something (r:1 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn cause_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1489` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 1489) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + /// Storage: TemplateModule Something (r:0 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn do_something() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: TemplateModule Something (r:1 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn cause_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1489` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 1489) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 90fa6269ebe8c..930c5b5c49ade 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -5,7 +5,7 @@ description = "A fresh FRAME-based Substrate node, ready for hacking." authors = ["Substrate DevHub "] homepage = "https://substrate.io/" edition = "2021" -license = "Unlicense" +license = "MIT-0" publish = false repository = "https://github.com/substrate-developer-hub/substrate-node-template/" @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } pallet-aura = { version = "4.0.0-dev", default-features = false, path = "../../../frame/aura" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index ac01aa95f4f12..3dcf1b5564dec 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -243,6 +243,10 @@ impl pallet_balances::Config for Runtime { type ExistentialDeposit = ConstU128; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { @@ -266,6 +270,7 @@ impl pallet_sudo::Config for Runtime { /// Configure the pallet-template in pallets/template. impl pallet_template::Config for Runtime { type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_template::weights::SubstrateWeight; } // Create the runtime by composing the FRAME pallets that were previously configured. @@ -354,6 +359,14 @@ impl_runtime_apis! { fn metadata() -> OpaqueMetadata { OpaqueMetadata::new(Runtime::metadata().into()) } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } } impl sp_block_builder::BlockBuilder for Runtime { diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index be5e94ee4df8b..30266ab0d24e7 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -25,7 +25,7 @@ serde = "1.0.136" serde_json = "1.0.85" derive_more = { version = "0.99.17", default-features = false, features = ["display"] } kvdb = "0.13.0" -kvdb-rocksdb = "0.17.0" +kvdb-rocksdb = "0.18.0" sp-trie = { version = "7.0.0", path = "../../../primitives/trie" } sp-core = { version = "7.0.0", path = "../../../primitives/core" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } @@ -38,7 +38,7 @@ tempfile = "3.1.0" fs_extra = "1" rand = { version = "0.8.5", features = ["small_rng"] } lazy_static = "1.4.0" -parity-db = "0.4.4" +parity-db = "0.4.6" sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } futures = { version = "0.3.21", features = ["thread-pool"] } diff --git a/bin/node/bench/src/core.rs b/bin/node/bench/src/core.rs index 29cda70990aa0..1f22d3db69261 100644 --- a/bin/node/bench/src/core.rs +++ b/bin/node/bench/src/core.rs @@ -72,24 +72,13 @@ pub struct NsFormatter(pub u64); impl fmt::Display for NsFormatter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let v = self.0; - - if v < 100 { - return write!(f, "{} ns", v) - } - - if self.0 < 100_000 { - return write!(f, "{:.1} µs", v as f64 / 1000.0) - } - - if self.0 < 1_000_000 { - return write!(f, "{:.4} ms", v as f64 / 1_000_000.0) - } - - if self.0 < 100_000_000 { - return write!(f, "{:.1} ms", v as f64 / 1_000_000.0) + match v { + v if v < 100 => write!(f, "{} ns", v), + v if v < 100_000 => write!(f, "{:.1} µs", v as f64 / 1000.0), + v if v < 1_000_000 => write!(f, "{:.4} ms", v as f64 / 1_000_000.0), + v if v < 100_000_000 => write!(f, "{:.1} ms", v as f64 / 1_000_000.0), + _ => write!(f, "{:.4} s", v as f64 / 1_000_000_000.0), } - - write!(f, "{:.4} s", v as f64 / 1_000_000_000.0) } } diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 4451935c36035..77e49ff07d6b5 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -127,6 +127,7 @@ tokio-util = { version = "0.7.4", features = ["compat"] } wait-timeout = "0.2" substrate-rpc-client = { path = "../../../utils/frame/rpc/client" } pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" } +substrate-cli-test-utils = { path = "../../../test-utils/cli" } [build-dependencies] clap = { version = "4.0.9", optional = true } diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index 501d69fc287b7..c7f0cd20efd5b 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -69,7 +69,6 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { transaction_pool: Default::default(), network: network_config, keystore: KeystoreConfig::InMemory, - keystore_remote: Default::default(), database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), @@ -162,7 +161,7 @@ fn prepare_benchmark(client: &FullClient) -> (usize, Vec) { let extrinsic: OpaqueExtrinsic = create_extrinsic( client, src.clone(), - BalancesCall::transfer { dest: dst.clone(), value: 1 * DOLLARS }, + BalancesCall::transfer_allow_death { dest: dst.clone(), value: 1 * DOLLARS }, Some(nonce), ) .into(); diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index 165c67f602777..abee9c846245f 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -27,7 +27,7 @@ use sc_client_api::execution_extensions::ExecutionStrategies; use sc_service::{ config::{ BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, - PruningMode, TransactionPoolOptions, WasmExecutionMethod, + PruningMode, TransactionPoolOptions, }, BasePath, Configuration, Role, }; @@ -64,13 +64,12 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { }, network: network_config, keystore: KeystoreConfig::InMemory, - keystore_remote: Default::default(), database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::KeepAll, chain_spec: spec, - wasm_method: WasmExecutionMethod::Interpreted, + wasm_method: Default::default(), // NOTE: we enforce the use of the native runtime to make the errors more debuggable execution_strategies: ExecutionStrategies { syncing: sc_client_api::ExecutionStrategy::NativeWhenPossible, @@ -140,10 +139,9 @@ fn create_account_extrinsics( Sr25519Keyring::Alice.pair(), SudoCall::sudo { call: Box::new( - BalancesCall::set_balance { + BalancesCall::force_set_balance { who: AccountId::from(a.public()).into(), new_free: 0, - new_reserved: 0, } .into(), ), @@ -156,10 +154,9 @@ fn create_account_extrinsics( Sr25519Keyring::Alice.pair(), SudoCall::sudo { call: Box::new( - BalancesCall::set_balance { + BalancesCall::force_set_balance { who: AccountId::from(a.public()).into(), new_free: 1_000_000 * DOLLARS, - new_reserved: 0, } .into(), ), @@ -184,7 +181,7 @@ fn create_benchmark_extrinsics( create_extrinsic( client, account.clone(), - BalancesCall::transfer { + BalancesCall::transfer_allow_death { dest: Sr25519Keyring::Bob.to_account_id().into(), value: 1 * DOLLARS, }, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 3c9716c08d1c0..de640180574ce 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -163,12 +163,7 @@ pub fn new_partial( }) .transpose()?; - let executor = NativeElseWasmExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - config.runtime_cache_size, - ); + let executor = sc_service::new_native_or_wasm_executor(&config); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( @@ -208,7 +203,7 @@ pub fn new_partial( )?; let slot_duration = babe_link.config().slot_duration(); - let import_queue = sc_consensus_babe::import_queue( + let (import_queue, babe_worker_handle) = sc_consensus_babe::import_queue( babe_link.clone(), block_import.clone(), Some(Box::new(justification_import)), @@ -233,7 +228,7 @@ pub fn new_partial( let import_setup = (block_import, grandpa_link, babe_link); let (rpc_extensions_builder, rpc_setup) = { - let (_, grandpa_link, babe_link) = &import_setup; + let (_, grandpa_link, _) = &import_setup; let justification_stream = grandpa_link.justification_stream(); let shared_authority_set = grandpa_link.shared_authority_set().clone(); @@ -245,13 +240,10 @@ pub fn new_partial( Some(shared_authority_set.clone()), ); - let babe_config = babe_link.config().clone(); - let shared_epoch_changes = babe_link.epoch_changes().clone(); - let client = client.clone(); let pool = transaction_pool.clone(); let select_chain = select_chain.clone(); - let keystore = keystore_container.sync_keystore(); + let keystore = keystore_container.keystore(); let chain_spec = config.chain_spec.cloned_box(); let rpc_backend = backend.clone(); @@ -263,9 +255,8 @@ pub fn new_partial( chain_spec: chain_spec.cloned_box(), deny_unsafe, babe: node_rpc::BabeDeps { - babe_config: babe_config.clone(), - shared_epoch_changes: shared_epoch_changes.clone(), keystore: keystore.clone(), + babe_worker_handle: babe_worker_handle.clone(), }, grandpa: node_rpc::GrandpaDeps { shared_voter_state: shared_voter_state.clone(), @@ -386,7 +377,7 @@ pub fn new_full_base( config, backend, client: client.clone(), - keystore: keystore_container.sync_keystore(), + keystore: keystore_container.keystore(), network: network.clone(), rpc_builder: Box::new(rpc_builder), transaction_pool: transaction_pool.clone(), @@ -431,7 +422,7 @@ pub fn new_full_base( let client_clone = client.clone(); let slot_duration = babe_link.config().slot_duration(); let babe_config = sc_consensus_babe::BabeParams { - keystore: keystore_container.sync_keystore(), + keystore: keystore_container.keystore(), client: client.clone(), select_chain, env: proposer, @@ -507,8 +498,7 @@ pub fn new_full_base( // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = - if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; + let keystore = if role.is_authority() { Some(keystore_container.keystore()) } else { None }; let config = grandpa::Config { // FIXME #1578 make this available through chainspec @@ -593,10 +583,10 @@ mod tests { use sc_service_test::TestNetNode; use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool}; use sp_consensus::{BlockOrigin, Environment, Proposer}; - use sp_core::{crypto::Pair as CryptoPair, Public}; + use sp_core::crypto::Pair; use sp_inherents::InherentDataProvider; use sp_keyring::AccountKeyring; - use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; + use sp_keystore::KeystorePtr; use sp_runtime::{ generic::{Digest, Era, SignedPayload}, key_types::BABE, @@ -616,12 +606,13 @@ mod tests { sp_tracing::try_init_simple(); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore: SyncCryptoStorePtr = - Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); - let alice: sp_consensus_babe::AuthorityId = - SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) - .expect("Creates authority pair") - .into(); + let keystore: KeystorePtr = LocalKeystore::open(keystore_path.path(), None) + .expect("Creates keystore") + .into(); + let alice: sp_consensus_babe::AuthorityId = keystore + .sr25519_generate_new(BABE, Some("//Alice")) + .expect("Creates authority pair") + .into(); let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority(); @@ -736,17 +727,11 @@ mod tests { // sign the pre-sealed hash of the block and then // add it to a digest item. let to_sign = pre_hash.encode(); - let signature = SyncCryptoStore::sign_with( - &*keystore, - sp_consensus_babe::AuthorityId::ID, - &alice.to_public_crypto_pair(), - &to_sign, - ) - .unwrap() - .unwrap() - .try_into() - .unwrap(); - let item = ::babe_seal(signature); + let signature = keystore + .sr25519_sign(sp_consensus_babe::AuthorityId::ID, alice.as_ref(), &to_sign) + .unwrap() + .unwrap(); + let item = ::babe_seal(signature.into()); slot += 1; let mut params = BlockImportParams::new(BlockOrigin::File, new_header); @@ -773,7 +758,7 @@ mod tests { }; let signer = charlie.clone(); - let function = RuntimeCall::Balances(BalancesCall::transfer { + let function = RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: to.into(), value: amount, }); diff --git a/bin/node/cli/tests/benchmark_block_works.rs b/bin/node/cli/tests/benchmark_block_works.rs index 369e1b3e52d87..50103a66a4d40 100644 --- a/bin/node/cli/tests/benchmark_block_works.rs +++ b/bin/node/cli/tests/benchmark_block_works.rs @@ -23,7 +23,7 @@ use assert_cmd::cargo::cargo_bin; use std::process::Command; use tempfile::tempdir; -pub mod common; +use substrate_cli_test_utils as common; /// `benchmark block` works for the dev runtime using the wasm executor. #[tokio::test] diff --git a/bin/node/cli/tests/benchmark_pallet_works.rs b/bin/node/cli/tests/benchmark_pallet_works.rs index 1a278f985da4f..2d9946543eed2 100644 --- a/bin/node/cli/tests/benchmark_pallet_works.rs +++ b/bin/node/cli/tests/benchmark_pallet_works.rs @@ -16,11 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![cfg(feature = "runtime-benchmarks")] + use assert_cmd::cargo::cargo_bin; use std::process::Command; -pub mod common; - /// `benchmark pallet` works for the different combinations of `steps` and `repeat`. #[test] fn benchmark_pallet_works() { diff --git a/bin/node/cli/tests/benchmark_storage_works.rs b/bin/node/cli/tests/benchmark_storage_works.rs index 1f1181218db03..953c07ca7f0db 100644 --- a/bin/node/cli/tests/benchmark_storage_works.rs +++ b/bin/node/cli/tests/benchmark_storage_works.rs @@ -16,6 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![cfg(feature = "runtime-benchmarks")] + use assert_cmd::cargo::cargo_bin; use std::{ path::Path, diff --git a/bin/node/cli/tests/check_block_works.rs b/bin/node/cli/tests/check_block_works.rs index 019d97aa8228e..67bc5e6031ea0 100644 --- a/bin/node/cli/tests/check_block_works.rs +++ b/bin/node/cli/tests/check_block_works.rs @@ -22,7 +22,7 @@ use assert_cmd::cargo::cargo_bin; use std::process::Command; use tempfile::tempdir; -pub mod common; +use substrate_cli_test_utils as common; #[tokio::test] async fn check_block_works() { diff --git a/bin/node/cli/tests/export_import_flow.rs b/bin/node/cli/tests/export_import_flow.rs index afee9ef70c15a..b5785f99ea81f 100644 --- a/bin/node/cli/tests/export_import_flow.rs +++ b/bin/node/cli/tests/export_import_flow.rs @@ -23,7 +23,7 @@ use regex::Regex; use std::{fs, path::PathBuf, process::Command}; use tempfile::{tempdir, TempDir}; -pub mod common; +use substrate_cli_test_utils as common; fn contains_error(logged_output: &str) -> bool { logged_output.contains("Error") diff --git a/bin/node/cli/tests/inspect_works.rs b/bin/node/cli/tests/inspect_works.rs index 56569248befc0..3695c318a8df2 100644 --- a/bin/node/cli/tests/inspect_works.rs +++ b/bin/node/cli/tests/inspect_works.rs @@ -22,7 +22,7 @@ use assert_cmd::cargo::cargo_bin; use std::process::Command; use tempfile::tempdir; -pub mod common; +use substrate_cli_test_utils as common; #[tokio::test] async fn inspect_works() { diff --git a/bin/node/cli/tests/purge_chain_works.rs b/bin/node/cli/tests/purge_chain_works.rs index 470c0a7e48fba..77421f865a0d9 100644 --- a/bin/node/cli/tests/purge_chain_works.rs +++ b/bin/node/cli/tests/purge_chain_works.rs @@ -20,7 +20,7 @@ use assert_cmd::cargo::cargo_bin; use std::process::Command; use tempfile::tempdir; -pub mod common; +use substrate_cli_test_utils as common; #[tokio::test] #[cfg(unix)] diff --git a/bin/node/cli/tests/remember_state_pruning_works.rs b/bin/node/cli/tests/remember_state_pruning_works.rs index b8c5ddf4a4864..e28b2ef55ef6c 100644 --- a/bin/node/cli/tests/remember_state_pruning_works.rs +++ b/bin/node/cli/tests/remember_state_pruning_works.rs @@ -18,7 +18,7 @@ use tempfile::tempdir; -pub mod common; +use substrate_cli_test_utils as common; #[tokio::test] #[cfg(unix)] diff --git a/bin/node/cli/tests/running_the_node_and_interrupt.rs b/bin/node/cli/tests/running_the_node_and_interrupt.rs index fc0bf69a099ba..1308067da0256 100644 --- a/bin/node/cli/tests/running_the_node_and_interrupt.rs +++ b/bin/node/cli/tests/running_the_node_and_interrupt.rs @@ -18,94 +18,72 @@ #![cfg(unix)] use assert_cmd::cargo::cargo_bin; -use nix::{ - sys::signal::{ - kill, - Signal::{self, SIGINT, SIGTERM}, - }, - unistd::Pid, +use nix::sys::signal::Signal::{self, SIGINT, SIGTERM}; +use std::{ + process::{self, Command}, + time::Duration, }; -use std::process::{self, Child, Command}; use tempfile::tempdir; -pub mod common; +use substrate_cli_test_utils as common; #[tokio::test] async fn running_the_node_works_and_can_be_interrupted() { - async fn run_command_and_kill(signal: Signal) { - let base_path = tempdir().expect("could not create a temp dir"); - let mut cmd = common::KillChildOnDrop( - Command::new(cargo_bin("substrate")) - .stdout(process::Stdio::piped()) - .stderr(process::Stdio::piped()) - .args(&["--dev", "-d"]) - .arg(base_path.path()) - .arg("--db=paritydb") - .arg("--no-hardware-benchmarks") - .spawn() - .unwrap(), - ); - - let stderr = cmd.stderr.take().unwrap(); - - let (ws_url, _) = common::find_ws_url_from_output(stderr); - - common::wait_n_finalized_blocks(3, 30, &ws_url) - .await - .expect("Blocks are produced in time"); - assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); - kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap(); - assert_eq!( - common::wait_for(&mut cmd, 30).map(|x| x.success()), - Ok(true), - "the process must exit gracefully after signal {}", - signal, - ); - // Check if the database was closed gracefully. If it was not, - // there may exist a ref cycle that prevents the Client from being dropped properly. - // - // parity-db only writes the stats file on clean shutdown. - let stats_file = base_path.path().join("chains/dev/paritydb/full/stats.txt"); - assert!(std::path::Path::exists(&stats_file)); - } - - run_command_and_kill(SIGINT).await; - run_command_and_kill(SIGTERM).await; + common::run_with_timeout(Duration::from_secs(60 * 10), async move { + async fn run_command_and_kill(signal: Signal) { + let base_path = tempdir().expect("could not create a temp dir"); + let mut cmd = common::KillChildOnDrop( + Command::new(cargo_bin("substrate")) + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) + .args(&["--dev", "-d"]) + .arg(base_path.path()) + .arg("--db=paritydb") + .arg("--no-hardware-benchmarks") + .spawn() + .unwrap(), + ); + + let stderr = cmd.stderr.take().unwrap(); + + let ws_url = common::extract_info_from_output(stderr).0.ws_url; + + common::wait_n_finalized_blocks(3, &ws_url).await; + + cmd.assert_still_running(); + + cmd.stop_with_signal(signal); + + // Check if the database was closed gracefully. If it was not, + // there may exist a ref cycle that prevents the Client from being dropped properly. + // + // parity-db only writes the stats file on clean shutdown. + let stats_file = base_path.path().join("chains/dev/paritydb/full/stats.txt"); + assert!(std::path::Path::exists(&stats_file)); + } + + run_command_and_kill(SIGINT).await; + run_command_and_kill(SIGTERM).await; + }) + .await; } #[tokio::test] async fn running_two_nodes_with_the_same_ws_port_should_work() { - fn start_node() -> Child { - Command::new(cargo_bin("substrate")) - .stdout(process::Stdio::piped()) - .stderr(process::Stdio::piped()) - .args(&["--dev", "--tmp", "--ws-port=45789", "--no-hardware-benchmarks"]) - .spawn() - .unwrap() - } - - let mut first_node = common::KillChildOnDrop(start_node()); - let mut second_node = common::KillChildOnDrop(start_node()); - - let stderr = first_node.stderr.take().unwrap(); - let (ws_url, _) = common::find_ws_url_from_output(stderr); - - common::wait_n_finalized_blocks(3, 30, &ws_url).await.unwrap(); - - assert!(first_node.try_wait().unwrap().is_none(), "The first node should still be running"); - assert!(second_node.try_wait().unwrap().is_none(), "The second node should still be running"); - - kill(Pid::from_raw(first_node.id().try_into().unwrap()), SIGINT).unwrap(); - kill(Pid::from_raw(second_node.id().try_into().unwrap()), SIGINT).unwrap(); - - assert_eq!( - common::wait_for(&mut first_node, 30).map(|x| x.success()), - Ok(true), - "The first node must exit gracefully", - ); - assert_eq!( - common::wait_for(&mut second_node, 30).map(|x| x.success()), - Ok(true), - "The second node must exit gracefully", - ); + common::run_with_timeout(Duration::from_secs(60 * 10), async move { + let mut first_node = common::KillChildOnDrop(common::start_node()); + let mut second_node = common::KillChildOnDrop(common::start_node()); + + let stderr = first_node.stderr.take().unwrap(); + let ws_url = common::extract_info_from_output(stderr).0.ws_url; + + common::wait_n_finalized_blocks(3, &ws_url).await; + + first_node.assert_still_running(); + second_node.assert_still_running(); + + first_node.stop(); + second_node.stop(); + }) + .await; } diff --git a/bin/node/cli/tests/telemetry.rs b/bin/node/cli/tests/telemetry.rs index 633cc996ca615..176d2e81ad06b 100644 --- a/bin/node/cli/tests/telemetry.rs +++ b/bin/node/cli/tests/telemetry.rs @@ -17,78 +17,76 @@ // along with this program. If not, see . use assert_cmd::cargo::cargo_bin; -use nix::{ - sys::signal::{kill, Signal::SIGINT}, - unistd::Pid, -}; -use std::process; +use std::{process, time::Duration}; -pub mod common; +use crate::common::KillChildOnDrop; + +use substrate_cli_test_utils as common; pub mod websocket_server; #[tokio::test] async fn telemetry_works() { - let config = websocket_server::Config { - capacity: 1, - max_frame_size: 1048 * 1024, - send_buffer_len: 32, - bind_address: "127.0.0.1:0".parse().unwrap(), - }; - let mut server = websocket_server::WsServer::new(config).await.unwrap(); - - let addr = server.local_addr().unwrap(); - - let server_task = tokio::spawn(async move { - loop { - use websocket_server::Event; - match server.next_event().await { - // New connection on the listener. - Event::ConnectionOpen { address } => { - println!("New connection from {:?}", address); - server.accept(); - }, - - // Received a message from a connection. - Event::BinaryFrame { message, .. } => { - let json: serde_json::Value = serde_json::from_slice(&message).unwrap(); - let object = - json.as_object().unwrap().get("payload").unwrap().as_object().unwrap(); - if matches!(object.get("best"), Some(serde_json::Value::String(_))) { - break - } - }, - - Event::TextFrame { .. } => panic!("Got a TextFrame over the socket, this is a bug"), - - // Connection has been closed. - Event::ConnectionError { .. } => {}, + common::run_with_timeout(Duration::from_secs(60 * 10), async move { + let config = websocket_server::Config { + capacity: 1, + max_frame_size: 1048 * 1024, + send_buffer_len: 32, + bind_address: "127.0.0.1:0".parse().unwrap(), + }; + let mut server = websocket_server::WsServer::new(config).await.unwrap(); + + let addr = server.local_addr().unwrap(); + + let server_task = tokio::spawn(async move { + loop { + use websocket_server::Event; + match server.next_event().await { + // New connection on the listener. + Event::ConnectionOpen { address } => { + println!("New connection from {:?}", address); + server.accept(); + }, + + // Received a message from a connection. + Event::BinaryFrame { message, .. } => { + let json: serde_json::Value = serde_json::from_slice(&message).unwrap(); + let object = + json.as_object().unwrap().get("payload").unwrap().as_object().unwrap(); + if matches!(object.get("best"), Some(serde_json::Value::String(_))) { + break + } + }, + + Event::TextFrame { .. } => { + panic!("Got a TextFrame over the socket, this is a bug") + }, + + // Connection has been closed. + Event::ConnectionError { .. } => {}, + } } - } - }); - - let mut substrate = process::Command::new(cargo_bin("substrate")); - - let mut substrate = substrate - .args(&["--dev", "--tmp", "--telemetry-url"]) - .arg(format!("ws://{} 10", addr)) - .arg("--no-hardware-benchmarks") - .stdout(process::Stdio::piped()) - .stderr(process::Stdio::piped()) - .stdin(process::Stdio::null()) - .spawn() - .unwrap(); - - server_task.await.expect("server task panicked"); - - assert!(substrate.try_wait().unwrap().is_none(), "the process should still be running"); - - // Stop the process - kill(Pid::from_raw(substrate.id().try_into().unwrap()), SIGINT).unwrap(); - assert!(common::wait_for(&mut substrate, 40).map(|x| x.success()).unwrap_or_default()); - - let output = substrate.wait_with_output().unwrap(); - - println!("{}", String::from_utf8(output.stdout).unwrap()); - eprintln!("{}", String::from_utf8(output.stderr).unwrap()); - assert!(output.status.success()); + }); + + let mut substrate = process::Command::new(cargo_bin("substrate")); + + let mut substrate = KillChildOnDrop( + substrate + .args(&["--dev", "--tmp", "--telemetry-url"]) + .arg(format!("ws://{} 10", addr)) + .arg("--no-hardware-benchmarks") + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) + .stdin(process::Stdio::null()) + .spawn() + .unwrap(), + ); + + server_task.await.expect("server task panicked"); + + substrate.assert_still_running(); + + // Stop the process + substrate.stop(); + }) + .await; } diff --git a/bin/node/cli/tests/temp_base_path_works.rs b/bin/node/cli/tests/temp_base_path_works.rs index 4e743f2d3abd4..fdcd9e23dde5a 100644 --- a/bin/node/cli/tests/temp_base_path_works.rs +++ b/bin/node/cli/tests/temp_base_path_works.rs @@ -19,45 +19,44 @@ #![cfg(unix)] use assert_cmd::cargo::cargo_bin; -use nix::{ - sys::signal::{kill, Signal::SIGINT}, - unistd::Pid, -}; -use regex::Regex; use std::{ - io::Read, - path::PathBuf, process::{Command, Stdio}, + time::Duration, }; -pub mod common; +use substrate_cli_test_utils as common; -#[tokio::test] +#[allow(dead_code)] +// Apparently `#[ignore]` doesn't actually work to disable this one. +//#[tokio::test] async fn temp_base_path_works() { - let mut cmd = Command::new(cargo_bin("substrate")); - let mut child = common::KillChildOnDrop( - cmd.args(&["--dev", "--tmp", "--no-hardware-benchmarks"]) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .spawn() - .unwrap(), - ); - - let mut stderr = child.stderr.take().unwrap(); - let (ws_url, mut data) = common::find_ws_url_from_output(&mut stderr); - - // Let it produce some blocks. - common::wait_n_finalized_blocks(3, 30, &ws_url).await.unwrap(); - assert!(child.try_wait().unwrap().is_none(), "the process should still be running"); - - // Stop the process - kill(Pid::from_raw(child.id().try_into().unwrap()), SIGINT).unwrap(); - assert!(common::wait_for(&mut child, 40).map(|x| x.success()).unwrap_or_default()); - - // Ensure the database has been deleted - stderr.read_to_string(&mut data).unwrap(); - let re = Regex::new(r"Database: .+ at (\S+)").unwrap(); - let db_path = PathBuf::from(re.captures(data.as_str()).unwrap().get(1).unwrap().as_str()); - - assert!(!db_path.exists()); + common::run_with_timeout(Duration::from_secs(60 * 10), async move { + let mut cmd = Command::new(cargo_bin("substrate")); + let mut child = common::KillChildOnDrop( + cmd.args(&["--dev", "--tmp", "--no-hardware-benchmarks"]) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .unwrap(), + ); + + let mut stderr = child.stderr.take().unwrap(); + let node_info = common::extract_info_from_output(&mut stderr).0; + + // Let it produce some blocks. + common::wait_n_finalized_blocks(3, &node_info.ws_url).await; + + // Ensure the db path exists while the node is running + assert!(node_info.db_path.exists()); + + child.assert_still_running(); + + // Stop the process + child.stop(); + + if node_info.db_path.exists() { + panic!("Database path `{}` wasn't deleted!", node_info.db_path.display()); + } + }) + .await; } diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index cb661f536f7b7..c4711adcb0e82 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2" } -scale-info = { version = "2.1.1", features = ["derive"] } +scale-info = { version = "2.5.0", features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarking" } node-primitives = { version = "2.0.0", path = "../primitives" } kitchensink-runtime = { version = "3.0.0-dev", path = "../runtime" } diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index 19e7b158a8c81..aa7d9eb0f31ff 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -26,7 +26,7 @@ use node_executor::ExecutorDispatch; use node_primitives::{BlockNumber, Hash}; use node_testing::keyring::*; use sc_executor::{ - Externalities, NativeElseWasmExecutor, RuntimeVersionOf, WasmExecutionMethod, + Externalities, NativeElseWasmExecutor, RuntimeVersionOf, WasmExecutionMethod, WasmExecutor, WasmtimeInstantiationStrategy, }; use sp_core::{ @@ -167,7 +167,7 @@ fn test_blocks( }]; block1_extrinsics.extend((0..20).map(|i| CheckedExtrinsic { signed: Some((alice(), signed_extra(i, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer { + function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 1 * DOLLARS, }), @@ -191,12 +191,13 @@ fn bench_execute_block(c: &mut Criterion) { for strategy in execution_methods { group.bench_function(format!("{:?}", strategy), |b| { let genesis_config = node_testing::genesis::config(Some(compact_code_unwrap())); - let (use_native, wasm_method) = match strategy { - ExecutionMethod::Native => (true, WasmExecutionMethod::Interpreted), - ExecutionMethod::Wasm(wasm_method) => (false, wasm_method), + let use_native = match strategy { + ExecutionMethod::Native => true, + ExecutionMethod::Wasm(..) => false, }; - let executor = NativeElseWasmExecutor::new(wasm_method, None, 8, 2); + let executor = + NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(compact_code_unwrap().into()), hash: vec![1, 2, 3], diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index ecfe02aa41596..d301aa06f90b0 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -89,7 +89,7 @@ fn changes_trie_block() -> (Vec, Hash) { }, CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer { + function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 69 * DOLLARS, }), @@ -116,7 +116,7 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { }, CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer { + function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 69 * DOLLARS, }), @@ -136,14 +136,14 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { }, CheckedExtrinsic { signed: Some((bob(), signed_extra(0, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer { + function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: alice().into(), value: 5 * DOLLARS, }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(1, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer { + function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 15 * DOLLARS, }), @@ -183,7 +183,12 @@ fn panic_execution_with_foreign_code_gives_error() { let mut t = new_test_ext(bloaty_code_unwrap()); t.insert( >::hashed_key_for(alice()), - (69u128, 0u32, 0u128, 0u128, 0u128).encode(), + AccountInfo::<::Index, _> { + providers: 1, + data: (69u128, 0u128, 0u128, 1u128 << 127), + ..Default::default() + } + .encode(), ); t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -204,9 +209,14 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { let mut t = new_test_ext(compact_code_unwrap()); t.insert( >::hashed_key_for(alice()), - (0u32, 0u32, 0u32, 69u128, 0u128, 0u128, 0u128).encode(), + AccountInfo::<::Index, _> { + providers: 1, + data: (69u128, 0u128, 0u128, 1u128 << 127), + ..Default::default() + } + .encode(), ); - t.insert(>::hashed_key().to_vec(), 69_u128.encode()); + t.insert(>::hashed_key().to_vec(), 69u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); let r = @@ -226,17 +236,18 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { t.insert( >::hashed_key_for(alice()), AccountInfo::<::Index, _> { - data: (111 * DOLLARS, 0u128, 0u128, 0u128), + providers: 1, + data: (111 * DOLLARS, 0u128, 0u128, 1u128 << 127), ..Default::default() } .encode(), ); t.insert( >::hashed_key_for(bob()), - AccountInfo::<::Index, _> { - data: (0 * DOLLARS, 0u128, 0u128, 0u128), - ..Default::default() - } + AccountInfo::< + ::Index, + ::AccountData, + >::default() .encode(), ); t.insert( @@ -267,17 +278,18 @@ fn successful_execution_with_foreign_code_gives_ok() { t.insert( >::hashed_key_for(alice()), AccountInfo::<::Index, _> { - data: (111 * DOLLARS, 0u128, 0u128, 0u128), + providers: 1, + data: (111 * DOLLARS, 0u128, 0u128, 1u128 << 127), ..Default::default() } .encode(), ); t.insert( >::hashed_key_for(bob()), - AccountInfo::<::Index, _> { - data: (0 * DOLLARS, 0u128, 0u128, 0u128), - ..Default::default() - } + AccountInfo::< + ::Index, + ::AccountData, + >::default() .encode(), ); t.insert( @@ -710,11 +722,7 @@ fn deploying_wasm_contract_should_work() { t.execute_with(|| { // Verify that the contract does exist by querying some of its storage items // It does not matter that the storage item itself does not exist. - assert!(&pallet_contracts::Pallet::::get_storage( - addr, - pallet_contracts::StorageKey::::default().to_vec() - ) - .is_ok()); + assert!(&pallet_contracts::Pallet::::get_storage(addr, vec![]).is_ok()); }); } @@ -788,17 +796,18 @@ fn successful_execution_gives_ok() { t.insert( >::hashed_key_for(alice()), AccountInfo::<::Index, _> { - data: (111 * DOLLARS, 0u128, 0u128, 0u128), + providers: 1, + data: (111 * DOLLARS, 0u128, 0u128, 1u128 << 127), ..Default::default() } .encode(), ); t.insert( >::hashed_key_for(bob()), - AccountInfo::<::Index, _> { - data: (0 * DOLLARS, 0u128, 0u128, 0u128), - ..Default::default() - } + AccountInfo::< + ::Index, + ::AccountData, + >::default() .encode(), ); t.insert( diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index 036528f8dae1f..6ce9ea3a01090 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -18,7 +18,7 @@ use codec::{Decode, Encode}; use frame_support::Hashable; use frame_system::offchain::AppCrypto; -use sc_executor::{error::Result, NativeElseWasmExecutor, WasmExecutionMethod}; +use sc_executor::{error::Result, NativeElseWasmExecutor, WasmExecutor}; use sp_consensus_babe::{ digests::{PreDigest, SecondaryPlainPreDigest}, Slot, BABE_ENGINE_ID, @@ -87,7 +87,10 @@ pub fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { } pub fn default_transfer_call() -> pallet_balances::Call { - pallet_balances::Call::::transfer { dest: bob().into(), value: 69 * DOLLARS } + pallet_balances::Call::::transfer_allow_death { + dest: bob().into(), + value: 69 * DOLLARS, + } } pub fn from_block_number(n: u32) -> Header { @@ -95,7 +98,7 @@ pub fn from_block_number(n: u32) -> Header { } pub fn executor() -> NativeElseWasmExecutor { - NativeElseWasmExecutor::new(WasmExecutionMethod::Interpreted, None, 8, 2) + NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()) } pub fn executor_call( diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index cbf0fdce9476b..970d790a87d3b 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -120,9 +120,9 @@ fn new_account_info(free_dollars: u128) -> Vec { frame_system::AccountInfo { nonce: 0u32, consumers: 0, - providers: 0, + providers: 1, sufficients: 0, - data: (free_dollars * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS), + data: (free_dollars * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 1u128 << 127), } .encode() } @@ -214,7 +214,7 @@ fn block_weight_capacity_report() { let mut xts = (0..num_transfers) .map(|i| CheckedExtrinsic { signed: Some((charlie(), signed_extra(nonce + i as Index, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer { + function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 0, }), diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index ab5df62a2e714..b260f90a87466 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -18,11 +18,10 @@ use codec::Decode; use frame_system::offchain::{SendSignedTransaction, Signer, SubmitTransaction}; use kitchensink_runtime::{Executive, Indices, Runtime, UncheckedExtrinsic}; -use sp_application_crypto::AppKey; +use sp_application_crypto::AppCrypto; use sp_core::offchain::{testing::TestTransactionPoolExt, TransactionPoolExt}; use sp_keyring::sr25519::Keyring::Alice; -use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStore}; -use std::sync::Arc; +use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; pub mod common; use self::common::*; @@ -62,31 +61,22 @@ fn should_submit_signed_transaction() { let (pool, state) = TestTransactionPoolExt::new(); t.register_extension(TransactionPoolExt::new(pool)); - let keystore = KeyStore::new(); - SyncCryptoStore::sr25519_generate_new( - &keystore, - sr25519::AuthorityId::ID, - Some(&format!("{}/hunter1", PHRASE)), - ) - .unwrap(); - SyncCryptoStore::sr25519_generate_new( - &keystore, - sr25519::AuthorityId::ID, - Some(&format!("{}/hunter2", PHRASE)), - ) - .unwrap(); - SyncCryptoStore::sr25519_generate_new( - &keystore, - sr25519::AuthorityId::ID, - Some(&format!("{}/hunter3", PHRASE)), - ) - .unwrap(); - t.register_extension(KeystoreExt(Arc::new(keystore))); + let keystore = MemoryKeystore::new(); + keystore + .sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter1", PHRASE))) + .unwrap(); + keystore + .sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter2", PHRASE))) + .unwrap(); + keystore + .sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter3", PHRASE))) + .unwrap(); + t.register_extension(KeystoreExt::new(keystore)); t.execute_with(|| { let results = Signer::::all_accounts().send_signed_transaction(|_| { - pallet_balances::Call::transfer { + pallet_balances::Call::transfer_allow_death { dest: Alice.to_account_id().into(), value: Default::default(), } @@ -105,25 +95,19 @@ fn should_submit_signed_twice_from_the_same_account() { let (pool, state) = TestTransactionPoolExt::new(); t.register_extension(TransactionPoolExt::new(pool)); - let keystore = KeyStore::new(); - SyncCryptoStore::sr25519_generate_new( - &keystore, - sr25519::AuthorityId::ID, - Some(&format!("{}/hunter1", PHRASE)), - ) - .unwrap(); - SyncCryptoStore::sr25519_generate_new( - &keystore, - sr25519::AuthorityId::ID, - Some(&format!("{}/hunter2", PHRASE)), - ) - .unwrap(); - t.register_extension(KeystoreExt(Arc::new(keystore))); + let keystore = MemoryKeystore::new(); + keystore + .sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter1", PHRASE))) + .unwrap(); + keystore + .sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter2", PHRASE))) + .unwrap(); + t.register_extension(KeystoreExt::new(keystore)); t.execute_with(|| { let result = Signer::::any_account().send_signed_transaction(|_| { - pallet_balances::Call::transfer { + pallet_balances::Call::transfer_allow_death { dest: Alice.to_account_id().into(), value: Default::default(), } @@ -135,7 +119,7 @@ fn should_submit_signed_twice_from_the_same_account() { // submit another one from the same account. The nonce should be incremented. let result = Signer::::any_account().send_signed_transaction(|_| { - pallet_balances::Call::transfer { + pallet_balances::Call::transfer_allow_death { dest: Alice.to_account_id().into(), value: Default::default(), } @@ -162,19 +146,19 @@ fn should_submit_signed_twice_from_all_accounts() { let (pool, state) = TestTransactionPoolExt::new(); t.register_extension(TransactionPoolExt::new(pool)); - let keystore = KeyStore::new(); + let keystore = MemoryKeystore::new(); keystore .sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter1", PHRASE))) .unwrap(); keystore .sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter2", PHRASE))) .unwrap(); - t.register_extension(KeystoreExt(Arc::new(keystore))); + t.register_extension(KeystoreExt::new(keystore)); t.execute_with(|| { let results = Signer::::all_accounts() .send_signed_transaction(|_| { - pallet_balances::Call::transfer { dest: Alice.to_account_id().into(), value: Default::default() } + pallet_balances::Call::transfer_allow_death { dest: Alice.to_account_id().into(), value: Default::default() } }); let len = results.len(); @@ -185,7 +169,7 @@ fn should_submit_signed_twice_from_all_accounts() { // submit another one from the same account. The nonce should be incremented. let results = Signer::::all_accounts() .send_signed_transaction(|_| { - pallet_balances::Call::transfer { dest: Alice.to_account_id().into(), value: Default::default() } + pallet_balances::Call::transfer_allow_death { dest: Alice.to_account_id().into(), value: Default::default() } }); let len = results.len(); @@ -226,19 +210,16 @@ fn submitted_transaction_should_be_valid() { let (pool, state) = TestTransactionPoolExt::new(); t.register_extension(TransactionPoolExt::new(pool)); - let keystore = KeyStore::new(); - SyncCryptoStore::sr25519_generate_new( - &keystore, - sr25519::AuthorityId::ID, - Some(&format!("{}/hunter1", PHRASE)), - ) - .unwrap(); - t.register_extension(KeystoreExt(Arc::new(keystore))); + let keystore = MemoryKeystore::new(); + keystore + .sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter1", PHRASE))) + .unwrap(); + t.register_extension(KeystoreExt::new(keystore)); t.execute_with(|| { let results = Signer::::all_accounts().send_signed_transaction(|_| { - pallet_balances::Call::transfer { + pallet_balances::Call::transfer_allow_death { dest: Alice.to_account_id().into(), value: Default::default(), } diff --git a/bin/node/inspect/src/command.rs b/bin/node/inspect/src/command.rs index 3dca979eb9e4a..9702576833ccf 100644 --- a/bin/node/inspect/src/command.rs +++ b/bin/node/inspect/src/command.rs @@ -23,41 +23,34 @@ use crate::{ Inspector, }; use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; -use sc_executor::NativeElseWasmExecutor; -use sc_service::{new_full_client, Configuration, NativeExecutionDispatch}; +use sc_service::{Configuration, NativeExecutionDispatch}; use sp_runtime::traits::Block; use std::str::FromStr; impl InspectCmd { /// Run the inspect command, passing the inspector. - pub fn run(&self, config: Configuration) -> Result<()> + pub fn run(&self, config: Configuration) -> Result<()> where B: Block, B::Hash: FromStr, RA: Send + Sync + 'static, - EX: NativeExecutionDispatch + 'static, + D: NativeExecutionDispatch + 'static, { - let executor = NativeElseWasmExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - config.runtime_cache_size, - ); - - let client = new_full_client::(&config, None, executor)?; + let executor = sc_service::new_native_or_wasm_executor::(&config); + let client = sc_service::new_full_client::(&config, None, executor)?; let inspect = Inspector::::new(client); match &self.command { InspectSubCmd::Block { input } => { let input = input.parse()?; - let res = inspect.block(input).map_err(|e| format!("{}", e))?; - println!("{}", res); + let res = inspect.block(input).map_err(|e| e.to_string())?; + println!("{res}"); Ok(()) }, InspectSubCmd::Extrinsic { input } => { let input = input.parse()?; - let res = inspect.extrinsic(input).map_err(|e| format!("{}", e))?; - println!("{}", res); + let res = inspect.extrinsic(input).map_err(|e| e.to_string())?; + println!("{res}"); Ok(()) }, } diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index 1f515f3e3a5b5..78cbf67ce7710 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../../primitives/application-crypto" } sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 0ea6f49bd6094..724efbe9a5721 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -21,7 +21,6 @@ sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } sc-consensus-babe-rpc = { version = "0.10.0-dev", path = "../../../client/consensus/babe/rpc" } -sc-consensus-epochs = { version = "0.10.0-dev", path = "../../../client/consensus/epochs" } sc-consensus-grandpa = { version = "0.10.0-dev", path = "../../../client/consensus/grandpa" } sc-consensus-grandpa-rpc = { version = "0.10.0-dev", path = "../../../client/consensus/grandpa/rpc" } sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 7c2f7c15e05c8..5f61fdcd55d97 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -36,8 +36,7 @@ use std::sync::Arc; use jsonrpsee::RpcModule; use node_primitives::{AccountId, Balance, Block, BlockNumber, Hash, Index}; use sc_client_api::AuxStore; -use sc_consensus_babe::{BabeConfiguration, Epoch}; -use sc_consensus_epochs::SharedEpochChanges; +use sc_consensus_babe::BabeWorkerHandle; use sc_consensus_grandpa::{ FinalityProofProvider, GrandpaJustificationStream, SharedAuthoritySet, SharedVoterState, }; @@ -49,16 +48,14 @@ use sp_block_builder::BlockBuilder; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_consensus::SelectChain; use sp_consensus_babe::BabeApi; -use sp_keystore::SyncCryptoStorePtr; +use sp_keystore::KeystorePtr; /// Extra dependencies for BABE. pub struct BabeDeps { - /// BABE protocol config. - pub babe_config: BabeConfiguration, - /// BABE pending epoch changes. - pub shared_epoch_changes: SharedEpochChanges, + /// A handle to the BABE worker for issuing requests. + pub babe_worker_handle: BabeWorkerHandle, /// The keystore that manages the keys of the node. - pub keystore: SyncCryptoStorePtr, + pub keystore: KeystorePtr, } /// Extra dependencies for GRANDPA @@ -130,7 +127,7 @@ where let mut io = RpcModule::new(()); let FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa } = deps; - let BabeDeps { keystore, babe_config, shared_epoch_changes } = babe; + let BabeDeps { keystore, babe_worker_handle } = babe; let GrandpaDeps { shared_voter_state, shared_authority_set, @@ -151,15 +148,8 @@ where io.merge(Mmr::new(client.clone()).into_rpc())?; io.merge(TransactionPayment::new(client.clone()).into_rpc())?; io.merge( - Babe::new( - client.clone(), - shared_epoch_changes.clone(), - keystore, - babe_config, - select_chain, - deny_unsafe, - ) - .into_rpc(), + Babe::new(client.clone(), babe_worker_handle.clone(), keystore, select_chain, deny_unsafe) + .into_rpc(), )?; io.merge( Grandpa::new( @@ -173,7 +163,7 @@ where )?; io.merge( - SyncState::new(chain_spec, client.clone(), shared_authority_set, shared_epoch_changes)? + SyncState::new(chain_spec, client.clone(), shared_authority_set, babe_worker_handle)? .into_rpc(), )?; diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 16f1d0a4cb532..0fa037c657cee 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } static_assertions = "1.1.0" log = { version = "0.4.17", default-features = false } @@ -46,13 +46,14 @@ sp-io = { version = "7.0.0", default-features = false, path = "../../../primitiv frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/benchmarking" } frame-benchmarking-pallet-pov = { version = "4.0.0-dev", default-features = false, path = "../../../frame/benchmarking/pov" } -frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support", features = ["tuples-96"] } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/benchmarking", optional = true } frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/election-provider-support" } frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../../../frame/try-runtime", optional = true } pallet-alliance = { version = "4.0.0-dev", default-features = false, path = "../../../frame/alliance" } +pallet-asset-rate = { version = "4.0.0-dev", default-features = false, path = "../../../frame/asset-rate" } pallet-assets = { version = "4.0.0-dev", default-features = false, path = "../../../frame/assets" } pallet-authority-discovery = { version = "4.0.0-dev", default-features = false, path = "../../../frame/authority-discovery" } pallet-authorship = { version = "4.0.0-dev", default-features = false, path = "../../../frame/authorship" } @@ -201,6 +202,7 @@ std = [ "pallet-transaction-payment/std", "pallet-transaction-storage/std", "pallet-treasury/std", + "pallet-asset-rate/std", "sp-transaction-pool/std", "pallet-utility/std", "sp-version/std", @@ -272,6 +274,7 @@ runtime-benchmarks = [ "pallet-tips/runtime-benchmarks", "pallet-transaction-storage/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", + "pallet-asset-rate/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-nfts/runtime-benchmarks", @@ -333,6 +336,7 @@ try-runtime = [ "pallet-timestamp/try-runtime", "pallet-tips/try-runtime", "pallet-treasury/try-runtime", + "pallet-asset-rate/try-runtime", "pallet-utility/try-runtime", "pallet-transaction-payment/try-runtime", "pallet-asset-tx-payment/try-runtime", diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 09f7b6a29e0b6..05531f47c6e05 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -24,7 +24,7 @@ use crate::{ use frame_support::{ pallet_prelude::*, traits::{ - fungibles::{Balanced, CreditOf}, + fungibles::{Balanced, Credit}, Currency, OnUnbalanced, }, }; @@ -45,7 +45,7 @@ impl OnUnbalanced for Author { /// Will drop and burn the assets in case the transfer fails. pub struct CreditToBlockAuthor; impl HandleCredit for CreditToBlockAuthor { - fn handle_credit(credit: CreditOf) { + fn handle_credit(credit: Credit) { if let Some(author) = pallet_authorship::Pallet::::author() { // Drop the result which will trigger the `OnDrop` of the imbalance in case of error. let _ = Assets::resolve(&author, credit); @@ -61,15 +61,13 @@ impl IdentityVerifier for AllianceIdentityVerifier { fn has_good_judgement(who: &AccountId) -> bool { use pallet_identity::Judgement; - if let Some(judgements) = - crate::Identity::identity(who).map(|registration| registration.judgements) - { - judgements - .iter() - .any(|(_, j)| matches!(j, Judgement::KnownGood | Judgement::Reasonable)) - } else { - false - } + crate::Identity::identity(who) + .map(|registration| registration.judgements) + .map_or(false, |judgements| { + judgements + .iter() + .any(|(_, j)| matches!(j, Judgement::KnownGood | Judgement::Reasonable)) + }) } fn super_account_id(who: &AccountId) -> Option { diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index d8426d3b35e15..766bc9515b8ab 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -33,7 +33,7 @@ use frame_support::{ parameter_types, traits::{ fungible::ItemOf, - tokens::{nonfungibles_v2::Inspect, GetSalary}, + tokens::{nonfungibles_v2::Inspect, GetSalary, PayFromAccount}, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, @@ -56,9 +56,10 @@ use pallet_election_provider_multi_phase::SolutionAccuracyOf; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_nfts::PalletFeatures; use pallet_nis::WithMaximumOf; -use pallet_session::historical::{self as pallet_session_historical}; +use pallet_session::historical as pallet_session_historical; pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdjustment}; use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; +use scale_info::TypeInfo; use sp_api::impl_runtime_apis; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_grandpa::AuthorityId as GrandpaId; @@ -209,6 +210,7 @@ parameter_types! { }) .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) .build_or_panic(); + pub MaxCollectivesProposalWeight: Weight = Perbill::from_percent(50) * RuntimeBlockWeights::get().max_block; } const_assert!(NORMAL_DISPATCH_RATIO.deconstruct() >= AVERAGE_ON_INITIALIZE_RATIO.deconstruct()); @@ -363,7 +365,10 @@ impl pallet_scheduler::Config for Runtime { type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; + #[cfg(feature = "runtime-benchmarks")] type MaxScheduledPerBlock = ConstU32<512>; + #[cfg(not(feature = "runtime-benchmarks"))] + type MaxScheduledPerBlock = ConstU32<50>; type WeightInfo = pallet_scheduler::weights::SubstrateWeight; type OriginPrivilegeCmp = EqualPrivilegeOnly; type Preimages = Preimage; @@ -432,6 +437,15 @@ parameter_types! { pub const MaxReserves: u32 = 50; } +/// A reason for placing a hold on funds. +#[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, Debug, TypeInfo, +)] +pub enum HoldReason { + /// The NIS Pallet has reserved it for a non-fungible receipt. + Nis, +} + impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; type MaxReserves = MaxReserves; @@ -442,6 +456,10 @@ impl pallet_balances::Config for Runtime { type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Pallet; type WeightInfo = pallet_balances::weights::SubstrateWeight; + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = HoldReason; + type MaxHolds = ConstU32<1>; } parameter_types! { @@ -995,6 +1013,7 @@ impl pallet_collective::Config for Runtime { type DefaultVote = pallet_collective::PrimeDefaultVote; type WeightInfo = pallet_collective::weights::SubstrateWeight; type SetMembersOrigin = EnsureRoot; + type MaxProposalWeight = MaxCollectivesProposalWeight; } parameter_types! { @@ -1055,6 +1074,7 @@ impl pallet_collective::Config for Runtime { type DefaultVote = pallet_collective::PrimeDefaultVote; type WeightInfo = pallet_collective::weights::SubstrateWeight; type SetMembersOrigin = EnsureRoot; + type MaxProposalWeight = MaxCollectivesProposalWeight; } type EnsureRootOrHalfCouncil = EitherOfDiverse< @@ -1114,6 +1134,17 @@ impl pallet_treasury::Config for Runtime { type SpendOrigin = EnsureWithSuccess, AccountId, MaxBalance>; } +impl pallet_asset_rate::Config for Runtime { + type CreateOrigin = EnsureRoot; + type RemoveOrigin = EnsureRoot; + type UpdateOrigin = EnsureRoot; + type Balance = Balance; + type Currency = Balances; + type AssetId = u32; + type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_asset_rate::weights::SubstrateWeight; +} + parameter_types! { pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); pub const BountyValueMinimum: Balance = 5 * DOLLARS; @@ -1184,13 +1215,7 @@ impl pallet_tips::Config for Runtime { parameter_types! { pub const DepositPerItem: Balance = deposit(1, 0); pub const DepositPerByte: Balance = deposit(0, 1); - pub const DeletionQueueDepth: u32 = 128; - // The lazy deletion runs inside on_initialize. - pub DeletionWeightLimit: Weight = RuntimeBlockWeights::get() - .per_class - .get(DispatchClass::Normal) - .max_total - .unwrap_or(RuntimeBlockWeights::get().max_block); + pub const DefaultDepositLimit: Balance = deposit(1024, 1024 * 1024); pub Schedule: pallet_contracts::Schedule = Default::default(); } @@ -1209,12 +1234,11 @@ impl pallet_contracts::Config for Runtime { type CallFilter = Nothing; type DepositPerItem = DepositPerItem; type DepositPerByte = DepositPerByte; + type DefaultDepositLimit = DefaultDepositLimit; type CallStack = [pallet_contracts::Frame; 5]; type WeightPrice = pallet_transaction_payment::Pallet; type WeightInfo = pallet_contracts::weights::SubstrateWeight; type ChainExtension = (); - type DeletionQueueDepth = DeletionQueueDepth; - type DeletionWeightLimit = DeletionWeightLimit; type Schedule = Schedule; type AddressGenerator = pallet_contracts::DefaultAddressGenerator; type MaxCodeLen = ConstU32<{ 123 * 1024 }>; @@ -1481,7 +1505,6 @@ impl pallet_assets::Config for Runtime { } parameter_types! { - pub IgnoredIssuance: Balance = Treasury::pot(); pub const QueueCount: u32 = 300; pub const MaxQueueLen: u32 = 1000; pub const FifoQueueLen: u32 = 500; @@ -1493,7 +1516,7 @@ parameter_types! { pub const ThawThrottle: (Perquintill, BlockNumber) = (Perquintill::from_percent(25), 5); pub Target: Perquintill = Perquintill::zero(); pub const NisPalletId: PalletId = PalletId(*b"py/nis "); - pub const NisReserveId: [u8; 8] = *b"py/nis "; + pub const NisHoldReason: HoldReason = HoldReason::Nis; } impl pallet_nis::Config for Runtime { @@ -1505,7 +1528,7 @@ impl pallet_nis::Config for Runtime { type Counterpart = ItemOf, AccountId>; type CounterpartAmount = WithMaximumOf>; type Deficit = (); - type IgnoredIssuance = IgnoredIssuance; + type IgnoredIssuance = (); type Target = Target; type PalletId = NisPalletId; type QueueCount = QueueCount; @@ -1517,7 +1540,7 @@ impl pallet_nis::Config for Runtime { type IntakePeriod = IntakePeriod; type MaxIntakeWeight = MaxIntakeWeight; type ThawThrottle = ThawThrottle; - type ReserveId = NisReserveId; + type HoldReason = NisHoldReason; } parameter_types! { @@ -1567,7 +1590,7 @@ impl GetSalary for SalaryForRank { impl pallet_salary::Config for Runtime { type WeightInfo = (); type RuntimeEvent = RuntimeEvent; - type Paymaster = pallet_salary::PayFromAccount; + type Paymaster = PayFromAccount; type Members = RankedCollective; type Salary = SalaryForRank; type RegistrationPeriod = ConstU32<200>; @@ -1682,6 +1705,7 @@ impl pallet_collective::Config for Runtime { type DefaultVote = pallet_collective::PrimeDefaultVote; type WeightInfo = pallet_collective::weights::SubstrateWeight; type SetMembersOrigin = EnsureRoot; + type MaxProposalWeight = MaxCollectivesProposalWeight; } parameter_types! { @@ -1758,6 +1782,7 @@ construct_runtime!( TechnicalMembership: pallet_membership::, Grandpa: pallet_grandpa, Treasury: pallet_treasury, + AssetRate: pallet_asset_rate, Contracts: pallet_contracts, Sudo: pallet_sudo, ImOnline: pallet_im_online, @@ -1915,6 +1940,7 @@ mod benches { [pallet_tips, Tips] [pallet_transaction_storage, TransactionStorage] [pallet_treasury, Treasury] + [pallet_asset_rate, AssetRate] [pallet_uniques, Uniques] [pallet_nfts, Nfts] [pallet_utility, Utility] @@ -1942,6 +1968,14 @@ impl_runtime_apis! { fn metadata() -> OpaqueMetadata { OpaqueMetadata::new(Runtime::metadata().into()) } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } } impl sp_block_builder::BlockBuilder for Runtime { @@ -2126,7 +2160,7 @@ impl_runtime_apis! { storage_deposit_limit, input_data, true, - pallet_contracts::Determinism::Deterministic, + pallet_contracts::Determinism::Enforced, ) } diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 1a9af13028483..9dbb26a906e6f 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -308,7 +308,7 @@ impl<'a> Iterator for BlockContentIterator<'a> { value: kitchensink_runtime::ExistentialDeposit::get() + 1, }), BlockType::RandomTransfersReaping => { - RuntimeCall::Balances(BalancesCall::transfer { + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: sp_runtime::MultiAddress::Id(receiver), // Transfer so that ending balance would be 1 less than existential // deposit so that we kill the sender account. @@ -392,14 +392,14 @@ impl BenchDb { let task_executor = TaskExecutor::new(); let backend = sc_service::new_db_backend(db_config).expect("Should not fail"); - let executor = NativeElseWasmExecutor::new( - WasmExecutionMethod::Compiled { - instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, - }, - None, - 8, - 2, + let executor = NativeElseWasmExecutor::new_with_wasm_executor( + sc_executor::WasmExecutor::builder() + .with_execution_method(WasmExecutionMethod::Compiled { + instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, + }) + .build(), ); + let client_config = sc_service::ClientConfig::default(); let genesis_block_builder = sc_service::GenesisBlockBuilder::new( &keyring.generate_genesis(), @@ -411,11 +411,16 @@ impl BenchDb { let client = sc_service::new_client( backend.clone(), - executor, + executor.clone(), genesis_block_builder, None, None, - ExecutionExtensions::new(profile.into_execution_strategies(), None, None), + ExecutionExtensions::new( + profile.into_execution_strategies(), + None, + None, + Arc::new(executor), + ), Box::new(task_executor.clone()), None, None, diff --git a/bin/utils/chain-spec-builder/src/main.rs b/bin/utils/chain-spec-builder/src/main.rs index b3f28b269fddd..f94fca1c1638d 100644 --- a/bin/utils/chain-spec-builder/src/main.rs +++ b/bin/utils/chain-spec-builder/src/main.rs @@ -19,7 +19,6 @@ use std::{ fs, path::{Path, PathBuf}, - sync::Arc, }; use ansi_term::Style; @@ -32,7 +31,7 @@ use sp_core::{ crypto::{ByteArray, Ss58Codec}, sr25519, }; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::KeystorePtr; /// A utility to easily create a testnet chain spec definition with a given set /// of authorities and endowed accounts and/or generate random accounts. @@ -164,16 +163,17 @@ fn generate_chain_spec( fn generate_authority_keys_and_store(seeds: &[String], keystore_path: &Path) -> Result<(), String> { for (n, seed) in seeds.iter().enumerate() { - let keystore: SyncCryptoStorePtr = Arc::new( + let keystore: KeystorePtr = LocalKeystore::open(keystore_path.join(format!("auth-{}", n)), None) - .map_err(|err| err.to_string())?, - ); + .map_err(|err| err.to_string())? + .into(); let (_, _, grandpa, babe, im_online, authority_discovery) = chain_spec::authority_keys_from_seed(seed); let insert_key = |key_type, public| { - SyncCryptoStore::insert_unknown(&*keystore, key_type, &format!("//{}", seed), public) + keystore + .insert(key_type, &format!("//{}", seed), public) .map_err(|_| format!("Failed to insert key: {}", grandpa)) }; diff --git a/client/allocator/src/freeing_bump.rs b/client/allocator/src/freeing_bump.rs index c3cb827afec08..144c0764540db 100644 --- a/client/allocator/src/freeing_bump.rs +++ b/client/allocator/src/freeing_bump.rs @@ -421,11 +421,11 @@ impl FreeingBumpHeapAllocator { let header_ptr: u32 = match self.free_lists[order] { Link::Ptr(header_ptr) => { - assert!( - u64::from(header_ptr + order.size() + HEADER_SIZE) <= mem.size(), - "Pointer is looked up in list of free entries, into which - only valid values are inserted; qed" - ); + if (u64::from(header_ptr) + u64::from(order.size()) + u64::from(HEADER_SIZE)) > + mem.size() + { + return Err(error("Invalid header pointer detected")) + } // Remove this header from the free list. let next_free = Header::read_from(mem, header_ptr)? @@ -1106,4 +1106,25 @@ mod tests { assert_eq!(3, mem.pages()); } + + #[test] + fn modifying_the_header_leads_to_an_error() { + let mut mem = MemoryInstance::with_pages(1); + let mut heap = FreeingBumpHeapAllocator::new(0); + + let ptr = heap.allocate(&mut mem, 5).unwrap(); + + heap.deallocate(&mut mem, ptr).unwrap(); + + Header::Free(Link::Ptr(u32::MAX - 1)) + .write_into(&mut mem, u32::from(ptr) - HEADER_SIZE) + .unwrap(); + + heap.allocate(&mut mem, 5).unwrap(); + assert!(heap + .allocate(&mut mem, 5) + .unwrap_err() + .to_string() + .contains("Invalid header pointer")); + } } diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index b491d7672e8f0..9344afbd3e6dd 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -27,10 +27,11 @@ use parking_lot::RwLock; use sc_transaction_pool_api::OffchainSubmitTransaction; use sp_core::{ offchain::{self, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt}, + traits::{ReadRuntimeVersion, ReadRuntimeVersionExt}, ExecutionContext, }; use sp_externalities::{Extension, Extensions}; -use sp_keystore::{KeystoreExt, SyncCryptoStorePtr}; +use sp_keystore::{KeystoreExt, KeystorePtr}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor}, @@ -163,7 +164,7 @@ impl DbExternaliti /// for each call, based on required `Capabilities`. pub struct ExecutionExtensions { strategies: ExecutionStrategies, - keystore: Option, + keystore: Option, offchain_db: Option>, // FIXME: these two are only RwLock because of https://github.com/paritytech/substrate/issues/4587 // remove when fixed. @@ -173,26 +174,16 @@ pub struct ExecutionExtensions { // during initialization. transaction_pool: RwLock>>>, extensions_factory: RwLock>>, -} - -impl Default for ExecutionExtensions { - fn default() -> Self { - Self { - strategies: Default::default(), - keystore: None, - offchain_db: None, - transaction_pool: RwLock::new(None), - extensions_factory: RwLock::new(Box::new(())), - } - } + read_runtime_version: Arc, } impl ExecutionExtensions { /// Create new `ExecutionExtensions` given a `keystore` and `ExecutionStrategies`. pub fn new( strategies: ExecutionStrategies, - keystore: Option, + keystore: Option, offchain_db: Option>, + read_runtime_version: Arc, ) -> Self { let transaction_pool = RwLock::new(None); let extensions_factory = Box::new(()); @@ -202,6 +193,7 @@ impl ExecutionExtensions { offchain_db, extensions_factory: RwLock::new(extensions_factory), transaction_pool, + read_runtime_version, } } @@ -271,6 +263,8 @@ impl ExecutionExtensions { ))); } + extensions.register(ReadRuntimeVersionExt::new(self.read_runtime_version.clone())); + extensions } diff --git a/client/authority-discovery/src/error.rs b/client/authority-discovery/src/error.rs index 89c05b71b9ea6..13148bb338c2f 100644 --- a/client/authority-discovery/src/error.rs +++ b/client/authority-discovery/src/error.rs @@ -18,13 +18,12 @@ //! Authority discovery errors. -use sp_core::crypto::CryptoTypePublicPair; - /// AuthorityDiscovery Result. pub type Result = std::result::Result; /// Error type for the authority discovery module. #[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum Error { #[error("Received dht value found event with records with different keys.")] ReceivingDhtValueFoundEventWithDifferentKeys, @@ -59,11 +58,8 @@ pub enum Error { #[error("Failed to parse a libp2p key.")] ParsingLibp2pIdentity(#[from] libp2p::identity::error::DecodingError), - #[error("Failed to sign using a specific public key.")] - MissingSignature(CryptoTypePublicPair), - - #[error("Failed to sign using all public keys.")] - Signing, + #[error("Failed to sign: {0}.")] + CannotSign(String), #[error("Failed to register Prometheus metric.")] Prometheus(#[from] prometheus_endpoint::PrometheusError), @@ -76,4 +72,7 @@ pub enum Error { #[error("Received authority record without a valid signature for the remote peer id.")] MissingPeerIdSignature, + + #[error("Unable to fetch best block.")] + BestBlockFetchingError, } diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index a3c6699091297..6bb12804cada3 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -28,6 +28,7 @@ //! See [`Worker`] and [`Service`] for more documentation. pub use crate::{ + error::Error, service::Service, worker::{AuthorityDiscovery, NetworkProvider, Role, Worker}, }; @@ -148,7 +149,7 @@ pub fn new_worker_and_service_with_config + HeaderBackend + 'static, + Client: AuthorityDiscovery + 'static, DhtEventStream: Stream + Unpin, { let (to_worker, from_service) = mpsc::channel(0); diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 982c3fc04c590..d354f6a963e73 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -33,7 +33,7 @@ use std::{collections::HashSet, sync::Arc}; use sp_authority_discovery::AuthorityId; use sp_core::crypto::key_types; -use sp_keystore::{testing::KeyStore, CryptoStore}; +use sp_keystore::{testing::MemoryKeystore, Keystore}; #[test] fn get_addresses_and_authority_id() { @@ -42,12 +42,11 @@ fn get_addresses_and_authority_id() { let mut pool = LocalPool::new(); - let key_store = KeyStore::new(); + let key_store = MemoryKeystore::new(); let remote_authority_id: AuthorityId = pool.run_until(async { key_store .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) - .await .unwrap() .into() }); diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 034d72902e65d..590d1dd19dc12 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -32,7 +32,7 @@ use std::{ use futures::{channel::mpsc, future, stream::Fuse, FutureExt, Stream, StreamExt}; use addr_cache::AddrCache; -use codec::Decode; +use codec::{Decode, Encode}; use ip_network::IpNetwork; use libp2p::{ core::multiaddr, @@ -43,6 +43,7 @@ use log::{debug, error, log_enabled}; use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; use prost::Message; use rand::{seq::SliceRandom, thread_rng}; + use sc_network::{ event::DhtEvent, KademliaKey, NetworkDHTProvider, NetworkSigner, NetworkStateInfo, Signature, }; @@ -51,9 +52,8 @@ use sp_authority_discovery::{ AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature, }; use sp_blockchain::HeaderBackend; - -use sp_core::crypto::{key_types, CryptoTypePublicPair, Pair}; -use sp_keystore::CryptoStore; +use sp_core::crypto::{key_types, ByteArray, Pair}; +use sp_keystore::{Keystore, KeystorePtr}; use sp_runtime::traits::Block as BlockT; mod addr_cache; @@ -78,7 +78,7 @@ const MAX_IN_FLIGHT_LOOKUPS: usize = 8; /// Role an authority discovery [`Worker`] can run as. pub enum Role { /// Publish own addresses and discover addresses of others. - PublishAndDiscover(Arc), + PublishAndDiscover(KeystorePtr), /// Discover addresses of others. Discover, } @@ -127,7 +127,7 @@ pub struct Worker { publish_if_changed_interval: ExpIncInterval, /// List of keys onto which addresses have been published at the latest publication. /// Used to check whether they have changed. - latest_published_keys: HashSet, + latest_published_keys: HashSet, /// Same value as in the configuration. publish_non_global_ips: bool, /// Same value as in the configuration. @@ -157,12 +157,15 @@ pub trait AuthorityDiscovery { /// Retrieve authority identifiers of the current and next authority set. async fn authorities(&self, at: Block::Hash) -> std::result::Result, ApiError>; + + /// Retrieve best block hash + async fn best_hash(&self) -> std::result::Result; } #[async_trait::async_trait] impl AuthorityDiscovery for T where - T: ProvideRuntimeApi + Send + Sync, + T: ProvideRuntimeApi + HeaderBackend + Send + Sync, T::Api: AuthorityDiscoveryApi, Block: BlockT, { @@ -172,13 +175,17 @@ where ) -> std::result::Result, ApiError> { self.runtime_api().authorities(at) } + + async fn best_hash(&self) -> std::result::Result { + Ok(self.info().best_hash) + } } impl Worker where Block: BlockT + Unpin + 'static, Network: NetworkProvider, - Client: AuthorityDiscovery + HeaderBackend + 'static, + Client: AuthorityDiscovery + 'static, DhtEventStream: Stream + Unpin, { /// Construct a [`Worker`]. @@ -339,7 +346,7 @@ where let keys = Worker::::get_own_public_keys_within_authority_set( key_store.clone(), self.client.as_ref(), - ).await?.into_iter().map(Into::into).collect::>(); + ).await?.into_iter().collect::>(); if only_if_changed && keys == self.latest_published_keys { return Ok(()) @@ -364,8 +371,7 @@ where Some(peer_signature), key_store.as_ref(), keys_vec, - ) - .await?; + )?; for (key, value) in kv_pairs.into_iter() { self.network.put_value(key, value); @@ -377,12 +383,11 @@ where } async fn refill_pending_lookups_queue(&mut self) -> Result<()> { - let best_hash = self.client.info().best_hash; + let best_hash = self.client.best_hash().await?; let local_keys = match &self.role { Role::PublishAndDiscover(key_store) => key_store .sr25519_public_keys(key_types::AUTHORITY_DISCOVERY) - .await .into_iter() .collect::>(), Role::Discover => HashSet::new(), @@ -588,16 +593,15 @@ where // next authority set with two keys. The function does not return all of the local authority // discovery public keys, but only the ones intersecting with the current or next authority set. async fn get_own_public_keys_within_authority_set( - key_store: Arc, + key_store: KeystorePtr, client: &Client, ) -> Result> { let local_pub_keys = key_store .sr25519_public_keys(key_types::AUTHORITY_DISCOVERY) - .await .into_iter() .collect::>(); - let best_hash = client.info().best_hash; + let best_hash = client.best_hash().await?; let authorities = client .authorities(best_hash) .await @@ -657,39 +661,39 @@ fn sign_record_with_peer_id( ) -> Result { let signature = network .sign_with_local_identity(serialized_record) - .map_err(|_| Error::Signing)?; + .map_err(|e| Error::CannotSign(format!("{} (network packet)", e)))?; let public_key = signature.public_key.to_protobuf_encoding(); let signature = signature.bytes; Ok(schema::PeerSignature { signature, public_key }) } -async fn sign_record_with_authority_ids( +fn sign_record_with_authority_ids( serialized_record: Vec, peer_signature: Option, - key_store: &dyn CryptoStore, - keys: Vec, + key_store: &dyn Keystore, + keys: Vec, ) -> Result)>> { - let signatures = key_store - .sign_with_all(key_types::AUTHORITY_DISCOVERY, keys.clone(), &serialized_record) - .await - .map_err(|_| Error::Signing)?; - - let mut result = vec![]; - for (sign_result, key) in signatures.into_iter().zip(keys.iter()) { - let mut signed_record = vec![]; - - // Verify that all signatures exist for all provided keys. - let auth_signature = - sign_result.ok().flatten().ok_or_else(|| Error::MissingSignature(key.clone()))?; - schema::SignedAuthorityRecord { + let mut result = Vec::with_capacity(keys.len()); + + for key in keys.iter() { + let auth_signature = key_store + .sr25519_sign(key_types::AUTHORITY_DISCOVERY, key.as_ref(), &serialized_record) + .map_err(|e| Error::CannotSign(format!("{}. Key: {:?}", e, key)))? + .ok_or_else(|| { + Error::CannotSign(format!("Could not find key in keystore. Key: {:?}", key)) + })?; + + // Scale encode + let auth_signature = auth_signature.encode(); + + let signed_record = schema::SignedAuthorityRecord { record: serialized_record.clone(), auth_signature, peer_signature: peer_signature.clone(), } - .encode(&mut signed_record) - .map_err(Error::EncodingProto)?; + .encode_to_vec(); - result.push((hash_authority_id(key.1.as_ref()), signed_record)); + result.push((hash_authority_id(key.as_slice()), signed_record)); } Ok(result) diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 4ab5bfcdc3773..8eb9563ff206e 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -40,7 +40,7 @@ use prometheus_endpoint::prometheus::default_registry; use sc_client_api::HeaderBackend; use sc_network::Signature; use sp_api::{ApiRef, ProvideRuntimeApi}; -use sp_keystore::{testing::KeyStore, CryptoStore}; +use sp_keystore::{testing::MemoryKeystore, Keystore}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use substrate_test_runtime_client::runtime::Block; @@ -208,10 +208,10 @@ impl<'a> NetworkSigner for TestSigner<'a> { } } -async fn build_dht_event( +fn build_dht_event( addresses: Vec, public_key: AuthorityId, - key_store: &dyn CryptoStore, + key_store: &MemoryKeystore, network: Option<&Signer>, ) -> Vec<(KademliaKey, Vec)> { let serialized_record = @@ -224,7 +224,6 @@ async fn build_dht_event( key_store, vec![public_key.into()], ) - .await .unwrap(); // There is always a single item in it, because we signed it with a single key kv_pairs @@ -234,7 +233,7 @@ async fn build_dht_event( fn new_registers_metrics() { let (_dht_event_tx, dht_event_rx) = mpsc::channel(1000); let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); + let key_store = MemoryKeystore::new(); let test_api = Arc::new(TestApi { authorities: vec![] }); let registry = prometheus_endpoint::Registry::new(); @@ -266,7 +265,7 @@ fn triggers_dht_get_query() { let test_api = Arc::new(TestApi { authorities: authorities.clone() }); let network = Arc::new(TestNetwork::default()); - let key_store = KeyStore::new(); + let key_store = MemoryKeystore::new(); let (_to_worker, from_service) = mpsc::channel(0); let mut worker = Worker::new( @@ -298,14 +297,12 @@ fn publish_discover_cycle() { let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); + let key_store = MemoryKeystore::new(); let _ = pool.spawner().spawn_local_obj( async move { - let node_a_public = key_store - .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) - .await - .unwrap(); + let node_a_public = + key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None).unwrap(); let test_api = Arc::new(TestApi { authorities: vec![node_a_public.into()] }); let (_to_worker, from_service) = mpsc::channel(0); @@ -337,7 +334,7 @@ fn publish_discover_cycle() { authorities: vec![node_a_public.into()], }); let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); + let key_store = MemoryKeystore::new(); let (_to_worker, from_service) = mpsc::channel(0); let mut worker = Worker::new( @@ -371,7 +368,7 @@ fn publish_discover_cycle() { fn terminate_when_event_stream_terminates() { let (dht_event_tx, dht_event_rx) = channel(1000); let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); + let key_store = MemoryKeystore::new(); let test_api = Arc::new(TestApi { authorities: vec![] }); let (to_worker, from_service) = mpsc::channel(0); @@ -420,11 +417,11 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { address.with(multiaddr::Protocol::P2p(peer_id.into())) }; - let remote_key_store = KeyStore::new(); - let remote_public_key: AuthorityId = - block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) - .unwrap() - .into(); + let remote_key_store = MemoryKeystore::new(); + let remote_public_key: AuthorityId = remote_key_store + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) + .unwrap() + .into(); let (mut dht_event_tx, dht_event_rx) = channel(1); let (network, mut network_events) = { @@ -433,7 +430,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { (Arc::new(n), r) }; - let key_store = KeyStore::new(); + let key_store = MemoryKeystore::new(); let test_api = Arc::new(TestApi { authorities: vec![remote_public_key.clone()] }); let mut pool = LocalPool::new(); @@ -480,8 +477,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { remote_public_key.clone(), &remote_key_store, None, - ) - .await; + ); DhtEvent::ValueFound(kv_pairs) }; dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); @@ -498,7 +494,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { } struct DhtValueFoundTester { - pub remote_key_store: KeyStore, + pub remote_key_store: MemoryKeystore, pub remote_authority_public: sp_core::sr25519::Public, pub remote_node_key: Keypair, pub local_worker: Option< @@ -516,10 +512,10 @@ struct DhtValueFoundTester { impl DhtValueFoundTester { fn new() -> Self { - let remote_key_store = KeyStore::new(); - let remote_authority_public = - block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) - .unwrap(); + let remote_key_store = MemoryKeystore::new(); + let remote_authority_public = remote_key_store + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) + .unwrap(); let remote_node_key = Keypair::generate_ed25519(); Self { remote_key_store, remote_authority_public, remote_node_key, local_worker: None } @@ -542,7 +538,7 @@ impl DhtValueFoundTester { let local_test_api = Arc::new(TestApi { authorities: vec![self.remote_authority_public.into()] }); let local_network: Arc = Arc::new(Default::default()); - let local_key_store = KeyStore::new(); + let local_key_store = MemoryKeystore::new(); let (_to_worker, from_service) = mpsc::channel(0); let mut local_worker = Worker::new( @@ -576,12 +572,12 @@ fn limit_number_of_addresses_added_to_cache_per_authority() { let mut tester = DhtValueFoundTester::new(); assert!(MAX_ADDRESSES_PER_AUTHORITY < 100); let addresses = (1..100).map(|i| tester.multiaddr_with_peer_id(i)).collect(); - let kv_pairs = block_on(build_dht_event::( + let kv_pairs = build_dht_event::( addresses, tester.remote_authority_public.into(), &tester.remote_key_store, None, - )); + ); let cached_remote_addresses = tester.process_value_found(false, kv_pairs); assert_eq!(MAX_ADDRESSES_PER_AUTHORITY, cached_remote_addresses.unwrap().len()); @@ -591,12 +587,12 @@ fn limit_number_of_addresses_added_to_cache_per_authority() { fn strict_accept_address_with_peer_signature() { let mut tester = DhtValueFoundTester::new(); let addr = tester.multiaddr_with_peer_id(1); - let kv_pairs = block_on(build_dht_event( + let kv_pairs = build_dht_event( vec![addr.clone()], tester.remote_authority_public.into(), &tester.remote_key_store, Some(&TestSigner { keypair: &tester.remote_node_key }), - )); + ); let cached_remote_addresses = tester.process_value_found(true, kv_pairs); @@ -611,12 +607,12 @@ fn strict_accept_address_with_peer_signature() { fn reject_address_with_rogue_peer_signature() { let mut tester = DhtValueFoundTester::new(); let rogue_remote_node_key = Keypair::generate_ed25519(); - let kv_pairs = block_on(build_dht_event( + let kv_pairs = build_dht_event( vec![tester.multiaddr_with_peer_id(1)], tester.remote_authority_public.into(), &tester.remote_key_store, Some(&TestSigner { keypair: &rogue_remote_node_key }), - )); + ); let cached_remote_addresses = tester.process_value_found(false, kv_pairs); @@ -629,12 +625,12 @@ fn reject_address_with_rogue_peer_signature() { #[test] fn reject_address_with_invalid_peer_signature() { let mut tester = DhtValueFoundTester::new(); - let mut kv_pairs = block_on(build_dht_event( + let mut kv_pairs = build_dht_event( vec![tester.multiaddr_with_peer_id(1)], tester.remote_authority_public.into(), &tester.remote_key_store, Some(&TestSigner { keypair: &tester.remote_node_key }), - )); + ); // tamper with the signature let mut record = schema::SignedAuthorityRecord::decode(kv_pairs[0].1.as_slice()).unwrap(); record.peer_signature.as_mut().map(|p| p.signature[1] = !p.signature[1]); @@ -651,12 +647,12 @@ fn reject_address_with_invalid_peer_signature() { #[test] fn reject_address_without_peer_signature() { let mut tester = DhtValueFoundTester::new(); - let kv_pairs = block_on(build_dht_event::( + let kv_pairs = build_dht_event::( vec![tester.multiaddr_with_peer_id(1)], tester.remote_authority_public.into(), &tester.remote_key_store, None, - )); + ); let cached_remote_addresses = tester.process_value_found(true, kv_pairs); @@ -669,12 +665,12 @@ fn do_not_cache_addresses_without_peer_id() { let multiaddr_with_peer_id = tester.multiaddr_with_peer_id(1); let multiaddr_without_peer_id: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse().unwrap(); - let kv_pairs = block_on(build_dht_event::( + let kv_pairs = build_dht_event::( vec![multiaddr_with_peer_id.clone(), multiaddr_without_peer_id], tester.remote_authority_public.into(), &tester.remote_key_store, None, - )); + ); let cached_remote_addresses = tester.process_value_found(false, kv_pairs); @@ -701,7 +697,7 @@ fn addresses_to_publish_adds_p2p() { Arc::new(TestApi { authorities: vec![] }), network.clone(), Box::pin(dht_event_rx), - Role::PublishAndDiscover(Arc::new(KeyStore::new())), + Role::PublishAndDiscover(MemoryKeystore::new().into()), Some(prometheus_endpoint::Registry::new()), Default::default(), ); @@ -735,7 +731,7 @@ fn addresses_to_publish_respects_existing_p2p_protocol() { Arc::new(TestApi { authorities: vec![] }), network.clone(), Box::pin(dht_event_rx), - Role::PublishAndDiscover(Arc::new(KeyStore::new())), + Role::PublishAndDiscover(MemoryKeystore::new().into()), Some(prometheus_endpoint::Registry::new()), Default::default(), ); @@ -755,10 +751,11 @@ fn lookup_throttling() { address.with(multiaddr::Protocol::P2p(peer_id.into())) }; - let remote_key_store = KeyStore::new(); + let remote_key_store = MemoryKeystore::new(); let remote_public_keys: Vec = (0..20) .map(|_| { - block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) + remote_key_store + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) .unwrap() .into() }) @@ -818,8 +815,7 @@ fn lookup_throttling() { remote_key, &remote_key_store, None, - ) - .await; + ); DhtEvent::ValueFound(kv_pairs) }; dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 376278fa1b6db..795288d0a1da8 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -474,14 +474,6 @@ where break EndProposingReason::HitBlockWeightLimit } }, - Err(e) if skipped > 0 => { - pending_iterator.report_invalid(&pending_tx); - trace!( - "[{:?}] Ignoring invalid transaction when skipping: {}", - pending_tx_hash, - e - ); - }, Err(e) => { pending_iterator.report_invalid(&pending_tx); debug!("[{:?}] Invalid transaction: {}", pending_tx_hash, e); @@ -740,8 +732,11 @@ mod tests { ); } + // This test ensures that if one transaction of a user was rejected, because for example + // the weight limit was hit, we don't mark the other transactions of the user as invalid because + // the nonce is not matching. #[test] - fn should_not_remove_invalid_transactions_when_skipping() { + fn should_not_remove_invalid_transactions_from_the_same_sender_after_one_was_invalid() { // given let mut client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index d97afadd40156..21f8e6fdd3999 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -312,7 +312,9 @@ mod tests { use sp_blockchain::HeaderBackend; use sp_core::Blake2Hasher; use sp_state_machine::Backend; - use substrate_test_runtime_client::{DefaultTestClientBuilderExt, TestClientBuilderExt}; + use substrate_test_runtime_client::{ + runtime::Extrinsic, DefaultTestClientBuilderExt, TestClientBuilderExt, + }; #[test] fn block_building_storage_proof_does_not_include_runtime_by_default() { @@ -345,4 +347,62 @@ mod tests { .unwrap_err() .contains("Database missing expected key"),); } + + #[test] + fn failing_extrinsic_rolls_back_changes_in_storage_proof() { + let builder = substrate_test_runtime_client::TestClientBuilder::new(); + let backend = builder.backend(); + let client = builder.build(); + + let mut block_builder = BlockBuilder::new( + &client, + client.info().best_hash, + client.info().best_number, + RecordProof::Yes, + Default::default(), + &*backend, + ) + .unwrap(); + + block_builder.push(Extrinsic::ReadAndPanic(8)).unwrap_err(); + + let block = block_builder.build().unwrap(); + + let proof_with_panic = block.proof.expect("Proof is build on request").encoded_size(); + + let mut block_builder = BlockBuilder::new( + &client, + client.info().best_hash, + client.info().best_number, + RecordProof::Yes, + Default::default(), + &*backend, + ) + .unwrap(); + + block_builder.push(Extrinsic::Read(8)).unwrap(); + + let block = block_builder.build().unwrap(); + + let proof_without_panic = block.proof.expect("Proof is build on request").encoded_size(); + + let block = BlockBuilder::new( + &client, + client.info().best_hash, + client.info().best_number, + RecordProof::Yes, + Default::default(), + &*backend, + ) + .unwrap() + .build() + .unwrap(); + + let proof_empty_block = block.proof.expect("Proof is build on request").encoded_size(); + + // Ensure that we rolled back the changes of the panicked transaction. + assert!(proof_without_panic > proof_with_panic); + assert!(proof_without_panic > proof_empty_block); + assert_eq!(proof_empty_block, proof_with_panic); + } } diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index d733bda9b1692..c16cd45a99433 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -16,6 +16,6 @@ proc-macro = true [dependencies] proc-macro-crate = "1.1.3" -proc-macro2 = "1.0.37" -quote = "1.0.10" -syn = "1.0.98" +proc-macro2 = "1.0.56" +quote = "1.0.26" +syn = "2.0.14" diff --git a/client/chain-spec/derive/src/impls.rs b/client/chain-spec/derive/src/impls.rs index e54c1895e4a4b..c0624897c133e 100644 --- a/client/chain-spec/derive/src/impls.rs +++ b/client/chain-spec/derive/src/impls.rs @@ -35,7 +35,7 @@ pub fn extension_derive(ast: &DeriveInput) -> proc_macro::TokenStream { .named .iter() .find_map(|f| { - if f.attrs.iter().any(|attr| attr.path.is_ident(ATTRIBUTE_NAME)) { + if f.attrs.iter().any(|attr| attr.path().is_ident(ATTRIBUTE_NAME)) { let typ = &f.ty; Some(quote! { #typ }) } else { diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index c3399a89680d1..7e2498cec4b7f 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -74,12 +74,12 @@ impl std::fmt::Display for WasmExecutionMethod { /// into an execution method which can be used internally. pub fn execution_method_from_cli( execution_method: WasmExecutionMethod, - _instantiation_strategy: WasmtimeInstantiationStrategy, + instantiation_strategy: WasmtimeInstantiationStrategy, ) -> sc_service::config::WasmExecutionMethod { match execution_method { WasmExecutionMethod::Interpreted => sc_service::config::WasmExecutionMethod::Interpreted, WasmExecutionMethod::Compiled => sc_service::config::WasmExecutionMethod::Compiled { - instantiation_strategy: match _instantiation_strategy { + instantiation_strategy: match instantiation_strategy { WasmtimeInstantiationStrategy::PoolingCopyOnWrite => sc_service::config::WasmtimeInstantiationStrategy::PoolingCopyOnWrite, WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite => diff --git a/client/cli/src/commands/build_spec_cmd.rs b/client/cli/src/commands/build_spec_cmd.rs index a2cbfedb764fc..aa5314f9cf5a4 100644 --- a/client/cli/src/commands/build_spec_cmd.rs +++ b/client/cli/src/commands/build_spec_cmd.rs @@ -38,7 +38,6 @@ pub struct BuildSpecCmd { pub raw: bool, /// Disable adding the default bootnode to the specification. - /// /// By default the `/ip4/127.0.0.1/tcp/30333/p2p/NODE_PEER_ID` bootnode is added to the /// specification when no bootnode exists. #[arg(long)] diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs index 897b61c8e0386..ba20376d998a9 100644 --- a/client/cli/src/commands/check_block_cmd.rs +++ b/client/cli/src/commands/check_block_cmd.rs @@ -29,12 +29,11 @@ use std::{fmt::Debug, str::FromStr, sync::Arc}; /// The `check-block` command used to validate blocks. #[derive(Debug, Clone, Parser)] pub struct CheckBlockCmd { - /// Block hash or number + /// Block hash or number. #[arg(value_name = "HASH or NUMBER")] pub input: BlockNumberOrHash, /// The default number of 64KB pages to ever allocate for Wasm execution. - /// /// Don't alter this unless you know what you're doing. #[arg(long, value_name = "COUNT")] pub default_heap_pages: Option, diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index 7e8a295f99618..120d7889878e5 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -36,13 +36,11 @@ pub struct ExportBlocksCmd { pub output: Option, /// Specify starting block number. - /// /// Default is 1. #[arg(long, value_name = "BLOCK")] pub from: Option, /// Specify last block number. - /// /// Default is best block. #[arg(long, value_name = "BLOCK")] pub to: Option, diff --git a/client/cli/src/commands/generate_node_key.rs b/client/cli/src/commands/generate_node_key.rs index 2288cd4037773..a16ba499082c5 100644 --- a/client/cli/src/commands/generate_node_key.rs +++ b/client/cli/src/commands/generate_node_key.rs @@ -35,13 +35,11 @@ use std::{ )] pub struct GenerateNodeKeyCmd { /// Name of file to save secret key to. - /// /// If not given, the secret key is printed to stdout. #[arg(long)] file: Option, /// The output is in raw binary format. - /// /// If not given, the output is written as an hex encoded string. #[arg(long)] bin: bool, diff --git a/client/cli/src/commands/import_blocks_cmd.rs b/client/cli/src/commands/import_blocks_cmd.rs index f76c72924ddb6..815c6ab18aa6c 100644 --- a/client/cli/src/commands/import_blocks_cmd.rs +++ b/client/cli/src/commands/import_blocks_cmd.rs @@ -41,7 +41,6 @@ pub struct ImportBlocksCmd { pub input: Option, /// The default number of 64KB pages to ever allocate for Wasm execution. - /// /// Don't alter this unless you know what you're doing. #[arg(long, value_name = "COUNT")] pub default_heap_pages: Option, diff --git a/client/cli/src/commands/insert_key.rs b/client/cli/src/commands/insert_key.rs index 77d0f57780a94..fa9d125d33108 100644 --- a/client/cli/src/commands/insert_key.rs +++ b/client/cli/src/commands/insert_key.rs @@ -24,8 +24,7 @@ use clap::Parser; use sc_keystore::LocalKeystore; use sc_service::config::{BasePath, KeystoreConfig}; use sp_core::crypto::{KeyTypeId, SecretString}; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; -use std::sync::Arc; +use sp_keystore::KeystorePtr; /// The `insert` command #[derive(Debug, Clone, Parser)] @@ -37,7 +36,7 @@ pub struct InsertKeyCmd { #[arg(long)] suri: Option, - /// Key type, examples: "gran", or "imon" + /// Key type, examples: "gran", or "imon". #[arg(long)] key_type: String, @@ -67,9 +66,9 @@ impl InsertKeyCmd { let config_dir = base_path.config_dir(chain_spec.id()); let (keystore, public) = match self.keystore_params.keystore_config(&config_dir)? { - (_, KeystoreConfig::Path { path, password }) => { + KeystoreConfig::Path { path, password } => { let public = with_crypto_scheme!(self.scheme, to_vec(&suri, password.clone()))?; - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(path, password)?); + let keystore: KeystorePtr = LocalKeystore::open(path, password)?.into(); (keystore, public) }, _ => unreachable!("keystore_config always returns path and password; qed"), @@ -78,8 +77,9 @@ impl InsertKeyCmd { let key_type = KeyTypeId::try_from(self.key_type.as_str()).map_err(|_| Error::KeyTypeInvalid)?; - SyncCryptoStore::insert_unknown(&*keystore, key_type, &suri, &public[..]) - .map_err(|_| Error::KeyStoreOperation)?; + keystore + .insert(key_type, &suri, &public[..]) + .map_err(|_| Error::KeystoreOperation)?; Ok(()) } @@ -95,6 +95,7 @@ mod tests { use super::*; use sc_service::{ChainSpec, ChainType, GenericChainSpec, NoExtension}; use sp_core::{sr25519::Pair, ByteArray, Pair as _}; + use sp_keystore::Keystore; use tempfile::TempDir; struct Cli; diff --git a/client/cli/src/commands/inspect_key.rs b/client/cli/src/commands/inspect_key.rs index de82fe71e2445..5aa8b0bdcaa60 100644 --- a/client/cli/src/commands/inspect_key.rs +++ b/client/cli/src/commands/inspect_key.rs @@ -34,12 +34,9 @@ use std::str::FromStr; pub struct InspectKeyCmd { /// A Key URI to be inspected. May be a secret seed, secret URI /// (with derivation paths and password), SS58, public URI or a hex encoded public key. - /// /// If it is a hex encoded public key, `--public` needs to be given as argument. - /// /// If the given value is a file, the file content will be used /// as URI. - /// /// If omitted, you will be prompted for the URI. uri: Option, @@ -64,12 +61,10 @@ pub struct InspectKeyCmd { pub crypto_scheme: CryptoSchemeFlag, /// Expect that `--uri` has the given public key/account-id. - /// /// If `--uri` has any derivations, the public key is checked against the base `uri`, i.e. the /// `uri` without any derivation applied. However, if `uri` has a password or there is one /// given by `--password`, it will be used to decrypt `uri` before comparing the public /// key/account-id. - /// /// If there is no derivation in `--uri`, the public key will be checked against the public key /// of `--uri` directly. #[arg(long, conflicts_with = "public")] diff --git a/client/cli/src/commands/inspect_node_key.rs b/client/cli/src/commands/inspect_node_key.rs index 2370f4a0989ba..733a1343a4333 100644 --- a/client/cli/src/commands/inspect_node_key.rs +++ b/client/cli/src/commands/inspect_node_key.rs @@ -34,13 +34,11 @@ use std::{ )] pub struct InspectNodeKeyCmd { /// Name of file to read the secret key from. - /// /// If not given, the secret key is read from stdin (up to EOF). #[arg(long)] file: Option, /// The input is in raw binary format. - /// /// If not given, the input is read as an hex encoded string. #[arg(long)] bin: bool, diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 9441acecc4dfc..7cc1e6c730cc9 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -23,7 +23,7 @@ use crate::{ ImportParams, KeystoreParams, NetworkParams, OffchainWorkerParams, SharedParams, TransactionPoolParams, }, - CliConfiguration, + CliConfiguration, PrometheusParams, RuntimeParams, TelemetryParams, }; use clap::Parser; use regex::Regex; @@ -38,7 +38,6 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; #[derive(Debug, Clone, Parser)] pub struct RunCmd { /// Enable validator mode. - /// /// The node will be started with the authority role and actively /// participate in any consensus task that it can (e.g. depending on /// availability of local keys). @@ -51,7 +50,6 @@ pub struct RunCmd { pub no_grandpa: bool, /// Listen to all RPC interfaces. - /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC /// proxy server to filter out dangerous methods. More details: /// . @@ -60,13 +58,11 @@ pub struct RunCmd { pub rpc_external: bool, /// Listen to all RPC interfaces. - /// /// Same as `--rpc-external`. #[arg(long)] pub unsafe_rpc_external: bool, /// RPC methods to expose. - /// /// - `unsafe`: Exposes every RPC method. /// - `safe`: Exposes only a safe subset of RPC methods, denying unsafe RPC methods. /// - `auto`: Acts as `safe` if RPC is served externally, e.g. when `--{rpc,ws}-external` is @@ -82,7 +78,6 @@ pub struct RunCmd { pub rpc_methods: RpcMethods, /// Listen to all Websocket interfaces. - /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC /// proxy server to filter out dangerous methods. More details: /// . @@ -91,7 +86,6 @@ pub struct RunCmd { pub ws_external: bool, /// Listen to all Websocket interfaces. - /// /// Same as `--ws-external` but doesn't warn you about it. #[arg(long)] pub unsafe_ws_external: bool, @@ -116,12 +110,6 @@ pub struct RunCmd { #[arg(long)] pub rpc_max_subscriptions_per_connection: Option, - /// Expose Prometheus exporter on all interfaces. - /// - /// Default is local. - #[arg(long)] - pub prometheus_external: bool, - /// DEPRECATED, IPC support has been removed. #[arg(long, value_name = "PATH")] pub ipc_path: Option, @@ -143,7 +131,6 @@ pub struct RunCmd { pub ws_max_out_buffer_capacity: Option, /// Specify browser Origins allowed to access the HTTP & WS RPC servers. - /// /// A comma-separated list of origins (protocol://domain or special `null` /// value). Value of `all` will disable origin validation. Default is to /// allow localhost and origins. When running in @@ -151,36 +138,22 @@ pub struct RunCmd { #[arg(long, value_name = "ORIGINS", value_parser = parse_cors)] pub rpc_cors: Option, - /// Specify Prometheus exporter TCP Port. - #[arg(long, value_name = "PORT")] - pub prometheus_port: Option, - - /// Do not expose a Prometheus exporter endpoint. - /// - /// Prometheus metric endpoint is enabled by default. - #[arg(long)] - pub no_prometheus: bool, - /// The human-readable name for this node. - /// - /// The node name will be reported to the telemetry server, if enabled. + /// It's used as network node name. #[arg(long, value_name = "NAME")] pub name: Option, - /// Disable connecting to the Substrate telemetry server. - /// - /// Telemetry is on by default on global chains. - #[arg(long)] - pub no_telemetry: bool, + #[allow(missing_docs)] + #[clap(flatten)] + pub telemetry_params: TelemetryParams, + + #[allow(missing_docs)] + #[clap(flatten)] + pub prometheus_params: PrometheusParams, - /// The URL of the telemetry server to connect to. - /// - /// This flag can be passed multiple times as a means to specify multiple - /// telemetry endpoints. Verbosity levels range from 0-9, with 0 denoting - /// the least verbosity. - /// Expected format is 'URL VERBOSITY', e.g. `--telemetry-url 'wss://foo/bar 0'`. - #[arg(long = "telemetry-url", value_name = "URL VERBOSITY", value_parser = parse_telemetry_endpoints)] - pub telemetry_endpoints: Vec<(String, u8)>, + #[allow(missing_docs)] + #[clap(flatten)] + pub runtime_params: RuntimeParams, #[allow(missing_docs)] #[clap(flatten)] @@ -202,6 +175,10 @@ pub struct RunCmd { #[clap(flatten)] pub pool_config: TransactionPoolParams, + #[allow(missing_docs)] + #[clap(flatten)] + pub keystore_params: KeystoreParams, + /// Shortcut for `--name Alice --validator` with session keys for `Alice` added to keystore. #[arg(long, conflicts_with_all = &["bob", "charlie", "dave", "eve", "ferdie", "one", "two"])] pub alice: bool, @@ -239,28 +216,11 @@ pub struct RunCmd { #[arg(long)] pub force_authoring: bool, - #[allow(missing_docs)] - #[clap(flatten)] - pub keystore_params: KeystoreParams, - - /// The size of the instances cache for each runtime. - /// - /// The default value is 8 and the values higher than 256 are ignored. - #[arg(long)] - pub max_runtime_instances: Option, - - /// Maximum number of different runtimes that can be cached. - #[arg(long, default_value_t = 2)] - pub runtime_cache_size: u8, - /// Run a temporary node. - /// /// A temporary directory will be created to store the configuration and will be deleted /// at the end of the process. - /// /// Note: the directory is random per process execution. This directory is used as base path /// which includes: database, node key and keystore. - /// /// When `--dev` is given and no explicit `--base-path`, this option is implied. #[arg(long, conflicts_with = "base_path")] pub tmp: bool, @@ -345,11 +305,12 @@ impl CliConfiguration for RunCmd { &self, chain_spec: &Box, ) -> Result> { - Ok(if self.no_telemetry { + let params = &self.telemetry_params; + Ok(if params.no_telemetry { None - } else if !self.telemetry_endpoints.is_empty() { + } else if !params.telemetry_endpoints.is_empty() { Some( - TelemetryEndpoints::new(self.telemetry_endpoints.clone()) + TelemetryEndpoints::new(params.telemetry_endpoints.clone()) .map_err(|e| e.to_string())?, ) } else { @@ -361,7 +322,7 @@ impl CliConfiguration for RunCmd { let keyring = self.get_keyring(); let is_authority = self.validator || is_dev || keyring.is_some(); - Ok(if is_authority { sc_service::Role::Authority } else { sc_service::Role::Full }) + Ok(if is_authority { Role::Authority } else { Role::Full }) } fn force_authoring(&self) -> Result { @@ -374,20 +335,9 @@ impl CliConfiguration for RunCmd { default_listen_port: u16, chain_spec: &Box, ) -> Result> { - Ok(if self.no_prometheus { - None - } else { - let interface = - if self.prometheus_external { Ipv4Addr::UNSPECIFIED } else { Ipv4Addr::LOCALHOST }; - - Some(PrometheusConfig::new_with_default_registry( - SocketAddr::new( - interface.into(), - self.prometheus_port.unwrap_or(default_listen_port), - ), - chain_spec.id().into(), - )) - }) + Ok(self + .prometheus_params + .prometheus_config(default_listen_port, chain_spec.id().to_string())) } fn disable_grandpa(&self) -> Result { @@ -474,11 +424,11 @@ impl CliConfiguration for RunCmd { } fn max_runtime_instances(&self) -> Result> { - Ok(self.max_runtime_instances.map(|x| x.min(256))) + Ok(Some(self.runtime_params.max_runtime_instances)) } fn runtime_cache_size(&self) -> Result { - Ok(self.runtime_cache_size) + Ok(self.runtime_params.runtime_cache_size) } fn base_path(&self) -> Result> { @@ -546,36 +496,6 @@ fn rpc_interface( } } -#[derive(Debug)] -enum TelemetryParsingError { - MissingVerbosity, - VerbosityParsingError(std::num::ParseIntError), -} - -impl std::error::Error for TelemetryParsingError {} - -impl std::fmt::Display for TelemetryParsingError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - TelemetryParsingError::MissingVerbosity => write!(f, "Verbosity level missing"), - TelemetryParsingError::VerbosityParsingError(e) => write!(f, "{}", e), - } - } -} - -fn parse_telemetry_endpoints(s: &str) -> std::result::Result<(String, u8), TelemetryParsingError> { - let pos = s.find(' '); - match pos { - None => Err(TelemetryParsingError::MissingVerbosity), - Some(pos_) => { - let url = s[..pos_].to_string(); - let verbosity = - s[pos_ + 1..].parse().map_err(TelemetryParsingError::VerbosityParsingError)?; - Ok((url, verbosity)) - }, - } -} - /// CORS setting /// /// The type is introduced to overcome `Option>` handling of `clap`. diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 5fedcf99a12ef..063b2c39839f4 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -185,10 +185,10 @@ pub trait CliConfiguration: Sized { /// /// By default this is retrieved from `KeystoreParams` if it is available. Otherwise it uses /// `KeystoreConfig::InMemory`. - fn keystore_config(&self, config_dir: &PathBuf) -> Result<(Option, KeystoreConfig)> { + fn keystore_config(&self, config_dir: &PathBuf) -> Result { self.keystore_params() .map(|x| x.keystore_config(config_dir)) - .unwrap_or_else(|| Ok((None, KeystoreConfig::InMemory))) + .unwrap_or_else(|| Ok(KeystoreConfig::InMemory)) } /// Get the database cache size. @@ -505,7 +505,7 @@ pub trait CliConfiguration: Sized { let role = self.role(is_dev)?; let max_runtime_instances = self.max_runtime_instances()?.unwrap_or(8); let is_validator = role.is_authority(); - let (keystore_remote, keystore) = self.keystore_config(&config_dir)?; + let keystore = self.keystore_config(&config_dir)?; let telemetry_endpoints = self.telemetry_endpoints(&chain_spec)?; let runtime_cache_size = self.runtime_cache_size()?; @@ -524,7 +524,6 @@ pub trait CliConfiguration: Sized { node_key, DCV::p2p_listen_port(), )?, - keystore_remote, keystore, database: self.database_config(&config_dir, database_cache_size, database)?, trie_cache_maximum_size: self.trie_cache_maximum_size()?, diff --git a/client/cli/src/error.rs b/client/cli/src/error.rs index 3b30ccb3ed6b0..6c0cfca4932ef 100644 --- a/client/cli/src/error.rs +++ b/client/cli/src/error.rs @@ -64,7 +64,7 @@ pub enum Error { SignatureInvalid, #[error("Key store operation failed")] - KeyStoreOperation, + KeystoreOperation, #[error("Key storage issue encountered")] KeyStorage(#[from] sc_keystore::Error), diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index adbb299317ac9..5d451bbed6562 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -31,6 +31,7 @@ mod config; mod error; mod params; mod runner; +mod signals; pub use arg_enums::*; pub use clap; @@ -41,6 +42,7 @@ pub use params::*; pub use runner::*; pub use sc_service::{ChainSpec, Role}; pub use sc_tracing::logging::LoggerBuilder; +pub use signals::Signals; pub use sp_version::RuntimeVersion; /// Substrate client CLI @@ -197,10 +199,15 @@ pub trait SubstrateCli: Sized { command: &T, ) -> error::Result> { let tokio_runtime = build_runtime()?; + + // `capture` needs to be called in a tokio context. + // Also capture them as early as possible. + let signals = tokio_runtime.block_on(async { Signals::capture() })?; + let config = command.create_configuration(self, tokio_runtime.handle().clone())?; command.init(&Self::support_url(), &Self::impl_version(), |_, _| {}, &config)?; - Runner::new(config, tokio_runtime) + Runner::new(config, tokio_runtime, signals) } /// Create a runner for the command provided in argument. The `logger_hook` can be used to setup @@ -231,10 +238,15 @@ pub trait SubstrateCli: Sized { F: FnOnce(&mut LoggerBuilder, &Configuration), { let tokio_runtime = build_runtime()?; + + // `capture` needs to be called in a tokio context. + // Also capture them as early as possible. + let signals = tokio_runtime.block_on(async { Signals::capture() })?; + let config = command.create_configuration(self, tokio_runtime.handle().clone())?; command.init(&Self::support_url(), &Self::impl_version(), logger_hook, &config)?; - Runner::new(config, tokio_runtime) + Runner::new(config, tokio_runtime, signals) } /// Native runtime version. fn native_runtime_version(chain_spec: &Box) -> &'static RuntimeVersion; diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index e36fbb5ffd91c..9e57a017e51ca 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -52,15 +52,11 @@ pub struct ImportParams { pub wasm_method: WasmExecutionMethod, /// The WASM instantiation method to use. - /// /// Only has an effect when `wasm-execution` is set to `compiled`. - /// /// The copy-on-write strategies are only supported on Linux. /// If the copy-on-write variant of a strategy is unsupported /// the executor will fall back to the non-CoW equivalent. - /// /// The fastest (and the default) strategy available is `pooling-copy-on-write`. - /// /// The `legacy-instance-reuse` strategy is deprecated and will /// be removed in the future. It should only be used in case of /// issues with the default instantiation strategy. @@ -73,7 +69,6 @@ pub struct ImportParams { pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy, /// Specify the path where local WASM runtimes are stored. - /// /// These runtimes will override on-chain runtimes when the version matches. #[arg(long, value_name = "PATH")] pub wasm_runtime_overrides: Option, @@ -83,13 +78,11 @@ pub struct ImportParams { pub execution_strategies: ExecutionStrategiesParams, /// Specify the state cache size. - /// /// Providing `0` will disable the cache. #[arg(long, value_name = "Bytes", default_value_t = 67108864)] pub trie_cache_size: usize, /// DEPRECATED - /// /// Switch to `--trie-cache-size`. #[arg(long)] state_cache_size: Option, diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index e6e6fd9eb47ed..a2fdd6b2218c4 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -61,16 +61,14 @@ pub struct KeystoreParams { pub password_filename: Option, } -/// Parse a sercret string, returning a displayable error. +/// Parse a secret string, returning a displayable error. pub fn secret_string_from_str(s: &str) -> std::result::Result { std::str::FromStr::from_str(s).map_err(|_| "Could not get SecretString".to_string()) } impl KeystoreParams { /// Get the keystore configuration for the parameters - /// - /// Returns a vector of remote-urls and the local Keystore configuration - pub fn keystore_config(&self, config_dir: &Path) -> Result<(Option, KeystoreConfig)> { + pub fn keystore_config(&self, config_dir: &Path) -> Result { let password = if self.password_interactive { Some(SecretString::new(input_keystore_password()?)) } else if let Some(ref file) = self.password_filename { @@ -85,7 +83,7 @@ impl KeystoreParams { .clone() .unwrap_or_else(|| config_dir.join(DEFAULT_KEYSTORE_CONFIG_PATH)); - Ok((self.keystore_uri.clone(), KeystoreConfig::Path { path, password })) + Ok(KeystoreConfig::Path { path, password }) } /// helper method to fetch password from `KeyParams` or read from stdin diff --git a/client/cli/src/params/message_params.rs b/client/cli/src/params/message_params.rs index a935bbb25bddd..3fcb6f2c6e8cc 100644 --- a/client/cli/src/params/message_params.rs +++ b/client/cli/src/params/message_params.rs @@ -20,14 +20,13 @@ use crate::error::Error; use array_bytes::{hex2bytes, hex_bytes2hex_str}; -use clap::Parser; +use clap::Args; use std::io::BufRead; /// Params to configure how a message should be passed into a command. -#[derive(Parser, Debug, Clone)] +#[derive(Debug, Clone, Args)] pub struct MessageParams { /// Message to process. Will be read from STDIN otherwise. - /// /// The message is assumed to be raw bytes per default. Use `--hex` for hex input. Can /// optionally be prefixed with `0x` in the hex case. #[arg(long)] diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs index f9c840d2d4865..247ffc0e04ba5 100644 --- a/client/cli/src/params/mod.rs +++ b/client/cli/src/params/mod.rs @@ -22,8 +22,11 @@ mod message_params; mod network_params; mod node_key_params; mod offchain_worker_params; +mod prometheus_params; mod pruning_params; +mod runtime_params; mod shared_params; +mod telemetry_params; mod transaction_pool_params; use crate::arg_enums::{CryptoScheme, OutputType}; @@ -37,8 +40,8 @@ use std::{fmt::Debug, str::FromStr}; pub use crate::params::{ database_params::*, import_params::*, keystore_params::*, message_params::*, network_params::*, - node_key_params::*, offchain_worker_params::*, pruning_params::*, shared_params::*, - transaction_pool_params::*, + node_key_params::*, offchain_worker_params::*, prometheus_params::*, pruning_params::*, + runtime_params::*, shared_params::*, telemetry_params::*, transaction_pool_params::*, }; /// Parse Ss58AddressFormat diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 106fba75aa727..13e4338257691 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -42,9 +42,7 @@ pub struct NetworkParams { pub reserved_nodes: Vec, /// Whether to only synchronize the chain with reserved nodes. - /// /// Also disables automatic peer discovery. - /// /// TCP connections might still be established with non-reserved nodes. /// In particular, if you are a validator your node might still connect to other /// validator nodes and collator nodes regardless of whether they are defined as @@ -95,14 +93,12 @@ pub struct NetworkParams { pub in_peers_light: u32, /// Disable mDNS discovery. - /// /// By default, the network will use mDNS to discover other nodes on the /// local network. This disables it. Automatically implied when using --dev. #[arg(long)] pub no_mdns: bool, /// Maximum number of peers from which to ask for the same blocks in parallel. - /// /// This allows downloading announced blocks from multiple peers. Decrease to save /// traffic and risk increased latency. #[arg(long, value_name = "COUNT", default_value_t = 5)] @@ -113,7 +109,6 @@ pub struct NetworkParams { pub node_key_params: NodeKeyParams, /// Enable peer discovery on local networks. - /// /// By default this option is `true` for `--dev` or when the chain type is /// `Local`/`Development` and false otherwise. #[arg(long)] @@ -121,7 +116,6 @@ pub struct NetworkParams { /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in /// the presence of potentially adversarial nodes. - /// /// See the S/Kademlia paper for more information on the high level design as well as its /// security improvements. #[arg(long)] @@ -132,11 +126,6 @@ pub struct NetworkParams { pub ipfs_server: bool, /// Blockchain syncing mode. - /// - /// - `full`: Download and validate full blockchain history. - /// - `fast`: Download blocks and the latest state only. - /// - `fast-unsafe`: Same as `fast`, but skip downloading state proofs. - /// - `warp`: Download the latest state and proof. #[arg( long, value_enum, @@ -146,6 +135,13 @@ pub struct NetworkParams { verbatim_doc_comment )] pub sync: SyncMode, + + /// Maximum number of blocks per request. + /// + /// Try reducing this number from the default value if you have a slow network connection + /// and observe block requests timing out. + #[arg(long, value_name = "COUNT", default_value_t = 64)] + pub max_blocks_per_request: u32, } impl NetworkParams { @@ -235,6 +231,7 @@ impl NetworkParams { allow_private_ip, }, max_parallel_downloads: self.max_parallel_downloads, + max_blocks_per_request: self.max_blocks_per_request, enable_dht_random_walk: !self.reserved_only, allow_non_globals_in_dht, kademlia_disjoint_query_paths: self.kademlia_disjoint_query_paths, diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs index 074b95bea0f3a..e31963d2f8b9d 100644 --- a/client/cli/src/params/node_key_params.rs +++ b/client/cli/src/params/node_key_params.rs @@ -33,16 +33,12 @@ const NODE_KEY_ED25519_FILE: &str = "secret_ed25519"; #[derive(Debug, Clone, Args)] pub struct NodeKeyParams { /// The secret key to use for libp2p networking. - /// /// The value is a string that is parsed according to the choice of /// `--node-key-type` as follows: - /// /// `ed25519`: /// The value is parsed as a hex-encoded Ed25519 32 byte secret key, /// i.e. 64 hex characters. - /// /// The value of this option takes precedence over `--node-key-file`. - /// /// WARNING: Secrets provided as command-line arguments are easily exposed. /// Use of this option should be limited to development and testing. To use /// an externally managed secret key, use `--node-key-file` instead. @@ -50,33 +46,25 @@ pub struct NodeKeyParams { pub node_key: Option, /// The type of secret key to use for libp2p networking. - /// /// The secret key of the node is obtained as follows: - /// /// * If the `--node-key` option is given, the value is parsed as a secret key according to /// the type. See the documentation for `--node-key`. - /// /// * If the `--node-key-file` option is given, the secret key is read from the specified /// file. See the documentation for `--node-key-file`. - /// /// * Otherwise, the secret key is read from a file with a predetermined, type-specific name /// from the chain-specific network config directory inside the base directory specified by /// `--base-dir`. If this file does not exist, it is created with a newly generated secret /// key of the chosen type. - /// /// The node's secret key determines the corresponding public key and hence the /// node's peer ID in the context of libp2p. #[arg(long, value_name = "TYPE", value_enum, ignore_case = true, default_value_t = NodeKeyType::Ed25519)] pub node_key_type: NodeKeyType, /// The file from which to read the node's secret key to use for libp2p networking. - /// /// The contents of the file are parsed according to the choice of `--node-key-type` /// as follows: - /// /// `ed25519`: /// The file must contain an unencoded 32 byte or hex encoded Ed25519 secret key. - /// /// If the file does not exist, it is created with a newly generated secret key of /// the chosen type. #[arg(long, value_name = "FILE")] diff --git a/client/cli/src/params/offchain_worker_params.rs b/client/cli/src/params/offchain_worker_params.rs index 33fd8d609b605..d1fedab4cb2eb 100644 --- a/client/cli/src/params/offchain_worker_params.rs +++ b/client/cli/src/params/offchain_worker_params.rs @@ -33,7 +33,6 @@ use crate::{error, OffchainWorkerEnabled}; #[derive(Debug, Clone, Args)] pub struct OffchainWorkerParams { /// Should execute offchain workers on every block. - /// /// By default it's only enabled for nodes that are authoring new blocks. #[arg( long = "offchain-worker", @@ -45,9 +44,7 @@ pub struct OffchainWorkerParams { pub enabled: OffchainWorkerEnabled, /// Enable Offchain Indexing API, which allows block import to write to Offchain DB. - /// - /// Enables a runtime to write directly to a offchain workers - /// DB during block import. + /// Enables a runtime to write directly to a offchain workers DB during block import. #[arg(long = "enable-offchain-indexing", value_name = "ENABLE_OFFCHAIN_INDEXING", default_value_t = false, action = ArgAction::Set)] pub indexing_enabled: bool, } diff --git a/client/cli/src/params/prometheus_params.rs b/client/cli/src/params/prometheus_params.rs new file mode 100644 index 0000000000000..4d234ea33c20d --- /dev/null +++ b/client/cli/src/params/prometheus_params.rs @@ -0,0 +1,61 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clap::Args; +use sc_service::config::PrometheusConfig; +use std::net::{Ipv4Addr, SocketAddr}; + +/// Parameters used to config prometheus. +#[derive(Debug, Clone, Args)] +pub struct PrometheusParams { + /// Specify Prometheus exporter TCP Port. + #[arg(long, value_name = "PORT")] + pub prometheus_port: Option, + /// Expose Prometheus exporter on all interfaces. + /// Default is local. + #[arg(long)] + pub prometheus_external: bool, + /// Do not expose a Prometheus exporter endpoint. + /// Prometheus metric endpoint is enabled by default. + #[arg(long)] + pub no_prometheus: bool, +} + +impl PrometheusParams { + /// Creates [`PrometheusConfig`]. + pub fn prometheus_config( + &self, + default_listen_port: u16, + chain_id: String, + ) -> Option { + if self.no_prometheus { + None + } else { + let interface = + if self.prometheus_external { Ipv4Addr::UNSPECIFIED } else { Ipv4Addr::LOCALHOST }; + + Some(PrometheusConfig::new_with_default_registry( + SocketAddr::new( + interface.into(), + self.prometheus_port.unwrap_or(default_listen_port), + ), + chain_id, + )) + } + } +} diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 59eeb13cd0379..1b5bf247d942e 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -21,51 +21,29 @@ use clap::Args; use sc_service::{BlocksPruning, PruningMode}; /// Parameters to define the pruning mode -#[derive(Debug, Clone, PartialEq, Args)] +#[derive(Debug, Clone, Args)] pub struct PruningParams { /// Specify the state pruning mode. - /// /// This mode specifies when the block's state (ie, storage) /// should be pruned (ie, removed) from the database. - /// /// This setting can only be set on the first creation of the database. Every subsequent run /// will load the pruning mode from the database and will error if the stored mode doesn't /// match this CLI value. It is fine to drop this CLI flag for subsequent runs. - /// /// Possible values: - /// - /// - archive: - /// - /// Keep the state of all blocks. - /// - /// - 'archive-canonical' - /// - /// Keep only the state of finalized blocks. - /// - /// - number - /// - /// Keep the state of the last number of finalized blocks. - /// + /// - archive: Keep the state of all blocks. + /// - 'archive-canonical' Keep only the state of finalized blocks. + /// - number Keep the state of the last number of finalized blocks. /// [default: 256] #[arg(alias = "pruning", long, value_name = "PRUNING_MODE")] pub state_pruning: Option, /// Specify the blocks pruning mode. - /// /// This mode specifies when the block's body (including justifications) /// should be pruned (ie, removed) from the database. - /// /// Possible values: - /// - 'archive' - /// - /// Keep all blocks. - /// - /// - 'archive-canonical' - /// - /// Keep only finalized blocks. - /// + /// - 'archive' Keep all blocks. + /// - 'archive-canonical' Keep only finalized blocks. /// - number - /// /// Keep the last `number` of finalized blocks. #[arg( alias = "keep-blocks", diff --git a/client/cli/src/params/runtime_params.rs b/client/cli/src/params/runtime_params.rs new file mode 100644 index 0000000000000..79035fc7d4c5d --- /dev/null +++ b/client/cli/src/params/runtime_params.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clap::Args; +use std::str::FromStr; + +/// Parameters used to config runtime. +#[derive(Debug, Clone, Args)] +pub struct RuntimeParams { + /// The size of the instances cache for each runtime. The values higher than 256 are illegal. + #[arg(long, default_value_t = 8, value_parser = parse_max_runtime_instances)] + pub max_runtime_instances: usize, + + /// Maximum number of different runtimes that can be cached. + #[arg(long, default_value_t = 2)] + pub runtime_cache_size: u8, +} + +fn parse_max_runtime_instances(s: &str) -> Result { + let max_runtime_instances = usize::from_str(s) + .map_err(|_err| format!("Illegal `--max-runtime-instances` value: {s}"))?; + + if max_runtime_instances > 256 { + Err(format!("Illegal `--max-runtime-instances` value: {max_runtime_instances} is more than the allowed maximum of `256` ")) + } else { + Ok(max_runtime_instances) + } +} diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index 46f5390039d46..3d20ca504a691 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -22,17 +22,15 @@ use sc_service::config::BasePath; use std::path::PathBuf; /// Shared parameters used by all `CoreParams`. -#[derive(Debug, Clone, PartialEq, Args)] +#[derive(Debug, Clone, Args)] pub struct SharedParams { /// Specify the chain specification. - /// /// It can be one of the predefined ones (dev, local, or staging) or it can be a path to a file /// with the chainspec (such as one exported by the `build-spec` subcommand). #[arg(long, value_name = "CHAIN_SPEC")] pub chain: Option, /// Specify the development chain. - /// /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, /// `--alice`, and `--tmp` flags, unless explicitly overridden. #[arg(long, conflicts_with_all = &["chain"])] @@ -43,16 +41,13 @@ pub struct SharedParams { pub base_path: Option, /// Sets a custom logging filter. Syntax is `=`, e.g. -lsync=debug. - /// /// Log levels (least to most verbose) are error, warn, info, debug, and trace. /// By default, all targets log `info`. The global log level can be set with `-l`. #[arg(short = 'l', long, value_name = "LOG_PATTERN", num_args = 1..)] pub log: Vec, /// Enable detailed log output. - /// /// This includes displaying the log target, log level and thread name. - /// /// This is automatically enabled when something is logged with any higher level than `info`. #[arg(long)] pub detailed_log_output: bool, @@ -62,10 +57,8 @@ pub struct SharedParams { pub disable_log_color: bool, /// Enable feature to dynamically update and reload the log filter. - /// /// Be aware that enabling this feature can lead to a performance decrease up to factor six or /// more. Depending on the global logging level the performance decrease changes. - /// /// The `system_addLogFilter` and `system_resetLogFilter` RPCs will have no effect with this /// option not being set. #[arg(long)] diff --git a/client/cli/src/params/telemetry_params.rs b/client/cli/src/params/telemetry_params.rs new file mode 100644 index 0000000000000..67f441071410c --- /dev/null +++ b/client/cli/src/params/telemetry_params.rs @@ -0,0 +1,66 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clap::Args; + +/// Parameters used to config telemetry. +#[derive(Debug, Clone, Args)] +pub struct TelemetryParams { + /// Disable connecting to the Substrate telemetry server. + /// Telemetry is on by default on global chains. + #[arg(long)] + pub no_telemetry: bool, + + /// The URL of the telemetry server to connect to. + /// This flag can be passed multiple times as a means to specify multiple + /// telemetry endpoints. Verbosity levels range from 0-9, with 0 denoting + /// the least verbosity. + /// Expected format is 'URL VERBOSITY', e.g. `--telemetry-url 'wss://foo/bar 0'`. + #[arg(long = "telemetry-url", value_name = "URL VERBOSITY", value_parser = parse_telemetry_endpoints)] + pub telemetry_endpoints: Vec<(String, u8)>, +} + +#[derive(Debug)] +enum TelemetryParsingError { + MissingVerbosity, + VerbosityParsingError(std::num::ParseIntError), +} + +impl std::error::Error for TelemetryParsingError {} + +impl std::fmt::Display for TelemetryParsingError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + TelemetryParsingError::MissingVerbosity => write!(f, "Verbosity level missing"), + TelemetryParsingError::VerbosityParsingError(e) => write!(f, "{}", e), + } + } +} + +fn parse_telemetry_endpoints(s: &str) -> Result<(String, u8), TelemetryParsingError> { + let pos = s.find(' '); + match pos { + None => Err(TelemetryParsingError::MissingVerbosity), + Some(pos_) => { + let url = s[..pos_].to_string(); + let verbosity = + s[pos_ + 1..].parse().map_err(TelemetryParsingError::VerbosityParsingError)?; + Ok((url, verbosity)) + }, + } +} diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index 8917a37c499c0..a8b75f2665aea 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -16,62 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{error::Error as CliError, Result, SubstrateCli}; +use crate::{error::Error as CliError, Result, Signals, SubstrateCli}; use chrono::prelude::*; -use futures::{future, future::FutureExt, pin_mut, select, Future}; +use futures::{future::FutureExt, Future}; use log::info; use sc_service::{Configuration, Error as ServiceError, TaskManager}; use sc_utils::metrics::{TOKIO_THREADS_ALIVE, TOKIO_THREADS_TOTAL}; use std::{marker::PhantomData, time::Duration}; -#[cfg(target_family = "unix")] -async fn main(func: F) -> std::result::Result<(), E> -where - F: Future> + future::FusedFuture, - E: std::error::Error + Send + Sync + 'static + From, -{ - use tokio::signal::unix::{signal, SignalKind}; - - let mut stream_int = signal(SignalKind::interrupt()).map_err(ServiceError::Io)?; - let mut stream_term = signal(SignalKind::terminate()).map_err(ServiceError::Io)?; - - let t1 = stream_int.recv().fuse(); - let t2 = stream_term.recv().fuse(); - let t3 = func; - - pin_mut!(t1, t2, t3); - - select! { - _ = t1 => {}, - _ = t2 => {}, - res = t3 => res?, - } - - Ok(()) -} - -#[cfg(not(unix))] -async fn main(func: F) -> std::result::Result<(), E> -where - F: Future> + future::FusedFuture, - E: std::error::Error + Send + Sync + 'static + From, -{ - use tokio::signal::ctrl_c; - - let t1 = ctrl_c().fuse(); - let t2 = func; - - pin_mut!(t1, t2); - - select! { - _ = t1 => {}, - res = t2 => res?, - } - - Ok(()) -} - -/// Build a tokio runtime with all features +/// Build a tokio runtime with all features. pub fn build_runtime() -> std::result::Result { tokio::runtime::Builder::new_multi_thread() .on_thread_start(|| { @@ -85,35 +38,22 @@ pub fn build_runtime() -> std::result::Result( - tokio_runtime: tokio::runtime::Runtime, - future: F, - task_manager: TaskManager, -) -> std::result::Result<(), E> -where - F: Future> + future::Future, - E: std::error::Error + Send + Sync + 'static + From, -{ - let f = future.fuse(); - pin_mut!(f); - - tokio_runtime.block_on(main(f))?; - drop(task_manager); - - Ok(()) -} - /// A Substrate CLI runtime that can be used to run a node or a command pub struct Runner { config: Configuration, tokio_runtime: tokio::runtime::Runtime, + signals: Signals, phantom: PhantomData, } impl Runner { /// Create a new runtime with the command provided in argument - pub fn new(config: Configuration, tokio_runtime: tokio::runtime::Runtime) -> Result> { - Ok(Runner { config, tokio_runtime, phantom: PhantomData }) + pub fn new( + config: Configuration, + tokio_runtime: tokio::runtime::Runtime, + signals: Signals, + ) -> Result> { + Ok(Runner { config, tokio_runtime, signals, phantom: PhantomData }) } /// Log information about the node itself. @@ -147,7 +87,10 @@ impl Runner { self.print_node_infos(); let mut task_manager = self.tokio_runtime.block_on(initialize(self.config))?; - let res = self.tokio_runtime.block_on(main(task_manager.future().fuse())); + + let res = self + .tokio_runtime + .block_on(self.signals.run_until_signal(task_manager.future().fuse())); // We need to drop the task manager here to inform all tasks that they should shut down. // // This is important to be done before we instruct the tokio runtime to shutdown. Otherwise @@ -210,7 +153,11 @@ impl Runner { E: std::error::Error + Send + Sync + 'static + From + From, { let (future, task_manager) = runner(self.config)?; - run_until_exit::<_, E>(self.tokio_runtime, future, task_manager) + self.tokio_runtime.block_on(self.signals.run_until_signal(future.fuse()))?; + // Drop the task manager before dropping the rest, to ensure that all futures were informed + // about the shut down. + drop(task_manager); + Ok(()) } /// Get an immutable reference to the node Configuration @@ -321,7 +268,6 @@ mod tests { transaction_pool: Default::default(), network: NetworkConfiguration::new_memory(), keystore: sc_service::config::KeystoreConfig::InMemory, - keystore_remote: None, database: sc_client_db::DatabaseSource::ParityDb { path: PathBuf::from("db") }, trie_cache_maximum_size: None, state_pruning: None, @@ -369,6 +315,7 @@ mod tests { runtime_cache_size: 2, }, runtime, + Signals::dummy(), ) .unwrap(); diff --git a/client/cli/src/signals.rs b/client/cli/src/signals.rs new file mode 100644 index 0000000000000..4b6a6f957a766 --- /dev/null +++ b/client/cli/src/signals.rs @@ -0,0 +1,92 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use futures::{ + future::{self, BoxFuture, FutureExt}, + pin_mut, select, Future, +}; + +use sc_service::Error as ServiceError; + +/// Abstraction over OS signals to handle the shutdown of the node smoothly. +/// +/// On `unix` this represents `SigInt` and `SigTerm`. +pub struct Signals(BoxFuture<'static, ()>); + +impl Signals { + /// Return the signals future. + pub fn future(self) -> BoxFuture<'static, ()> { + self.0 + } + + /// Capture the relevant signals to handle shutdown of the node smoothly. + /// + /// Needs to be called in a Tokio context to have access to the tokio reactor. + #[cfg(target_family = "unix")] + pub fn capture() -> std::result::Result { + use tokio::signal::unix::{signal, SignalKind}; + + let mut stream_int = signal(SignalKind::interrupt()).map_err(ServiceError::Io)?; + let mut stream_term = signal(SignalKind::terminate()).map_err(ServiceError::Io)?; + + Ok(Signals( + async move { + future::select(stream_int.recv().boxed(), stream_term.recv().boxed()).await; + } + .boxed(), + )) + } + + /// Capture the relevant signals to handle shutdown of the node smoothly. + /// + /// Needs to be called in a Tokio context to have access to the tokio reactor. + #[cfg(not(unix))] + pub fn capture() -> Result { + use tokio::signal::ctrl_c; + + Ok(Signals( + async move { + let _ = ctrl_c().await; + } + .boxed(), + )) + } + + /// A dummy signal that never returns. + pub fn dummy() -> Self { + Self(future::pending().boxed()) + } + + /// Run a future task until receive a signal. + pub async fn run_until_signal(self, func: F) -> Result<(), E> + where + F: Future> + future::FusedFuture, + E: std::error::Error + Send + Sync + 'static, + { + let signals = self.future().fuse(); + + pin_mut!(func, signals); + + select! { + _ = signals => {}, + res = func => res?, + } + + Ok(()) + } +} diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index 46e0ccb4e302a..ef7a2a1cc865b 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -19,7 +19,7 @@ //! Module implementing the logic for verifying and importing AuRa blocks. use crate::{ - aura_err, authorities, find_pre_digest, slot_author, AuthorityId, CompatibilityMode, Error, + authorities, standalone::SealVerificationError, AuthorityId, CompatibilityMode, Error, LOG_TARGET, }; use codec::{Codec, Decode, Encode}; @@ -36,7 +36,7 @@ use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::HeaderBackend; use sp_consensus::Error as ConsensusError; -use sp_consensus_aura::{digests::CompatibleDigestItem, inherents::AuraInherentData, AuraApi}; +use sp_consensus_aura::{inherents::AuraInherentData, AuraApi}; use sp_consensus_slots::Slot; use sp_core::{crypto::Pair, ExecutionContext}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _}; @@ -54,7 +54,7 @@ use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; fn check_header( client: &C, slot_now: Slot, - mut header: B::Header, + header: B::Header, hash: B::Hash, authorities: &[AuthorityId

], check_for_equivocation: CheckForEquivocation, @@ -64,27 +64,16 @@ where C: sc_client_api::backend::AuxStore, P::Public: Encode + Decode + PartialEq + Clone, { - let seal = header.digest_mut().pop().ok_or(Error::HeaderUnsealed(hash))?; - - let sig = seal.as_aura_seal().ok_or_else(|| aura_err(Error::HeaderBadSeal(hash)))?; - - let slot = find_pre_digest::(&header)?; - - if slot > slot_now { - header.digest_mut().push(seal); - Ok(CheckedHeader::Deferred(header, slot)) - } else { - // check the signature is valid under the expected authority and - // chain state. - let expected_author = - slot_author::

(slot, authorities).ok_or(Error::SlotAuthorNotFound)?; - - let pre_hash = header.hash(); - - if P::verify(&sig, pre_hash.as_ref(), expected_author) { - if check_for_equivocation.check_for_equivocation() { + let check_result = + crate::standalone::check_header_slot_and_seal::(slot_now, header, authorities); + + match check_result { + Ok((header, slot, seal)) => { + let expected_author = crate::standalone::slot_author::

(slot, &authorities); + let should_equiv_check = check_for_equivocation.check_for_equivocation(); + if let (true, Some(expected)) = (should_equiv_check, expected_author) { if let Some(equivocation_proof) = - check_equivocation(client, slot_now, slot, &header, expected_author) + check_equivocation(client, slot_now, slot, &header, expected) .map_err(Error::Client)? { info!( @@ -98,9 +87,14 @@ where } Ok(CheckedHeader::Checked(header, (slot, seal))) - } else { - Err(Error::BadSignature(hash)) - } + }, + Err(SealVerificationError::Deferred(header, slot)) => + Ok(CheckedHeader::Deferred(header, slot)), + Err(SealVerificationError::Unsealed) => Err(Error::HeaderUnsealed(hash)), + Err(SealVerificationError::BadSeal) => Err(Error::HeaderBadSeal(hash)), + Err(SealVerificationError::BadSignature) => Err(Error::BadSignature(hash)), + Err(SealVerificationError::SlotAuthorNotFound) => Err(Error::SlotAuthorNotFound), + Err(SealVerificationError::InvalidPreDigest(e)) => Err(Error::from(e)), } } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index ccc2db6daf150..caaea3087765b 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -33,11 +33,10 @@ use std::{fmt::Debug, hash::Hash, marker::PhantomData, pin::Pin, sync::Arc}; use futures::prelude::*; -use log::{debug, trace}; use codec::{Codec, Decode, Encode}; -use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; +use sc_client_api::{backend::AuxStore, BlockOf}; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; use sc_consensus_slots::{ BackoffAuthoringBlocksStrategy, InherentDataProviderExt, SimpleSlotWorkerToSlotWorker, @@ -45,20 +44,19 @@ use sc_consensus_slots::{ }; use sc_telemetry::TelemetryHandle; use sp_api::{Core, ProvideRuntimeApi}; -use sp_application_crypto::{AppKey, AppPublic}; -use sp_blockchain::{HeaderBackend, Result as CResult}; +use sp_application_crypto::AppPublic; +use sp_blockchain::HeaderBackend; use sp_consensus::{BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain}; use sp_consensus_slots::Slot; -use sp_core::crypto::{ByteArray, Pair, Public}; +use sp_core::crypto::{Pair, Public}; use sp_inherents::CreateInherentDataProviders; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; -use sp_runtime::{ - traits::{Block as BlockT, Header, Member, NumberFor, Zero}, - DigestItem, -}; +use sp_keystore::{KeystorePtr}; +use sp_runtime::traits::{Block as BlockT, Header, Member, NumberFor}; mod import_queue; +pub mod standalone; +pub use crate::standalone::{find_pre_digest, slot_duration}; pub use import_queue::{ build_verifier, import_queue, AuraVerifier, BuildVerifierParams, CheckForEquivocation, ImportQueueParams, @@ -70,6 +68,7 @@ pub use sp_consensus_aura::{ inherents::{InherentDataProvider, InherentType as AuraInherent, INHERENT_IDENTIFIER}, AuraApi, ConsensusLog, SlotDuration, AURA_ENGINE_ID, }; +use sp_core::ByteArray; const LOG_TARGET: &str = "aura"; @@ -112,39 +111,6 @@ impl Default for CompatibilityMode { } } -/// Get the slot duration for Aura. -pub fn slot_duration(client: &C) -> CResult -where - A: Codec, - B: BlockT, - C: AuxStore + ProvideRuntimeApi + UsageProvider, - C::Api: AuraApi, -{ - client - .runtime_api() - .slot_duration(client.usage_info().chain.best_hash) - .map_err(|err| err.into()) -} - -/// Get slot author for given block along with authorities. -fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&AuthorityId

> { - if authorities.is_empty() { - return None - } - - let idx = *slot % (authorities.len() as u64); - assert!( - idx <= usize::MAX as u64, - "It is impossible to have a vector with length beyond the address space; qed", - ); - - let current_author = authorities.get(idx as usize).expect( - "authorities not empty; index constrained to list length;this is a valid index; qed", - ); - - Some(current_author) -} - /// Parameters of [`start_aura`]. pub struct StartAuraParams { /// The duration of a slot. @@ -168,7 +134,7 @@ pub struct StartAuraParams { /// The backoff strategy when we miss slots. pub backoff_authoring_blocks: Option, /// The keystore used by the node. - pub keystore: SyncCryptoStorePtr, + pub keystore: KeystorePtr, /// The proportion of the slot dedicated to proposing. /// /// The block proposing will be limited to this proportion of the slot from the starting of the @@ -205,7 +171,7 @@ pub fn start_aura( telemetry, compatibility_mode, }: StartAuraParams>, -) -> Result, sp_consensus::Error> +) -> Result, ConsensusError> where P: Pair + Send + Sync, P::Public: AppPublic + Hash + Member + Encode + Decode, @@ -222,7 +188,7 @@ where CIDP: CreateInherentDataProviders + Send + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send, BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, - Error: std::error::Error + Send + From + 'static, + Error: std::error::Error + Send + From + 'static, { let worker = build_aura_worker::(BuildAuraWorkerParams { client, @@ -265,7 +231,7 @@ pub struct BuildAuraWorkerParams { /// The backoff strategy when we miss slots. pub backoff_authoring_blocks: Option, /// The keystore used by the node. - pub keystore: SyncCryptoStorePtr, + pub keystore: KeystorePtr, /// The proportion of the slot dedicated to proposing. /// /// The block proposing will be limited to this proportion of the slot from the starting of the @@ -320,7 +286,7 @@ where P::Public: AppPublic + Hash + Member + Encode + Decode, P::Signature: TryFrom> + Hash + Member + Encode + Decode, I: BlockImport> + Send + Sync + 'static, - Error: std::error::Error + Send + From + 'static, + Error: std::error::Error + Send + From + 'static, SO: SyncOracle + Send + Sync + Clone, L: sc_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, @@ -346,7 +312,7 @@ struct AuraWorker { client: Arc, block_import: I, env: E, - keystore: SyncCryptoStorePtr, + keystore: KeystorePtr, sync_oracle: SO, justification_sync_link: L, force_authoring: bool, @@ -374,13 +340,13 @@ where SO: SyncOracle + Send + Clone + Sync, L: sc_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, - Error: std::error::Error + Send + From + 'static, + Error: std::error::Error + Send + From + 'static, { type BlockImport = I; type SyncOracle = SO; type JustificationSyncLink = L; type CreateProposer = - Pin> + Send + 'static>>; + Pin> + Send + 'static>>; type Proposer = E::Proposer; type Claim = P::Public; type AuxData = Vec>; @@ -393,11 +359,7 @@ where &mut self.block_import } - fn aux_data( - &self, - header: &B::Header, - _slot: Slot, - ) -> Result { + fn aux_data(&self, header: &B::Header, _slot: Slot) -> Result { authorities( self.client.as_ref(), header.hash(), @@ -406,31 +368,21 @@ where ) } - fn authorities_len(&self, epoch_data: &Self::AuxData) -> Option { - Some(epoch_data.len()) + fn authorities_len(&self, authorities: &Self::AuxData) -> Option { + Some(authorities.len()) } async fn claim_slot( &self, _header: &B::Header, slot: Slot, - epoch_data: &Self::AuxData, + authorities: &Self::AuxData, ) -> Option { - let expected_author = slot_author::

(slot, epoch_data); - expected_author.and_then(|p| { - if SyncCryptoStore::has_keys( - &*self.keystore, - &[(p.to_raw_vec(), sp_application_crypto::key_types::AURA)], - ) { - Some(p.clone()) - } else { - None - } - }) + crate::standalone::claim_slot::

(slot, authorities, &self.keystore).await } fn pre_digest_data(&self, slot: Slot, _claim: &Self::Claim) -> Vec { - vec![>::aura_pre_digest(slot)] + vec![crate::standalone::pre_digest::

(slot)] } async fn block_import_params( @@ -440,35 +392,13 @@ where body: Vec, storage_changes: StorageChanges<>::Transaction, B>, public: Self::Claim, - _epoch: Self::AuxData, + _authorities: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, - sp_consensus::Error, + ConsensusError, > { - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let public_type_pair = public.to_public_crypto_pair(); - let public = public.to_raw_vec(); - let signature = SyncCryptoStore::sign_with( - &*self.keystore, - as AppKey>::ID, - &public_type_pair, - header_hash.as_ref(), - ) - .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? - .ok_or_else(|| { - sp_consensus::Error::CannotSign( - public.clone(), - "Could not find key in keystore.".into(), - ) - })?; - let signature = signature - .clone() - .try_into() - .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; - let signature_digest_item = - >::aura_seal(signature); + crate::standalone::seal::<_, P>(header_hash, &public, &self.keystore)?; let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(signature_digest_item); @@ -510,7 +440,7 @@ where fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { self.env .init(block) - .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))) + .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e))) .boxed() } @@ -530,8 +460,7 @@ where self.logging_target(), ) } - - fn keystore(&self) -> SyncCryptoStorePtr { + fn keystore(&self) -> KeystorePtr { self.keystore.clone() } @@ -540,11 +469,6 @@ where } } -fn aura_err(error: Error) -> Error { - debug!(target: LOG_TARGET, "{}", error); - error -} - /// Aura Errors #[derive(Debug, thiserror::Error)] pub enum Error { @@ -583,22 +507,13 @@ impl From> for String { } } -/// Get pre-digests from the header -pub fn find_pre_digest(header: &B::Header) -> Result> { - if header.number().is_zero() { - return Ok(0.into()) - } - - let mut pre_digest: Option = None; - for log in header.digest().logs() { - trace!(target: LOG_TARGET, "Checking log {:?}", log); - match (CompatibleDigestItem::::as_aura_pre_digest(log), pre_digest.is_some()) { - (Some(_), true) => return Err(aura_err(Error::MultipleHeaders)), - (None, _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), - (s, false) => pre_digest = s, +impl From for Error { + fn from(e: crate::standalone::PreDigestLookupError) -> Self { + match e { + crate::standalone::PreDigestLookupError::MultipleHeaders => Error::MultipleHeaders, + crate::standalone::PreDigestLookupError::NoDigestFound => Error::NoDigestFound, } } - pre_digest.ok_or_else(|| aura_err(Error::NoDigestFound)) } fn authorities( @@ -631,14 +546,14 @@ where Default::default(), ), ) - .map_err(|_| sp_consensus::Error::InvalidAuthoritiesSet)?; + .map_err(|_| ConsensusError::InvalidAuthoritiesSet)?; }, } runtime_api .authorities(parent_hash) .ok() - .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) + .ok_or(ConsensusError::InvalidAuthoritiesSet) } #[cfg(test)] @@ -651,11 +566,12 @@ mod tests { use sc_consensus_slots::{BackoffAuthoringOnFinalizedHeadLagging, SimpleSlotWorker}; use sc_keystore::LocalKeystore; use sc_network_test::{Block as TestBlock, *}; - use sp_application_crypto::key_types::AURA; + use sp_application_crypto::{key_types::AURA, AppCrypto}; use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; use sp_consensus_aura::sr25519::AuthorityPair; use sp_inherents::InherentData; use sp_keyring::sr25519::Keyring; + use sp_keystore::Keystore; use sp_runtime::{ traits::{Block as BlockT, Header as _}, Digest, @@ -669,7 +585,6 @@ mod tests { runtime::{Header, H256}, TestClient, }; - use tokio::runtime::{Handle, Runtime}; const SLOT_DURATION_MS: u64 = 1000; @@ -807,7 +722,8 @@ mod tests { LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore."), ); - SyncCryptoStore::sr25519_generate_new(&*keystore, AURA, Some(&key.to_seed())) + keystore + .sr25519_generate_new(AURA, Some(&key.to_seed())) .expect("Creates authority key"); keystore_paths.push(keystore_path); @@ -864,22 +780,6 @@ mod tests { .await; } - #[test] - fn authorities_call_works() { - let client = substrate_test_runtime_client::new(); - - assert_eq!(client.chain_info().best_number, 0); - assert_eq!( - authorities(&client, client.chain_info().best_hash, 1, &CompatibilityMode::None) - .unwrap(), - vec![ - Keyring::Alice.public().into(), - Keyring::Bob.public().into(), - Keyring::Charlie.public().into() - ] - ); - } - #[tokio::test] async fn current_node_authority_should_claim_slot() { let net = AuraTestNet::new(4); @@ -892,7 +792,8 @@ mod tests { let keystore_path = tempfile::tempdir().expect("Creates keystore path"); let keystore = LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore."); - let public = SyncCryptoStore::sr25519_generate_new(&keystore, AuthorityPair::ID, None) + let public = keystore + .sr25519_generate_new(AuthorityPair::ID, None) .expect("Key should be created"); authorities.push(public.into()); @@ -942,12 +843,9 @@ mod tests { let keystore_path = tempfile::tempdir().expect("Creates keystore path"); let keystore = LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore."); - SyncCryptoStore::sr25519_generate_new( - &keystore, - AuthorityPair::ID, - Some(&Keyring::Alice.to_seed()), - ) - .expect("Key should be created"); + keystore + .sr25519_generate_new(AuthorityPair::ID, Some(&Keyring::Alice.to_seed())) + .expect("Key should be created"); let net = Arc::new(Mutex::new(net)); diff --git a/client/consensus/aura/src/standalone.rs b/client/consensus/aura/src/standalone.rs new file mode 100644 index 0000000000000..0f9b8668d4478 --- /dev/null +++ b/client/consensus/aura/src/standalone.rs @@ -0,0 +1,357 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Standalone functions used within the implementation of Aura. + +use std::fmt::Debug; + +use log::trace; + +use codec::Codec; + +use sc_client_api::{backend::AuxStore, UsageProvider}; +use sp_api::{Core, ProvideRuntimeApi}; +use sp_application_crypto::{AppCrypto, AppPublic}; +use sp_blockchain::Result as CResult; +use sp_consensus::Error as ConsensusError; +use sp_consensus_slots::Slot; +use sp_core::crypto::{ByteArray, Pair}; +use sp_keystore::KeystorePtr; +use sp_runtime::{ + traits::{Block as BlockT, Header, NumberFor, Zero}, + DigestItem, +}; + +pub use sc_consensus_slots::check_equivocation; + +use super::{ + AuraApi, AuthorityId, CompatibilityMode, CompatibleDigestItem, SlotDuration, LOG_TARGET, +}; + +/// Get the slot duration for Aura by reading from a runtime API at the best block's state. +pub fn slot_duration(client: &C) -> CResult +where + A: Codec, + B: BlockT, + C: AuxStore + ProvideRuntimeApi + UsageProvider, + C::Api: AuraApi, +{ + slot_duration_at(client, client.usage_info().chain.best_hash) +} + +/// Get the slot duration for Aura by reading from a runtime API at a given block's state. +pub fn slot_duration_at(client: &C, block_hash: B::Hash) -> CResult +where + A: Codec, + B: BlockT, + C: AuxStore + ProvideRuntimeApi, + C::Api: AuraApi, +{ + client.runtime_api().slot_duration(block_hash).map_err(|err| err.into()) +} + +/// Get the slot author for given block along with authorities. +pub fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&AuthorityId

> { + if authorities.is_empty() { + return None + } + + let idx = *slot % (authorities.len() as u64); + assert!( + idx <= usize::MAX as u64, + "It is impossible to have a vector with length beyond the address space; qed", + ); + + let current_author = authorities.get(idx as usize).expect( + "authorities not empty; index constrained to list length;this is a valid index; qed", + ); + + Some(current_author) +} + +/// Attempt to claim a slot using a keystore. +/// +/// This returns `None` if the slot author is not locally controlled, and `Some` if it is, +/// with the public key of the slot author. +pub async fn claim_slot( + slot: Slot, + authorities: &[AuthorityId

], + keystore: &KeystorePtr, +) -> Option { + let expected_author = slot_author::

(slot, authorities); + expected_author.and_then(|p| { + if keystore.has_keys(&[(p.to_raw_vec(), sp_application_crypto::key_types::AURA)]) { + Some(p.clone()) + } else { + None + } + }) +} + +/// Produce the pre-runtime digest containing the slot info. +/// +/// This is intended to be put into the block header prior to runtime execution, +/// so the runtime can read the slot in this way. +pub fn pre_digest(slot: Slot) -> sp_runtime::DigestItem +where + P::Signature: Codec, +{ + >::aura_pre_digest(slot) +} + +/// Produce the seal digest item by signing the hash of a block. +/// +/// Note that after this is added to a block header, the hash of the block will change. +pub fn seal( + header_hash: &Hash, + public: &P::Public, + keystore: &KeystorePtr, +) -> Result +where + Hash: AsRef<[u8]>, + P: Pair, + P::Signature: Codec + TryFrom>, + P::Public: AppPublic, +{ + let signature = keystore + .sign_with( + as AppCrypto>::ID, + as AppCrypto>::CRYPTO_ID, + public.as_slice(), + header_hash.as_ref(), + ) + .map_err(|e| ConsensusError::CannotSign(format!("{}. Key: {:?}", e, public)))? + .ok_or_else(|| { + ConsensusError::CannotSign(format!("Could not find key in keystore. Key: {:?}", public)) + })?; + + let signature = signature + .clone() + .try_into() + .map_err(|_| ConsensusError::InvalidSignature(signature, public.to_raw_vec()))?; + + let signature_digest_item = + >::aura_seal(signature); + + Ok(signature_digest_item) +} + +/// Errors in pre-digest lookup. +#[derive(Debug, thiserror::Error)] +pub enum PreDigestLookupError { + /// Multiple Aura pre-runtime headers + #[error("Multiple Aura pre-runtime headers")] + MultipleHeaders, + /// No Aura pre-runtime digest found + #[error("No Aura pre-runtime digest found")] + NoDigestFound, +} + +/// Extract a pre-digest from a block header. +/// +/// This fails if there is no pre-digest or there are multiple. +/// +/// Returns the `slot` stored in the pre-digest or an error if no pre-digest was found. +pub fn find_pre_digest( + header: &B::Header, +) -> Result { + if header.number().is_zero() { + return Ok(0.into()) + } + + let mut pre_digest: Option = None; + for log in header.digest().logs() { + trace!(target: LOG_TARGET, "Checking log {:?}", log); + match (CompatibleDigestItem::::as_aura_pre_digest(log), pre_digest.is_some()) { + (Some(_), true) => return Err(PreDigestLookupError::MultipleHeaders), + (None, _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), + (s, false) => pre_digest = s, + } + } + pre_digest.ok_or_else(|| PreDigestLookupError::NoDigestFound) +} + +/// Fetch the current set of authorities from the runtime at a specific block. +/// +/// The compatibility mode and context block number informs this function whether +/// to initialize the hypothetical block created by the runtime API as backwards compatibility +/// for older chains. +pub fn fetch_authorities_with_compatibility_mode( + client: &C, + parent_hash: B::Hash, + context_block_number: NumberFor, + compatibility_mode: &CompatibilityMode>, +) -> Result, ConsensusError> +where + A: Codec + Debug, + B: BlockT, + C: ProvideRuntimeApi, + C::Api: AuraApi, +{ + let runtime_api = client.runtime_api(); + + match compatibility_mode { + CompatibilityMode::None => {}, + // Use `initialize_block` until we hit the block that should disable the mode. + CompatibilityMode::UseInitializeBlock { until } => + if *until > context_block_number { + runtime_api + .initialize_block( + parent_hash, + &B::Header::new( + context_block_number, + Default::default(), + Default::default(), + parent_hash, + Default::default(), + ), + ) + .map_err(|_| ConsensusError::InvalidAuthoritiesSet)?; + }, + } + + runtime_api + .authorities(parent_hash) + .ok() + .ok_or(ConsensusError::InvalidAuthoritiesSet) +} + +/// Load the current set of authorities from a runtime at a specific block. +pub fn fetch_authorities( + client: &C, + parent_hash: B::Hash, +) -> Result, ConsensusError> +where + A: Codec + Debug, + B: BlockT, + C: ProvideRuntimeApi, + C::Api: AuraApi, +{ + client + .runtime_api() + .authorities(parent_hash) + .ok() + .ok_or(ConsensusError::InvalidAuthoritiesSet) +} + +/// Errors in slot and seal verification. +#[derive(Debug, thiserror::Error)] +pub enum SealVerificationError

{ + /// Header is deferred to the future. + #[error("Header slot is in the future")] + Deferred(Header, Slot), + + /// The header has no seal digest. + #[error("Header is unsealed.")] + Unsealed, + + /// The header has a malformed seal. + #[error("Header has a malformed seal")] + BadSeal, + + /// The header has a bad signature. + #[error("Header has a bad signature")] + BadSignature, + + /// No slot author found. + #[error("No slot author for provided slot")] + SlotAuthorNotFound, + + /// Header has no valid slot pre-digest. + #[error("Header has no valid slot pre-digest")] + InvalidPreDigest(PreDigestLookupError), +} + +/// Check a header has been signed by the right key. If the slot is too far in the future, an error +/// will be returned. If it's successful, returns the pre-header (i.e. without the seal), +/// the slot, and the digest item containing the seal. +/// +/// Note that this does not check for equivocations, and [`check_equivocation`] is recommended +/// for that purpose. +/// +/// This digest item will always return `Some` when used with `as_aura_seal`. +pub fn check_header_slot_and_seal( + slot_now: Slot, + mut header: B::Header, + authorities: &[AuthorityId

], +) -> Result<(B::Header, Slot, DigestItem), SealVerificationError> +where + P::Signature: Codec, + P::Public: Codec + PartialEq + Clone, +{ + let seal = header.digest_mut().pop().ok_or(SealVerificationError::Unsealed)?; + + let sig = seal.as_aura_seal().ok_or(SealVerificationError::BadSeal)?; + + let slot = find_pre_digest::(&header) + .map_err(SealVerificationError::InvalidPreDigest)?; + + if slot > slot_now { + header.digest_mut().push(seal); + return Err(SealVerificationError::Deferred(header, slot)) + } else { + // check the signature is valid under the expected authority and + // chain state. + let expected_author = + slot_author::

(slot, authorities).ok_or(SealVerificationError::SlotAuthorNotFound)?; + + let pre_hash = header.hash(); + + if P::verify(&sig, pre_hash.as_ref(), expected_author) { + Ok((header, slot, seal)) + } else { + Err(SealVerificationError::BadSignature) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_keyring::sr25519::Keyring; + + #[test] + fn authorities_call_works() { + let client = substrate_test_runtime_client::new(); + + assert_eq!(client.chain_info().best_number, 0); + assert_eq!( + fetch_authorities_with_compatibility_mode( + &client, + client.chain_info().best_hash, + 1, + &CompatibilityMode::None + ) + .unwrap(), + vec![ + Keyring::Alice.public().into(), + Keyring::Bob.public().into(), + Keyring::Charlie.public().into() + ] + ); + + assert_eq!( + fetch_authorities(&client, client.chain_info().best_hash).unwrap(), + vec![ + Keyring::Alice.public().into(), + Keyring::Bob.public().into(), + Keyring::Charlie.public().into() + ] + ); + } +} diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 713392b012f9d..2382d064d0219 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -15,16 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.57" -scale-info = { version = "2.1.1", features = ["derive"] } +scale-info = { version = "2.5.0", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } futures = "0.3.21" log = "0.4.17" -merlin = "2.0" num-bigint = "0.4.3" num-rational = "0.4.1" num-traits = "0.2.8" parking_lot = "0.12.1" -schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"] } thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } @@ -41,13 +39,10 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } -sp-consensus-vrf = { version = "0.10.0-dev", path = "../../../primitives/consensus/vrf" } sp-core = { version = "7.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-io = { version = "7.0.0", path = "../../../primitives/io" } sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } -sp-version = { version = "5.0.0", path = "../../../primitives/version" } [dev-dependencies] rand_chacha = "0.2.2" diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 4f5aaf85494b9..f81a14a97c758 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -31,7 +31,6 @@ sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } [dev-dependencies] serde_json = "1.0.85" -tempfile = "3.1.0" tokio = "1.22.0" sc-consensus = { version = "0.10.0-dev", path = "../../../consensus/common" } sc-keystore = { version = "4.0.0-dev", path = "../../../keystore" } diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index 677a4ad92c9e1..1ae15cc5453d7 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -18,28 +18,29 @@ //! RPC api for babe. +use std::{collections::HashMap, sync::Arc}; + use futures::TryFutureExt; use jsonrpsee::{ core::{async_trait, Error as JsonRpseeError, RpcResult}, proc_macros::rpc, types::{error::CallError, ErrorObject}, }; +use serde::{Deserialize, Serialize}; -use sc_consensus_babe::{authorship, Epoch}; -use sc_consensus_epochs::{descendent_query, Epoch as EpochT, SharedEpochChanges}; +use sc_consensus_babe::{authorship, BabeWorkerHandle}; +use sc_consensus_epochs::Epoch as EpochT; use sc_rpc_api::DenyUnsafe; -use serde::{Deserialize, Serialize}; use sp_api::ProvideRuntimeApi; -use sp_application_crypto::AppKey; +use sp_application_crypto::AppCrypto; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_consensus::{Error as ConsensusError, SelectChain}; -use sp_consensus_babe::{ - digests::PreDigest, AuthorityId, BabeApi as BabeRuntimeApi, BabeConfiguration, -}; +use sp_consensus_babe::{digests::PreDigest, AuthorityId, BabeApi as BabeRuntimeApi}; use sp_core::crypto::ByteArray; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as _}; -use std::{collections::HashMap, sync::Arc}; + +const BABE_ERROR: i32 = 9000; /// Provides rpc methods for interacting with Babe. #[rpc(client, server)] @@ -54,12 +55,10 @@ pub trait BabeApi { pub struct Babe { /// shared reference to the client. client: Arc, - /// shared reference to EpochChanges - shared_epoch_changes: SharedEpochChanges, + /// A handle to the BABE worker for issuing requests. + babe_worker_handle: BabeWorkerHandle, /// shared reference to the Keystore - keystore: SyncCryptoStorePtr, - /// config (actually holds the slot duration) - babe_config: BabeConfiguration, + keystore: KeystorePtr, /// The SelectChain strategy select_chain: SC, /// Whether to deny unsafe calls @@ -70,13 +69,12 @@ impl Babe { /// Creates a new instance of the Babe Rpc handler. pub fn new( client: Arc, - shared_epoch_changes: SharedEpochChanges, - keystore: SyncCryptoStorePtr, - babe_config: BabeConfiguration, + babe_worker_handle: BabeWorkerHandle, + keystore: KeystorePtr, select_chain: SC, deny_unsafe: DenyUnsafe, ) -> Self { - Self { client, shared_epoch_changes, keystore, babe_config, select_chain, deny_unsafe } + Self { client, babe_worker_handle, keystore, select_chain, deny_unsafe } } } @@ -93,21 +91,21 @@ where { async fn epoch_authorship(&self) -> RpcResult> { self.deny_unsafe.check_if_safe()?; - let header = self.select_chain.best_chain().map_err(Error::Consensus).await?; + + let best_header = self.select_chain.best_chain().map_err(Error::SelectChain).await?; + let epoch_start = self .client .runtime_api() - .current_epoch_start(header.hash()) - .map_err(|err| Error::StringError(format!("{:?}", err)))?; - - let epoch = epoch_data( - &self.shared_epoch_changes, - &self.client, - &self.babe_config, - *epoch_start, - &self.select_chain, - ) - .await?; + .current_epoch_start(best_header.hash()) + .map_err(|_| Error::FetchEpoch)?; + + let epoch = self + .babe_worker_handle + .epoch_data_for_child_of(best_header.hash(), *best_header.number(), epoch_start) + .await + .map_err(|_| Error::FetchEpoch)?; + let (epoch_start, epoch_end) = (epoch.start_slot(), epoch.end_slot()); let mut claims: HashMap = HashMap::new(); @@ -117,10 +115,7 @@ where .iter() .enumerate() .filter_map(|(i, a)| { - if SyncCryptoStore::has_keys( - &*self.keystore, - &[(a.0.to_raw_vec(), AuthorityId::ID)], - ) { + if self.keystore.has_keys(&[(a.0.to_raw_vec(), AuthorityId::ID)]) { Some((a.0.clone(), i)) } else { None @@ -162,81 +157,50 @@ pub struct EpochAuthorship { secondary_vrf: Vec, } -/// Errors encountered by the RPC +/// Top-level error type for the RPC handler. #[derive(Debug, thiserror::Error)] pub enum Error { - /// Consensus error - #[error(transparent)] - Consensus(#[from] ConsensusError), - /// Errors that can be formatted as a String - #[error("{0}")] - StringError(String), + /// Failed to fetch the current best header. + #[error("Failed to fetch the current best header: {0}")] + SelectChain(ConsensusError), + /// Failed to fetch epoch data. + #[error("Failed to fetch epoch data")] + FetchEpoch, } impl From for JsonRpseeError { fn from(error: Error) -> Self { + let error_code = match error { + Error::SelectChain(_) => 1, + Error::FetchEpoch => 2, + }; + JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( - 1234, + BABE_ERROR + error_code, error.to_string(), - None::<()>, + Some(format!("{:?}", error)), ))) } } -/// Fetches the epoch data for a given slot. -async fn epoch_data( - epoch_changes: &SharedEpochChanges, - client: &Arc, - babe_config: &BabeConfiguration, - slot: u64, - select_chain: &SC, -) -> Result -where - B: BlockT, - C: HeaderBackend + HeaderMetadata + 'static, - SC: SelectChain, -{ - let parent = select_chain.best_chain().await?; - epoch_changes - .shared_data() - .epoch_data_for_child_of( - descendent_query(&**client), - &parent.hash(), - *parent.number(), - slot.into(), - |slot| Epoch::genesis(babe_config, slot), - ) - .map_err(|e| Error::Consensus(ConsensusError::ChainLookup(e.to_string())))? - .ok_or(Error::Consensus(ConsensusError::InvalidAuthoritiesSet)) -} - #[cfg(test)] mod tests { use super::*; - use sc_keystore::LocalKeystore; - use sp_application_crypto::AppPair; - use sp_core::crypto::key_types::BABE; + use sp_consensus_babe::inherents::InherentDataProvider; + use sp_core::{crypto::key_types::BABE, testing::TaskExecutor}; use sp_keyring::Sr25519Keyring; - use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; + use sp_keystore::{testing::MemoryKeystore, Keystore}; use substrate_test_runtime_client::{ runtime::Block, Backend, DefaultTestClientBuilderExt, TestClient, TestClientBuilder, TestClientBuilderExt, }; - use sc_consensus_babe::{block_import, AuthorityPair}; - use std::sync::Arc; - - /// creates keystore backed by a temp file - fn create_temp_keystore( - authority: Sr25519Keyring, - ) -> (SyncCryptoStorePtr, tempfile::TempDir) { - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = - Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); - SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some(&authority.to_seed())) + fn create_keystore(authority: Sr25519Keyring) -> KeystorePtr { + let keystore = MemoryKeystore::new(); + keystore + .sr25519_generate_new(BABE, Some(&authority.to_seed())) .expect("Creates authority key"); - - (keystore, keystore_path) + keystore.into() } fn test_babe_rpc_module( @@ -245,14 +209,35 @@ mod tests { let builder = TestClientBuilder::new(); let (client, longest_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); - let config = sc_consensus_babe::configuration(&*client).expect("config available"); - let (_, link) = block_import(config.clone(), client.clone(), client.clone()) - .expect("can initialize block-import"); + let task_executor = TaskExecutor::new(); + let keystore = create_keystore(Sr25519Keyring::Alice); - let epoch_changes = link.epoch_changes().clone(); - let keystore = create_temp_keystore::(Sr25519Keyring::Alice).0; + let config = sc_consensus_babe::configuration(&*client).expect("config available"); + let slot_duration = config.slot_duration(); + + let (block_import, link) = + sc_consensus_babe::block_import(config.clone(), client.clone(), client.clone()) + .expect("can initialize block-import"); + + let (_, babe_worker_handle) = sc_consensus_babe::import_queue( + link.clone(), + block_import.clone(), + None, + client.clone(), + longest_chain.clone(), + move |_, _| async move { + Ok((InherentDataProvider::from_timestamp_and_slot_duration( + 0.into(), + slot_duration, + ),)) + }, + &task_executor, + None, + None, + ) + .unwrap(); - Babe::new(client.clone(), epoch_changes, keystore, config, longest_chain, deny_unsafe) + Babe::new(client.clone(), babe_worker_handle, keystore, longest_chain, deny_unsafe) } #[tokio::test] diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index 195a19b3d0f61..dc7cd10867745 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -18,18 +18,20 @@ //! BABE authority selection and slot claiming. -use super::Epoch; +use super::{Epoch, AUTHORING_SCORE_LENGTH, AUTHORING_SCORE_VRF_CONTEXT}; use codec::Encode; use sc_consensus_epochs::Epoch as EpochT; -use schnorrkel::{keys::PublicKey, vrf::VRFInOut}; -use sp_application_crypto::AppKey; +use sp_application_crypto::AppCrypto; use sp_consensus_babe::{ digests::{PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest}, - make_transcript, make_transcript_data, AuthorityId, BabeAuthorityWeight, Slot, BABE_VRF_PREFIX, + make_transcript, AuthorityId, BabeAuthorityWeight, Randomness, Slot, }; -use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; -use sp_core::{blake2_256, crypto::ByteArray, U256}; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_core::{ + blake2_256, + crypto::{ByteArray, Wraps}, + U256, +}; +use sp_keystore::KeystorePtr; /// Calculates the primary selection threshold for a given authority, taking /// into account `c` (`1 - c` represents the probability of a slot being empty). @@ -95,19 +97,13 @@ pub(super) fn calculate_primary_threshold( ) } -/// Returns true if the given VRF output is lower than the given threshold, -/// false otherwise. -pub(super) fn check_primary_threshold(inout: &VRFInOut, threshold: u128) -> bool { - u128::from_le_bytes(inout.make_bytes::<[u8; 16]>(BABE_VRF_PREFIX)) < threshold -} - /// Get the expected secondary author for the given slot and with given /// authorities. This should always assign the slot to some authority unless the /// authorities list is empty. pub(super) fn secondary_slot_author( slot: Slot, authorities: &[(AuthorityId, BabeAuthorityWeight)], - randomness: [u8; 32], + randomness: Randomness, ) -> Option<&AuthorityId> { if authorities.is_empty() { return None @@ -133,7 +129,7 @@ fn claim_secondary_slot( slot: Slot, epoch: &Epoch, keys: &[(AuthorityId, usize)], - keystore: &SyncCryptoStorePtr, + keystore: &KeystorePtr, author_secondary_vrf: bool, ) -> Option<(PreDigest, AuthorityId)> { let Epoch { authorities, randomness, mut epoch_index, .. } = epoch; @@ -152,27 +148,19 @@ fn claim_secondary_slot( for (authority_id, authority_index) in keys { if authority_id == expected_author { let pre_digest = if author_secondary_vrf { - let transcript_data = make_transcript_data(randomness, slot, epoch_index); - let result = SyncCryptoStore::sr25519_vrf_sign( - &**keystore, - AuthorityId::ID, - authority_id.as_ref(), - transcript_data, - ); - if let Ok(Some(signature)) = result { + let transcript = make_transcript(randomness, slot, epoch_index); + let result = + keystore.sr25519_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &transcript); + if let Ok(Some(vrf_signature)) = result { Some(PreDigest::SecondaryVRF(SecondaryVRFPreDigest { slot, - vrf_output: VRFOutput(signature.output), - vrf_proof: VRFProof(signature.proof), authority_index: *authority_index as u32, + vrf_signature, })) } else { None } - } else if SyncCryptoStore::has_keys( - &**keystore, - &[(authority_id.to_raw_vec(), AuthorityId::ID)], - ) { + } else if keystore.has_keys(&[(authority_id.to_raw_vec(), AuthorityId::ID)]) { Some(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { slot, authority_index: *authority_index as u32, @@ -197,7 +185,7 @@ fn claim_secondary_slot( pub fn claim_slot( slot: Slot, epoch: &Epoch, - keystore: &SyncCryptoStorePtr, + keystore: &KeystorePtr, ) -> Option<(PreDigest, AuthorityId)> { let authorities = epoch .authorities @@ -213,7 +201,7 @@ pub fn claim_slot( pub fn claim_slot_using_keys( slot: Slot, epoch: &Epoch, - keystore: &SyncCryptoStorePtr, + keystore: &KeystorePtr, keys: &[(AuthorityId, usize)], ) -> Option<(PreDigest, AuthorityId)> { claim_primary_slot(slot, epoch, epoch.config.c, keystore, keys).or_else(|| { @@ -241,7 +229,7 @@ fn claim_primary_slot( slot: Slot, epoch: &Epoch, c: (u64, u64), - keystore: &SyncCryptoStorePtr, + keystore: &KeystorePtr, keys: &[(AuthorityId, usize)], ) -> Option<(PreDigest, AuthorityId)> { let Epoch { authorities, randomness, mut epoch_index, .. } = epoch; @@ -251,29 +239,28 @@ fn claim_primary_slot( epoch_index = epoch.clone_for_slot(slot).epoch_index; } - for (authority_id, authority_index) in keys { - let transcript = make_transcript(randomness, slot, epoch_index); - let transcript_data = make_transcript_data(randomness, slot, epoch_index); - let result = SyncCryptoStore::sr25519_vrf_sign( - &**keystore, - AuthorityId::ID, - authority_id.as_ref(), - transcript_data, - ); - if let Ok(Some(signature)) = result { - let public = PublicKey::from_bytes(&authority_id.to_raw_vec()).ok()?; - let inout = match signature.output.attach_input_hash(&public, transcript) { - Ok(inout) => inout, - Err(_) => continue, - }; + let transcript = make_transcript(randomness, slot, epoch_index); + for (authority_id, authority_index) in keys { + let result = keystore.sr25519_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &transcript); + if let Ok(Some(vrf_signature)) = result { let threshold = calculate_primary_threshold(c, authorities, *authority_index); - if check_primary_threshold(&inout, threshold) { + + let can_claim = authority_id + .as_inner_ref() + .make_bytes::<[u8; AUTHORING_SCORE_LENGTH]>( + AUTHORING_SCORE_VRF_CONTEXT, + &transcript, + &vrf_signature.output, + ) + .map(|bytes| u128::from_le_bytes(bytes) < threshold) + .unwrap_or_default(); + + if can_claim { let pre_digest = PreDigest::Primary(PrimaryPreDigest { slot, - vrf_output: VRFOutput(signature.output), - vrf_proof: VRFProof(signature.proof), authority_index: *authority_index as u32, + vrf_signature, }); return Some((pre_digest, authority_id.clone())) @@ -287,20 +274,16 @@ fn claim_primary_slot( #[cfg(test)] mod tests { use super::*; - use sc_keystore::LocalKeystore; use sp_consensus_babe::{AllowedSlots, AuthorityId, BabeEpochConfiguration}; use sp_core::{crypto::Pair as _, sr25519::Pair}; - use std::sync::Arc; + use sp_keystore::testing::MemoryKeystore; #[test] fn claim_secondary_plain_slot_works() { - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory()); - let valid_public_key = SyncCryptoStore::sr25519_generate_new( - &*keystore, - AuthorityId::ID, - Some(sp_core::crypto::DEV_PHRASE), - ) - .unwrap(); + let keystore: KeystorePtr = MemoryKeystore::new().into(); + let valid_public_key = keystore + .sr25519_generate_new(AuthorityId::ID, Some(sp_core::crypto::DEV_PHRASE)) + .unwrap(); let authorities = vec![ (AuthorityId::from(Pair::generate().0.public()), 5), diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index d65aae33f6275..d7dbfc14c4ea6 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -86,11 +86,10 @@ use futures::{ use log::{debug, info, log, trace, warn}; use parking_lot::Mutex; use prometheus_endpoint::Registry; -use schnorrkel::SignatureError; use sc_client_api::{ - backend::AuxStore, AuxDataOperations, Backend as BackendT, BlockchainEvents, - FinalityNotification, PreCommitActions, ProvideUncles, UsageProvider, + backend::AuxStore, AuxDataOperations, Backend as BackendT, FinalityNotification, + PreCommitActions, UsageProvider, }; use sc_consensus::{ block_import::{ @@ -108,7 +107,7 @@ use sc_consensus_slots::{ }; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_application_crypto::AppKey; +use sp_application_crypto::AppCrypto; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{ Backend as _, BlockStatus, Error as ClientError, ForkBackend, HeaderBackend, HeaderMetadata, @@ -117,9 +116,9 @@ use sp_blockchain::{ use sp_consensus::{BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain}; use sp_consensus_babe::inherents::BabeInherentData; use sp_consensus_slots::Slot; -use sp_core::{crypto::ByteArray, ExecutionContext}; +use sp_core::ExecutionContext; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::{KeystorePtr}; use sp_runtime::{ generic::OpaqueDigestItemId, traits::{Block as BlockT, Header, NumberFor, SaturatedConversion, Zero}, @@ -134,7 +133,7 @@ pub use sp_consensus_babe::{ PrimaryPreDigest, SecondaryPlainPreDigest, }, AuthorityId, AuthorityPair, AuthoritySignature, BabeApi, BabeAuthorityWeight, BabeBlockWeight, - BabeConfiguration, BabeEpochConfiguration, ConsensusLog, BABE_ENGINE_ID, VRF_OUTPUT_LENGTH, + BabeConfiguration, BabeEpochConfiguration, ConsensusLog, Randomness, BABE_ENGINE_ID, }; pub use aux_schema::load_block_weight as block_weight; @@ -149,6 +148,12 @@ mod tests; const LOG_TARGET: &str = "babe"; +/// VRF context used for slots claiming lottery. +const AUTHORING_SCORE_VRF_CONTEXT: &[u8] = b"substrate-babe-vrf"; + +/// VRF output length for slots claiming lottery. +const AUTHORING_SCORE_LENGTH: usize = 16; + /// BABE epoch information #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, scale_info::TypeInfo)] pub struct Epoch { @@ -161,7 +166,7 @@ pub struct Epoch { /// The authorities and their weights. pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, /// Randomness for this epoch. - pub randomness: [u8; VRF_OUTPUT_LENGTH], + pub randomness: Randomness, /// Configuration of the epoch. pub config: BabeEpochConfiguration, } @@ -274,7 +279,7 @@ pub enum Error { MultipleConfigChangeDigests, /// Could not extract timestamp and slot #[error("Could not extract timestamp and slot: {0}")] - Extraction(sp_consensus::Error), + Extraction(ConsensusError), /// Could not fetch epoch #[error("Could not fetch epoch at {0:?}")] FetchEpoch(B::Hash), @@ -308,12 +313,12 @@ pub enum Error { /// No secondary author expected. #[error("No secondary author expected.")] NoSecondaryAuthorExpected, - /// VRF verification of block by author failed - #[error("VRF verification of block by author {0:?} failed: threshold {1} exceeded")] - VRFVerificationOfBlockFailed(AuthorityId, u128), /// VRF verification failed - #[error("VRF verification failed: {0:?}")] - VRFVerificationFailed(SignatureError), + #[error("VRF verification failed")] + VrfVerificationFailed, + /// Primary slot threshold too low + #[error("VRF output rejected, threshold {0} exceeded")] + VrfThresholdExceeded(u128), /// Could not fetch parent header #[error("Could not fetch parent header: {0}")] FetchParentHeader(sp_blockchain::Error), @@ -338,6 +343,9 @@ pub enum Error { /// Create inherents error. #[error("Creating inherents failed: {0}")] CreateInherents(sp_inherents::Error), + /// Background worker is not running and therefore requests cannot be answered. + #[error("Background worker is not running")] + BackgroundWorkerTerminated, /// Client error #[error(transparent)] Client(sp_blockchain::Error), @@ -404,7 +412,7 @@ where /// Parameters for BABE. pub struct BabeParams { /// The keystore that manages the keys of the node. - pub keystore: SyncCryptoStorePtr, + pub keystore: KeystorePtr, /// The client to use pub client: Arc, @@ -471,13 +479,10 @@ pub fn start_babe( max_block_proposal_slot_portion, telemetry, }: BabeParams, -) -> Result, sp_consensus::Error> +) -> Result, ConsensusError> where B: BlockT, C: ProvideRuntimeApi - + ProvideUncles - + BlockchainEvents - + PreCommitActions + HeaderBackend + HeaderMetadata + Send @@ -498,8 +503,6 @@ where BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, Error: std::error::Error + Send + From + From + 'static, { - const HANDLE_BUFFER_SIZE: usize = 1024; - let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); let worker = BabeSlotWorker { @@ -529,17 +532,7 @@ where create_inherent_data_providers, ); - let (worker_tx, worker_rx) = channel(HANDLE_BUFFER_SIZE); - - let answer_requests = - answer_requests(worker_rx, babe_link.config, client, babe_link.epoch_changes); - - let inner = future::select(Box::pin(slot_worker), Box::pin(answer_requests)); - Ok(BabeWorker { - inner: Box::pin(inner.map(|_| ())), - slot_notification_sinks, - handle: BabeWorkerHandle(worker_tx), - }) + Ok(BabeWorker { inner: Box::pin(slot_worker), slot_notification_sinks }) } // Remove obsolete block's weight data by leveraging finality notifications. @@ -556,8 +549,9 @@ fn aux_storage_cleanup + HeaderBackend, Block: B Ok(meta) => { hashes.insert(meta.parent); }, - Err(err) => - warn!(target: LOG_TARGET, "Failed to lookup metadata for block `{:?}`: {}", first, err,), + Err(err) => { + warn!(target: LOG_TARGET, "Failed to lookup metadata for block `{:?}`: {}", first, err,) + }, } // Cleans data for finalized block's ancestors @@ -592,42 +586,26 @@ async fn answer_requests( client: Arc, epoch_changes: SharedEpochChanges, ) where - C: ProvideRuntimeApi - + ProvideUncles - + BlockchainEvents - + HeaderBackend - + HeaderMetadata - + Send - + Sync - + 'static, + C: HeaderBackend + HeaderMetadata, { while let Some(request) = request_rx.next().await { match request { - BabeRequest::EpochForChild(parent_hash, parent_number, slot_number, response) => { + BabeRequest::EpochData(response) => { + let _ = response.send(epoch_changes.shared_data().clone()); + }, + BabeRequest::EpochDataForChildOf(parent_hash, parent_number, slot, response) => { let lookup = || { let epoch_changes = epoch_changes.shared_data(); - let epoch_descriptor = epoch_changes - .epoch_descriptor_for_child_of( + epoch_changes + .epoch_data_for_child_of( descendent_query(&*client), &parent_hash, parent_number, - slot_number, + slot, + |slot| Epoch::genesis(&config, slot), ) .map_err(|e| Error::::ForkTree(Box::new(e)))? - .ok_or(Error::::FetchEpoch(parent_hash))?; - - let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&config, slot)) - .ok_or(Error::::FetchEpoch(parent_hash))?; - - Ok(sp_consensus_babe::Epoch { - epoch_index: viable_epoch.as_ref().epoch_index, - start_slot: viable_epoch.as_ref().start_slot, - duration: viable_epoch.as_ref().duration, - authorities: viable_epoch.as_ref().authorities.clone(), - randomness: viable_epoch.as_ref().randomness, - config: viable_epoch.as_ref().config.clone(), - }) + .ok_or(Error::::FetchEpoch(parent_hash)) }; let _ = response.send(lookup()); @@ -637,17 +615,13 @@ async fn answer_requests( } /// Requests to the BABE service. -#[non_exhaustive] -pub enum BabeRequest { +enum BabeRequest { + /// Request all available epoch data. + EpochData(oneshot::Sender>), /// Request the epoch that a child of the given block, with the given slot number would have. /// /// The parent block is identified by its hash and number. - EpochForChild( - B::Hash, - NumberFor, - Slot, - oneshot::Sender>>, - ), + EpochDataForChildOf(B::Hash, NumberFor, Slot, oneshot::Sender>>), } /// A handle to the BABE worker for issuing requests. @@ -655,11 +629,41 @@ pub enum BabeRequest { pub struct BabeWorkerHandle(Sender>); impl BabeWorkerHandle { - /// Send a request to the BABE service. - pub async fn send(&mut self, request: BabeRequest) { - // Failure to send means that the service is down. - // This will manifest as the receiver of the request being dropped. - let _ = self.0.send(request).await; + async fn send_request(&self, request: BabeRequest) -> Result<(), Error> { + match self.0.clone().send(request).await { + Err(err) if err.is_disconnected() => return Err(Error::BackgroundWorkerTerminated), + Err(err) => warn!( + target: LOG_TARGET, + "Unhandled error when sending request to worker: {:?}", err + ), + _ => {}, + } + + Ok(()) + } + + /// Fetch all available epoch data. + pub async fn epoch_data(&self) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + self.send_request(BabeRequest::EpochData(tx)).await?; + + rx.await.or(Err(Error::BackgroundWorkerTerminated)) + } + + /// Fetch the epoch that a child of the given block, with the given slot number would have. + /// + /// The parent block is identified by its hash and number. + pub async fn epoch_data_for_child_of( + &self, + parent_hash: B::Hash, + parent_number: NumberFor, + slot: Slot, + ) -> Result> { + let (tx, rx) = oneshot::channel(); + self.send_request(BabeRequest::EpochDataForChildOf(parent_hash, parent_number, slot, tx)) + .await?; + + rx.await.or(Err(Error::BackgroundWorkerTerminated))? } } @@ -668,7 +672,6 @@ impl BabeWorkerHandle { pub struct BabeWorker { inner: Pin + Send + 'static>>, slot_notification_sinks: SlotNotificationSinks, - handle: BabeWorkerHandle, } impl BabeWorker { @@ -683,11 +686,6 @@ impl BabeWorker { self.slot_notification_sinks.lock().push(sink); stream } - - /// Get a handle to the worker. - pub fn handle(&self) -> BabeWorkerHandle { - self.handle.clone() - } } impl Future for BabeWorker { @@ -711,7 +709,7 @@ struct BabeSlotWorker { justification_sync_link: L, force_authoring: bool, backoff_authoring_blocks: Option, - keystore: SyncCryptoStorePtr, + keystore: KeystorePtr, epoch_changes: SharedEpochChanges, slot_notification_sinks: SlotNotificationSinks, config: BabeConfiguration, @@ -739,7 +737,7 @@ where type SyncOracle = SO; type JustificationSyncLink = L; type CreateProposer = - Pin> + Send + 'static>>; + Pin> + Send + 'static>>; type Proposer = E::Proposer; type BlockImport = I; type AuxData = ViableEpochDescriptor, Epoch>; @@ -762,7 +760,7 @@ where slot, ) .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) + .ok_or(ConsensusError::InvalidAuthoritiesSet) } fn authorities_len(&self, epoch_descriptor: &Self::AuxData) -> Option { @@ -827,31 +825,21 @@ where (_, public): Self::Claim, epoch_descriptor: Self::AuxData, ) -> Result< - sc_consensus::BlockImportParams>::Transaction>, - sp_consensus::Error, + BlockImportParams>::Transaction>, + ConsensusError, > { - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let public_type_pair = public.clone().into(); - let public = public.to_raw_vec(); - let signature = SyncCryptoStore::sign_with( - &*self.keystore, - ::ID, - &public_type_pair, - header_hash.as_ref(), - ) - .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? - .ok_or_else(|| { - sp_consensus::Error::CannotSign( - public.clone(), - "Could not find key in keystore.".into(), - ) - })?; - let signature: AuthoritySignature = signature - .clone() - .try_into() - .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; - let digest_item = ::babe_seal(signature); + let signature = self + .keystore + .sr25519_sign(::ID, public.as_ref(), header_hash.as_ref()) + .map_err(|e| ConsensusError::CannotSign(format!("{}. Key: {:?}", e, public)))? + .ok_or_else(|| { + ConsensusError::CannotSign(format!( + "Could not find key in keystore. Key: {:?}", + public + )) + })?; + + let digest_item = ::babe_seal(signature.into()); let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(digest_item); @@ -894,11 +882,7 @@ where } fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { - Box::pin( - self.env - .init(block) - .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))), - ) + Box::pin(self.env.init(block).map_err(|e| ConsensusError::ClientImport(e.to_string()))) } fn telemetry(&self) -> Option { @@ -917,7 +901,8 @@ where self.logging_target(), ) } - fn keystore(&self) -> SyncCryptoStorePtr { + + fn keystore(&self) -> KeystorePtr { unimplemented!() // self.keystore.clone() } @@ -1193,7 +1178,7 @@ where .create_inherent_data_providers .create_inherent_data_providers(parent_hash, ()) .await - .map_err(|e| Error::::Client(sp_consensus::Error::from(e).into()))?; + .map_err(|e| Error::::Client(ConsensusError::from(e).into()))?; let slot_now = create_inherent_data_providers.slot(); @@ -1811,7 +1796,7 @@ pub fn import_queue( spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, telemetry: Option, -) -> ClientResult> +) -> ClientResult<(DefaultImportQueue, BabeWorkerHandle)> where Inner: BlockImport< Block, @@ -1832,16 +1817,28 @@ where CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { + const HANDLE_BUFFER_SIZE: usize = 1024; + let verifier = BabeVerifier { select_chain, create_inherent_data_providers, - config: babe_link.config, - epoch_changes: babe_link.epoch_changes, + config: babe_link.config.clone(), + epoch_changes: babe_link.epoch_changes.clone(), telemetry, - client, + client: client.clone(), }; - Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) + let (worker_tx, worker_rx) = channel(HANDLE_BUFFER_SIZE); + + let answer_requests = + answer_requests(worker_rx, babe_link.config, client, babe_link.epoch_changes); + + spawner.spawn_essential("babe-worker", Some("babe"), answer_requests.boxed()); + + Ok(( + BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry), + BabeWorkerHandle(worker_tx), + )) } /// Reverts protocol aux data to at most the last finalized block. diff --git a/client/consensus/babe/src/migration.rs b/client/consensus/babe/src/migration.rs index ec864b8e5510f..2b1396c41c268 100644 --- a/client/consensus/babe/src/migration.rs +++ b/client/consensus/babe/src/migration.rs @@ -18,7 +18,7 @@ use crate::{ AuthorityId, BabeAuthorityWeight, BabeConfiguration, BabeEpochConfiguration, Epoch, - NextEpochDescriptor, VRF_OUTPUT_LENGTH, + NextEpochDescriptor, Randomness, }; use codec::{Decode, Encode}; use sc_consensus_epochs::Epoch as EpochT; @@ -36,7 +36,7 @@ pub struct EpochV0 { /// The authorities and their weights. pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, /// Randomness for this epoch. - pub randomness: [u8; VRF_OUTPUT_LENGTH], + pub randomness: Randomness, } impl EpochT for EpochV0 { diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 60f53d9886d35..740ce63676374 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -20,10 +20,6 @@ use super::*; use authorship::claim_slot; -use rand_chacha::{ - rand_core::{RngCore, SeedableRng}, - ChaChaRng, -}; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer}; use sc_consensus::{BoxBlockImport, BoxJustificationImport}; @@ -33,24 +29,19 @@ use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::BABE; use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; use sp_consensus_babe::{ - inherents::InherentDataProvider, make_transcript, make_transcript_data, AllowedSlots, - AuthorityId, AuthorityPair, Slot, + inherents::InherentDataProvider, make_transcript, AllowedSlots, AuthorityId, AuthorityPair, + Slot, }; use sp_consensus_slots::SlotDuration; -use sp_consensus_vrf::schnorrkel::VRFOutput; use sp_core::crypto::Pair; use sp_keyring::Sr25519Keyring; -use sp_keystore::{ - testing::KeyStore as TestKeyStore, vrf::make_transcript as transcript_from_data, - SyncCryptoStore, -}; +use sp_keystore::{testing::MemoryKeystore, Keystore}; use sp_runtime::{ generic::{Digest, DigestItem}, traits::Block as BlockT, }; use sp_timestamp::Timestamp; use std::{cell::RefCell, task::Poll, time::Duration}; -use tokio::runtime::{Handle, Runtime}; type Item = DigestItem; @@ -370,11 +361,12 @@ async fn rejects_empty_block() { }) } -fn create_keystore(authority: Sr25519Keyring) -> SyncCryptoStorePtr { - let keystore = Arc::new(TestKeyStore::new()); - SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some(&authority.to_seed())) - .expect("Generates authority key"); +fn create_keystore(authority: Sr25519Keyring) -> KeystorePtr { + let keystore = MemoryKeystore::new(); keystore + .sr25519_generate_new(BABE, Some(&authority.to_seed())) + .expect("Generates authority key"); + keystore.into() } async fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static) { @@ -485,9 +477,7 @@ async fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + ' .await; } -/// NOTE: BABE is not used in mangata-node #[tokio::test] -#[ignore] async fn authoring_blocks() { run_one_test(|_, _| ()).await; } @@ -640,22 +630,24 @@ fn claim_vrf_check() { PreDigest::Primary(d) => d, v => panic!("Unexpected pre-digest variant {:?}", v), }; - let transcript = make_transcript_data(&epoch.randomness.clone(), 0.into(), epoch.epoch_index); - let sign = SyncCryptoStore::sr25519_vrf_sign(&*keystore, AuthorityId::ID, &public, transcript) + let transcript = make_transcript(&epoch.randomness.clone(), 0.into(), epoch.epoch_index); + let sign = keystore + .sr25519_vrf_sign(AuthorityId::ID, &public, &transcript) .unwrap() .unwrap(); - assert_eq!(pre_digest.vrf_output, VRFOutput(sign.output)); + assert_eq!(pre_digest.vrf_signature.output, sign.output); // We expect a SecondaryVRF claim for slot 1 let pre_digest = match claim_slot(1.into(), &epoch, &keystore).unwrap().0 { PreDigest::SecondaryVRF(d) => d, v => panic!("Unexpected pre-digest variant {:?}", v), }; - let transcript = make_transcript_data(&epoch.randomness.clone(), 1.into(), epoch.epoch_index); - let sign = SyncCryptoStore::sr25519_vrf_sign(&*keystore, AuthorityId::ID, &public, transcript) + let transcript = make_transcript(&epoch.randomness.clone(), 1.into(), epoch.epoch_index); + let sign = keystore + .sr25519_vrf_sign(AuthorityId::ID, &public, &transcript) .unwrap() .unwrap(); - assert_eq!(pre_digest.vrf_output, VRFOutput(sign.output)); + assert_eq!(pre_digest.vrf_signature.output, sign.output); // Check that correct epoch index has been used if epochs are skipped (primary VRF) let slot = Slot::from(103); @@ -664,12 +656,13 @@ fn claim_vrf_check() { v => panic!("Unexpected claim variant {:?}", v), }; let fixed_epoch = epoch.clone_for_slot(slot); - let transcript = make_transcript_data(&epoch.randomness.clone(), slot, fixed_epoch.epoch_index); - let sign = SyncCryptoStore::sr25519_vrf_sign(&*keystore, AuthorityId::ID, &public, transcript) + let transcript = make_transcript(&epoch.randomness.clone(), slot, fixed_epoch.epoch_index); + let sign = keystore + .sr25519_vrf_sign(AuthorityId::ID, &public, &transcript) .unwrap() .unwrap(); assert_eq!(fixed_epoch.epoch_index, 11); - assert_eq!(claim.vrf_output, VRFOutput(sign.output)); + assert_eq!(claim.vrf_signature.output, sign.output); // Check that correct epoch index has been used if epochs are skipped (secondary VRF) let slot = Slot::from(100); @@ -678,12 +671,13 @@ fn claim_vrf_check() { v => panic!("Unexpected claim variant {:?}", v), }; let fixed_epoch = epoch.clone_for_slot(slot); - let transcript = make_transcript_data(&epoch.randomness.clone(), slot, fixed_epoch.epoch_index); - let sign = SyncCryptoStore::sr25519_vrf_sign(&*keystore, AuthorityId::ID, &public, transcript) + let transcript = make_transcript(&epoch.randomness.clone(), slot, fixed_epoch.epoch_index); + let sign = keystore + .sr25519_vrf_sign(AuthorityId::ID, &public, &transcript) .unwrap() .unwrap(); assert_eq!(fixed_epoch.epoch_index, 11); - assert_eq!(pre_digest.vrf_output, VRFOutput(sign.output)); + assert_eq!(pre_digest.vrf_signature.output, sign.output); } // Propose and import a new BABE block on top of the given parent. @@ -1083,36 +1077,6 @@ async fn verify_slots_are_strictly_increasing() { propose_and_import_block(&b1, Some(999.into()), &mut proposer_factory, &mut block_import).await; } -#[test] -fn babe_transcript_generation_match() { - sp_tracing::try_init_simple(); - - let authority = Sr25519Keyring::Alice; - let _keystore = create_keystore(authority); - - let epoch = Epoch { - start_slot: 0.into(), - authorities: vec![(authority.public().into(), 1)], - randomness: [0; 32], - epoch_index: 1, - duration: 100, - config: BabeEpochConfiguration { - c: (3, 10), - allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, - }, - }; - - let orig_transcript = make_transcript(&epoch.randomness.clone(), 1.into(), epoch.epoch_index); - let new_transcript = make_transcript_data(&epoch.randomness, 1.into(), epoch.epoch_index); - - let test = |t: merlin::Transcript| -> [u8; 16] { - let mut b = [0u8; 16]; - t.build_rng().finalize(&mut ChaChaRng::from_seed([0u8; 32])).fill_bytes(&mut b); - b - }; - debug_assert!(test(orig_transcript) == test(transcript_from_data(new_transcript))); -} - #[tokio::test] async fn obsolete_blocks_aux_data_cleanup() { let mut net = BabeTestNet::new(1); diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index 96d3961d44571..cadceb6a57510 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -18,8 +18,9 @@ //! Verification for BABE headers. use crate::{ - authorship::{calculate_primary_threshold, check_primary_threshold, secondary_slot_author}, - babe_err, find_pre_digest, BlockT, Epoch, Error, LOG_TARGET, + authorship::{calculate_primary_threshold, secondary_slot_author}, + babe_err, find_pre_digest, BlockT, Epoch, Error, AUTHORING_SCORE_LENGTH, + AUTHORING_SCORE_VRF_CONTEXT, LOG_TARGET, }; use log::{debug, trace}; use sc_consensus_epochs::Epoch as EpochT; @@ -32,7 +33,10 @@ use sp_consensus_babe::{ make_transcript, AuthorityId, AuthorityPair, AuthoritySignature, }; use sp_consensus_slots::Slot; -use sp_core::{ByteArray, Pair}; +use sp_core::{ + crypto::{VrfVerifier, Wraps}, + Pair, +}; use sp_runtime::{traits::Header, DigestItem}; /// BABE verification parameters @@ -155,7 +159,7 @@ fn check_primary_header( epoch: &Epoch, c: (u64, u64), ) -> Result<(), Error> { - let author = &epoch.authorities[pre_digest.authority_index as usize].0; + let authority_id = &epoch.authorities[pre_digest.authority_index as usize].0; let mut epoch_index = epoch.epoch_index; if epoch.end_slot() <= pre_digest.slot { @@ -163,28 +167,34 @@ fn check_primary_header( epoch_index = epoch.clone_for_slot(pre_digest.slot).epoch_index; } - if AuthorityPair::verify(&signature, pre_hash, author) { - let (inout, _) = { - let transcript = make_transcript(&epoch.randomness, pre_digest.slot, epoch_index); - - schnorrkel::PublicKey::from_bytes(author.as_slice()) - .and_then(|p| { - p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof) - }) - .map_err(|s| babe_err(Error::VRFVerificationFailed(s)))? - }; + if !AuthorityPair::verify(&signature, pre_hash, authority_id) { + return Err(babe_err(Error::BadSignature(pre_hash))) + } - let threshold = - calculate_primary_threshold(c, &epoch.authorities, pre_digest.authority_index as usize); + let transcript = make_transcript(&epoch.randomness, pre_digest.slot, epoch_index); - if !check_primary_threshold(&inout, threshold) { - return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))) - } + if !authority_id.as_inner_ref().vrf_verify(&transcript, &pre_digest.vrf_signature) { + return Err(babe_err(Error::VrfVerificationFailed)) + } - Ok(()) - } else { - Err(babe_err(Error::BadSignature(pre_hash))) + let threshold = + calculate_primary_threshold(c, &epoch.authorities, pre_digest.authority_index as usize); + + let score = authority_id + .as_inner_ref() + .make_bytes::<[u8; AUTHORING_SCORE_LENGTH]>( + AUTHORING_SCORE_VRF_CONTEXT, + &transcript, + &pre_digest.vrf_signature.output, + ) + .map(u128::from_le_bytes) + .map_err(|_| babe_err(Error::VrfVerificationFailed))?; + + if score >= threshold { + return Err(babe_err(Error::VrfThresholdExceeded(threshold))) } + + Ok(()) } /// Check a secondary slot proposal header. We validate that the given header is @@ -197,8 +207,7 @@ fn check_secondary_plain_header( signature: AuthoritySignature, epoch: &Epoch, ) -> Result<(), Error> { - // check the signature is valid under the expected authority and - // chain state. + // check the signature is valid under the expected authority and chain state. let expected_author = secondary_slot_author(pre_digest.slot, &epoch.authorities, epoch.randomness) .ok_or(Error::NoSecondaryAuthorExpected)?; @@ -209,11 +218,11 @@ fn check_secondary_plain_header( return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())) } - if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { - Ok(()) - } else { - Err(Error::BadSignature(pre_hash)) + if !AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { + return Err(Error::BadSignature(pre_hash)) } + + Ok(()) } /// Check a secondary VRF slot proposal header. @@ -223,8 +232,7 @@ fn check_secondary_vrf_header( signature: AuthoritySignature, epoch: &Epoch, ) -> Result<(), Error> { - // check the signature is valid under the expected authority and - // chain state. + // check the signature is valid under the expected authority and chain state. let expected_author = secondary_slot_author(pre_digest.slot, &epoch.authorities, epoch.randomness) .ok_or(Error::NoSecondaryAuthorExpected)?; @@ -241,15 +249,15 @@ fn check_secondary_vrf_header( epoch_index = epoch.clone_for_slot(pre_digest.slot).epoch_index; } - if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { - let transcript = make_transcript(&epoch.randomness, pre_digest.slot, epoch_index); + if !AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { + return Err(Error::BadSignature(pre_hash)) + } - schnorrkel::PublicKey::from_bytes(author.as_slice()) - .and_then(|p| p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof)) - .map_err(|s| babe_err(Error::VRFVerificationFailed(s)))?; + let transcript = make_transcript(&epoch.randomness, pre_digest.slot, epoch_index); - Ok(()) - } else { - Err(Error::BadSignature(pre_hash)) + if !author.as_inner_ref().vrf_verify(&transcript, &pre_digest.vrf_signature) { + return Err(Error::VrfVerificationFailed) } + + Ok(()) } diff --git a/client/consensus/beefy/src/aux_schema.rs b/client/consensus/beefy/src/aux_schema.rs index 2e99b4cc40638..84186140b6925 100644 --- a/client/consensus/beefy/src/aux_schema.rs +++ b/client/consensus/beefy/src/aux_schema.rs @@ -28,7 +28,7 @@ use sp_runtime::traits::Block as BlockT; const VERSION_KEY: &[u8] = b"beefy_auxschema_version"; const WORKER_STATE_KEY: &[u8] = b"beefy_voter_state"; -const CURRENT_VERSION: u32 = 2; +const CURRENT_VERSION: u32 = 3; pub(crate) fn write_current_version(backend: &BE) -> ClientResult<()> { info!(target: LOG_TARGET, "🥩 write aux schema version {:?}", CURRENT_VERSION); @@ -63,8 +63,8 @@ where match version { None => (), - Some(1) => (), // version 1 is totally obsolete and should be simply ignored - Some(2) => return load_decode::<_, PersistedState>(backend, WORKER_STATE_KEY), + Some(1) | Some(2) => (), // versions 1 & 2 are obsolete and should be simply ignored + Some(3) => return load_decode::<_, PersistedState>(backend, WORKER_STATE_KEY), other => return Err(ClientError::Backend(format!("Unsupported BEEFY DB version: {:?}", other))), } diff --git a/client/consensus/beefy/src/communication/gossip.rs b/client/consensus/beefy/src/communication/gossip.rs index 219203ee4e173..9be648f8796c3 100644 --- a/client/consensus/beefy/src/communication/gossip.rs +++ b/client/consensus/beefy/src/communication/gossip.rs @@ -18,7 +18,7 @@ use std::{collections::BTreeMap, sync::Arc, time::Duration}; -use sc_network::PeerId; +use sc_network::{PeerId, ReputationChange}; use sc_network_gossip::{MessageIntent, ValidationResult, Validator, ValidatorContext}; use sp_core::hashing::twox_64; use sp_runtime::traits::{Block, Hash, Header, NumberFor}; @@ -26,64 +26,195 @@ use sp_runtime::traits::{Block, Hash, Header, NumberFor}; use codec::{Decode, Encode}; use log::{debug, trace}; use parking_lot::{Mutex, RwLock}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use wasm_timer::Instant; -use crate::{communication::peers::KnownPeers, keystore::BeefyKeystore, LOG_TARGET}; +use crate::{ + communication::{ + benefit, cost, + peers::{KnownPeers, PeerReport}, + }, + justification::{ + proof_block_num_and_set_id, verify_with_validator_set, BeefyVersionedFinalityProof, + }, + keystore::BeefyKeystore, + LOG_TARGET, +}; use sp_consensus_beefy::{ - crypto::{Public, Signature}, - VoteMessage, + crypto::{AuthorityId, Signature}, + ValidatorSet, ValidatorSetId, VoteMessage, }; // Timeout for rebroadcasting messages. -const REBROADCAST_AFTER: Duration = Duration::from_secs(60 * 5); +#[cfg(not(test))] +const REBROADCAST_AFTER: Duration = Duration::from_secs(60); +#[cfg(test)] +const REBROADCAST_AFTER: Duration = Duration::from_secs(5); + +#[derive(Debug, PartialEq)] +pub(super) enum Action { + // repropagate under given topic, to the given peers, applying cost/benefit to originator. + Keep(H, ReputationChange), + // discard, applying cost/benefit to originator. + Discard(ReputationChange), +} + +/// An outcome of examining a message. +#[derive(Debug, PartialEq, Clone, Copy)] +enum Consider { + /// Accept the message. + Accept, + /// Message is too early. Reject. + RejectPast, + /// Message is from the future. Reject. + RejectFuture, + /// Message cannot be evaluated. Reject. + RejectOutOfScope, +} -/// Gossip engine messages topic -pub(crate) fn topic() -> B::Hash +/// BEEFY gossip message type that gets encoded and sent on the network. +#[derive(Debug, Encode, Decode)] +pub(crate) enum GossipMessage { + /// BEEFY message with commitment and single signature. + Vote(VoteMessage, AuthorityId, Signature>), + /// BEEFY justification with commitment and signatures. + FinalityProof(BeefyVersionedFinalityProof), +} + +impl GossipMessage { + /// Return inner vote if this message is a Vote. + pub fn unwrap_vote(self) -> Option, AuthorityId, Signature>> { + match self { + GossipMessage::Vote(vote) => Some(vote), + GossipMessage::FinalityProof(_) => None, + } + } + + /// Return inner finality proof if this message is a FinalityProof. + pub fn unwrap_finality_proof(self) -> Option> { + match self { + GossipMessage::Vote(_) => None, + GossipMessage::FinalityProof(proof) => Some(proof), + } + } +} + +/// Gossip engine votes messages topic +pub(crate) fn votes_topic() -> B::Hash where B: Block, { - <::Hashing as Hash>::hash(b"beefy") + <::Hashing as Hash>::hash(b"beefy-votes") +} + +/// Gossip engine justifications messages topic +pub(crate) fn proofs_topic() -> B::Hash +where + B: Block, +{ + <::Hashing as Hash>::hash(b"beefy-justifications") } /// A type that represents hash of the message. pub type MessageHash = [u8; 8]; -struct KnownVotes { - last_done: Option>, - live: BTreeMap, fnv::FnvHashSet>, +#[derive(Clone, Debug)] +pub(crate) struct GossipFilterCfg<'a, B: Block> { + pub start: NumberFor, + pub end: NumberFor, + pub validator_set: &'a ValidatorSet, +} + +#[derive(Clone, Debug)] +struct FilterInner { + pub start: NumberFor, + pub end: NumberFor, + pub validator_set: ValidatorSet, } -impl KnownVotes { +struct Filter { + inner: Option>, + live_votes: BTreeMap, fnv::FnvHashSet>, +} + +impl Filter { pub fn new() -> Self { - Self { last_done: None, live: BTreeMap::new() } + Self { inner: None, live_votes: BTreeMap::new() } } - /// Create new round votes set if not already present. - fn insert(&mut self, round: NumberFor) { - self.live.entry(round).or_default(); + /// Update filter to new `start` and `set_id`. + fn update(&mut self, cfg: GossipFilterCfg) { + self.live_votes.retain(|&round, _| round >= cfg.start && round <= cfg.end); + // only clone+overwrite big validator_set if set_id changed + match self.inner.as_mut() { + Some(f) if f.validator_set.id() == cfg.validator_set.id() => { + f.start = cfg.start; + f.end = cfg.end; + }, + _ => + self.inner = Some(FilterInner { + start: cfg.start, + end: cfg.end, + validator_set: cfg.validator_set.clone(), + }), + } } - /// Remove `round` and older from live set, update `last_done` accordingly. - fn conclude(&mut self, round: NumberFor) { - self.live.retain(|&number, _| number > round); - self.last_done = self.last_done.max(Some(round)); + /// Accept if `max(session_start, best_beefy) <= round <= best_grandpa`, + /// and vote `set_id` matches session set id. + /// + /// Latest concluded round is still considered alive to allow proper gossiping for it. + fn consider_vote(&self, round: NumberFor, set_id: ValidatorSetId) -> Consider { + self.inner + .as_ref() + .map(|f| + // only from current set and only [filter.start, filter.end] + if set_id < f.validator_set.id() { + Consider::RejectPast + } else if set_id > f.validator_set.id() { + Consider::RejectFuture + } else if round < f.start { + Consider::RejectPast + } else if round > f.end { + Consider::RejectFuture + } else { + Consider::Accept + }) + .unwrap_or(Consider::RejectOutOfScope) } - /// Return true if `round` is newer than previously concluded rounds. + /// Return true if `round` is >= than `max(session_start, best_beefy)`, + /// and proof `set_id` matches session set id. /// /// Latest concluded round is still considered alive to allow proper gossiping for it. - fn is_live(&self, round: &NumberFor) -> bool { - Some(*round) >= self.last_done + fn consider_finality_proof(&self, round: NumberFor, set_id: ValidatorSetId) -> Consider { + self.inner + .as_ref() + .map(|f| + // only from current set and only >= filter.start + if round < f.start || set_id < f.validator_set.id() { + Consider::RejectPast + } else if set_id > f.validator_set.id() { + Consider::RejectFuture + } else { + Consider::Accept + } + ) + .unwrap_or(Consider::RejectOutOfScope) } /// Add new _known_ `hash` to the round's known votes. - fn add_known(&mut self, round: &NumberFor, hash: MessageHash) { - self.live.get_mut(round).map(|known| known.insert(hash)); + fn add_known_vote(&mut self, round: NumberFor, hash: MessageHash) { + self.live_votes.entry(round).or_default().insert(hash); } /// Check if `hash` is already part of round's known votes. - fn is_known(&self, round: &NumberFor, hash: &MessageHash) -> bool { - self.live.get(round).map(|known| known.contains(hash)).unwrap_or(false) + fn is_known_vote(&self, round: NumberFor, hash: &MessageHash) -> bool { + self.live_votes.get(&round).map(|known| known.contains(hash)).unwrap_or(false) + } + + fn validator_set(&self) -> Option<&ValidatorSet> { + self.inner.as_ref().map(|f| &f.validator_set) } } @@ -99,39 +230,132 @@ pub(crate) struct GossipValidator where B: Block, { - topic: B::Hash, - known_votes: RwLock>, + votes_topic: B::Hash, + justifs_topic: B::Hash, + gossip_filter: RwLock>, next_rebroadcast: Mutex, known_peers: Arc>>, + report_sender: TracingUnboundedSender, } impl GossipValidator where B: Block, { - pub fn new(known_peers: Arc>>) -> GossipValidator { - GossipValidator { - topic: topic::(), - known_votes: RwLock::new(KnownVotes::new()), + pub(crate) fn new( + known_peers: Arc>>, + ) -> (GossipValidator, TracingUnboundedReceiver) { + let (tx, rx) = tracing_unbounded("mpsc_beefy_gossip_validator", 10_000); + let val = GossipValidator { + votes_topic: votes_topic::(), + justifs_topic: proofs_topic::(), + gossip_filter: RwLock::new(Filter::new()), next_rebroadcast: Mutex::new(Instant::now() + REBROADCAST_AFTER), known_peers, - } + report_sender: tx, + }; + (val, rx) } - /// Note a voting round. + /// Update gossip validator filter. /// - /// Noting round will track gossiped votes for `round`. - pub(crate) fn note_round(&self, round: NumberFor) { - debug!(target: LOG_TARGET, "🥩 About to note gossip round #{}", round); - self.known_votes.write().insert(round); + /// Only votes for `set_id` and rounds `start <= round <= end` will be accepted. + pub(crate) fn update_filter(&self, filter: GossipFilterCfg) { + debug!(target: LOG_TARGET, "🥩 New gossip filter {:?}", filter); + self.gossip_filter.write().update(filter); } - /// Conclude a voting round. - /// - /// This can be called once round is complete so we stop gossiping for it. - pub(crate) fn conclude_round(&self, round: NumberFor) { - debug!(target: LOG_TARGET, "🥩 About to drop gossip round #{}", round); - self.known_votes.write().conclude(round); + fn report(&self, who: PeerId, cost_benefit: ReputationChange) { + let _ = self.report_sender.unbounded_send(PeerReport { who, cost_benefit }); + } + + fn validate_vote( + &self, + vote: VoteMessage, AuthorityId, Signature>, + sender: &PeerId, + data: &[u8], + ) -> Action { + let msg_hash = twox_64(data); + let round = vote.commitment.block_number; + let set_id = vote.commitment.validator_set_id; + self.known_peers.lock().note_vote_for(*sender, round); + + // Verify general usefulness of the message. + // We are going to discard old votes right away (without verification) + // Also we keep track of already received votes to avoid verifying duplicates. + { + let filter = self.gossip_filter.read(); + + match filter.consider_vote(round, set_id) { + Consider::RejectPast => return Action::Discard(cost::OUTDATED_MESSAGE), + Consider::RejectFuture => return Action::Discard(cost::FUTURE_MESSAGE), + Consider::RejectOutOfScope => return Action::Discard(cost::OUT_OF_SCOPE_MESSAGE), + Consider::Accept => {}, + } + + if filter.is_known_vote(round, &msg_hash) { + return Action::Keep(self.votes_topic, benefit::KNOWN_VOTE_MESSAGE) + } + + // ensure authority is part of the set. + if !filter + .validator_set() + .map(|set| set.validators().contains(&vote.id)) + .unwrap_or(false) + { + debug!(target: LOG_TARGET, "Message from voter not in validator set: {}", vote.id); + return Action::Discard(cost::UNKNOWN_VOTER) + } + } + + if BeefyKeystore::verify(&vote.id, &vote.signature, &vote.commitment.encode()) { + self.gossip_filter.write().add_known_vote(round, msg_hash); + Action::Keep(self.votes_topic, benefit::VOTE_MESSAGE) + } else { + debug!( + target: LOG_TARGET, + "🥩 Bad signature on message: {:?}, from: {:?}", vote, sender + ); + Action::Discard(cost::BAD_SIGNATURE) + } + } + + fn validate_finality_proof( + &self, + proof: BeefyVersionedFinalityProof, + sender: &PeerId, + ) -> Action { + let (round, set_id) = proof_block_num_and_set_id::(&proof); + self.known_peers.lock().note_vote_for(*sender, round); + + let guard = self.gossip_filter.read(); + // Verify general usefulness of the justification. + match guard.consider_finality_proof(round, set_id) { + Consider::RejectPast => return Action::Discard(cost::OUTDATED_MESSAGE), + Consider::RejectFuture => return Action::Discard(cost::FUTURE_MESSAGE), + Consider::RejectOutOfScope => return Action::Discard(cost::OUT_OF_SCOPE_MESSAGE), + Consider::Accept => {}, + } + // Verify justification signatures. + guard + .validator_set() + .map(|validator_set| { + if let Err((_, signatures_checked)) = + verify_with_validator_set::(round, validator_set, &proof) + { + debug!( + target: LOG_TARGET, + "🥩 Bad signatures on message: {:?}, from: {:?}", proof, sender + ); + let mut cost = cost::INVALID_PROOF; + cost.value += + cost::PER_SIGNATURE_CHECKED.saturating_mul(signatures_checked as i32); + Action::Discard(cost) + } else { + Action::Keep(self.justifs_topic, benefit::VALIDATED_PROOF) + } + }) + .unwrap_or(Action::Discard(cost::OUT_OF_SCOPE_MESSAGE)) } } @@ -145,59 +369,59 @@ where fn validate( &self, - _context: &mut dyn ValidatorContext, + context: &mut dyn ValidatorContext, sender: &PeerId, mut data: &[u8], ) -> ValidationResult { - if let Ok(msg) = VoteMessage::, Public, Signature>::decode(&mut data) { - let msg_hash = twox_64(data); - let round = msg.commitment.block_number; - - // Verify general usefulness of the message. - // We are going to discard old votes right away (without verification) - // Also we keep track of already received votes to avoid verifying duplicates. - { - let known_votes = self.known_votes.read(); - - if !known_votes.is_live(&round) { - return ValidationResult::Discard - } - - if known_votes.is_known(&round, &msg_hash) { - return ValidationResult::ProcessAndKeep(self.topic) - } - } - - if BeefyKeystore::verify(&msg.id, &msg.signature, &msg.commitment.encode()) { - self.known_votes.write().add_known(&round, msg_hash); - self.known_peers.lock().note_vote_for(*sender, round); - return ValidationResult::ProcessAndKeep(self.topic) - } else { - // TODO: report peer - debug!( - target: LOG_TARGET, - "🥩 Bad signature on message: {:?}, from: {:?}", msg, sender + let raw = data; + let action = match GossipMessage::::decode(&mut data) { + Ok(GossipMessage::Vote(msg)) => self.validate_vote(msg, sender, raw), + Ok(GossipMessage::FinalityProof(proof)) => self.validate_finality_proof(proof, sender), + Err(e) => { + debug!(target: LOG_TARGET, "Error decoding message: {}", e); + let bytes = raw.len().min(i32::MAX as usize) as i32; + let cost = ReputationChange::new( + bytes.saturating_mul(cost::PER_UNDECODABLE_BYTE), + "BEEFY: Bad packet", ); - } + Action::Discard(cost) + }, + }; + match action { + Action::Keep(topic, cb) => { + self.report(*sender, cb); + context.broadcast_message(topic, data.to_vec(), false); + ValidationResult::ProcessAndKeep(topic) + }, + Action::Discard(cb) => { + self.report(*sender, cb); + ValidationResult::Discard + }, } - - ValidationResult::Discard } fn message_expired<'a>(&'a self) -> Box bool + 'a> { - let known_votes = self.known_votes.read(); - Box::new(move |_topic, mut data| { - let msg = match VoteMessage::, Public, Signature>::decode(&mut data) { - Ok(vote) => vote, - Err(_) => return true, - }; - - let round = msg.commitment.block_number; - let expired = !known_votes.is_live(&round); - - trace!(target: LOG_TARGET, "🥩 Message for round #{} expired: {}", round, expired); - - expired + let filter = self.gossip_filter.read(); + Box::new(move |_topic, mut data| match GossipMessage::::decode(&mut data) { + Ok(GossipMessage::Vote(msg)) => { + let round = msg.commitment.block_number; + let set_id = msg.commitment.validator_set_id; + let expired = filter.consider_vote(round, set_id) != Consider::Accept; + trace!(target: LOG_TARGET, "🥩 Vote for round #{} expired: {}", round, expired); + expired + }, + Ok(GossipMessage::FinalityProof(proof)) => { + let (round, set_id) = proof_block_num_and_set_id::(&proof); + let expired = filter.consider_finality_proof(round, set_id) != Consider::Accept; + trace!( + target: LOG_TARGET, + "🥩 Finality proof for round #{} expired: {}", + round, + expired + ); + expired + }, + Err(_) => true, }) } @@ -208,6 +432,7 @@ where let now = Instant::now(); let mut next_rebroadcast = self.next_rebroadcast.lock(); if now >= *next_rebroadcast { + trace!(target: LOG_TARGET, "🥩 Gossip rebroadcast"); *next_rebroadcast = now + REBROADCAST_AFTER; true } else { @@ -215,116 +440,81 @@ where } }; - let known_votes = self.known_votes.read(); + let filter = self.gossip_filter.read(); Box::new(move |_who, intent, _topic, mut data| { if let MessageIntent::PeriodicRebroadcast = intent { return do_rebroadcast } - let msg = match VoteMessage::, Public, Signature>::decode(&mut data) { - Ok(vote) => vote, - Err(_) => return false, - }; - - let round = msg.commitment.block_number; - let allowed = known_votes.is_live(&round); - - trace!(target: LOG_TARGET, "🥩 Message for round #{} allowed: {}", round, allowed); - - allowed + match GossipMessage::::decode(&mut data) { + Ok(GossipMessage::Vote(msg)) => { + let round = msg.commitment.block_number; + let set_id = msg.commitment.validator_set_id; + let allowed = filter.consider_vote(round, set_id) == Consider::Accept; + trace!(target: LOG_TARGET, "🥩 Vote for round #{} allowed: {}", round, allowed); + allowed + }, + Ok(GossipMessage::FinalityProof(proof)) => { + let (round, set_id) = proof_block_num_and_set_id::(&proof); + let allowed = filter.consider_finality_proof(round, set_id) == Consider::Accept; + trace!( + target: LOG_TARGET, + "🥩 Finality proof for round #{} allowed: {}", + round, + allowed + ); + allowed + }, + Err(_) => false, + } }) } } #[cfg(test)] -mod tests { - use sc_keystore::LocalKeystore; - use sc_network_test::Block; - use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; - +pub(crate) mod tests { + use super::*; use crate::keystore::BeefyKeystore; + use sc_network_test::Block; use sp_consensus_beefy::{ - crypto::Signature, known_payloads, Commitment, Keyring, MmrRootHash, Payload, VoteMessage, - KEY_TYPE, + crypto::Signature, known_payloads, Commitment, Keyring, MmrRootHash, Payload, + SignedCommitment, VoteMessage, KEY_TYPE, }; - - use super::*; + use sp_keystore::{testing::MemoryKeystore, Keystore}; #[test] fn known_votes_insert_remove() { - let mut kv = KnownVotes::::new(); - - kv.insert(1); - kv.insert(1); - kv.insert(2); - assert_eq!(kv.live.len(), 2); - - let mut kv = KnownVotes::::new(); - kv.insert(1); - kv.insert(2); - kv.insert(3); - - assert!(kv.last_done.is_none()); - kv.conclude(2); - assert_eq!(kv.live.len(), 1); - assert!(!kv.live.contains_key(&2)); - assert_eq!(kv.last_done, Some(2)); - - kv.conclude(1); - assert_eq!(kv.last_done, Some(2)); - - kv.conclude(3); - assert_eq!(kv.last_done, Some(3)); - assert!(kv.live.is_empty()); - } - - #[test] - fn note_and_drop_round_works() { - let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); - - gv.note_round(1u64); - - assert!(gv.known_votes.read().is_live(&1u64)); - - gv.note_round(3u64); - gv.note_round(7u64); - gv.note_round(10u64); - - assert_eq!(gv.known_votes.read().live.len(), 4); - - gv.conclude_round(7u64); - - let votes = gv.known_votes.read(); - - // rounds 1 and 3 are outdated, don't gossip anymore - assert!(!votes.is_live(&1u64)); - assert!(!votes.is_live(&3u64)); - // latest concluded round is still gossiped - assert!(votes.is_live(&7u64)); - // round 10 is alive and in-progress - assert!(votes.is_live(&10u64)); - } - - #[test] - fn note_same_round_twice() { - let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); - - gv.note_round(3u64); - gv.note_round(7u64); - gv.note_round(10u64); - - assert_eq!(gv.known_votes.read().live.len(), 3); - - // note round #7 again -> should not change anything - gv.note_round(7u64); - - let votes = gv.known_votes.read(); - - assert_eq!(votes.live.len(), 3); - - assert!(votes.is_live(&3u64)); - assert!(votes.is_live(&7u64)); - assert!(votes.is_live(&10u64)); + let mut filter = Filter::::new(); + let msg_hash = twox_64(b"data"); + let keys = vec![Keyring::Alice.public()]; + let validator_set = ValidatorSet::::new(keys.clone(), 1).unwrap(); + + filter.add_known_vote(1, msg_hash); + filter.add_known_vote(1, msg_hash); + filter.add_known_vote(2, msg_hash); + assert_eq!(filter.live_votes.len(), 2); + + filter.add_known_vote(3, msg_hash); + assert!(filter.is_known_vote(3, &msg_hash)); + assert!(!filter.is_known_vote(3, &twox_64(b"other"))); + assert!(!filter.is_known_vote(4, &msg_hash)); + assert_eq!(filter.live_votes.len(), 3); + + assert!(filter.inner.is_none()); + assert_eq!(filter.consider_vote(1, 1), Consider::RejectOutOfScope); + + filter.update(GossipFilterCfg { start: 3, end: 10, validator_set: &validator_set }); + assert_eq!(filter.live_votes.len(), 1); + assert!(filter.live_votes.contains_key(&3)); + assert_eq!(filter.consider_vote(2, 1), Consider::RejectPast); + assert_eq!(filter.consider_vote(3, 1), Consider::Accept); + assert_eq!(filter.consider_vote(4, 1), Consider::Accept); + assert_eq!(filter.consider_vote(20, 1), Consider::RejectFuture); + assert_eq!(filter.consider_vote(4, 2), Consider::RejectFuture); + + let validator_set = ValidatorSet::::new(keys, 2).unwrap(); + filter.update(GossipFilterCfg { start: 5, end: 10, validator_set: &validator_set }); + assert!(filter.live_votes.is_empty()); } struct TestContext; @@ -333,9 +523,7 @@ mod tests { todo!() } - fn broadcast_message(&mut self, _topic: B::Hash, _message: Vec, _force: bool) { - todo!() - } + fn broadcast_message(&mut self, _topic: B::Hash, _message: Vec, _force: bool) {} fn send_message(&mut self, _who: &sc_network::PeerId, _message: Vec) { todo!() @@ -346,15 +534,14 @@ mod tests { } } - fn sign_commitment(who: &Keyring, commitment: &Commitment) -> Signature { - let store: SyncCryptoStorePtr = std::sync::Arc::new(LocalKeystore::in_memory()); - SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&who.to_seed())).unwrap(); - let beefy_keystore: BeefyKeystore = Some(store).into(); - + pub fn sign_commitment(who: &Keyring, commitment: &Commitment) -> Signature { + let store = MemoryKeystore::new(); + store.ecdsa_generate_new(KEY_TYPE, Some(&who.to_seed())).unwrap(); + let beefy_keystore: BeefyKeystore = Some(store.into()).into(); beefy_keystore.sign(&who.public(), &commitment.encode()).unwrap() } - fn dummy_vote(block_number: u64) -> VoteMessage { + fn dummy_vote(block_number: u64) -> VoteMessage { let payload = Payload::from_single_entry( known_payloads::MMR_ROOT_ID, MmrRootHash::default().encode(), @@ -365,53 +552,166 @@ mod tests { VoteMessage { commitment, id: Keyring::Alice.public(), signature } } + pub fn dummy_proof( + block_number: u64, + validator_set: &ValidatorSet, + ) -> BeefyVersionedFinalityProof { + let payload = Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + MmrRootHash::default().encode(), + ); + let commitment = Commitment { payload, block_number, validator_set_id: validator_set.id() }; + let signatures = validator_set + .validators() + .iter() + .map(|validator: &AuthorityId| { + Some(sign_commitment(&Keyring::from_public(validator).unwrap(), &commitment)) + }) + .collect(); + + BeefyVersionedFinalityProof::::V1(SignedCommitment { commitment, signatures }) + } + #[test] - fn should_avoid_verifying_signatures_twice() { - let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); - let sender = sc_network::PeerId::random(); + fn should_validate_messages() { + let keys = vec![Keyring::Alice.public()]; + let validator_set = ValidatorSet::::new(keys.clone(), 0).unwrap(); + let (gv, mut report_stream) = + GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); + let sender = PeerId::random(); let mut context = TestContext; - let vote = dummy_vote(3); + // reject message, decoding error + let bad_encoding = b"0000000000".as_slice(); + let expected_cost = ReputationChange::new( + (bad_encoding.len() as i32).saturating_mul(cost::PER_UNDECODABLE_BYTE), + "BEEFY: Bad packet", + ); + let mut expected_report = PeerReport { who: sender, cost_benefit: expected_cost }; + let res = gv.validate(&mut context, &sender, bad_encoding); + assert!(matches!(res, ValidationResult::Discard)); + assert_eq!(report_stream.try_recv().unwrap(), expected_report); - gv.note_round(3u64); - gv.note_round(7u64); - gv.note_round(10u64); + // verify votes validation + + let vote = dummy_vote(3); + let encoded = GossipMessage::::Vote(vote.clone()).encode(); - // first time the cache should be populated - let res = gv.validate(&mut context, &sender, &vote.encode()); + // filter not initialized + let res = gv.validate(&mut context, &sender, &encoded); + assert!(matches!(res, ValidationResult::Discard)); + expected_report.cost_benefit = cost::OUT_OF_SCOPE_MESSAGE; + assert_eq!(report_stream.try_recv().unwrap(), expected_report); + gv.update_filter(GossipFilterCfg { start: 0, end: 10, validator_set: &validator_set }); + // nothing in cache first time + let res = gv.validate(&mut context, &sender, &encoded); assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); + expected_report.cost_benefit = benefit::VOTE_MESSAGE; + assert_eq!(report_stream.try_recv().unwrap(), expected_report); assert_eq!( - gv.known_votes.read().live.get(&vote.commitment.block_number).map(|x| x.len()), + gv.gossip_filter + .read() + .live_votes + .get(&vote.commitment.block_number) + .map(|x| x.len()), Some(1) ); // second time we should hit the cache - let res = gv.validate(&mut context, &sender, &vote.encode()); - + let res = gv.validate(&mut context, &sender, &encoded); assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); + expected_report.cost_benefit = benefit::KNOWN_VOTE_MESSAGE; + assert_eq!(report_stream.try_recv().unwrap(), expected_report); + + // reject vote, voter not in validator set + let mut bad_vote = vote.clone(); + bad_vote.id = Keyring::Bob.public(); + let bad_vote = GossipMessage::::Vote(bad_vote).encode(); + let res = gv.validate(&mut context, &sender, &bad_vote); + assert!(matches!(res, ValidationResult::Discard)); + expected_report.cost_benefit = cost::UNKNOWN_VOTER; + assert_eq!(report_stream.try_recv().unwrap(), expected_report); + + // reject if the round is not GRANDPA finalized + gv.update_filter(GossipFilterCfg { start: 1, end: 2, validator_set: &validator_set }); + let number = vote.commitment.block_number; + let set_id = vote.commitment.validator_set_id; + assert_eq!(gv.gossip_filter.read().consider_vote(number, set_id), Consider::RejectFuture); + let res = gv.validate(&mut context, &sender, &encoded); + assert!(matches!(res, ValidationResult::Discard)); + expected_report.cost_benefit = cost::FUTURE_MESSAGE; + assert_eq!(report_stream.try_recv().unwrap(), expected_report); + + // reject if the round is not live anymore + gv.update_filter(GossipFilterCfg { start: 7, end: 10, validator_set: &validator_set }); + let number = vote.commitment.block_number; + let set_id = vote.commitment.validator_set_id; + assert_eq!(gv.gossip_filter.read().consider_vote(number, set_id), Consider::RejectPast); + let res = gv.validate(&mut context, &sender, &encoded); + assert!(matches!(res, ValidationResult::Discard)); + expected_report.cost_benefit = cost::OUTDATED_MESSAGE; + assert_eq!(report_stream.try_recv().unwrap(), expected_report); - // next we should quickly reject if the round is not live - gv.conclude_round(7_u64); + // now verify proofs validation - assert!(!gv.known_votes.read().is_live(&vote.commitment.block_number)); + // reject old proof + let proof = dummy_proof(5, &validator_set); + let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let res = gv.validate(&mut context, &sender, &encoded_proof); + assert!(matches!(res, ValidationResult::Discard)); + expected_report.cost_benefit = cost::OUTDATED_MESSAGE; + assert_eq!(report_stream.try_recv().unwrap(), expected_report); - let res = gv.validate(&mut context, &sender, &vote.encode()); + // accept next proof with good set_id + let proof = dummy_proof(7, &validator_set); + let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let res = gv.validate(&mut context, &sender, &encoded_proof); + assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); + expected_report.cost_benefit = benefit::VALIDATED_PROOF; + assert_eq!(report_stream.try_recv().unwrap(), expected_report); + // accept future proof with good set_id + let proof = dummy_proof(20, &validator_set); + let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let res = gv.validate(&mut context, &sender, &encoded_proof); + assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); + expected_report.cost_benefit = benefit::VALIDATED_PROOF; + assert_eq!(report_stream.try_recv().unwrap(), expected_report); + + // reject proof, future set_id + let bad_validator_set = ValidatorSet::::new(keys, 1).unwrap(); + let proof = dummy_proof(20, &bad_validator_set); + let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let res = gv.validate(&mut context, &sender, &encoded_proof); assert!(matches!(res, ValidationResult::Discard)); + expected_report.cost_benefit = cost::FUTURE_MESSAGE; + assert_eq!(report_stream.try_recv().unwrap(), expected_report); + + // reject proof, bad signatures (Bob instead of Alice) + let bad_validator_set = + ValidatorSet::::new(vec![Keyring::Bob.public()], 0).unwrap(); + let proof = dummy_proof(20, &bad_validator_set); + let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let res = gv.validate(&mut context, &sender, &encoded_proof); + assert!(matches!(res, ValidationResult::Discard)); + expected_report.cost_benefit = cost::INVALID_PROOF; + expected_report.cost_benefit.value += cost::PER_SIGNATURE_CHECKED; + assert_eq!(report_stream.try_recv().unwrap(), expected_report); } #[test] fn messages_allowed_and_expired() { - let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); + let keys = vec![Keyring::Alice.public()]; + let validator_set = ValidatorSet::::new(keys.clone(), 0).unwrap(); + let (gv, _) = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); + gv.update_filter(GossipFilterCfg { start: 0, end: 10, validator_set: &validator_set }); let sender = sc_network::PeerId::random(); let topic = Default::default(); let intent = MessageIntent::Broadcast; - // note round 2 and 3, then conclude 2 - gv.note_round(2u64); - gv.note_round(3u64); - gv.conclude_round(2u64); + // conclude 2 + gv.update_filter(GossipFilterCfg { start: 2, end: 10, validator_set: &validator_set }); let mut allowed = gv.message_allowed(); let mut expired = gv.message_expired(); @@ -421,32 +721,68 @@ mod tests { // inactive round 1 -> expired let vote = dummy_vote(1); - let mut encoded_vote = vote.encode(); + let mut encoded_vote = GossipMessage::::Vote(vote).encode(); assert!(!allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(expired(topic, &mut encoded_vote)); + let proof = dummy_proof(1, &validator_set); + let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + assert!(!allowed(&sender, intent, &topic, &mut encoded_proof)); + assert!(expired(topic, &mut encoded_proof)); // active round 2 -> !expired - concluded but still gossiped let vote = dummy_vote(2); - let mut encoded_vote = vote.encode(); + let mut encoded_vote = GossipMessage::::Vote(vote).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(!expired(topic, &mut encoded_vote)); + let proof = dummy_proof(2, &validator_set); + let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); + assert!(!expired(topic, &mut encoded_proof)); + // using wrong set_id -> !allowed, expired + let bad_validator_set = ValidatorSet::::new(keys.clone(), 1).unwrap(); + let proof = dummy_proof(2, &bad_validator_set); + let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + assert!(!allowed(&sender, intent, &topic, &mut encoded_proof)); + assert!(expired(topic, &mut encoded_proof)); // in progress round 3 -> !expired let vote = dummy_vote(3); - let mut encoded_vote = vote.encode(); + let mut encoded_vote = GossipMessage::::Vote(vote).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(!expired(topic, &mut encoded_vote)); + let proof = dummy_proof(3, &validator_set); + let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); + assert!(!expired(topic, &mut encoded_proof)); // unseen round 4 -> !expired - let vote = dummy_vote(3); - let mut encoded_vote = vote.encode(); + let vote = dummy_vote(4); + let mut encoded_vote = GossipMessage::::Vote(vote).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(!expired(topic, &mut encoded_vote)); + let proof = dummy_proof(4, &validator_set); + let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); + assert!(!expired(topic, &mut encoded_proof)); + + // future round 11 -> expired + let vote = dummy_vote(11); + let mut encoded_vote = GossipMessage::::Vote(vote).encode(); + assert!(!allowed(&sender, intent, &topic, &mut encoded_vote)); + assert!(expired(topic, &mut encoded_vote)); + // future proofs allowed while same set_id -> allowed + let proof = dummy_proof(11, &validator_set); + let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); + assert!(!expired(topic, &mut encoded_proof)); } #[test] fn messages_rebroadcast() { - let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); + let keys = vec![Keyring::Alice.public()]; + let validator_set = ValidatorSet::::new(keys.clone(), 0).unwrap(); + let (gv, _) = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); + gv.update_filter(GossipFilterCfg { start: 0, end: 10, validator_set: &validator_set }); let sender = sc_network::PeerId::random(); let topic = Default::default(); diff --git a/client/consensus/beefy/src/communication/mod.rs b/client/consensus/beefy/src/communication/mod.rs index 295d549bb1ba8..d8e4d22053628 100644 --- a/client/consensus/beefy/src/communication/mod.rs +++ b/client/consensus/beefy/src/communication/mod.rs @@ -29,7 +29,7 @@ pub(crate) mod beefy_protocol_name { use sc_network::ProtocolName; /// BEEFY votes gossip protocol name suffix. - const GOSSIP_NAME: &str = "/beefy/1"; + const GOSSIP_NAME: &str = "/beefy/2"; /// BEEFY justifications protocol name suffix. const JUSTIFICATIONS_NAME: &str = "/beefy/justifications/1"; @@ -73,6 +73,39 @@ pub fn beefy_peers_set_config( cfg } +// cost scalars for reporting peers. +mod cost { + use sc_network::ReputationChange as Rep; + // Message that's for an outdated round. + pub(super) const OUTDATED_MESSAGE: Rep = Rep::new(-50, "BEEFY: Past message"); + // Message that's from the future relative to our current set-id. + pub(super) const FUTURE_MESSAGE: Rep = Rep::new(-100, "BEEFY: Future message"); + // Vote message containing bad signature. + pub(super) const BAD_SIGNATURE: Rep = Rep::new(-100, "BEEFY: Bad signature"); + // Message received with vote from voter not in validator set. + pub(super) const UNKNOWN_VOTER: Rep = Rep::new(-150, "BEEFY: Unknown voter"); + // A message received that cannot be evaluated relative to our current state. + pub(super) const OUT_OF_SCOPE_MESSAGE: Rep = Rep::new(-500, "BEEFY: Out-of-scope message"); + // Message containing invalid proof. + pub(super) const INVALID_PROOF: Rep = Rep::new(-5000, "BEEFY: Invalid commit"); + // Reputation cost per signature checked for invalid proof. + pub(super) const PER_SIGNATURE_CHECKED: i32 = -25; + // Reputation cost per byte for un-decodable message. + pub(super) const PER_UNDECODABLE_BYTE: i32 = -5; + // On-demand request was refused by peer. + pub(super) const REFUSAL_RESPONSE: Rep = Rep::new(-100, "BEEFY: Proof request refused"); + // On-demand request for a proof that can't be found in the backend. + pub(super) const UNKOWN_PROOF_REQUEST: Rep = Rep::new(-150, "BEEFY: Unknown proof request"); +} + +// benefit scalars for reporting peers. +mod benefit { + use sc_network::ReputationChange as Rep; + pub(super) const VOTE_MESSAGE: Rep = Rep::new(100, "BEEFY: Round vote message"); + pub(super) const KNOWN_VOTE_MESSAGE: Rep = Rep::new(50, "BEEFY: Known vote"); + pub(super) const VALIDATED_PROOF: Rep = Rep::new(100, "BEEFY: Justification"); +} + #[cfg(test)] mod tests { use super::*; @@ -86,7 +119,7 @@ mod tests { let genesis_hash = H256::random(); let genesis_hex = array_bytes::bytes2hex("", genesis_hash.as_ref()); - let expected_gossip_name = format!("/{}/beefy/1", genesis_hex); + let expected_gossip_name = format!("/{}/beefy/2", genesis_hex); let gossip_proto_name = gossip_protocol_name(&genesis_hash, None); assert_eq!(gossip_proto_name.to_string(), expected_gossip_name); @@ -101,7 +134,7 @@ mod tests { ]; let genesis_hex = "32043c7b3a6ad8f6c2bc8bc121d4caab09377b5e082b0cfbbb39ad13bc4acd93"; - let expected_gossip_name = format!("/{}/beefy/1", genesis_hex); + let expected_gossip_name = format!("/{}/beefy/2", genesis_hex); let gossip_proto_name = gossip_protocol_name(&genesis_hash, None); assert_eq!(gossip_proto_name.to_string(), expected_gossip_name); diff --git a/client/consensus/beefy/src/communication/peers.rs b/client/consensus/beefy/src/communication/peers.rs index c2fb06faddf0c..4704b8dcf4576 100644 --- a/client/consensus/beefy/src/communication/peers.rs +++ b/client/consensus/beefy/src/communication/peers.rs @@ -18,13 +18,17 @@ //! Logic for keeping track of BEEFY peers. -// TODO (issue #12296): replace this naive peer tracking with generic one that infers data -// from multiple network protocols. - -use sc_network::PeerId; +use sc_network::{PeerId, ReputationChange}; use sp_runtime::traits::{Block, NumberFor, Zero}; use std::collections::{HashMap, VecDeque}; +/// Report specifying a reputation change for a given peer. +#[derive(Debug, PartialEq)] +pub(crate) struct PeerReport { + pub who: PeerId, + pub cost_benefit: ReputationChange, +} + struct PeerData { last_voted_on: NumberFor, } diff --git a/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs b/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs index 1670e99828831..d4f4b59f0195e 100644 --- a/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -32,9 +32,12 @@ use sp_runtime::traits::Block; use std::{marker::PhantomData, sync::Arc}; use crate::{ - communication::request_response::{ - on_demand_justifications_protocol_config, Error, JustificationRequest, - BEEFY_SYNC_LOG_TARGET, + communication::{ + cost, + request_response::{ + on_demand_justifications_protocol_config, Error, JustificationRequest, + BEEFY_SYNC_LOG_TARGET, + }, }, metric_inc, metrics::{register_metrics, OnDemandIncomingRequestsMetrics}, @@ -69,17 +72,20 @@ impl IncomingRequest { /// Params: /// - The raw request to decode /// - Reputation changes to apply for the peer in case decoding fails. - pub fn try_from_raw( + pub fn try_from_raw( raw: netconfig::IncomingRequest, - reputation_changes: Vec, - ) -> Result { + reputation_changes_on_err: F, + ) -> Result + where + F: FnOnce(usize) -> Vec, + { let netconfig::IncomingRequest { payload, peer, pending_response } = raw; let payload = match JustificationRequest::decode(&mut payload.as_ref()) { Ok(payload) => payload, Err(err) => { let response = netconfig::OutgoingResponse { result: Err(()), - reputation_changes, + reputation_changes: reputation_changes_on_err(payload.len()), sent_feedback: None, }; if let Err(_) = pending_response.send(response) { @@ -111,11 +117,11 @@ impl IncomingRequestReceiver { pub async fn recv(&mut self, reputation_changes: F) -> Result, Error> where B: Block, - F: FnOnce() -> Vec, + F: FnOnce(usize) -> Vec, { let req = match self.raw.next().await { None => return Err(Error::RequestChannelExhausted), - Some(raw) => IncomingRequest::::try_from_raw(raw, reputation_changes())?, + Some(raw) => IncomingRequest::::try_from_raw(raw, reputation_changes)?, }; Ok(req) } @@ -159,26 +165,20 @@ where // Sends back justification response if justification found in client backend. fn handle_request(&self, request: IncomingRequest) -> Result<(), Error> { - // TODO (issue #12293): validate `request` and change peer reputation for invalid requests. - - let maybe_encoded_proof = if let Some(hash) = - self.client.block_hash(request.payload.begin).map_err(Error::Client)? - { - self.client - .justifications(hash) - .map_err(Error::Client)? - .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) - // No BEEFY justification present. - .ok_or(()) - } else { - Err(()) - }; - + let mut reputation_changes = vec![]; + let maybe_encoded_proof = self + .client + .block_hash(request.payload.begin) + .ok() + .flatten() + .and_then(|hash| self.client.justifications(hash).ok().flatten()) + .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) + .ok_or_else(|| reputation_changes.push(cost::UNKOWN_PROOF_REQUEST)); request .pending_response .send(netconfig::OutgoingResponse { result: maybe_encoded_proof, - reputation_changes: Vec::new(), + reputation_changes, sent_feedback: None, }) .map_err(|_| Error::SendResponse) @@ -188,7 +188,17 @@ where pub async fn run(mut self) { trace!(target: BEEFY_SYNC_LOG_TARGET, "🥩 Running BeefyJustifsRequestHandler"); - while let Ok(request) = self.request_receiver.recv(|| vec![]).await { + while let Ok(request) = self + .request_receiver + .recv(|bytes| { + let bytes = bytes.min(i32::MAX as usize) as i32; + vec![ReputationChange::new( + bytes.saturating_mul(cost::PER_UNDECODABLE_BYTE), + "BEEFY: Bad request payload", + )] + }) + .await + { let peer = request.peer; match self.handle_request(request) { Ok(()) => { @@ -199,8 +209,8 @@ where ) }, Err(e) => { + // peer reputation changes already applied in `self.handle_request()` metric_inc!(self, beefy_failed_justification_responses); - // TODO (issue #12293): apply reputation changes here based on error type. debug!( target: BEEFY_SYNC_LOG_TARGET, "🥩 Failed to handle BEEFY justification request from {:?}: {}", peer, e, diff --git a/client/consensus/beefy/src/communication/request_response/mod.rs b/client/consensus/beefy/src/communication/request_response/mod.rs index c528d06bbe0c5..545ab18cf1d34 100644 --- a/client/consensus/beefy/src/communication/request_response/mod.rs +++ b/client/consensus/beefy/src/communication/request_response/mod.rs @@ -30,7 +30,7 @@ use codec::{Decode, Encode, Error as CodecError}; use sc_network::{config::RequestResponseConfig, PeerId}; use sp_runtime::traits::{Block, NumberFor}; -use crate::communication::beefy_protocol_name::justifications_protocol_name; +use crate::communication::{beefy_protocol_name::justifications_protocol_name, peers::PeerReport}; use incoming_requests_handler::IncomingRequestReceiver; // 10 seems reasonable, considering justifs are explicitly requested only @@ -76,7 +76,7 @@ pub struct JustificationRequest { } #[derive(Debug, thiserror::Error)] -pub enum Error { +pub(crate) enum Error { #[error(transparent)] Client(#[from] sp_blockchain::Error), @@ -99,5 +99,8 @@ pub enum Error { SendResponse, #[error("Received invalid response.")] - InvalidResponse, + InvalidResponse(PeerReport), + + #[error("Internal error while getting response.")] + ResponseError, } diff --git a/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs b/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs index fbf464bd639d9..10105ff2d417d 100644 --- a/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ b/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -31,7 +31,11 @@ use sp_runtime::traits::{Block, NumberFor}; use std::{collections::VecDeque, result::Result, sync::Arc}; use crate::{ - communication::request_response::{Error, JustificationRequest, BEEFY_SYNC_LOG_TARGET}, + communication::{ + benefit, cost, + peers::PeerReport, + request_response::{Error, JustificationRequest, BEEFY_SYNC_LOG_TARGET}, + }, justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, metric_inc, metrics::{register_metrics, OnDemandOutgoingRequestsMetrics}, @@ -54,6 +58,16 @@ enum State { AwaitingResponse(PeerId, RequestInfo, ResponseReceiver), } +/// Possible engine responses. +pub(crate) enum ResponseInfo { + /// No peer response available yet. + Pending, + /// Valid justification provided alongside peer reputation changes. + ValidProof(BeefyVersionedFinalityProof, PeerReport), + /// No justification yet, only peer reputation changes. + PeerReport(PeerReport), +} + pub struct OnDemandJustificationsEngine { network: Arc, protocol_name: ProtocolName, @@ -84,12 +98,10 @@ impl OnDemandJustificationsEngine { } fn reset_peers_cache_for_block(&mut self, block: NumberFor) { - // TODO (issue #12296): replace peer selection with generic one that involves all protocols. self.peers_cache = self.live_peers.lock().further_than(block); } fn try_next_peer(&mut self) -> Option { - // TODO (issue #12296): replace peer selection with generic one that involves all protocols. let live = self.live_peers.lock(); while let Some(peer) = self.peers_cache.pop_front() { if live.contains(&peer) { @@ -159,24 +171,19 @@ impl OnDemandJustificationsEngine { fn process_response( &mut self, - peer: PeerId, + peer: &PeerId, req_info: &RequestInfo, response: Result, ) -> Result, Error> { response .map_err(|e| { - metric_inc!(self, beefy_on_demand_justification_peer_hang_up); debug!( target: BEEFY_SYNC_LOG_TARGET, - "🥩 for on demand justification #{:?}, peer {:?} hung up: {:?}", - req_info.block, - peer, - e + "🥩 on-demand sc-network channel sender closed, err: {:?}", e ); - Error::InvalidResponse + Error::ResponseError })? .map_err(|e| { - metric_inc!(self, beefy_on_demand_justification_peer_error); debug!( target: BEEFY_SYNC_LOG_TARGET, "🥩 for on demand justification #{:?}, peer {:?} error: {:?}", @@ -184,7 +191,18 @@ impl OnDemandJustificationsEngine { peer, e ); - Error::InvalidResponse + match e { + RequestFailure::Refused => { + metric_inc!(self, beefy_on_demand_justification_peer_refused); + let peer_report = + PeerReport { who: *peer, cost_benefit: cost::REFUSAL_RESPONSE }; + Error::InvalidResponse(peer_report) + }, + _ => { + metric_inc!(self, beefy_on_demand_justification_peer_error); + Error::ResponseError + }, + } }) .and_then(|encoded| { decode_and_verify_finality_proof::( @@ -192,23 +210,26 @@ impl OnDemandJustificationsEngine { req_info.block, &req_info.active_set, ) - .map_err(|e| { + .map_err(|(err, signatures_checked)| { metric_inc!(self, beefy_on_demand_justification_invalid_proof); debug!( target: BEEFY_SYNC_LOG_TARGET, "🥩 for on demand justification #{:?}, peer {:?} responded with invalid proof: {:?}", - req_info.block, peer, e + req_info.block, peer, err ); - Error::InvalidResponse + let mut cost = cost::INVALID_PROOF; + cost.value += + cost::PER_SIGNATURE_CHECKED.saturating_mul(signatures_checked as i32); + Error::InvalidResponse(PeerReport { who: *peer, cost_benefit: cost }) }) }) } - pub async fn next(&mut self) -> Option> { + pub(crate) async fn next(&mut self) -> ResponseInfo { let (peer, req_info, resp) = match &mut self.state { State::Idle => { futures::future::pending::<()>().await; - return None + return ResponseInfo::Pending }, State::AwaitingResponse(peer, req_info, receiver) => { let resp = receiver.await; @@ -220,8 +241,8 @@ impl OnDemandJustificationsEngine { self.state = State::Idle; let block = req_info.block; - self.process_response(peer, &req_info, resp) - .map_err(|_| { + match self.process_response(&peer, &req_info, resp) { + Err(err) => { // No valid justification received, try next peer in our set. if let Some(peer) = self.try_next_peer() { self.request_from_peer(peer, req_info); @@ -231,15 +252,22 @@ impl OnDemandJustificationsEngine { "🥩 ran out of peers to request justif #{:?} from", block ); } - }) - .map(|proof| { + // Report peer based on error type. + if let Error::InvalidResponse(peer_report) = err { + ResponseInfo::PeerReport(peer_report) + } else { + ResponseInfo::Pending + } + }, + Ok(proof) => { metric_inc!(self, beefy_on_demand_justification_good_proof); debug!( target: BEEFY_SYNC_LOG_TARGET, "🥩 received valid on-demand justif #{:?} from {:?}", block, peer ); - proof - }) - .ok() + let peer_report = PeerReport { who: peer, cost_benefit: benefit::VALIDATED_PROOF }; + ResponseInfo::ValidProof(proof, peer_report) + }, + } } } diff --git a/client/consensus/beefy/src/import.rs b/client/consensus/beefy/src/import.rs index dd2ed92ef8353..bda8169d95013 100644 --- a/client/consensus/beefy/src/import.rs +++ b/client/consensus/beefy/src/import.rs @@ -109,6 +109,7 @@ where .ok_or_else(|| ImportError("Unknown validator set".to_string()))?; decode_and_verify_finality_proof::(&encoded[..], number, &validator_set) + .map_err(|(err, _)| err) } } diff --git a/client/consensus/beefy/src/justification.rs b/client/consensus/beefy/src/justification.rs index 1bd250b2a25f3..731acdfa63389 100644 --- a/client/consensus/beefy/src/justification.rs +++ b/client/consensus/beefy/src/justification.rs @@ -21,38 +21,47 @@ use codec::{Decode, Encode}; use sp_consensus::Error as ConsensusError; use sp_consensus_beefy::{ crypto::{AuthorityId, Signature}, - ValidatorSet, VersionedFinalityProof, + ValidatorSet, ValidatorSetId, VersionedFinalityProof, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; /// A finality proof with matching BEEFY authorities' signatures. -pub type BeefyVersionedFinalityProof = - sp_consensus_beefy::VersionedFinalityProof, Signature>; +pub type BeefyVersionedFinalityProof = VersionedFinalityProof, Signature>; + +pub(crate) fn proof_block_num_and_set_id( + proof: &BeefyVersionedFinalityProof, +) -> (NumberFor, ValidatorSetId) { + match proof { + VersionedFinalityProof::V1(sc) => + (sc.commitment.block_number, sc.commitment.validator_set_id), + } +} /// Decode and verify a Beefy FinalityProof. pub(crate) fn decode_and_verify_finality_proof( encoded: &[u8], target_number: NumberFor, validator_set: &ValidatorSet, -) -> Result, ConsensusError> { +) -> Result, (ConsensusError, u32)> { let proof = >::decode(&mut &*encoded) - .map_err(|_| ConsensusError::InvalidJustification)?; + .map_err(|_| (ConsensusError::InvalidJustification, 0))?; verify_with_validator_set::(target_number, validator_set, &proof).map(|_| proof) } /// Verify the Beefy finality proof against the validator set at the block it was generated. -fn verify_with_validator_set( +pub(crate) fn verify_with_validator_set( target_number: NumberFor, validator_set: &ValidatorSet, proof: &BeefyVersionedFinalityProof, -) -> Result<(), ConsensusError> { +) -> Result<(), (ConsensusError, u32)> { + let mut signatures_checked = 0u32; match proof { VersionedFinalityProof::V1(signed_commitment) => { if signed_commitment.signatures.len() != validator_set.len() || signed_commitment.commitment.validator_set_id != validator_set.id() || signed_commitment.commitment.block_number != target_number { - return Err(ConsensusError::InvalidJustification) + return Err((ConsensusError::InvalidJustification, 0)) } // Arrangement of signatures in the commitment should be in the same order @@ -65,14 +74,17 @@ fn verify_with_validator_set( .filter(|(id, signature)| { signature .as_ref() - .map(|sig| BeefyKeystore::verify(id, sig, &message[..])) + .map(|sig| { + signatures_checked += 1; + BeefyKeystore::verify(id, sig, &message[..]) + }) .unwrap_or(false) }) .count(); if valid_signatures >= crate::round::threshold(validator_set.len()) { Ok(()) } else { - Err(ConsensusError::InvalidJustification) + Err((ConsensusError::InvalidJustification, signatures_checked)) } }, } @@ -119,16 +131,16 @@ pub(crate) mod tests { // wrong block number -> should fail verification let good_proof = proof.clone().into(); match verify_with_validator_set::(block_num + 1, &validator_set, &good_proof) { - Err(ConsensusError::InvalidJustification) => (), - _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + Err((ConsensusError::InvalidJustification, 0)) => (), + e => assert!(false, "Got unexpected {:?}", e), }; // wrong validator set id -> should fail verification let good_proof = proof.clone().into(); let other = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); match verify_with_validator_set::(block_num, &other, &good_proof) { - Err(ConsensusError::InvalidJustification) => (), - _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + Err((ConsensusError::InvalidJustification, 0)) => (), + e => assert!(false, "Got unexpected {:?}", e), }; // wrong signatures length -> should fail verification @@ -139,8 +151,8 @@ pub(crate) mod tests { }; bad_signed_commitment.signatures.pop().flatten().unwrap(); match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { - Err(ConsensusError::InvalidJustification) => (), - _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + Err((ConsensusError::InvalidJustification, 0)) => (), + e => assert!(false, "Got unexpected {:?}", e), }; // not enough signatures -> should fail verification @@ -150,9 +162,9 @@ pub(crate) mod tests { }; // remove a signature (but same length) *bad_signed_commitment.signatures.first_mut().unwrap() = None; - match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { - Err(ConsensusError::InvalidJustification) => (), - _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + match verify_with_validator_set::(block_num, &validator_set, &bad_proof.into()) { + Err((ConsensusError::InvalidJustification, 2)) => (), + e => assert!(false, "Got unexpected {:?}", e), }; // not enough _correct_ signatures -> should fail verification @@ -163,9 +175,9 @@ pub(crate) mod tests { // change a signature to a different key *bad_signed_commitment.signatures.first_mut().unwrap() = Some(Keyring::Dave.sign(&bad_signed_commitment.commitment.encode())); - match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { - Err(ConsensusError::InvalidJustification) => (), - _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + match verify_with_validator_set::(block_num, &validator_set, &bad_proof.into()) { + Err((ConsensusError::InvalidJustification, 3)) => (), + e => assert!(false, "Got unexpected {:?}", e), }; } diff --git a/client/consensus/beefy/src/keystore.rs b/client/consensus/beefy/src/keystore.rs index 421f7149018c8..795d4cc8ade10 100644 --- a/client/consensus/beefy/src/keystore.rs +++ b/client/consensus/beefy/src/keystore.rs @@ -18,7 +18,7 @@ use sp_application_crypto::RuntimeAppPublic; use sp_core::keccak_256; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::KeystorePtr; use log::warn; @@ -33,9 +33,9 @@ use crate::{error, LOG_TARGET}; pub(crate) type BeefySignatureHasher = sp_runtime::traits::Keccak256; /// A BEEFY specific keystore implemented as a `Newtype`. This is basically a -/// wrapper around [`sp_keystore::SyncCryptoStore`] and allows to customize +/// wrapper around [`sp_keystore::Keystore`] and allows to customize /// common cryptographic functionality. -pub(crate) struct BeefyKeystore(Option); +pub(crate) struct BeefyKeystore(Option); impl BeefyKeystore { /// Check if the keystore contains a private key for one of the public keys @@ -50,7 +50,7 @@ impl BeefyKeystore { // we do check for multiple private keys as a key store sanity check. let public: Vec = keys .iter() - .filter(|k| SyncCryptoStore::has_keys(&*store, &[(k.to_raw_vec(), KEY_TYPE)])) + .filter(|k| store.has_keys(&[(k.to_raw_vec(), KEY_TYPE)])) .cloned() .collect(); @@ -77,7 +77,8 @@ impl BeefyKeystore { let msg = keccak_256(message); let public = public.as_ref(); - let sig = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, public, &msg) + let sig = store + .ecdsa_sign_prehashed(KEY_TYPE, public, &msg) .map_err(|e| error::Error::Keystore(e.to_string()))? .ok_or_else(|| error::Error::Signature("ecdsa_sign_prehashed() failed".to_string()))?; @@ -94,10 +95,8 @@ impl BeefyKeystore { pub fn public_keys(&self) -> Result, error::Error> { let store = self.0.clone().ok_or_else(|| error::Error::Keystore("no Keystore".into()))?; - let pk: Vec = SyncCryptoStore::ecdsa_public_keys(&*store, KEY_TYPE) - .drain(..) - .map(Public::from) - .collect(); + let pk: Vec = + store.ecdsa_public_keys(KEY_TYPE).drain(..).map(Public::from).collect(); Ok(pk) } @@ -110,26 +109,23 @@ impl BeefyKeystore { } } -impl From> for BeefyKeystore { - fn from(store: Option) -> BeefyKeystore { +impl From> for BeefyKeystore { + fn from(store: Option) -> BeefyKeystore { BeefyKeystore(store) } } #[cfg(test)] pub mod tests { - use std::sync::Arc; - - use sc_keystore::LocalKeystore; - use sp_core::{ecdsa, Pair}; - use sp_consensus_beefy::{crypto, Keyring}; + use sp_core::{ecdsa, Pair}; + use sp_keystore::testing::MemoryKeystore; use super::*; use crate::error::Error; - fn keystore() -> SyncCryptoStorePtr { - Arc::new(LocalKeystore::in_memory()) + fn keystore() -> KeystorePtr { + MemoryKeystore::new().into() } #[test] @@ -197,11 +193,11 @@ pub mod tests { fn authority_id_works() { let store = keystore(); - let alice: crypto::Public = - SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) - .ok() - .unwrap() - .into(); + let alice: crypto::Public = store + .ecdsa_generate_new(KEY_TYPE, Some(&Keyring::Alice.to_seed())) + .ok() + .unwrap() + .into(); let bob = Keyring::Bob.public(); let charlie = Keyring::Charlie.public(); @@ -223,11 +219,11 @@ pub mod tests { fn sign_works() { let store = keystore(); - let alice: crypto::Public = - SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) - .ok() - .unwrap() - .into(); + let alice: crypto::Public = store + .ecdsa_generate_new(KEY_TYPE, Some(&Keyring::Alice.to_seed())) + .ok() + .unwrap() + .into(); let store: BeefyKeystore = Some(store).into(); @@ -243,10 +239,7 @@ pub mod tests { fn sign_error() { let store = keystore(); - let _ = - SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Bob.to_seed())) - .ok() - .unwrap(); + store.ecdsa_generate_new(KEY_TYPE, Some(&Keyring::Bob.to_seed())).ok().unwrap(); let store: BeefyKeystore = Some(store).into(); @@ -275,11 +268,11 @@ pub mod tests { fn verify_works() { let store = keystore(); - let alice: crypto::Public = - SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) - .ok() - .unwrap() - .into(); + let alice: crypto::Public = store + .ecdsa_generate_new(KEY_TYPE, Some(&Keyring::Alice.to_seed())) + .ok() + .unwrap() + .into(); let store: BeefyKeystore = Some(store).into(); @@ -301,9 +294,8 @@ pub mod tests { let store = keystore(); - let add_key = |key_type, seed: Option<&str>| { - SyncCryptoStore::ecdsa_generate_new(&*store, key_type, seed).unwrap() - }; + let add_key = + |key_type, seed: Option<&str>| store.ecdsa_generate_new(key_type, seed).unwrap(); // test keys let _ = add_key(TEST_TYPE, Some(Keyring::Alice.to_seed().as_str())); diff --git a/client/consensus/beefy/src/lib.rs b/client/consensus/beefy/src/lib.rs index eb56a97de1dd9..d3e5e4bc68936 100644 --- a/client/consensus/beefy/src/lib.rs +++ b/client/consensus/beefy/src/lib.rs @@ -49,10 +49,14 @@ use sp_consensus_beefy::{ crypto::AuthorityId, BeefyApi, MmrRootHash, PayloadProvider, ValidatorSet, BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID, }; -use sp_keystore::SyncCryptoStorePtr; +use sp_keystore::KeystorePtr; use sp_mmr_primitives::MmrApi; use sp_runtime::traits::{Block, Zero}; -use std::{collections::VecDeque, marker::PhantomData, sync::Arc}; +use std::{ + collections::{BTreeMap, VecDeque}, + marker::PhantomData, + sync::Arc, +}; mod aux_schema; mod error; @@ -197,7 +201,7 @@ pub struct BeefyParams { /// Runtime Api Provider pub runtime: Arc, /// Local key store - pub key_store: Option, + pub key_store: Option, /// BEEFY voter network params pub network_params: BeefyNetworkParams, /// Minimal delta between blocks, BEEFY should vote for @@ -247,9 +251,12 @@ pub async fn start_beefy_gadget( } = network_params; let known_peers = Arc::new(Mutex::new(KnownPeers::new())); - let gossip_validator = - Arc::new(communication::gossip::GossipValidator::new(known_peers.clone())); - let mut gossip_engine = sc_network_gossip::GossipEngine::new( + // Default votes filter is to discard everything. + // Validator is updated later with correct starting round and set id. + let (gossip_validator, gossip_report_stream) = + communication::gossip::GossipValidator::new(known_peers.clone()); + let gossip_validator = Arc::new(gossip_validator); + let mut gossip_engine = GossipEngine::new( network.clone(), sync.clone(), gossip_protocol_name, @@ -284,8 +291,16 @@ pub async fn start_beefy_gadget( return }, }; + // Update the gossip validator with the right starting round and set id. + if let Err(e) = persisted_state + .gossip_filter_config() + .map(|f| gossip_validator.update_filter(f)) + { + error!(target: LOG_TARGET, "Error: {:?}. Terminating.", e); + return + } - let worker_params = worker::WorkerParams { + let worker = worker::BeefyWorker { backend, payload_provider, runtime, @@ -293,14 +308,14 @@ pub async fn start_beefy_gadget( key_store: key_store.into(), gossip_engine, gossip_validator, + gossip_report_stream, on_demand_justifications, links, metrics, + pending_justifications: BTreeMap::new(), persisted_state, }; - let worker = worker::BeefyWorker::<_, _, _, _, _>::new(worker_params); - futures::future::join( worker.run(block_import_justif, finality_notifications), on_demand_justifications_handler.run(), diff --git a/client/consensus/beefy/src/metrics.rs b/client/consensus/beefy/src/metrics.rs index 0ce48e60ebc84..031748bdceab5 100644 --- a/client/consensus/beefy/src/metrics.rs +++ b/client/consensus/beefy/src/metrics.rs @@ -45,10 +45,6 @@ pub struct VoterMetrics { pub beefy_lagging_sessions: Counter, /// Number of times no Authority public key found in store pub beefy_no_authority_found_in_store: Counter, - /// Number of currently buffered votes - pub beefy_buffered_votes: Gauge, - /// Number of votes dropped due to full buffers - pub beefy_buffered_votes_dropped: Counter, /// Number of good votes successfully handled pub beefy_good_votes_processed: Counter, /// Number of equivocation votes received @@ -65,8 +61,6 @@ pub struct VoterMetrics { pub beefy_imported_justifications: Counter, /// Number of justifications dropped due to full buffers pub beefy_buffered_justifications_dropped: Counter, - /// Trying to set Best Beefy block to old block - pub beefy_best_block_set_last_failure: Gauge, } impl PrometheusRegister for VoterMetrics { @@ -110,17 +104,6 @@ impl PrometheusRegister for VoterMetrics { )?, registry, )?, - beefy_buffered_votes: register( - Gauge::new("substrate_beefy_buffered_votes", "Number of currently buffered votes")?, - registry, - )?, - beefy_buffered_votes_dropped: register( - Counter::new( - "substrate_beefy_buffered_votes_dropped", - "Number of votes dropped due to full buffers", - )?, - registry, - )?, beefy_good_votes_processed: register( Counter::new( "substrate_beefy_successful_handled_votes", @@ -174,13 +157,6 @@ impl PrometheusRegister for VoterMetrics { )?, registry, )?, - beefy_best_block_set_last_failure: register( - Gauge::new( - "substrate_beefy_best_block_to_old_block", - "Trying to set Best Beefy block to old block", - )?, - registry, - )?, }) } } @@ -252,8 +228,8 @@ impl PrometheusRegister for OnDemandIncomingRequestsMetrics { pub struct OnDemandOutgoingRequestsMetrics { /// Number of times there was no good peer to request justification from pub beefy_on_demand_justification_no_peer_to_request_from: Counter, - /// Number of on-demand justification peer hang up - pub beefy_on_demand_justification_peer_hang_up: Counter, + /// Number of on-demand justification peer refused valid requests + pub beefy_on_demand_justification_peer_refused: Counter, /// Number of on-demand justification peer error pub beefy_on_demand_justification_peer_error: Counter, /// Number of on-demand justification invalid proof @@ -273,10 +249,10 @@ impl PrometheusRegister for OnDemandOutgoingRequestsMetrics { )?, registry, )?, - beefy_on_demand_justification_peer_hang_up: register( + beefy_on_demand_justification_peer_refused: register( Counter::new( - "substrate_beefy_on_demand_justification_peer_hang_up", - "Number of on-demand justification peer hang up", + "beefy_on_demand_justification_peer_refused", + "Number of on-demand justification peer refused valid requests", )?, registry, )?, diff --git a/client/consensus/beefy/src/round.rs b/client/consensus/beefy/src/round.rs index 64d03beeee854..d8948ff98c552 100644 --- a/client/consensus/beefy/src/round.rs +++ b/client/consensus/beefy/src/round.rs @@ -21,7 +21,7 @@ use crate::LOG_TARGET; use codec::{Decode, Encode}; use log::debug; use sp_consensus_beefy::{ - crypto::{AuthorityId, Public, Signature}, + crypto::{AuthorityId, Signature}, Commitment, EquivocationProof, SignedCommitment, ValidatorSet, ValidatorSetId, VoteMessage, }; use sp_runtime::traits::{Block, NumberFor}; @@ -33,11 +33,11 @@ use std::collections::BTreeMap; /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). #[derive(Debug, Decode, Default, Encode, PartialEq)] pub(crate) struct RoundTracker { - votes: BTreeMap, + votes: BTreeMap, } impl RoundTracker { - fn add_vote(&mut self, vote: (Public, Signature)) -> bool { + fn add_vote(&mut self, vote: (AuthorityId, Signature)) -> bool { if self.votes.contains_key(&vote.0) { return false } @@ -61,7 +61,7 @@ pub fn threshold(authorities: usize) -> usize { pub enum VoteImportResult { Ok, RoundConcluded(SignedCommitment, Signature>), - Equivocation(EquivocationProof, Public, Signature>), + Equivocation(EquivocationProof, AuthorityId, Signature>), Invalid, Stale, } @@ -73,9 +73,10 @@ pub enum VoteImportResult { #[derive(Debug, Decode, Encode, PartialEq)] pub(crate) struct Rounds { rounds: BTreeMap>, RoundTracker>, - previous_votes: BTreeMap<(Public, NumberFor), VoteMessage, Public, Signature>>, + previous_votes: + BTreeMap<(AuthorityId, NumberFor), VoteMessage, AuthorityId, Signature>>, session_start: NumberFor, - validator_set: ValidatorSet, + validator_set: ValidatorSet, mandatory_done: bool, best_done: Option>, } @@ -84,7 +85,10 @@ impl Rounds where B: Block, { - pub(crate) fn new(session_start: NumberFor, validator_set: ValidatorSet) -> Self { + pub(crate) fn new( + session_start: NumberFor, + validator_set: ValidatorSet, + ) -> Self { Rounds { rounds: BTreeMap::new(), previous_votes: BTreeMap::new(), @@ -95,7 +99,7 @@ where } } - pub(crate) fn validator_set(&self) -> &ValidatorSet { + pub(crate) fn validator_set(&self) -> &ValidatorSet { &self.validator_set } @@ -103,7 +107,7 @@ where self.validator_set.id() } - pub(crate) fn validators(&self) -> &[Public] { + pub(crate) fn validators(&self) -> &[AuthorityId] { self.validator_set.validators() } @@ -199,11 +203,11 @@ mod tests { use sc_network_test::Block; use sp_consensus_beefy::{ - crypto::Public, known_payloads::MMR_ROOT_ID, Commitment, EquivocationProof, Keyring, - Payload, SignedCommitment, ValidatorSet, VoteMessage, + known_payloads::MMR_ROOT_ID, Commitment, EquivocationProof, Keyring, Payload, + SignedCommitment, ValidatorSet, VoteMessage, }; - use super::{threshold, Block as BlockT, RoundTracker, Rounds}; + use super::{threshold, AuthorityId, Block as BlockT, RoundTracker, Rounds}; use crate::round::VoteImportResult; impl Rounds @@ -251,7 +255,7 @@ mod tests { fn new_rounds() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], 42, ) @@ -272,7 +276,7 @@ mod tests { fn add_and_conclude_votes() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![ Keyring::Alice.public(), Keyring::Bob.public(), @@ -338,7 +342,7 @@ mod tests { fn old_rounds_not_accepted() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], 42, ) @@ -384,7 +388,7 @@ mod tests { fn multiple_rounds() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], Default::default(), ) @@ -459,7 +463,7 @@ mod tests { fn should_provide_equivocation_proof() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![Keyring::Alice.public(), Keyring::Bob.public()], Default::default(), ) diff --git a/client/consensus/beefy/src/tests.rs b/client/consensus/beefy/src/tests.rs index 27dc8d81915aa..48ecebdac3581 100644 --- a/client/consensus/beefy/src/tests.rs +++ b/client/consensus/beefy/src/tests.rs @@ -21,15 +21,19 @@ use crate::{ aux_schema::{load_persistent, tests::verify_persisted_version}, beefy_block_import_and_links, - communication::request_response::{ - on_demand_justifications_protocol_config, BeefyJustifsRequestHandler, + communication::{ + gossip::{ + proofs_topic, tests::sign_commitment, votes_topic, GossipFilterCfg, GossipMessage, + GossipValidator, + }, + request_response::{on_demand_justifications_protocol_config, BeefyJustifsRequestHandler}, }, gossip_protocol_name, justification::*, load_or_init_voter_state, wait_for_runtime_pallet, BeefyRPCLinks, BeefyVoterLinks, KnownPeers, PersistedState, }; -use futures::{future, stream::FuturesUnordered, Future, StreamExt}; +use futures::{future, stream::FuturesUnordered, Future, FutureExt, StreamExt}; use parking_lot::Mutex; use sc_client_api::{Backend as BackendT, BlockchainEvents, FinalityNotifications, HeaderBackend}; use sc_consensus::{ @@ -48,16 +52,16 @@ use sp_consensus::BlockOrigin; use sp_consensus_beefy::{ crypto::{AuthorityId, Signature}, known_payloads, - mmr::MmrRootProvider, + mmr::{find_mmr_root_digest, MmrRootProvider}, BeefyApi, Commitment, ConsensusLog, EquivocationProof, Keyring as BeefyKeyring, MmrRootHash, OpaqueKeyOwnershipProof, Payload, SignedCommitment, ValidatorSet, ValidatorSetId, - VersionedFinalityProof, BEEFY_ENGINE_ID, KEY_TYPE as BeefyKeyType, + VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, KEY_TYPE as BeefyKeyType, }; use sp_core::H256; -use sp_keystore::{testing::KeyStore as TestKeystore, SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::{testing::MemoryKeystore, Keystore, KeystorePtr}; use sp_mmr_primitives::{Error as MmrError, MmrApi}; use sp_runtime::{ - codec::Encode, + codec::{Decode, Encode}, traits::{Header as HeaderT, NumberFor}, BuildStorage, DigestItem, EncodedJustification, Justifications, Storage, }; @@ -339,11 +343,12 @@ pub(crate) fn make_beefy_ids(keys: &[BeefyKeyring]) -> Vec { keys.iter().map(|&key| key.public().into()).collect() } -pub(crate) fn create_beefy_keystore(authority: BeefyKeyring) -> SyncCryptoStorePtr { - let keystore = Arc::new(TestKeystore::new()); - SyncCryptoStore::ecdsa_generate_new(&*keystore, BeefyKeyType, Some(&authority.to_seed())) - .expect("Creates authority key"); +pub(crate) fn create_beefy_keystore(authority: BeefyKeyring) -> KeystorePtr { + let keystore = MemoryKeystore::new(); keystore + .ecdsa_generate_new(BeefyKeyType, Some(&authority.to_seed())) + .expect("Creates authority key"); + keystore.into() } async fn voter_init_setup( @@ -353,8 +358,8 @@ async fn voter_init_setup( ) -> sp_blockchain::Result> { let backend = net.peer(0).client().as_backend(); let known_peers = Arc::new(Mutex::new(KnownPeers::new())); - let gossip_validator = - Arc::new(crate::communication::gossip::GossipValidator::new(known_peers)); + let (gossip_validator, _) = GossipValidator::new(known_peers); + let gossip_validator = Arc::new(gossip_validator); let mut gossip_engine = sc_network_gossip::GossipEngine::new( net.peer(0).network_service().clone(), net.peer(0).sync_service().clone(), @@ -502,16 +507,15 @@ async fn wait_for_beefy_signed_commitments( run_until(wait_for, net).await; } -async fn streams_empty_after_timeout( +async fn streams_empty_after_future( streams: Vec>, - net: &Arc>, - timeout: Option, + future: Option, ) where T: std::fmt::Debug, T: std::cmp::PartialEq, { - if let Some(timeout) = timeout { - run_for(timeout, net).await; + if let Some(future) = future { + future.await; } for mut stream in streams.into_iter() { future::poll_fn(move |cx| { @@ -522,21 +526,31 @@ async fn streams_empty_after_timeout( } } +async fn streams_empty_after_timeout( + streams: Vec>, + net: &Arc>, + timeout: Option, +) where + T: std::fmt::Debug, + T: std::cmp::PartialEq, +{ + let timeout = timeout.map(|timeout| Box::pin(run_for(timeout, net))); + streams_empty_after_future(streams, timeout).await; +} + async fn finalize_block_and_wait_for_beefy( net: &Arc>, // peer index and key peers: impl Iterator + Clone, - finalize_targets: &[H256], + finalize_target: &H256, expected_beefy: &[u64], ) { let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); - for block in finalize_targets { - peers.clone().for_each(|(index, _)| { - let client = net.lock().peer(index).client().as_client(); - client.finalize_block(*block, None).unwrap(); - }) - } + peers.clone().for_each(|(index, _)| { + let client = net.lock().peer(index).client().as_client(); + client.finalize_block(*finalize_target, None).unwrap(); + }); if expected_beefy.is_empty() { // run for quarter second then verify no new best beefy block available @@ -574,31 +588,32 @@ async fn beefy_finalizing_blocks() { let peers = peers.into_iter().enumerate(); // finalize block #5 -> BEEFY should finalize #1 (mandatory) and #5 from diff-power-of-two rule. - finalize_block_and_wait_for_beefy(&net, peers.clone(), &[hashes[1], hashes[5]], &[1, 5]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[1], &[1]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[5], &[5]).await; // GRANDPA finalize #10 -> BEEFY finalize #10 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers.clone(), &[hashes[10]], &[10]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[10], &[10]).await; // GRANDPA finalize #18 -> BEEFY finalize #14, then #18 (diff-power-of-two rule) - finalize_block_and_wait_for_beefy(&net, peers.clone(), &[hashes[18]], &[14, 18]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[18], &[14, 18]).await; // GRANDPA finalize #20 -> BEEFY finalize #20 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers.clone(), &[hashes[20]], &[20]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[20], &[20]).await; // GRANDPA finalize #21 -> BEEFY finalize nothing (yet) because min delta is 4 - finalize_block_and_wait_for_beefy(&net, peers, &[hashes[21]], &[]).await; + finalize_block_and_wait_for_beefy(&net, peers, &hashes[21], &[]).await; } #[tokio::test] async fn lagging_validators() { sp_tracing::try_init_simple(); - let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie]; let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 30; let min_block_delta = 1; - let mut net = BeefyTestNet::new(2); + let mut net = BeefyTestNet::new(3); let api = Arc::new(TestApi::with_validator_set(&validator_set)); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); tokio::spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); @@ -611,55 +626,47 @@ async fn lagging_validators() { let peers = peers.into_iter().enumerate(); // finalize block #15 -> BEEFY should finalize #1 (mandatory) and #9, #13, #14, #15 from // diff-power-of-two rule. - finalize_block_and_wait_for_beefy( - &net, - peers.clone(), - &[hashes[1], hashes[15]], - &[1, 9, 13, 14, 15], - ) - .await; - - // Alice finalizes #25, Bob lags behind + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[1], &[1]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[15], &[9, 13, 14, 15]).await; + + // Alice and Bob finalize #25, Charlie lags behind let finalize = hashes[25]; let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY - let timeout = Some(Duration::from_millis(250)); + let timeout = Some(Duration::from_millis(100)); streams_empty_after_timeout(best_blocks, &net, timeout).await; streams_empty_after_timeout(versioned_finality_proof, &net, None).await; - // Bob catches up and also finalizes #25 + // Charlie catches up and also finalizes #25 let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); - net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); - // expected beefy finalizes block #17 from diff-power-of-two + net.lock().peer(2).client().as_client().finalize_block(finalize, None).unwrap(); + // expected beefy finalizes blocks 23, 24, 25 from diff-power-of-two wait_for_best_beefy_blocks(best_blocks, &net, &[23, 24, 25]).await; wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &[23, 24, 25]).await; // Both finalize #30 (mandatory session) and #32 -> BEEFY finalize #30 (mandatory), #31, #32 - finalize_block_and_wait_for_beefy( - &net, - peers.clone(), - &[hashes[30], hashes[32]], - &[30, 31, 32], - ) - .await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[30], &[30]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[32], &[31, 32]).await; // Verify that session-boundary votes get buffered by client and only processed once // session-boundary block is GRANDPA-finalized (this guarantees authenticity for the new session // validator set). - // Alice finalizes session-boundary mandatory block #60, Bob lags behind + // Alice and Bob finalize session-boundary mandatory block #60, Charlie lags behind let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); let finalize = hashes[60]; net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY - let timeout = Some(Duration::from_millis(250)); + let timeout = Some(Duration::from_millis(100)); streams_empty_after_timeout(best_blocks, &net, timeout).await; streams_empty_after_timeout(versioned_finality_proof, &net, None).await; - // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) + // Charlie catches up and also finalizes #60 (and should have buffered Alice's vote on #60) let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); - net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); + net.lock().peer(2).client().as_client().finalize_block(finalize, None).unwrap(); // verify beefy skips intermediary votes, and successfully finalizes mandatory block #60 wait_for_best_beefy_blocks(best_blocks, &net, &[60]).await; wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &[60]).await; @@ -696,7 +703,8 @@ async fn correct_beefy_payload() { let net = Arc::new(Mutex::new(net)); let peers = peers.into_iter().enumerate(); // with 3 good voters and 1 bad one, consensus should happen and best blocks produced. - finalize_block_and_wait_for_beefy(&net, peers, &[hashes[1], hashes[10]], &[1, 9]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[1], &[1]).await; + finalize_block_and_wait_for_beefy(&net, peers, &hashes[10], &[9]).await; let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); @@ -708,7 +716,7 @@ async fn correct_beefy_payload() { net.lock().peer(3).client().as_client().finalize_block(hashof11, None).unwrap(); // verify consensus is _not_ reached - let timeout = Some(Duration::from_millis(250)); + let timeout = Some(Duration::from_millis(100)); streams_empty_after_timeout(best_blocks, &net, timeout).await; streams_empty_after_timeout(versioned_finality_proof, &net, None).await; @@ -874,39 +882,6 @@ async fn beefy_importing_justifications() { } } -#[tokio::test] -async fn voter_initialization() { - sp_tracing::try_init_simple(); - // Regression test for voter initialization where finality notifications were dropped - // after waiting for BEEFY pallet availability. - - let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); - let session_len = 5; - // Should vote on all mandatory blocks no matter the `min_block_delta`. - let min_block_delta = 10; - - let mut net = BeefyTestNet::new(2); - let api = Arc::new(TestApi::with_validator_set(&validator_set)); - let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); - tokio::spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); - - // push 26 blocks - let hashes = net.generate_blocks_and_sync(26, session_len, &validator_set, false).await; - let net = Arc::new(Mutex::new(net)); - - // Finalize multiple blocks at once to get a burst of finality notifications right from start. - // Need to finalize at least one block in each session, choose randomly. - // Expect voters to pick up all of them and BEEFY-finalize the mandatory blocks of each session. - finalize_block_and_wait_for_beefy( - &net, - peers.into_iter().enumerate(), - &[hashes[1], hashes[6], hashes[10], hashes[17], hashes[24], hashes[26]], - &[1, 5, 10, 15, 20, 25], - ) - .await; -} - #[tokio::test] async fn on_demand_beefy_justification_sync() { sp_tracing::try_init_simple(); @@ -940,13 +915,11 @@ async fn on_demand_beefy_justification_sync() { let net = Arc::new(Mutex::new(net)); // With 3 active voters and one inactive, consensus should happen and blocks BEEFY-finalized. // Need to finalize at least one block in each session, choose randomly. - finalize_block_and_wait_for_beefy( - &net, - fast_peers.clone(), - &[hashes[1], hashes[6], hashes[10], hashes[17], hashes[23]], - &[1, 5, 10, 15, 20], - ) - .await; + finalize_block_and_wait_for_beefy(&net, fast_peers.clone(), &hashes[1], &[1]).await; + finalize_block_and_wait_for_beefy(&net, fast_peers.clone(), &hashes[6], &[5]).await; + finalize_block_and_wait_for_beefy(&net, fast_peers.clone(), &hashes[10], &[10]).await; + finalize_block_and_wait_for_beefy(&net, fast_peers.clone(), &hashes[17], &[15]).await; + finalize_block_and_wait_for_beefy(&net, fast_peers.clone(), &hashes[24], &[20]).await; // Spawn Dave, they are now way behind voting and can only catch up through on-demand justif // sync. @@ -971,7 +944,8 @@ async fn on_demand_beefy_justification_sync() { // freshly spun up Dave now needs to listen for gossip to figure out the state of their peers. // Have the other peers do some gossip so Dave finds out about their progress. - finalize_block_and_wait_for_beefy(&net, fast_peers, &[hashes[25], hashes[29]], &[25, 29]).await; + finalize_block_and_wait_for_beefy(&net, fast_peers.clone(), &hashes[25], &[25]).await; + finalize_block_and_wait_for_beefy(&net, fast_peers, &hashes[29], &[29]).await; // Kick Dave's async loop by finalizing another block. client.finalize_block(hashes[2], None).unwrap(); @@ -982,13 +956,14 @@ async fn on_demand_beefy_justification_sync() { // Give all tasks some cpu cycles to burn through their events queues, run_for(Duration::from_millis(100), &net).await; // then verify Dave catches up through on-demand justification requests. - finalize_block_and_wait_for_beefy( - &net, - [(dave_index, BeefyKeyring::Dave)].into_iter(), - &[hashes[6], hashes[10], hashes[17], hashes[24], hashes[26]], - &[5, 10, 15, 20, 25], - ) - .await; + let (dave_best_blocks, _) = + get_beefy_streams(&mut net.lock(), [(dave_index, BeefyKeyring::Dave)].into_iter()); + client.finalize_block(hashes[6], None).unwrap(); + client.finalize_block(hashes[10], None).unwrap(); + client.finalize_block(hashes[17], None).unwrap(); + client.finalize_block(hashes[24], None).unwrap(); + client.finalize_block(hashes[26], None).unwrap(); + wait_for_best_beefy_blocks(dave_best_blocks, &net, &[5, 10, 15, 20, 25]).await; } #[tokio::test] @@ -1022,13 +997,8 @@ async fn should_initialize_voter_at_genesis() { // verify next vote target is mandatory block 1 assert_eq!(persisted_state.best_beefy_block(), 0); - assert_eq!(persisted_state.best_grandpa_block(), 13); - assert_eq!( - persisted_state - .voting_oracle() - .voting_target(persisted_state.best_beefy_block(), 13), - Some(1) - ); + assert_eq!(persisted_state.best_grandpa_number(), 13); + assert_eq!(persisted_state.voting_oracle().voting_target(), Some(1)); // verify state also saved to db assert!(verify_persisted_version(&*backend)); @@ -1070,13 +1040,8 @@ async fn should_initialize_voter_at_custom_genesis() { // verify next vote target is mandatory block 7 assert_eq!(persisted_state.best_beefy_block(), 0); - assert_eq!(persisted_state.best_grandpa_block(), 8); - assert_eq!( - persisted_state - .voting_oracle() - .voting_target(persisted_state.best_beefy_block(), 13), - Some(custom_pallet_genesis) - ); + assert_eq!(persisted_state.best_grandpa_number(), 8); + assert_eq!(persisted_state.voting_oracle().voting_target(), Some(custom_pallet_genesis)); // verify state also saved to db assert!(verify_persisted_version(&*backend)); @@ -1128,14 +1093,9 @@ async fn should_initialize_voter_when_last_final_is_session_boundary() { // verify block 10 is correctly marked as finalized assert_eq!(persisted_state.best_beefy_block(), 10); - assert_eq!(persisted_state.best_grandpa_block(), 13); + assert_eq!(persisted_state.best_grandpa_number(), 13); // verify next vote target is diff-power-of-two block 12 - assert_eq!( - persisted_state - .voting_oracle() - .voting_target(persisted_state.best_beefy_block(), 13), - Some(12) - ); + assert_eq!(persisted_state.voting_oracle().voting_target(), Some(12)); // verify state also saved to db assert!(verify_persisted_version(&*backend)); @@ -1186,13 +1146,8 @@ async fn should_initialize_voter_at_latest_finalized() { // verify next vote target is 13 assert_eq!(persisted_state.best_beefy_block(), 12); - assert_eq!(persisted_state.best_grandpa_block(), 13); - assert_eq!( - persisted_state - .voting_oracle() - .voting_target(persisted_state.best_beefy_block(), 13), - Some(13) - ); + assert_eq!(persisted_state.best_grandpa_number(), 13); + assert_eq!(persisted_state.voting_oracle().voting_target(), Some(13)); // verify state also saved to db assert!(verify_persisted_version(&*backend)); @@ -1225,13 +1180,13 @@ async fn beefy_finalizing_after_pallet_genesis() { // Minimum BEEFY block delta is 1. // GRANDPA finalize blocks leading up to BEEFY pallet genesis -> BEEFY should finalize nothing. - finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[1..14], &[]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[14], &[]).await; // GRANDPA finalize block #16 -> BEEFY should finalize #15 (genesis mandatory) and #16. - finalize_block_and_wait_for_beefy(&net, peers.clone(), &[hashes[16]], &[15, 16]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[16], &[15, 16]).await; // GRANDPA finalize #21 -> BEEFY finalize #20 (mandatory) and #21 - finalize_block_and_wait_for_beefy(&net, peers.clone(), &[hashes[21]], &[20, 21]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[21], &[20, 21]).await; } #[tokio::test] @@ -1249,14 +1204,14 @@ async fn beefy_reports_equivocations() { let mut api_alice = TestApi::with_validator_set(&validator_set); api_alice.allow_equivocations(); let api_alice = Arc::new(api_alice); - let alice = (0, &peers[0], api_alice.clone()); + let alice = (0, &BeefyKeyring::Alice, api_alice.clone()); tokio::spawn(initialize_beefy(&mut net, vec![alice], min_block_delta)); // Bob votes on bad MMR roots, equivocations are allowed/expected. let mut api_bob = TestApi::new(1, &validator_set, BAD_MMR_ROOT); api_bob.allow_equivocations(); let api_bob = Arc::new(api_bob); - let bob = (1, &peers[1], api_bob.clone()); + let bob = (1, &BeefyKeyring::Bob, api_bob.clone()); tokio::spawn(initialize_beefy(&mut net, vec![bob], min_block_delta)); // We spawn another node voting with Bob key, on alternate bad MMR roots (equivocating). @@ -1276,7 +1231,7 @@ async fn beefy_reports_equivocations() { let peers = peers.into_iter().enumerate(); // finalize block #1 -> BEEFY should not finalize anything (each node votes on different MMR). - finalize_block_and_wait_for_beefy(&net, peers, &[hashes[1]], &[]).await; + finalize_block_and_wait_for_beefy(&net, peers, &hashes[1], &[]).await; // Verify neither Bob or Bob_Prime report themselves as equivocating. assert!(api_bob.reported_equivocations.as_ref().unwrap().lock().is_empty()); @@ -1289,3 +1244,143 @@ async fn beefy_reports_equivocations() { assert_eq!(equivocation_proof.first.id, BeefyKeyring::Bob.public()); assert_eq!(equivocation_proof.first.commitment.block_number, 1); } + +#[tokio::test] +async fn gossipped_finality_proofs() { + sp_tracing::try_init_simple(); + + let validators = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie]; + // Only Alice and Bob are running the voter -> finality threshold not reached + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(&validators), 0).unwrap(); + let session_len = 30; + let min_block_delta = 1; + + let mut net = BeefyTestNet::new(3); + let api = Arc::new(TestApi::with_validator_set(&validator_set)); + let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); + + let charlie = &net.peers[2]; + let known_peers = Arc::new(Mutex::new(KnownPeers::::new())); + // Charlie will run just the gossip engine and not the full voter. + let (gossip_validator, _) = GossipValidator::new(known_peers); + let charlie_gossip_validator = Arc::new(gossip_validator); + charlie_gossip_validator.update_filter(GossipFilterCfg:: { + start: 1, + end: 10, + validator_set: &validator_set, + }); + let mut charlie_gossip_engine = sc_network_gossip::GossipEngine::new( + charlie.network_service().clone(), + charlie.sync_service().clone(), + beefy_gossip_proto_name(), + charlie_gossip_validator.clone(), + None, + ); + + // Alice and Bob run full voter. + tokio::spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); + + let net = Arc::new(Mutex::new(net)); + + // Pump net + Charlie gossip to see peers. + let timeout = Box::pin(tokio::time::sleep(Duration::from_millis(200))); + let gossip_engine_pump = &mut charlie_gossip_engine; + let pump_with_timeout = future::select(gossip_engine_pump, timeout); + run_until(pump_with_timeout, &net).await; + + // push 10 blocks + let hashes = net.lock().generate_blocks_and_sync(10, session_len, &validator_set, true).await; + + let peers = peers.into_iter().enumerate(); + + // Alice, Bob and Charlie finalize #1, Alice and Bob vote on it, but not Charlie. + let finalize = hashes[1]; + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); + net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); + net.lock().peer(2).client().as_client().finalize_block(finalize, None).unwrap(); + // verify nothing gets finalized by BEEFY + let timeout = Box::pin(tokio::time::sleep(Duration::from_millis(100))); + let pump_net = futures::future::poll_fn(|cx| { + net.lock().poll(cx); + Poll::<()>::Pending + }); + let pump_gossip = &mut charlie_gossip_engine; + let pump_with_timeout = future::select(pump_gossip, future::select(pump_net, timeout)); + streams_empty_after_future(best_blocks, Some(pump_with_timeout)).await; + streams_empty_after_timeout(versioned_finality_proof, &net, None).await; + + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); + // Charlie gossips finality proof for #1 -> Alice and Bob also finalize. + let proof = crate::communication::gossip::tests::dummy_proof(1, &validator_set); + let gossip_proof = GossipMessage::::FinalityProof(proof); + let encoded_proof = gossip_proof.encode(); + charlie_gossip_engine.gossip_message(proofs_topic::(), encoded_proof, true); + // Expect #1 is finalized. + wait_for_best_beefy_blocks(best_blocks, &net, &[1]).await; + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &[1]).await; + + // Code above verifies gossipped finality proofs are correctly imported and consumed by voters. + // Next, let's verify finality proofs are correctly generated and gossipped by voters. + + // Everyone finalizes #2 + let block_number = 2u64; + let finalize = hashes[block_number as usize]; + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); + net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); + net.lock().peer(2).client().as_client().finalize_block(finalize, None).unwrap(); + + // Simulate Charlie vote on #2 + let header = net.lock().peer(2).client().as_client().expect_header(finalize).unwrap(); + let mmr_root = find_mmr_root_digest::(&header).unwrap(); + let payload = Payload::from_single_entry(known_payloads::MMR_ROOT_ID, mmr_root.encode()); + let commitment = Commitment { payload, block_number, validator_set_id: validator_set.id() }; + let signature = sign_commitment(&BeefyKeyring::Charlie, &commitment); + let vote_message = VoteMessage { commitment, id: BeefyKeyring::Charlie.public(), signature }; + let encoded_vote = GossipMessage::::Vote(vote_message).encode(); + charlie_gossip_engine.gossip_message(votes_topic::(), encoded_vote, true); + + // Expect #2 is finalized. + wait_for_best_beefy_blocks(best_blocks, &net, &[2]).await; + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &[2]).await; + + // Now verify Charlie also sees the gossipped proof generated by either Alice or Bob. + let mut charlie_gossip_proofs = Box::pin( + charlie_gossip_engine + .messages_for(proofs_topic::()) + .filter_map(|notification| async move { + GossipMessage::::decode(&mut ¬ification.message[..]).ok().and_then( + |message| match message { + GossipMessage::::Vote(_) => unreachable!(), + GossipMessage::::FinalityProof(proof) => Some(proof), + }, + ) + }) + .fuse(), + ); + loop { + let pump_net = futures::future::poll_fn(|cx| { + net.lock().poll(cx); + Poll::<()>::Pending + }); + let mut gossip_engine = &mut charlie_gossip_engine; + futures::select! { + // pump gossip engine + _ = gossip_engine => unreachable!(), + // pump network + _ = pump_net.fuse() => unreachable!(), + // verify finality proof has been gossipped + proof = charlie_gossip_proofs.next() => { + let proof = proof.unwrap(); + let (round, _) = proof_block_num_and_set_id::(&proof); + match round { + 1 => continue, // finality proof generated by Charlie in the previous round + 2 => break, // finality proof generated by Alice or Bob and gossiped to Charlie + _ => panic!("Charlie got unexpected finality proof"), + } + }, + } + } +} diff --git a/client/consensus/beefy/src/worker.rs b/client/consensus/beefy/src/worker.rs index 0abb38d022ef6..c05de197d58fd 100644 --- a/client/consensus/beefy/src/worker.rs +++ b/client/consensus/beefy/src/worker.rs @@ -18,13 +18,14 @@ use crate::{ communication::{ - gossip::{topic, GossipValidator}, - request_response::outgoing_requests_engine::OnDemandJustificationsEngine, + gossip::{proofs_topic, votes_topic, GossipFilterCfg, GossipMessage, GossipValidator}, + peers::PeerReport, + request_response::outgoing_requests_engine::{OnDemandJustificationsEngine, ResponseInfo}, }, error::Error, justification::BeefyVersionedFinalityProof, keystore::{BeefyKeystore, BeefySignatureHasher}, - metric_get, metric_inc, metric_set, + metric_inc, metric_set, metrics::VoterMetrics, round::{Rounds, VoteImportResult}, BeefyVoterLinks, LOG_TARGET, @@ -34,7 +35,7 @@ use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, log_enabled, trace, warn}; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; use sc_network_gossip::GossipEngine; -use sc_utils::notification::NotificationReceiver; +use sc_utils::{mpsc::TracingUnboundedReceiver, notification::NotificationReceiver}; use sp_api::{BlockId, ProvideRuntimeApi}; use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; use sp_consensus::SyncOracle; @@ -46,20 +47,15 @@ use sp_consensus_beefy::{ }; use sp_runtime::{ generic::OpaqueDigestItemId, - traits::{Block, ConstU32, Header, NumberFor, Zero}, - BoundedVec, SaturatedConversion, + traits::{Block, Header, NumberFor, Zero}, + SaturatedConversion, }; use std::{ collections::{BTreeMap, BTreeSet, VecDeque}, fmt::Debug, - marker::PhantomData, sync::Arc, }; -/// Bound for the number of buffered future voting rounds. -const MAX_BUFFERED_VOTE_ROUNDS: usize = 600; -/// Bound for the number of buffered votes per round number. -const MAX_BUFFERED_VOTES_PER_ROUND: u32 = 1000; /// Bound for the number of pending justifications - use 2400 - the max number /// of justifications possible in a single session. const MAX_BUFFERED_JUSTIFICATIONS: usize = 2400; @@ -87,23 +83,34 @@ pub(crate) struct VoterOracle { sessions: VecDeque>, /// Min delta in block numbers between two blocks, BEEFY should vote on. min_block_delta: u32, + /// Best block we received a GRANDPA finality for. + best_grandpa_block_header: ::Header, + /// Best block a BEEFY voting round has been concluded for. + best_beefy_block: NumberFor, } impl VoterOracle { /// Verify provided `sessions` satisfies requirements, then build `VoterOracle`. - pub fn checked_new(sessions: VecDeque>, min_block_delta: u32) -> Option { + pub fn checked_new( + sessions: VecDeque>, + min_block_delta: u32, + grandpa_header: ::Header, + best_beefy: NumberFor, + ) -> Option { let mut prev_start = Zero::zero(); let mut prev_validator_id = None; // verifies the let mut validate = || -> bool { - if sessions.is_empty() { + let best_grandpa = *grandpa_header.number(); + if sessions.is_empty() || best_beefy > best_grandpa { return false } for (idx, session) in sessions.iter().enumerate() { + let start = session.session_start(); if session.validators().is_empty() { return false } - if session.session_start() <= prev_start { + if start > best_grandpa || start <= prev_start { return false } #[cfg(not(test))] @@ -125,23 +132,35 @@ impl VoterOracle { sessions, // Always target at least one block better than current best beefy. min_block_delta: min_block_delta.max(1), + best_grandpa_block_header: grandpa_header, + best_beefy_block: best_beefy, }) } else { - error!(target: LOG_TARGET, "🥩 Invalid sessions queue: {:?}.", sessions); + error!( + target: LOG_TARGET, + "🥩 Invalid sessions queue: {:?}; best-beefy {:?} best-grandpa-header {:?}.", + sessions, + best_beefy, + grandpa_header + ); None } } // Return reference to rounds pertaining to first session in the queue. // Voting will always happen at the head of the queue. - fn active_rounds(&self) -> Option<&Rounds> { - self.sessions.front() + fn active_rounds(&self) -> Result<&Rounds, Error> { + self.sessions.front().ok_or(Error::UninitSession) } // Return mutable reference to rounds pertaining to first session in the queue. // Voting will always happen at the head of the queue. - fn active_rounds_mut(&mut self) -> Option<&mut Rounds> { - self.sessions.front_mut() + fn active_rounds_mut(&mut self) -> Result<&mut Rounds, Error> { + self.sessions.front_mut().ok_or(Error::UninitSession) + } + + fn current_validator_set(&self) -> Result<&ValidatorSet, Error> { + self.active_rounds().map(|r| r.validator_set()) } // Prune the sessions queue to keep the Oracle in one of the expected three states. @@ -164,7 +183,7 @@ impl VoterOracle { /// Finalize a particular block. pub fn finalize(&mut self, block: NumberFor) -> Result<(), Error> { // Conclude voting round for this block. - self.active_rounds_mut().ok_or(Error::UninitSession)?.conclude(block); + self.active_rounds_mut()?.conclude(block); // Prune any now "finalized" sessions from queue. self.try_prune(); Ok(()) @@ -182,30 +201,26 @@ impl VoterOracle { } /// Return `(A, B)` tuple representing inclusive [A, B] interval of votes to accept. - pub fn accepted_interval( - &self, - best_grandpa: NumberFor, - ) -> Result<(NumberFor, NumberFor), Error> { + pub fn accepted_interval(&self) -> Result<(NumberFor, NumberFor), Error> { let rounds = self.sessions.front().ok_or(Error::UninitSession)?; if rounds.mandatory_done() { // There's only one session active and its mandatory is done. - // Accept any GRANDPA finalized vote. - Ok((rounds.session_start(), best_grandpa.into())) + // Accept any vote for a GRANDPA finalized block in a better round. + Ok(( + rounds.session_start().max(self.best_beefy_block), + (*self.best_grandpa_block_header.number()).into(), + )) } else { - // There's at least one session with mandatory not done. + // Current session has mandatory not done. // Only accept votes for the mandatory block in the front of queue. Ok((rounds.session_start(), rounds.session_start())) } } /// Utility function to quickly decide what to do for each round. - pub fn triage_round( - &self, - round: NumberFor, - best_grandpa: NumberFor, - ) -> Result { - let (start, end) = self.accepted_interval(best_grandpa)?; + pub fn triage_round(&self, round: NumberFor) -> Result { + let (start, end) = self.accepted_interval()?; if start <= round && round <= end { Ok(RoundAction::Process) } else if round > end { @@ -217,17 +232,15 @@ impl VoterOracle { /// Return `Some(number)` if we should be voting on block `number`, /// return `None` if there is no block we should vote on. - pub fn voting_target( - &self, - best_beefy: NumberFor, - best_grandpa: NumberFor, - ) -> Option> { + pub fn voting_target(&self) -> Option> { let rounds = if let Some(r) = self.sessions.front() { r } else { debug!(target: LOG_TARGET, "🥩 No voting round started"); return None }; + let best_grandpa = *self.best_grandpa_block_header.number(); + let best_beefy = self.best_beefy_block; // `target` is guaranteed > `best_beefy` since `min_block_delta` is at least `1`. let target = @@ -243,26 +256,8 @@ impl VoterOracle { } } -pub(crate) struct WorkerParams { - pub backend: Arc, - pub payload_provider: P, - pub runtime: Arc, - pub sync: Arc, - pub key_store: BeefyKeystore, - pub gossip_engine: GossipEngine, - pub gossip_validator: Arc>, - pub on_demand_justifications: OnDemandJustificationsEngine, - pub links: BeefyVoterLinks, - pub metrics: Option, - pub persisted_state: PersistedState, -} - #[derive(Debug, Decode, Encode, PartialEq)] pub(crate) struct PersistedState { - /// Best block we received a GRANDPA finality for. - best_grandpa_block_header: ::Header, - /// Best block a BEEFY voting round has been concluded for. - best_beefy_block: NumberFor, /// Best block we voted on. best_voted: NumberFor, /// Chooses which incoming votes to accept and which votes to generate. @@ -277,56 +272,55 @@ impl PersistedState { sessions: VecDeque>, min_block_delta: u32, ) -> Option { - VoterOracle::checked_new(sessions, min_block_delta).map(|voting_oracle| PersistedState { - best_grandpa_block_header: grandpa_header, - best_beefy_block: best_beefy, - best_voted: Zero::zero(), - voting_oracle, - }) + VoterOracle::checked_new(sessions, min_block_delta, grandpa_header, best_beefy) + .map(|voting_oracle| PersistedState { best_voted: Zero::zero(), voting_oracle }) } pub(crate) fn set_min_block_delta(&mut self, min_block_delta: u32) { self.voting_oracle.min_block_delta = min_block_delta.max(1); } + pub(crate) fn set_best_beefy(&mut self, best_beefy: NumberFor) { + self.voting_oracle.best_beefy_block = best_beefy; + } + pub(crate) fn set_best_grandpa(&mut self, best_grandpa: ::Header) { - self.best_grandpa_block_header = best_grandpa; + self.voting_oracle.best_grandpa_block_header = best_grandpa; + } + + pub(crate) fn gossip_filter_config(&self) -> Result, Error> { + let (start, end) = self.voting_oracle.accepted_interval()?; + let validator_set = self.voting_oracle.current_validator_set()?; + Ok(GossipFilterCfg { start, end, validator_set }) } } /// A BEEFY worker plays the BEEFY protocol pub(crate) struct BeefyWorker { // utilities - backend: Arc, - payload_provider: P, - runtime: Arc, - sync: Arc, - key_store: BeefyKeystore, + pub backend: Arc, + pub payload_provider: P, + pub runtime: Arc, + pub sync: Arc, + pub key_store: BeefyKeystore, // communication - gossip_engine: GossipEngine, - gossip_validator: Arc>, - on_demand_justifications: OnDemandJustificationsEngine, + pub gossip_engine: GossipEngine, + pub gossip_validator: Arc>, + pub gossip_report_stream: TracingUnboundedReceiver, + pub on_demand_justifications: OnDemandJustificationsEngine, // channels /// Links between the block importer, the background voter and the RPC layer. - links: BeefyVoterLinks, + pub links: BeefyVoterLinks, // voter state /// BEEFY client metrics. - metrics: Option, - /// Buffer holding votes for future processing. - pending_votes: BTreeMap< - NumberFor, - BoundedVec< - VoteMessage, AuthorityId, Signature>, - ConstU32, - >, - >, + pub metrics: Option, /// Buffer holding justifications for future processing. - pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, + pub pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, /// Persisted voter state. - persisted_state: PersistedState, + pub persisted_state: PersistedState, } impl BeefyWorker @@ -338,57 +332,15 @@ where R: ProvideRuntimeApi, R::Api: BeefyApi, { - /// Return a new BEEFY worker instance. - /// - /// Note that a BEEFY worker is only fully functional if a corresponding - /// BEEFY pallet has been deployed on-chain. - /// - /// The BEEFY pallet is needed in order to keep track of the BEEFY authority set. - pub(crate) fn new(worker_params: WorkerParams) -> Self { - let WorkerParams { - backend, - payload_provider, - runtime, - key_store, - sync, - gossip_engine, - gossip_validator, - on_demand_justifications, - links, - metrics, - persisted_state, - } = worker_params; - - BeefyWorker { - backend, - payload_provider, - runtime, - sync, - key_store, - gossip_engine, - gossip_validator, - on_demand_justifications, - links, - metrics, - pending_votes: BTreeMap::new(), - pending_justifications: BTreeMap::new(), - persisted_state, - } - } - fn best_grandpa_block(&self) -> NumberFor { - *self.persisted_state.best_grandpa_block_header.number() - } - - fn best_beefy_block(&self) -> NumberFor { - self.persisted_state.best_beefy_block + *self.persisted_state.voting_oracle.best_grandpa_block_header.number() } fn voting_oracle(&self) -> &VoterOracle { &self.persisted_state.voting_oracle } - fn active_rounds(&mut self) -> Option<&Rounds> { + fn active_rounds(&mut self) -> Result<&Rounds, Error> { self.persisted_state.voting_oracle.active_rounds() } @@ -429,7 +381,7 @@ where debug!(target: LOG_TARGET, "🥩 New active validator set: {:?}", validator_set); // BEEFY should finalize a mandatory block during each session. - if let Some(active_session) = self.active_rounds() { + if let Ok(active_session) = self.active_rounds() { if !active_session.mandatory_done() { debug!( target: LOG_TARGET, @@ -460,12 +412,17 @@ where } fn handle_finality_notification(&mut self, notification: &FinalityNotification) { - debug!(target: LOG_TARGET, "🥩 Finality notification: {:?}", notification); + debug!( + target: LOG_TARGET, + "🥩 Finality notification: header {:?} tree_route {:?}", + notification.header, + notification.tree_route, + ); let header = ¬ification.header; if *header.number() > self.best_grandpa_block() { // update best GRANDPA finalized block we have seen - self.persisted_state.best_grandpa_block_header = header.clone(); + self.persisted_state.set_best_grandpa(header.clone()); // Check all (newly) finalized blocks for new session(s). let backend = self.backend.clone(); @@ -484,38 +441,33 @@ where self.init_session_at(new_validator_set, *header.number()); } } + + // Update gossip validator votes filter. + if let Err(e) = self + .persisted_state + .gossip_filter_config() + .map(|filter| self.gossip_validator.update_filter(filter)) + { + error!(target: LOG_TARGET, "🥩 Voter error: {:?}", e); + } } } - /// Based on [VoterOracle] this vote is either processed here or enqueued for later. + /// Based on [VoterOracle] this vote is either processed here or discarded. fn triage_incoming_vote( &mut self, vote: VoteMessage, AuthorityId, Signature>, ) -> Result<(), Error> { let block_num = vote.commitment.block_number; - let best_grandpa = self.best_grandpa_block(); - self.gossip_validator.note_round(block_num); - match self.voting_oracle().triage_round(block_num, best_grandpa)? { - RoundAction::Process => self.handle_vote(vote)?, - RoundAction::Enqueue => { - debug!(target: LOG_TARGET, "🥩 Buffer vote for round: {:?}.", block_num); - if self.pending_votes.len() < MAX_BUFFERED_VOTE_ROUNDS { - let votes_vec = self.pending_votes.entry(block_num).or_default(); - if votes_vec.try_push(vote).is_ok() { - metric_inc!(self, beefy_buffered_votes); - } else { - warn!( - target: LOG_TARGET, - "🥩 Buffer vote dropped for round: {:?}", block_num - ); - metric_inc!(self, beefy_buffered_votes_dropped); - } - } else { - warn!(target: LOG_TARGET, "🥩 Buffer vote dropped for round: {:?}.", block_num); - metric_inc!(self, beefy_buffered_votes_dropped); - } - }, + match self.voting_oracle().triage_round(block_num)? { + RoundAction::Process => + if let Some(finality_proof) = self.handle_vote(vote)? { + let gossip_proof = GossipMessage::::FinalityProof(finality_proof); + let encoded_proof = gossip_proof.encode(); + self.gossip_engine.gossip_message(proofs_topic::(), encoded_proof, true); + }, RoundAction::Drop => metric_inc!(self, beefy_stale_votes), + RoundAction::Enqueue => error!(target: LOG_TARGET, "🥩 unexpected vote: {:?}.", vote), }; Ok(()) } @@ -531,8 +483,7 @@ where VersionedFinalityProof::V1(ref sc) => sc, }; let block_num = signed_commitment.commitment.block_number; - let best_grandpa = self.best_grandpa_block(); - match self.voting_oracle().triage_round(block_num, best_grandpa)? { + match self.voting_oracle().triage_round(block_num)? { RoundAction::Process => { debug!(target: LOG_TARGET, "🥩 Process justification for round: {:?}.", block_num); metric_inc!(self, beefy_imported_justifications); @@ -559,12 +510,8 @@ where fn handle_vote( &mut self, vote: VoteMessage, AuthorityId, Signature>, - ) -> Result<(), Error> { - let rounds = self - .persisted_state - .voting_oracle - .active_rounds_mut() - .ok_or(Error::UninitSession)?; + ) -> Result>, Error> { + let rounds = self.persisted_state.voting_oracle.active_rounds_mut()?; let block_number = vote.commitment.block_number; match rounds.add_vote(vote) { @@ -576,8 +523,9 @@ where ); // We created the `finality_proof` and know to be valid. // New state is persisted after finalization. - self.finalize(finality_proof)?; + self.finalize(finality_proof.clone())?; metric_inc!(self, beefy_good_votes_processed); + return Ok(Some(finality_proof)) }, VoteImportResult::Ok => { // Persist state after handling mandatory block vote. @@ -599,7 +547,7 @@ where VoteImportResult::Invalid => metric_inc!(self, beefy_invalid_votes), VoteImportResult::Stale => metric_inc!(self, beefy_stale_votes), }; - Ok(()) + Ok(None) } /// Provide BEEFY finality for block based on `finality_proof`: @@ -608,7 +556,7 @@ where /// 3. Persist voter state, /// 4. Send best block hash and `finality_proof` to RPC worker. /// - /// Expects `finality proof` to be valid. + /// Expects `finality proof` to be valid and for a block > current-best-beefy. fn finalize(&mut self, finality_proof: BeefyVersionedFinalityProof) -> Result<(), Error> { let block_num = match finality_proof { VersionedFinalityProof::V1(ref sc) => sc.commitment.block_number, @@ -616,74 +564,62 @@ where // Finalize inner round and update voting_oracle state. self.persisted_state.voting_oracle.finalize(block_num)?; - self.gossip_validator.conclude_round(block_num); - if block_num > self.best_beefy_block() { - // Set new best BEEFY block number. - self.persisted_state.best_beefy_block = block_num; - crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) - .map_err(|e| Error::Backend(e.to_string()))?; + // Set new best BEEFY block number. + self.persisted_state.set_best_beefy(block_num); + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) + .map_err(|e| Error::Backend(e.to_string()))?; - metric_set!(self, beefy_best_block, block_num); + metric_set!(self, beefy_best_block, block_num); - self.on_demand_justifications.cancel_requests_older_than(block_num); + self.on_demand_justifications.cancel_requests_older_than(block_num); - if let Err(e) = self - .backend - .blockchain() - .expect_block_hash_from_id(&BlockId::Number(block_num)) - .and_then(|hash| { - self.links - .to_rpc_best_block_sender - .notify(|| Ok::<_, ()>(hash)) - .expect("forwards closure result; the closure always returns Ok; qed."); - - self.backend - .append_justification(hash, (BEEFY_ENGINE_ID, finality_proof.encode())) - }) { - error!( - target: LOG_TARGET, - "🥩 Error {:?} on appending justification: {:?}", e, finality_proof - ); - } - - self.links - .to_rpc_justif_sender - .notify(|| Ok::<_, ()>(finality_proof)) - .expect("forwards closure result; the closure always returns Ok; qed."); - } else { - debug!(target: LOG_TARGET, "🥩 Can't set best beefy to old: {}", block_num); - metric_set!(self, beefy_best_block_set_last_failure, block_num); + if let Err(e) = self + .backend + .blockchain() + .expect_block_hash_from_id(&BlockId::Number(block_num)) + .and_then(|hash| { + self.links + .to_rpc_best_block_sender + .notify(|| Ok::<_, ()>(hash)) + .expect("forwards closure result; the closure always returns Ok; qed."); + + self.backend + .append_justification(hash, (BEEFY_ENGINE_ID, finality_proof.encode())) + }) { + error!( + target: LOG_TARGET, + "🥩 Error {:?} on appending justification: {:?}", e, finality_proof + ); } + + self.links + .to_rpc_justif_sender + .notify(|| Ok::<_, ()>(finality_proof)) + .expect("forwards closure result; the closure always returns Ok; qed."); + + // Update gossip validator votes filter. + self.persisted_state + .gossip_filter_config() + .map(|filter| self.gossip_validator.update_filter(filter))?; Ok(()) } - /// Handle previously buffered justifications and votes that now land in the voting interval. - fn try_pending_justif_and_votes(&mut self) -> Result<(), Error> { - let best_grandpa = self.best_grandpa_block(); - let _ph = PhantomData::::default(); - - fn to_process_for( - pending: &mut BTreeMap, T>, - (start, end): (NumberFor, NumberFor), - _: PhantomData, - ) -> BTreeMap, T> { + /// Handle previously buffered justifications, that now land in the voting interval. + fn try_pending_justififactions(&mut self) -> Result<(), Error> { + // Interval of blocks for which we can process justifications and votes right now. + let (start, end) = self.voting_oracle().accepted_interval()?; + // Process pending justifications. + if !self.pending_justifications.is_empty() { // These are still pending. - let still_pending = pending.split_off(&end.saturating_add(1u32.into())); + let still_pending = + self.pending_justifications.split_off(&end.saturating_add(1u32.into())); // These can be processed. - let to_handle = pending.split_off(&start); + let justifs_to_process = self.pending_justifications.split_off(&start); // The rest can be dropped. - *pending = still_pending; - // Return ones to process. - to_handle - } - // Interval of blocks for which we can process justifications and votes right now. - let mut interval = self.voting_oracle().accepted_interval(best_grandpa)?; + self.pending_justifications = still_pending; - // Process pending justifications. - if !self.pending_justifications.is_empty() { - let justifs_to_handle = to_process_for(&mut self.pending_justifications, interval, _ph); - for (num, justification) in justifs_to_handle.into_iter() { + for (num, justification) in justifs_to_process.into_iter() { debug!(target: LOG_TARGET, "🥩 Handle buffered justification for: {:?}.", num); metric_inc!(self, beefy_imported_justifications); if let Err(err) = self.finalize(justification) { @@ -691,27 +627,6 @@ where } } metric_set!(self, beefy_buffered_justifications, self.pending_justifications.len()); - // Possibly new interval after processing justifications. - interval = self.voting_oracle().accepted_interval(best_grandpa)?; - } - - // Process pending votes. - if !self.pending_votes.is_empty() { - let mut processed = 0u64; - let votes_to_handle = to_process_for(&mut self.pending_votes, interval, _ph); - for (num, votes) in votes_to_handle.into_iter() { - debug!(target: LOG_TARGET, "🥩 Handle buffered votes for: {:?}.", num); - processed += votes.len() as u64; - for v in votes.into_iter() { - if let Err(err) = self.handle_vote(v) { - error!(target: LOG_TARGET, "🥩 Error handling buffered vote: {}", err); - }; - } - } - if let Some(previous) = metric_get!(self, beefy_buffered_votes) { - previous.sub(processed); - metric_set!(self, beefy_buffered_votes, previous.get()); - } } Ok(()) } @@ -719,10 +634,7 @@ where /// Decide if should vote, then vote.. or don't.. fn try_to_vote(&mut self) -> Result<(), Error> { // Vote if there's now a new vote target. - if let Some(target) = self - .voting_oracle() - .voting_target(self.best_beefy_block(), self.best_grandpa_block()) - { + if let Some(target) = self.voting_oracle().voting_target() { metric_set!(self, beefy_should_vote_on, target); if target > self.persisted_state.best_voted { self.do_vote(target)?; @@ -740,7 +652,7 @@ where // Most of the time we get here, `target` is actually `best_grandpa`, // avoid getting header from backend in that case. let target_header = if target_number == self.best_grandpa_block() { - self.persisted_state.best_grandpa_block_header.clone() + self.persisted_state.voting_oracle.best_grandpa_block_header.clone() } else { let hash = self .backend @@ -771,11 +683,7 @@ where return Ok(()) }; - let rounds = self - .persisted_state - .voting_oracle - .active_rounds_mut() - .ok_or(Error::UninitSession)?; + let rounds = self.persisted_state.voting_oracle.active_rounds_mut()?; let (validators, validator_set_id) = (rounds.validators(), rounds.validator_set_id()); let authority_id = if let Some(id) = self.key_store.authority_id(validators) { @@ -807,20 +715,20 @@ where BeefyKeystore::verify(&authority_id, &signature, &encoded_commitment) ); - let message = VoteMessage { commitment, id: authority_id, signature }; - - let encoded_message = message.encode(); - - metric_inc!(self, beefy_votes_sent); - - debug!(target: LOG_TARGET, "🥩 Sent vote message: {:?}", message); - - if let Err(err) = self.handle_vote(message) { + let vote = VoteMessage { commitment, id: authority_id, signature }; + if let Some(finality_proof) = self.handle_vote(vote.clone()).map_err(|err| { error!(target: LOG_TARGET, "🥩 Error handling self vote: {}", err); + err + })? { + let encoded_proof = GossipMessage::::FinalityProof(finality_proof).encode(); + self.gossip_engine.gossip_message(proofs_topic::(), encoded_proof, true); + } else { + metric_inc!(self, beefy_votes_sent); + debug!(target: LOG_TARGET, "🥩 Sent vote message: {:?}", vote); + let encoded_vote = GossipMessage::::Vote(vote).encode(); + self.gossip_engine.gossip_message(votes_topic::(), encoded_vote, false); } - self.gossip_engine.gossip_message(topic::(), encoded_message, false); - // Persist state after vote to avoid double voting in case of voter restarts. self.persisted_state.best_voted = target_number; metric_set!(self, beefy_best_voted, target_number); @@ -830,7 +738,7 @@ where fn process_new_state(&mut self) { // Handle pending justifications and/or votes for now GRANDPA finalized blocks. - if let Err(err) = self.try_pending_justif_and_votes() { + if let Err(err) = self.try_pending_justififactions() { debug!(target: LOG_TARGET, "🥩 {}", err); } @@ -865,14 +773,25 @@ where let mut votes = Box::pin( self.gossip_engine - .messages_for(topic::()) + .messages_for(votes_topic::()) .filter_map(|notification| async move { - trace!(target: LOG_TARGET, "🥩 Got vote message: {:?}", notification); - - VoteMessage::, AuthorityId, Signature>::decode( - &mut ¬ification.message[..], - ) - .ok() + let vote = GossipMessage::::decode(&mut ¬ification.message[..]) + .ok() + .and_then(|message| message.unwrap_vote()); + trace!(target: LOG_TARGET, "🥩 Got vote message: {:?}", vote); + vote + }) + .fuse(), + ); + let mut gossip_proofs = Box::pin( + self.gossip_engine + .messages_for(proofs_topic::()) + .filter_map(|notification| async move { + let proof = GossipMessage::::decode(&mut ¬ification.message[..]) + .ok() + .and_then(|message| message.unwrap_finality_proof()); + trace!(target: LOG_TARGET, "🥩 Got gossip proof message: {:?}", proof); + proof }) .fuse(), ); @@ -881,7 +800,12 @@ where // Act on changed 'state'. self.process_new_state(); + // Mutable reference used to drive the gossip engine. let mut gossip_engine = &mut self.gossip_engine; + // Use temp val and report after async section, + // to avoid having to Mutex-wrap `gossip_engine`. + let mut gossip_report: Option = None; + // Wait for, and handle external events. // The branches below only change 'state', actual voting happens afterwards, // based on the new resulting 'state'. @@ -902,11 +826,16 @@ where return; }, // Process incoming justifications as these can make some in-flight votes obsolete. - justif = self.on_demand_justifications.next().fuse() => { - if let Some(justif) = justif { - if let Err(err) = self.triage_incoming_justif(justif) { - debug!(target: LOG_TARGET, "🥩 {}", err); - } + response_info = self.on_demand_justifications.next().fuse() => { + match response_info { + ResponseInfo::ValidProof(justif, peer_report) => { + if let Err(err) = self.triage_incoming_justif(justif) { + debug!(target: LOG_TARGET, "🥩 {}", err); + } + gossip_report = Some(peer_report); + }, + ResponseInfo::PeerReport(peer_report) => gossip_report = Some(peer_report), + ResponseInfo::Pending => (), } }, justif = block_import_justif.next() => { @@ -921,6 +850,20 @@ where return; } }, + justif = gossip_proofs.next() => { + if let Some(justif) = justif { + // Gossiped justifications have already been verified by `GossipValidator`. + if let Err(err) = self.triage_incoming_justif(justif) { + debug!(target: LOG_TARGET, "🥩 {}", err); + } + } else { + error!( + target: LOG_TARGET, + "🥩 Finality proofs gossiping stream terminated, closing worker." + ); + return; + } + }, // Finally process incoming votes. vote = votes.next() => { if let Some(vote) = vote { @@ -929,10 +872,20 @@ where debug!(target: LOG_TARGET, "🥩 {}", err); } } else { - error!(target: LOG_TARGET, "🥩 Votes gossiping stream terminated, closing worker."); + error!( + target: LOG_TARGET, + "🥩 Votes gossiping stream terminated, closing worker." + ); return; } }, + // Process peer reports. + report = self.gossip_report_stream.next() => { + gossip_report = report; + }, + } + if let Some(PeerReport { who, cost_benefit }) = gossip_report { + self.gossip_engine.report(who, cost_benefit); } } } @@ -946,8 +899,7 @@ where &self, proof: EquivocationProof, AuthorityId, Signature>, ) -> Result<(), Error> { - let rounds = - self.persisted_state.voting_oracle.active_rounds().ok_or(Error::UninitSession)?; + let rounds = self.persisted_state.voting_oracle.active_rounds()?; let (validators, validator_set_id) = (rounds.validators(), rounds.validator_set_id()); let offender_id = proof.offender_id().clone(); @@ -1083,16 +1035,16 @@ pub(crate) mod tests { &self.voting_oracle } - pub fn active_round(&self) -> Option<&Rounds> { + pub fn active_round(&self) -> Result<&Rounds, Error> { self.voting_oracle.active_rounds() } pub fn best_beefy_block(&self) -> NumberFor { - self.best_beefy_block + self.voting_oracle.best_beefy_block } - pub fn best_grandpa_block(&self) -> NumberFor { - *self.best_grandpa_block_header.number() + pub fn best_grandpa_number(&self) -> NumberFor { + *self.voting_oracle.best_grandpa_block_header.number() } } @@ -1103,7 +1055,7 @@ pub(crate) mod tests { } fn create_beefy_worker( - peer: &BeefyPeer, + peer: &mut BeefyPeer, key: &Keyring, min_block_delta: u32, genesis_validator_set: ValidatorSet, @@ -1138,7 +1090,8 @@ pub(crate) mod tests { let network = peer.network_service().clone(); let sync = peer.sync_service().clone(); let known_peers = Arc::new(Mutex::new(KnownPeers::new())); - let gossip_validator = Arc::new(GossipValidator::new(known_peers.clone())); + let (gossip_validator, gossip_report_stream) = GossipValidator::new(known_peers.clone()); + let gossip_validator = Arc::new(gossip_validator); let gossip_engine = GossipEngine::new( network.clone(), sync.clone(), @@ -1153,19 +1106,22 @@ pub(crate) mod tests { known_peers, None, ); - let genesis_header = backend + // Push 1 block - will start first session. + let hashes = peer.push_blocks(1, false); + backend.finalize_block(hashes[0], None).unwrap(); + let first_header = backend .blockchain() - .expect_header(backend.blockchain().info().genesis_hash) + .expect_header(backend.blockchain().info().best_hash) .unwrap(); let persisted_state = PersistedState::checked_new( - genesis_header, + first_header, Zero::zero(), vec![Rounds::new(One::one(), genesis_validator_set)].into(), min_block_delta, ) .unwrap(); let payload_provider = MmrRootProvider::new(api.clone()); - let worker_params = crate::worker::WorkerParams { + BeefyWorker { backend, payload_provider, runtime: api, @@ -1173,12 +1129,13 @@ pub(crate) mod tests { links, gossip_engine, gossip_validator, + gossip_report_stream, metrics, sync: Arc::new(sync), on_demand_justifications, + pending_justifications: BTreeMap::new(), persisted_state, - }; - BeefyWorker::<_, _, _, _, _>::new(worker_params) + } } #[test] @@ -1275,10 +1232,30 @@ pub(crate) mod tests { #[test] fn should_vote_target() { - let mut oracle = VoterOracle:: { min_block_delta: 1, sessions: VecDeque::new() }; + let header = Header::new( + 1u32.into(), + Default::default(), + Default::default(), + Default::default(), + Digest::default(), + ); + let mut oracle = VoterOracle:: { + best_beefy_block: 0, + best_grandpa_block_header: header, + min_block_delta: 1, + sessions: VecDeque::new(), + }; + let voting_target_with = |oracle: &mut VoterOracle, + best_beefy: NumberFor, + best_grandpa: NumberFor| + -> Option> { + oracle.best_beefy_block = best_beefy; + oracle.best_grandpa_block_header.number = best_grandpa; + oracle.voting_target() + }; // rounds not initialized -> should vote: `None` - assert_eq!(oracle.voting_target(0, 1), None); + assert_eq!(voting_target_with(&mut oracle, 0, 1), None); let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); @@ -1287,29 +1264,29 @@ pub(crate) mod tests { // under min delta oracle.min_block_delta = 4; - assert_eq!(oracle.voting_target(1, 1), None); - assert_eq!(oracle.voting_target(2, 5), None); + assert_eq!(voting_target_with(&mut oracle, 1, 1), None); + assert_eq!(voting_target_with(&mut oracle, 2, 5), None); // vote on min delta - assert_eq!(oracle.voting_target(4, 9), Some(8)); + assert_eq!(voting_target_with(&mut oracle, 4, 9), Some(8)); oracle.min_block_delta = 8; - assert_eq!(oracle.voting_target(10, 18), Some(18)); + assert_eq!(voting_target_with(&mut oracle, 10, 18), Some(18)); // vote on power of two oracle.min_block_delta = 1; - assert_eq!(oracle.voting_target(1000, 1008), Some(1004)); - assert_eq!(oracle.voting_target(1000, 1016), Some(1008)); + assert_eq!(voting_target_with(&mut oracle, 1000, 1008), Some(1004)); + assert_eq!(voting_target_with(&mut oracle, 1000, 1016), Some(1008)); // nothing new to vote on - assert_eq!(oracle.voting_target(1000, 1000), None); + assert_eq!(voting_target_with(&mut oracle, 1000, 1000), None); // vote on mandatory oracle.sessions.clear(); oracle.add_session(Rounds::new(1000, validator_set.clone())); - assert_eq!(oracle.voting_target(0, 1008), Some(1000)); + assert_eq!(voting_target_with(&mut oracle, 0, 1008), Some(1000)); oracle.sessions.clear(); oracle.add_session(Rounds::new(1001, validator_set.clone())); - assert_eq!(oracle.voting_target(1000, 1008), Some(1001)); + assert_eq!(voting_target_with(&mut oracle, 1000, 1008), Some(1001)); } #[test] @@ -1317,16 +1294,34 @@ pub(crate) mod tests { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut oracle = VoterOracle:: { min_block_delta: 1, sessions: VecDeque::new() }; + let header = Header::new( + 1u32.into(), + Default::default(), + Default::default(), + Default::default(), + Digest::default(), + ); + let mut oracle = VoterOracle:: { + best_beefy_block: 0, + best_grandpa_block_header: header, + min_block_delta: 1, + sessions: VecDeque::new(), + }; + let accepted_interval_with = |oracle: &mut VoterOracle, + best_grandpa: NumberFor| + -> Result<(NumberFor, NumberFor), Error> { + oracle.best_grandpa_block_header.number = best_grandpa; + oracle.accepted_interval() + }; // rounds not initialized -> should accept votes: `None` - assert!(oracle.accepted_interval(1).is_err()); + assert!(accepted_interval_with(&mut oracle, 1).is_err()); let session_one = 1; oracle.add_session(Rounds::new(session_one, validator_set.clone())); // mandatory not done, only accept mandatory for i in 0..15 { - assert_eq!(oracle.accepted_interval(i), Ok((session_one, session_one))); + assert_eq!(accepted_interval_with(&mut oracle, i), Ok((session_one, session_one))); } // add more sessions, nothing changes @@ -1336,7 +1331,7 @@ pub(crate) mod tests { oracle.add_session(Rounds::new(session_three, validator_set.clone())); // mandatory not done, should accept mandatory for session_one for i in session_three..session_three + 15 { - assert_eq!(oracle.accepted_interval(i), Ok((session_one, session_one))); + assert_eq!(accepted_interval_with(&mut oracle, i), Ok((session_one, session_one))); } // simulate finish mandatory for session one, prune oracle @@ -1344,7 +1339,7 @@ pub(crate) mod tests { oracle.try_prune(); // session_one pruned, should accept mandatory for session_two for i in session_three..session_three + 15 { - assert_eq!(oracle.accepted_interval(i), Ok((session_two, session_two))); + assert_eq!(accepted_interval_with(&mut oracle, i), Ok((session_two, session_two))); } // simulate finish mandatory for session two, prune oracle @@ -1352,26 +1347,29 @@ pub(crate) mod tests { oracle.try_prune(); // session_two pruned, should accept mandatory for session_three for i in session_three..session_three + 15 { - assert_eq!(oracle.accepted_interval(i), Ok((session_three, session_three))); + assert_eq!(accepted_interval_with(&mut oracle, i), Ok((session_three, session_three))); } // simulate finish mandatory for session three oracle.sessions.front_mut().unwrap().test_set_mandatory_done(true); // verify all other blocks in this session are now open to voting for i in session_three..session_three + 15 { - assert_eq!(oracle.accepted_interval(i), Ok((session_three, i))); + assert_eq!(accepted_interval_with(&mut oracle, i), Ok((session_three, i))); } // pruning does nothing in this case oracle.try_prune(); for i in session_three..session_three + 15 { - assert_eq!(oracle.accepted_interval(i), Ok((session_three, i))); + assert_eq!(accepted_interval_with(&mut oracle, i), Ok((session_three, i))); } // adding new session automatically prunes "finalized" previous session let session_four = 31; oracle.add_session(Rounds::new(session_four, validator_set.clone())); assert_eq!(oracle.sessions.front().unwrap().session_start(), session_four); - assert_eq!(oracle.accepted_interval(session_four + 10), Ok((session_four, session_four))); + assert_eq!( + accepted_interval_with(&mut oracle, session_four + 10), + Ok((session_four, session_four)) + ); } #[test] @@ -1405,7 +1403,7 @@ pub(crate) mod tests { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let mut net = BeefyTestNet::new(1); - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1, validator_set.clone()); + let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); // keystore doesn't contain other keys than validators' assert_eq!(worker.verify_validator_set(&1, &validator_set), Ok(())); @@ -1429,7 +1427,7 @@ pub(crate) mod tests { let validator_set = ValidatorSet::new(make_beefy_ids(&keys), 0).unwrap(); let mut net = BeefyTestNet::new(1); let backend = net.peer(0).client().as_backend(); - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1, validator_set.clone()); + let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); // remove default session, will manually add custom one. worker.persisted_state.voting_oracle.sessions.clear(); @@ -1449,7 +1447,7 @@ pub(crate) mod tests { }; // no 'best beefy block' or finality proofs - assert_eq!(worker.best_beefy_block(), 0); + assert_eq!(worker.persisted_state.best_beefy_block(), 0); poll_fn(move |cx| { assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); assert_eq!(finality_proof.poll_next_unpin(cx), Poll::Pending); @@ -1457,6 +1455,7 @@ pub(crate) mod tests { }) .await; + let client = net.peer(0).client().as_client(); // unknown hash for block #1 let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys.clone()); @@ -1471,10 +1470,16 @@ pub(crate) mod tests { // try to finalize block #1 worker.finalize(justif.clone()).unwrap(); // verify block finalized - assert_eq!(worker.best_beefy_block(), 1); + assert_eq!(worker.persisted_state.best_beefy_block(), 1); poll_fn(move |cx| { - // unknown hash -> nothing streamed - assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); + // expect Some(hash-of-block-1) + match best_block_stream.poll_next_unpin(cx) { + Poll::Ready(Some(hash)) => { + let block_num = client.number(hash).unwrap(); + assert_eq!(block_num, Some(1)); + }, + v => panic!("unexpected value: {:?}", v), + } // commitment streamed match finality_proof.poll_next_unpin(cx) { // expect justification @@ -1488,11 +1493,9 @@ pub(crate) mod tests { // generate 2 blocks, try again expect success let (mut best_block_streams, _) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - let hashes = net.peer(0).push_blocks(2, false); + let hashes = net.peer(0).push_blocks(1, false); // finalize 1 and 2 without justifications (hashes does not contain genesis) - let hashof1 = hashes[0]; - let hashof2 = hashes[1]; - backend.finalize_block(hashof1, None).unwrap(); + let hashof2 = hashes[0]; backend.finalize_block(hashof2, None).unwrap(); let justif = create_finality_proof(2); @@ -1504,7 +1507,7 @@ pub(crate) mod tests { // new session starting at #2 is in front assert_eq!(worker.active_rounds().unwrap().session_start(), 2); // verify block finalized - assert_eq!(worker.best_beefy_block(), 2); + assert_eq!(worker.persisted_state.best_beefy_block(), 2); poll_fn(move |cx| { match best_block_stream.poll_next_unpin(cx) { // expect Some(hash-of-block-2) @@ -1528,7 +1531,7 @@ pub(crate) mod tests { let keys = &[Keyring::Alice, Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let mut net = BeefyTestNet::new(1); - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1, validator_set.clone()); + let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); let worker_rounds = worker.active_rounds().unwrap(); assert_eq!(worker_rounds.session_start(), 1); @@ -1554,77 +1557,6 @@ pub(crate) mod tests { assert_eq!(rounds.validator_set_id(), new_validator_set.id()); } - #[tokio::test] - async fn should_triage_votes_and_process_later() { - let keys = &[Keyring::Alice, Keyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1); - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1, validator_set.clone()); - // remove default session, will manually add custom one. - worker.persisted_state.voting_oracle.sessions.clear(); - - fn new_vote( - block_number: NumberFor, - ) -> VoteMessage, AuthorityId, Signature> { - let commitment = Commitment { - payload: Payload::from_single_entry(*b"BF", vec![]), - block_number, - validator_set_id: 0, - }; - VoteMessage { - commitment, - id: Keyring::Alice.public(), - signature: Keyring::Alice.sign(b"I am committed"), - } - } - - // best grandpa is 20 - let best_grandpa_header = Header::new( - 20u32.into(), - Default::default(), - Default::default(), - Default::default(), - Digest::default(), - ); - - worker - .persisted_state - .voting_oracle - .add_session(Rounds::new(10, validator_set.clone())); - worker.persisted_state.best_grandpa_block_header = best_grandpa_header; - - // triage votes for blocks 10..13 - worker.triage_incoming_vote(new_vote(10)).unwrap(); - worker.triage_incoming_vote(new_vote(11)).unwrap(); - worker.triage_incoming_vote(new_vote(12)).unwrap(); - // triage votes for blocks 20..23 - worker.triage_incoming_vote(new_vote(20)).unwrap(); - worker.triage_incoming_vote(new_vote(21)).unwrap(); - worker.triage_incoming_vote(new_vote(22)).unwrap(); - - // vote for 10 should have been handled, while the rest buffered for later processing - let mut votes = worker.pending_votes.values(); - assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 11); - assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 12); - assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 20); - assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 21); - assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 22); - assert!(votes.next().is_none()); - - // simulate mandatory done, and retry buffered votes - worker - .persisted_state - .voting_oracle - .active_rounds_mut() - .unwrap() - .test_set_mandatory_done(true); - worker.try_pending_justif_and_votes().unwrap(); - // all blocks <= grandpa finalized should have been handled, rest still buffered - let mut votes = worker.pending_votes.values(); - assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 21); - assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 22); - } - #[tokio::test] async fn should_not_report_bad_old_or_self_equivocations() { let block_num = 1; @@ -1637,7 +1569,7 @@ pub(crate) mod tests { let api_alice = Arc::new(api_alice); let mut net = BeefyTestNet::new(1); - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1, validator_set.clone()); + let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); worker.runtime = api_alice.clone(); // let there be a block with num = 1: diff --git a/client/consensus/grandpa/Cargo.toml b/client/consensus/grandpa/Cargo.toml index 511e546aa7256..67c58d37a2cf5 100644 --- a/client/consensus/grandpa/Cargo.toml +++ b/client/consensus/grandpa/Cargo.toml @@ -18,7 +18,7 @@ ahash = "0.8.2" array-bytes = "4.1" async-trait = "0.1.57" dyn-clone = "1.0" -finality-grandpa = { version = "0.16.1", features = ["derive-codec"] } +finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.21" futures-timer = "3.0.1" log = "0.4.17" @@ -50,7 +50,7 @@ sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } [dev-dependencies] assert_matches = "1.3.0" -finality-grandpa = { version = "0.16.1", features = ["derive-codec", "test-helpers"] } +finality-grandpa = { version = "0.16.2", features = ["derive-codec", "test-helpers"] } serde = "1.0.136" tokio = "1.22.0" sc-network = { version = "0.10.0-dev", path = "../../network" } diff --git a/client/consensus/grandpa/rpc/Cargo.toml b/client/consensus/grandpa/rpc/Cargo.toml index 4880b50d30630..76a06ad298f77 100644 --- a/client/consensus/grandpa/rpc/Cargo.toml +++ b/client/consensus/grandpa/rpc/Cargo.toml @@ -10,7 +10,7 @@ readme = "README.md" homepage = "https://substrate.io" [dependencies] -finality-grandpa = { version = "0.16.1", features = ["derive-codec"] } +finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.16" jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } log = "0.4.8" diff --git a/client/consensus/grandpa/src/communication/mod.rs b/client/consensus/grandpa/src/communication/mod.rs index 3896bc36031ba..9d90035d71cbb 100644 --- a/client/consensus/grandpa/src/communication/mod.rs +++ b/client/consensus/grandpa/src/communication/mod.rs @@ -49,7 +49,7 @@ use parity_scale_codec::{Decode, Encode}; use sc_network::{NetworkBlock, NetworkSyncForkRequest, ReputationChange}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; -use sp_keystore::SyncCryptoStorePtr; +use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, NumberFor}; use crate::{ @@ -136,7 +136,7 @@ mod benefit { /// A type that ties together our local authority id and a keystore where it is /// available for signing. -pub struct LocalIdKeystore((AuthorityId, SyncCryptoStorePtr)); +pub struct LocalIdKeystore((AuthorityId, KeystorePtr)); impl LocalIdKeystore { /// Returns a reference to our local authority id. @@ -145,13 +145,13 @@ impl LocalIdKeystore { } /// Returns a reference to the keystore. - fn keystore(&self) -> SyncCryptoStorePtr { + fn keystore(&self) -> KeystorePtr { (self.0).1.clone() } } -impl From<(AuthorityId, SyncCryptoStorePtr)> for LocalIdKeystore { - fn from(inner: (AuthorityId, SyncCryptoStorePtr)) -> LocalIdKeystore { +impl From<(AuthorityId, KeystorePtr)> for LocalIdKeystore { + fn from(inner: (AuthorityId, KeystorePtr)) -> LocalIdKeystore { LocalIdKeystore(inner) } } @@ -852,9 +852,7 @@ fn check_compact_commit( // check signatures on all contained precommits. let mut buf = Vec::new(); - for (i, (precommit, &(ref sig, ref id))) in - msg.precommits.iter().zip(&msg.auth_data).enumerate() - { + for (i, (precommit, (sig, id))) in msg.precommits.iter().zip(&msg.auth_data).enumerate() { use crate::communication::gossip::Misbehavior; use finality_grandpa::Message as GrandpaMessage; diff --git a/client/consensus/grandpa/src/lib.rs b/client/consensus/grandpa/src/lib.rs index 2baa135081c55..90cb8a51fa6f4 100644 --- a/client/consensus/grandpa/src/lib.rs +++ b/client/consensus/grandpa/src/lib.rs @@ -72,14 +72,14 @@ use sc_network::types::ProtocolName; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; use sp_api::ProvideRuntimeApi; -use sp_application_crypto::AppKey; +use sp_application_crypto::AppCrypto; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult}; use sp_consensus::SelectChain; use sp_consensus_grandpa::{ AuthorityList, AuthoritySignature, SetId, CLIENT_LOG_TARGET as LOG_TARGET, }; use sp_core::{crypto::ByteArray, traits::CallContext}; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::KeystorePtr; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor, Zero}, @@ -228,7 +228,7 @@ pub struct Config { /// Some local identifier of the voter. pub name: Option, /// The keystore that manages the keys of this node. - pub keystore: Option, + pub keystore: Option, /// TelemetryHandle instance. pub telemetry: Option, /// Chain specific GRANDPA protocol name. See [`crate::protocol_standard_name`]. @@ -623,7 +623,7 @@ fn global_communication( voters: &Arc>, client: Arc, network: &NetworkBridge, - keystore: Option<&SyncCryptoStorePtr>, + keystore: Option<&KeystorePtr>, metrics: Option, ) -> ( impl Stream< @@ -1136,14 +1136,12 @@ where /// available. fn local_authority_id( voters: &VoterSet, - keystore: Option<&SyncCryptoStorePtr>, + keystore: Option<&KeystorePtr>, ) -> Option { keystore.and_then(|keystore| { voters .iter() - .find(|(p, _)| { - SyncCryptoStore::has_keys(&**keystore, &[(p.to_raw_vec(), AuthorityId::ID)]) - }) + .find(|(p, _)| keystore.has_keys(&[(p.to_raw_vec(), AuthorityId::ID)])) .map(|(p, _)| p.clone()) }) } diff --git a/client/consensus/grandpa/src/observer.rs b/client/consensus/grandpa/src/observer.rs index 53672c1f02225..8541baa822bb4 100644 --- a/client/consensus/grandpa/src/observer.rs +++ b/client/consensus/grandpa/src/observer.rs @@ -33,7 +33,7 @@ use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; use sp_consensus::SelectChain; use sp_consensus_grandpa::AuthorityId; -use sp_keystore::SyncCryptoStorePtr; +use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, NumberFor}; use crate::{ @@ -220,7 +220,7 @@ struct ObserverWork, S: SyncingT> { client: Arc, network: NetworkBridge, persistent_data: PersistentData, - keystore: Option, + keystore: Option, voter_commands_rx: TracingUnboundedReceiver>>, justification_sender: Option>, telemetry: Option, @@ -240,7 +240,7 @@ where client: Arc, network: NetworkBridge, persistent_data: PersistentData, - keystore: Option, + keystore: Option, voter_commands_rx: TracingUnboundedReceiver>>, justification_sender: Option>, telemetry: Option, diff --git a/client/consensus/grandpa/src/tests.rs b/client/consensus/grandpa/src/tests.rs index f5e5c45e74207..7a3f862d3602b 100644 --- a/client/consensus/grandpa/src/tests.rs +++ b/client/consensus/grandpa/src/tests.rs @@ -40,7 +40,7 @@ use sp_consensus_grandpa::{ }; use sp_core::H256; use sp_keyring::Ed25519Keyring; -use sp_keystore::{testing::KeyStore as TestKeyStore, SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::{testing::MemoryKeystore, Keystore, KeystorePtr}; use sp_runtime::{ codec::Encode, generic::{BlockId, DigestItem}, @@ -280,11 +280,12 @@ fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { keys.iter().map(|&key| key.public().into()).map(|id| (id, 1)).collect() } -fn create_keystore(authority: Ed25519Keyring) -> SyncCryptoStorePtr { - let keystore = Arc::new(TestKeyStore::new()); - SyncCryptoStore::ed25519_generate_new(&*keystore, GRANDPA, Some(&authority.to_seed())) - .expect("Creates authority key"); +fn create_keystore(authority: Ed25519Keyring) -> KeystorePtr { + let keystore = MemoryKeystore::new(); keystore + .ed25519_generate_new(GRANDPA, Some(&authority.to_seed())) + .expect("Creates authority key"); + keystore.into() } async fn run_until_complete(future: impl Future + Unpin, net: &Arc>) { @@ -1376,7 +1377,7 @@ type TestEnvironment = fn test_environment_with_select_chain( link: &TestLinkHalf, - keystore: Option, + keystore: Option, network_service: N, sync_service: S, select_chain: SC, @@ -1428,7 +1429,7 @@ where fn test_environment( link: &TestLinkHalf, - keystore: Option, + keystore: Option, network_service: N, sync_service: S, voting_rule: VR, diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 3d21fd73d95ae..2485bd603e785 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -29,7 +29,7 @@ use sc_consensus_babe::{ use sc_consensus_epochs::{ descendent_query, EpochHeader, SharedEpochChanges, ViableEpochDescriptor, }; -use sp_keystore::SyncCryptoStorePtr; +use sp_keystore::KeystorePtr; use std::{marker::PhantomData, sync::Arc}; use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; @@ -53,7 +53,7 @@ use sp_timestamp::TimestampInherentData; /// Intended for use with BABE runtimes. pub struct BabeConsensusDataProvider { /// shared reference to keystore - keystore: SyncCryptoStorePtr, + keystore: KeystorePtr, /// Shared reference to the client. client: Arc, @@ -143,7 +143,7 @@ where { pub fn new( client: Arc, - keystore: SyncCryptoStorePtr, + keystore: KeystorePtr, epoch_changes: SharedEpochChanges, authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, ) -> Result { diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 4adca0b56700e..0f208a364c0c0 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -42,7 +42,7 @@ use sp_consensus::{Proposal, Proposer, SelectChain, SyncOracle}; use sp_consensus_slots::{Slot, SlotDuration}; use sp_core::sr25519; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::{Keystore, KeystorePtr}; use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT}; use sp_ver::RandomSeedInherentDataProvider; use std::{ @@ -82,14 +82,14 @@ pub trait SlotWorker { } async fn inject_inherents<'a, B: BlockT>( - keystore: SyncCryptoStorePtr, + keystore: KeystorePtr, public: &'a sr25519::Public, slot_info: &'a SlotInfo, in_data: &'a mut InherentData, ) -> Result<(), sp_consensus::Error> { let prev_seed = slot_info.chain_head.seed(); - let seed = sp_ver::calculate_next_seed::(&(*keystore), public, prev_seed) + let seed = sp_ver::calculate_next_seed::(&(*keystore), public, prev_seed) .ok_or(sp_consensus::Error::StateUnavailable(String::from("signing seed failure")))?; RandomSeedInherentDataProvider(seed) @@ -179,7 +179,7 @@ pub trait SimpleSlotWorker { body: Vec, storage_changes: StorageChanges<>::Transaction, B>, public: Self::Claim, - epoch: Self::AuxData, + aux_data: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, @@ -239,7 +239,6 @@ pub trait SimpleSlotWorker { let key = self.get_key(&claim); inject_inherents(keystore, &key, &slot_info, &mut inherent_data).await.ok()?; - let proposing_remaining_duration = end_proposing_at.saturating_duration_since(Instant::now()); let logs = self.pre_digest_data(slot, claim); @@ -252,7 +251,7 @@ pub trait SimpleSlotWorker { inherent_data, sp_runtime::generic::Digest { logs }, proposing_remaining_duration.mul_f32(0.98), - None, + slot_info.block_size_limit, ) .map_err(|e| sp_consensus::Error::ClientImport(e.to_string())); @@ -495,7 +494,7 @@ pub trait SimpleSlotWorker { } /// keystore handle - fn keystore(&self) -> SyncCryptoStorePtr; + fn keystore(&self) -> KeystorePtr; } /// A type that implements [`SlotWorker`] for a type that implements [`SimpleSlotWorker`]. diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index c96e5c7405e74..cd3a73dd4dab4 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -19,10 +19,10 @@ codec = { package = "parity-scale-codec", version = "3.2.2", features = [ hash-db = "0.16.0" kvdb = "0.13.0" kvdb-memorydb = "0.13.0" -kvdb-rocksdb = { version = "0.17.0", optional = true } +kvdb-rocksdb = { version = "0.18.0", optional = true } linked-hash-map = "0.5.4" log = "0.4.17" -parity-db = "0.4.4" +parity-db = "0.4.6" parking_lot = "0.12.1" sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-state-db = { version = "0.10.0-dev", path = "../state-db" } @@ -37,7 +37,7 @@ sp-trie = { version = "7.0.0", path = "../../primitives/trie" } [dev-dependencies] criterion = "0.4.0" -kvdb-rocksdb = "0.17.0" +kvdb-rocksdb = "0.18.0" rand = "0.8.5" tempfile = "3.1.0" quickcheck = { version = "1.0.3", default-features = false } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 4a8b0fe7cb906..c62b9ce0e7a61 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -42,7 +42,7 @@ mod upgrade; mod utils; use linked_hash_map::LinkedHashMap; -use log::{debug, info, trace, warn}; +use log::{debug, trace, warn}; use parking_lot::{Mutex, RwLock}; use std::{ collections::{HashMap, HashSet}, @@ -653,7 +653,6 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha if let Some(result) = cache.get_refresh(&hash) { return Ok(result.clone()) } - info!("HASH inside header function {}", hash); let header = utils::read_header( &*self.db, columns::KEY_LOOKUP, @@ -3536,50 +3535,6 @@ pub(crate) mod tests { assert_eq!(Some(vec![0x3a.into()]), bc.body(block_3a).unwrap()); } - #[test] - fn prune_blocks_on_finalize_and_reorg() { - // 0 - 1b - // \ - 1a - 2a - 3a - // \ - 2b - - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(10), 10); - - let make_block = |index, parent, val: u64| { - insert_block(&backend, index, parent, None, H256::random(), vec![val.into()], None) - .unwrap() - }; - - let block_0 = make_block(0, Default::default(), 0x00); - let block_1a = make_block(1, block_0, 0x1a); - let block_1b = make_block(1, block_0, 0x1b); - let block_2a = make_block(2, block_1a, 0x2a); - let block_2b = make_block(2, block_1a, 0x2b); - let block_3a = make_block(3, block_2a, 0x3a); - - // Make sure 1b is head - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block_0).unwrap(); - op.mark_head(block_1b).unwrap(); - backend.commit_operation(op).unwrap(); - - // Finalize 3a - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block_0).unwrap(); - op.mark_head(block_3a).unwrap(); - op.mark_finalized(block_1a, None).unwrap(); - op.mark_finalized(block_2a, None).unwrap(); - op.mark_finalized(block_3a, None).unwrap(); - backend.commit_operation(op).unwrap(); - - let bc = backend.blockchain(); - assert_eq!(None, bc.body(block_1b).unwrap()); - assert_eq!(None, bc.body(block_2b).unwrap()); - assert_eq!(Some(vec![0x00.into()]), bc.body(block_0).unwrap()); - assert_eq!(Some(vec![0x1a.into()]), bc.body(block_1a).unwrap()); - assert_eq!(Some(vec![0x2a.into()]), bc.body(block_2a).unwrap()); - assert_eq!(Some(vec![0x3a.into()]), bc.body(block_3a).unwrap()); - } - #[test] fn indexed_data_block_body() { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); diff --git a/client/db/src/pinned_blocks_cache.rs b/client/db/src/pinned_blocks_cache.rs index f50a7081df6a8..7b346b4631eee 100644 --- a/client/db/src/pinned_blocks_cache.rs +++ b/client/db/src/pinned_blocks_cache.rs @@ -149,8 +149,9 @@ impl PinnedBlocksCache { self.cache.len() ); }, - None => - log::warn!(target: LOG_TARGET, "Unable to bump reference count. hash = {}", hash), + None => { + log::warn!(target: LOG_TARGET, "Unable to bump reference count. hash = {}", hash) + }, }; } diff --git a/client/executor/benches/bench.rs b/client/executor/benches/bench.rs index 10425ea461c21..772898b8c76b3 100644 --- a/client/executor/benches/bench.rs +++ b/client/executor/benches/bench.rs @@ -21,7 +21,7 @@ use codec::Encode; use sc_executor_common::{ runtime_blob::RuntimeBlob, - wasm_runtime::{HeapAllocStrategy, WasmInstance, WasmModule}, + wasm_runtime::{WasmInstance, WasmModule, DEFAULT_HEAP_ALLOC_STRATEGY}, }; use sc_executor_wasmtime::InstantiationStrategy; use sc_runtime_test::wasm_binary_unwrap as test_runtime; @@ -48,30 +48,33 @@ fn initialize( _tmpdir: &mut Option, runtime: &[u8], method: Method, -) -> Arc { +) -> Box { let blob = RuntimeBlob::uncompress_if_needed(runtime).unwrap(); let host_functions = sp_io::SubstrateHostFunctions::host_functions(); - let extra_pages = 2048; let allow_missing_func_imports = true; match method { Method::Interpreted => sc_executor_wasmi::create_runtime( blob, - HeapAllocStrategy::Static { extra_pages }, + DEFAULT_HEAP_ALLOC_STRATEGY, host_functions, allow_missing_func_imports, ) - .map(|runtime| -> Arc { Arc::new(runtime) }), + .map(|runtime| -> Box { Box::new(runtime) }), Method::Compiled { instantiation_strategy, precompile } => { let config = sc_executor_wasmtime::Config { allow_missing_func_imports, cache_path: None, semantics: sc_executor_wasmtime::Semantics { - heap_alloc_strategy: HeapAllocStrategy::Static { extra_pages }, + heap_alloc_strategy: DEFAULT_HEAP_ALLOC_STRATEGY, instantiation_strategy, deterministic_stack_limit: None, canonicalize_nans: false, parallel_compilation: true, + wasm_multi_value: false, + wasm_bulk_memory: false, + wasm_reference_types: false, + wasm_simd: false, }, }; @@ -95,7 +98,7 @@ fn initialize( } else { sc_executor_wasmtime::create_runtime::(blob, config) } - .map(|runtime| -> Arc { Arc::new(runtime) }) + .map(|runtime| -> Box { Box::new(runtime) }) }, } .unwrap() diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index c47164a60655f..2dfe0bf02df2f 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -104,6 +104,9 @@ pub enum Error { #[error("Execution aborted due to trap: {0}")] AbortedDueToTrap(MessageWithBacktrace), + + #[error("Output exceeds bounds of wasm memory")] + OutputExceedsBounds, } impl wasmi::HostError for Error {} @@ -153,7 +156,7 @@ pub enum WasmError { Instantiation(String), /// Other error happenend. - #[error("{0}")] + #[error("Other error happened while constructing the runtime: {0}")] Other(String), } diff --git a/client/executor/common/src/wasm_runtime.rs b/client/executor/common/src/wasm_runtime.rs index e3db7e52fc7e7..5dac77e59fa74 100644 --- a/client/executor/common/src/wasm_runtime.rs +++ b/client/executor/common/src/wasm_runtime.rs @@ -23,6 +23,13 @@ use sp_wasm_interface::Value; pub use sc_allocator::AllocationStats; +/// Default heap allocation strategy. +pub const DEFAULT_HEAP_ALLOC_STRATEGY: HeapAllocStrategy = + HeapAllocStrategy::Static { extra_pages: DEFAULT_HEAP_ALLOC_PAGES }; + +/// Default heap allocation pages. +pub const DEFAULT_HEAP_ALLOC_PAGES: u32 = 2048; + /// A method to be used to find the entrypoint when calling into the runtime /// /// Contains variants on how to resolve wasm function that will be invoked. diff --git a/client/executor/src/native_executor.rs b/client/executor/src/executor.rs similarity index 94% rename from client/executor/src/native_executor.rs rename to client/executor/src/executor.rs index c72cf3c9c91df..a3717f4d29002 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/executor.rs @@ -32,16 +32,14 @@ use std::{ use codec::Encode; use sc_executor_common::{ runtime_blob::RuntimeBlob, - wasm_runtime::{AllocationStats, HeapAllocStrategy, WasmInstance, WasmModule}, + wasm_runtime::{ + AllocationStats, HeapAllocStrategy, WasmInstance, WasmModule, DEFAULT_HEAP_ALLOC_STRATEGY, + }, }; use sp_core::traits::{CallContext, CodeExecutor, Externalities, RuntimeCode}; use sp_version::{GetNativeVersion, NativeVersion, RuntimeVersion}; use sp_wasm_interface::{ExtendedHostFunctions, HostFunctions}; -/// Default heap allocation strategy. -const DEFAULT_HEAP_ALLOC_STRATEGY: HeapAllocStrategy = - HeapAllocStrategy::Static { extra_pages: 2048 }; - /// Set up the externalities and safe calling environment to execute runtime calls. /// /// If the inner closure panics, it will be caught and return an error. @@ -100,10 +98,10 @@ impl WasmExecutorBuilder { /// Create a new instance of `Self` /// /// - `method`: The wasm execution method that should be used by the executor. - pub fn new(method: WasmExecutionMethod) -> Self { + pub fn new() -> Self { Self { _phantom: PhantomData, - method, + method: WasmExecutionMethod::default(), onchain_heap_alloc_strategy: None, offchain_heap_alloc_strategy: None, max_runtime_instances: 2, @@ -113,6 +111,12 @@ impl WasmExecutorBuilder { } } + /// Create the wasm executor with execution method that should be used by the executor. + pub fn with_execution_method(mut self, method: WasmExecutionMethod) -> Self { + self.method = method; + self + } + /// Create the wasm executor with the given number of `heap_alloc_strategy` for onchain runtime /// calls. pub fn with_onchain_heap_alloc_strategy( @@ -256,6 +260,7 @@ where /// compiled execution method is used. /// /// `runtime_cache_size` - The capacity of runtime cache. + #[deprecated(note = "use `Self::builder` method instead of it")] pub fn new( method: WasmExecutionMethod, default_heap_pages: Option, @@ -283,11 +288,12 @@ where } /// Instantiate a builder for creating an instance of `Self`. - pub fn builder(method: WasmExecutionMethod) -> WasmExecutorBuilder { - WasmExecutorBuilder::new(method) + pub fn builder() -> WasmExecutorBuilder { + WasmExecutorBuilder::new() } /// Ignore missing function imports if set true. + #[deprecated(note = "use `Self::builder` method instead of it")] pub fn allow_missing_host_functions(&mut self, allow_missing_host_functions: bool) { self.allow_missing_host_functions = allow_missing_host_functions } @@ -314,7 +320,7 @@ where ) -> Result where F: FnOnce( - AssertUnwindSafe<&Arc>, + AssertUnwindSafe<&dyn WasmModule>, AssertUnwindSafe<&mut dyn WasmInstance>, Option<&RuntimeVersion>, AssertUnwindSafe<&mut dyn Externalities>, @@ -539,6 +545,7 @@ pub struct NativeElseWasmExecutor { } impl NativeElseWasmExecutor { + /// /// Create new instance. /// /// # Parameters @@ -553,19 +560,23 @@ impl NativeElseWasmExecutor { /// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse. /// /// `runtime_cache_size` - The capacity of runtime cache. + #[deprecated(note = "use `Self::new_with_wasm_executor` method instead of it")] pub fn new( fallback_method: WasmExecutionMethod, default_heap_pages: Option, max_runtime_instances: usize, runtime_cache_size: u8, ) -> Self { - let wasm = WasmExecutor::new( - fallback_method, - default_heap_pages, - max_runtime_instances, - None, - runtime_cache_size, - ); + let heap_pages = default_heap_pages.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| { + HeapAllocStrategy::Static { extra_pages: h as _ } + }); + let wasm = WasmExecutor::builder() + .with_execution_method(fallback_method) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .with_max_runtime_instances(max_runtime_instances) + .with_runtime_cache_size(runtime_cache_size) + .build(); NativeElseWasmExecutor { native_version: D::native_version(), wasm } } @@ -580,6 +591,7 @@ impl NativeElseWasmExecutor { } /// Ignore missing function imports if set true. + #[deprecated(note = "use `Self::new_with_wasm_executor` method instead of it")] pub fn allow_missing_host_functions(&mut self, allow_missing_host_functions: bool) { self.wasm.allow_missing_host_functions = allow_missing_host_functions } @@ -714,11 +726,8 @@ mod tests { #[test] fn native_executor_registers_custom_interface() { - let executor = NativeElseWasmExecutor::::new( - WasmExecutionMethod::Interpreted, - None, - 8, - 2, + let executor = NativeElseWasmExecutor::::new_with_wasm_executor( + WasmExecutor::builder().build(), ); fn extract_host_functions( diff --git a/client/executor/src/integration_tests/linux.rs b/client/executor/src/integration_tests/linux.rs index 38d75da4eb7e8..434cb69bfdd32 100644 --- a/client/executor/src/integration_tests/linux.rs +++ b/client/executor/src/integration_tests/linux.rs @@ -21,7 +21,7 @@ use super::mk_test_runtime; use crate::WasmExecutionMethod; use codec::Encode as _; -use sc_executor_common::wasm_runtime::HeapAllocStrategy; +use sc_executor_common::wasm_runtime::DEFAULT_HEAP_ALLOC_STRATEGY; mod smaps; @@ -74,7 +74,7 @@ fn memory_consumption(wasm_method: WasmExecutionMethod) { // For that we make a series of runtime calls, probing the RSS for the VMA matching the linear // memory. After the call we expect RSS to be equal to 0. - let runtime = mk_test_runtime(wasm_method, HeapAllocStrategy::Static { extra_pages: 1024 }); + let runtime = mk_test_runtime(wasm_method, DEFAULT_HEAP_ALLOC_STRATEGY); let mut instance = runtime.new_instance().unwrap(); let heap_base = instance diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 066b1497fb6ec..b65aeb8d01109 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -22,9 +22,9 @@ mod linux; use assert_matches::assert_matches; use codec::{Decode, Encode}; use sc_executor_common::{ - error::{Error, WasmError}, + error::Error, runtime_blob::RuntimeBlob, - wasm_runtime::{HeapAllocStrategy, WasmModule}, + wasm_runtime::{HeapAllocStrategy, WasmModule, DEFAULT_HEAP_ALLOC_STRATEGY}, }; use sc_runtime_test::wasm_binary_unwrap; use sp_core::{ @@ -114,8 +114,10 @@ fn call_in_wasm( execution_method: WasmExecutionMethod, ext: &mut E, ) -> Result, Error> { - let executor = - crate::WasmExecutor::::new(execution_method, Some(1024), 8, None, 2); + let executor = crate::WasmExecutor::::builder() + .with_execution_method(execution_method) + .build(); + executor.uncached_call( RuntimeBlob::uncompress_if_needed(wasm_binary_unwrap()).unwrap(), ext, @@ -446,13 +448,11 @@ test_wasm_execution!(should_trap_when_heap_exhausted); fn should_trap_when_heap_exhausted(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - let executor = crate::WasmExecutor::::new( - wasm_method, - Some(17), // `17` is the initial number of pages compiled into the binary. - 8, - None, - 2, - ); + let executor = crate::WasmExecutor::::builder() + .with_execution_method(wasm_method) + // `17` is the initial number of pages compiled into the binary. + .with_onchain_heap_alloc_strategy(HeapAllocStrategy::Static { extra_pages: 17 }) + .build(); let err = executor .uncached_call( @@ -483,7 +483,7 @@ fn should_trap_when_heap_exhausted(wasm_method: WasmExecutionMethod) { fn mk_test_runtime( wasm_method: WasmExecutionMethod, pages: HeapAllocStrategy, -) -> Arc { +) -> Box { let blob = RuntimeBlob::uncompress_if_needed(wasm_binary_unwrap()) .expect("failed to create a runtime blob out of test runtime"); @@ -560,7 +560,7 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) { test_wasm_execution!(interpreted_only heap_is_reset_between_calls); fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { - let runtime = mk_test_runtime(wasm_method, HeapAllocStrategy::Static { extra_pages: 1024 }); + let runtime = mk_test_runtime(wasm_method, DEFAULT_HEAP_ALLOC_STRATEGY); let mut instance = runtime.new_instance().unwrap(); let heap_base = instance @@ -579,13 +579,11 @@ fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { test_wasm_execution!(parallel_execution); fn parallel_execution(wasm_method: WasmExecutionMethod) { - let executor = std::sync::Arc::new(crate::WasmExecutor::::new( - wasm_method, - Some(1024), - 8, - None, - 2, - )); + let executor = Arc::new( + crate::WasmExecutor::::builder() + .with_execution_method(wasm_method) + .build(), + ); let threads: Vec<_> = (0..8) .map(|_| { let executor = executor.clone(); @@ -819,9 +817,8 @@ fn return_huge_len(wasm_method: WasmExecutionMethod) { Error::Runtime => { assert_matches!(wasm_method, WasmExecutionMethod::Interpreted); }, - Error::RuntimeConstruction(WasmError::Other(error)) => { + Error::OutputExceedsBounds => { assert_matches!(wasm_method, WasmExecutionMethod::Compiled { .. }); - assert_eq!(error, "output exceeds bounds of wasm memory"); }, error => panic!("unexpected error: {:?}", error), } @@ -849,9 +846,8 @@ fn return_max_memory_offset_plus_one(wasm_method: WasmExecutionMethod) { Error::Runtime => { assert_matches!(wasm_method, WasmExecutionMethod::Interpreted); }, - Error::RuntimeConstruction(WasmError::Other(error)) => { + Error::OutputExceedsBounds => { assert_matches!(wasm_method, WasmExecutionMethod::Compiled { .. }); - assert_eq!(error, "output exceeds bounds of wasm memory"); }, error => panic!("unexpected error: {:?}", error), } @@ -866,9 +862,8 @@ fn return_overflow(wasm_method: WasmExecutionMethod) { Error::Runtime => { assert_matches!(wasm_method, WasmExecutionMethod::Interpreted); }, - Error::RuntimeConstruction(WasmError::Other(error)) => { + Error::OutputExceedsBounds => { assert_matches!(wasm_method, WasmExecutionMethod::Compiled { .. }); - assert_eq!(error, "output exceeds bounds of wasm memory"); }, error => panic!("unexpected error: {:?}", error), } diff --git a/client/executor/src/lib.rs b/client/executor/src/lib.rs index e5bae474e9e25..42e7dc7d16bd8 100644 --- a/client/executor/src/lib.rs +++ b/client/executor/src/lib.rs @@ -32,24 +32,29 @@ #![recursion_limit = "128"] #[macro_use] -mod native_executor; +mod executor; #[cfg(test)] mod integration_tests; mod wasm_runtime; -pub use codec::Codec; -pub use native_executor::{ - with_externalities_safe, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor, +pub use self::{ + executor::{ + with_externalities_safe, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor, + }, + wasm_runtime::{read_embedded_version, WasmExecutionMethod}, }; +pub use codec::Codec; #[doc(hidden)] pub use sp_core::traits::Externalities; pub use sp_version::{NativeVersion, RuntimeVersion}; #[doc(hidden)] pub use sp_wasm_interface; -pub use wasm_runtime::{read_embedded_version, WasmExecutionMethod}; pub use wasmi; -pub use sc_executor_common::error; +pub use sc_executor_common::{ + error, + wasm_runtime::{HeapAllocStrategy, DEFAULT_HEAP_ALLOC_PAGES, DEFAULT_HEAP_ALLOC_STRATEGY}, +}; pub use sc_executor_wasmtime::InstantiationStrategy as WasmtimeInstantiationStrategy; /// Extracts the runtime version of a given runtime code. @@ -74,13 +79,7 @@ mod tests { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let executor = WasmExecutor::::new( - WasmExecutionMethod::Interpreted, - Some(8), - 8, - None, - 2, - ); + let executor = WasmExecutor::::builder().build(); let res = executor .uncached_call( RuntimeBlob::uncompress_if_needed(wasm_binary_unwrap()).unwrap(), diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 254380dbb3693..41a095081f8d1 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -71,14 +71,14 @@ struct VersionedRuntimeId { /// A Wasm runtime object along with its cached runtime version. struct VersionedRuntime { /// Shared runtime that can spawn instances. - module: Arc, + module: Box, /// Runtime version according to `Core_version` if any. version: Option, // TODO: Remove this once the legacy instance reuse instantiation strategy // for `wasmtime` is gone, as this only makes sense with that particular strategy. /// Cached instance pool. - instances: Arc>>>>, + instances: Vec>>>, } impl VersionedRuntime { @@ -86,7 +86,7 @@ impl VersionedRuntime { fn with_instance(&self, ext: &mut dyn Externalities, f: F) -> Result where F: FnOnce( - &Arc, + &dyn WasmModule, &mut dyn WasmInstance, Option<&RuntimeVersion>, &mut dyn Externalities, @@ -106,7 +106,7 @@ impl VersionedRuntime { .map(|r| Ok((r, false))) .unwrap_or_else(|| self.module.new_instance().map(|i| (i, true)))?; - let result = f(&self.module, &mut *instance, self.version.as_ref(), ext); + let result = f(&*self.module, &mut *instance, self.version.as_ref(), ext); if let Err(e) = &result { if new_inst { tracing::warn!( @@ -142,7 +142,7 @@ impl VersionedRuntime { // Allocate a new instance let mut instance = self.module.new_instance()?; - f(&self.module, &mut *instance, self.version.as_ref(), ext) + f(&*self.module, &mut *instance, self.version.as_ref(), ext) }, } } @@ -228,7 +228,7 @@ impl RuntimeCache { where H: HostFunctions, F: FnOnce( - &Arc, + &dyn WasmModule, &mut dyn WasmInstance, Option<&RuntimeVersion>, &mut dyn Externalities, @@ -294,7 +294,7 @@ pub fn create_wasm_runtime_with_code( blob: RuntimeBlob, allow_missing_func_imports: bool, cache_path: Option<&Path>, -) -> Result, WasmError> +) -> Result, WasmError> where H: HostFunctions, { @@ -312,7 +312,7 @@ where H::host_functions(), allow_missing_func_imports, ) - .map(|runtime| -> Arc { Arc::new(runtime) }) + .map(|runtime| -> Box { Box::new(runtime) }) }, WasmExecutionMethod::Compiled { instantiation_strategy } => sc_executor_wasmtime::create_runtime::( @@ -326,10 +326,14 @@ where deterministic_stack_limit: None, canonicalize_nans: false, parallel_compilation: true, + wasm_multi_value: false, + wasm_bulk_memory: false, + wasm_reference_types: false, + wasm_simd: false, }, }, ) - .map(|runtime| -> Arc { Arc::new(runtime) }), + .map(|runtime| -> Box { Box::new(runtime) }), } } @@ -430,7 +434,7 @@ where // The following unwind safety assertion is OK because if the method call panics, the // runtime will be dropped. let runtime = AssertUnwindSafe(runtime.as_ref()); - crate::native_executor::with_externalities_safe(&mut **ext, move || { + crate::executor::with_externalities_safe(&mut **ext, move || { runtime.new_instance()?.call("Core_version".into(), &[]) }) .map_err(|_| WasmError::Instantiation("panic in call to get runtime version".into()))? @@ -444,7 +448,7 @@ where let mut instances = Vec::with_capacity(max_instances); instances.resize_with(max_instances, || Mutex::new(None)); - Ok(VersionedRuntime { module: runtime, version, instances: Arc::new(instances) }) + Ok(VersionedRuntime { module: runtime, version, instances }) } #[cfg(test)] diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index bffccd12badfb..58a1ffde07898 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -19,7 +19,7 @@ libc = "0.2.121" # When bumping wasmtime do not forget to also bump rustix # to exactly the same version as used by wasmtime! -wasmtime = { version = "6.0.1", default-features = false, features = [ +wasmtime = { version = "6.0.2", default-features = false, features = [ "cache", "cranelift", "jitdump", @@ -48,4 +48,4 @@ sp-io = { version = "7.0.0", path = "../../../primitives/io" } tempfile = "3.3.0" paste = "1.0" codec = { package = "parity-scale-codec", version = "3.2.2" } -cargo_metadata = "0.15.2" +cargo_metadata = "0.15.4" diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index de7a2ea0e73df..c9a2c83e0493c 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -26,7 +26,7 @@ use crate::{ use sc_allocator::{AllocationStats, FreeingBumpHeapAllocator}; use sc_executor_common::{ - error::{Result, WasmError}, + error::{Error, Result, WasmError}, runtime_blob::{ self, DataSegmentsSnapshot, ExposedMutableGlobalsSet, GlobalsSnapshot, RuntimeBlob, }, @@ -325,10 +325,10 @@ fn common_config(semantics: &Semantics) -> std::result::Result InformantDisplay { let best_number = info.chain.best_number; let best_hash = info.chain.best_hash; let finalized_number = info.chain.finalized_number; - let num_connected_peers = net_status.num_connected_peers; + let num_connected_peers = sync_status.num_connected_peers; let speed = speed::(best_number, self.last_number, self.last_update); let total_bytes_inbound = net_status.total_bytes_inbound; let total_bytes_outbound = net_status.total_bytes_outbound; diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index cd55739a9fac3..3551623f332a2 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -17,22 +17,15 @@ // //! Local keystore implementation -use async_trait::async_trait; use parking_lot::RwLock; -use sp_application_crypto::{ecdsa, ed25519, sr25519, AppKey, AppPair, IsWrappedBy}; +use sp_application_crypto::{AppCrypto, AppPair, IsWrappedBy}; use sp_core::{ - crypto::{ - ByteArray, CryptoTypePublicPair, ExposeSecret, KeyTypeId, Pair as PairT, SecretString, - }, - sr25519::{Pair as Sr25519Pair, Public as Sr25519Public}, - Encode, -}; -use sp_keystore::{ - vrf::{make_transcript, VRFSignature, VRFTranscriptData}, - CryptoStore, Error as TraitError, SyncCryptoStore, SyncCryptoStorePtr, + crypto::{ByteArray, ExposeSecret, KeyTypeId, Pair as CorePair, SecretString, VrfSigner}, + ecdsa, ed25519, sr25519, }; +use sp_keystore::{Error as TraitError, Keystore, KeystorePtr}; use std::{ - collections::{HashMap, HashSet}, + collections::HashMap, fs::{self, File}, io::Write, path::PathBuf, @@ -63,303 +56,181 @@ impl LocalKeystore { /// `Err(_)` when something failed. pub fn key_pair( &self, - public: &::Public, + public: &::Public, ) -> Result> { self.0.read().key_pair::(public) } -} - -#[async_trait] -impl CryptoStore for LocalKeystore { - async fn keys( - &self, - id: KeyTypeId, - ) -> std::result::Result, TraitError> { - SyncCryptoStore::keys(self, id) - } - - async fn sr25519_public_keys(&self, id: KeyTypeId) -> Vec { - SyncCryptoStore::sr25519_public_keys(self, id) - } - - async fn sr25519_generate_new( - &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> std::result::Result { - SyncCryptoStore::sr25519_generate_new(self, id, seed) - } - - async fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec { - SyncCryptoStore::ed25519_public_keys(self, id) - } - - async fn ed25519_generate_new( - &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> std::result::Result { - SyncCryptoStore::ed25519_generate_new(self, id, seed) - } - async fn ecdsa_public_keys(&self, id: KeyTypeId) -> Vec { - SyncCryptoStore::ecdsa_public_keys(self, id) + fn public_keys(&self, key_type: KeyTypeId) -> Vec { + self.0 + .read() + .raw_public_keys(key_type) + .map(|v| { + v.into_iter().filter_map(|k| T::Public::from_slice(k.as_slice()).ok()).collect() + }) + .unwrap_or_default() } - async fn ecdsa_generate_new( + fn generate_new( &self, - id: KeyTypeId, + key_type: KeyTypeId, seed: Option<&str>, - ) -> std::result::Result { - SyncCryptoStore::ecdsa_generate_new(self, id, seed) - } - - async fn insert_unknown( - &self, - id: KeyTypeId, - suri: &str, - public: &[u8], - ) -> std::result::Result<(), ()> { - SyncCryptoStore::insert_unknown(self, id, suri, public) - } - - async fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { - SyncCryptoStore::has_keys(self, public_keys) - } - - async fn supported_keys( - &self, - id: KeyTypeId, - keys: Vec, - ) -> std::result::Result, TraitError> { - SyncCryptoStore::supported_keys(self, id, keys) + ) -> std::result::Result { + let pair = match seed { + Some(seed) => self.0.write().insert_ephemeral_from_seed_by_type::(seed, key_type), + None => self.0.write().generate_by_type::(key_type), + } + .map_err(|e| -> TraitError { e.into() })?; + Ok(pair.public()) } - async fn sign_with( + fn sign( &self, - id: KeyTypeId, - key: &CryptoTypePublicPair, + key_type: KeyTypeId, + public: &T::Public, msg: &[u8], - ) -> std::result::Result>, TraitError> { - SyncCryptoStore::sign_with(self, id, key, msg) + ) -> std::result::Result, TraitError> { + let signature = self + .0 + .read() + .key_pair_by_type::(public, key_type)? + .map(|pair| pair.sign(msg)); + Ok(signature) } - async fn sr25519_vrf_sign( + fn vrf_sign( &self, key_type: KeyTypeId, - public: &sr25519::Public, - transcript_data: VRFTranscriptData, - ) -> std::result::Result, TraitError> { - SyncCryptoStore::sr25519_vrf_sign(self, key_type, public, transcript_data) - } - - async fn ecdsa_sign_prehashed( - &self, - id: KeyTypeId, - public: &ecdsa::Public, - msg: &[u8; 32], - ) -> std::result::Result, TraitError> { - SyncCryptoStore::ecdsa_sign_prehashed(self, id, public, msg) + public: &T::Public, + transcript: &T::VrfInput, + ) -> std::result::Result, TraitError> { + let sig = self + .0 + .read() + .key_pair_by_type::(public, key_type)? + .map(|pair| pair.vrf_sign(transcript)); + Ok(sig) } } -impl SyncCryptoStore for LocalKeystore { - fn keys(&self, id: KeyTypeId) -> std::result::Result, TraitError> { - let raw_keys = self.0.read().raw_public_keys(id)?; - Ok(raw_keys.into_iter().fold(Vec::new(), |mut v, k| { - v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ecdsa::CRYPTO_ID, k)); - v - })) +impl Keystore for LocalKeystore { + fn sr25519_public_keys(&self, key_type: KeyTypeId) -> Vec { + self.public_keys::(key_type) } - fn supported_keys( + /// Generate a new pair compatible with the 'ed25519' signature scheme. + /// + /// If the `[seed]` is `Some` then the key will be ephemeral and stored in memory. + fn sr25519_generate_new( &self, - id: KeyTypeId, - keys: Vec, - ) -> std::result::Result, TraitError> { - let all_keys = SyncCryptoStore::keys(self, id)?.into_iter().collect::>(); - Ok(keys.into_iter().filter(|key| all_keys.contains(key)).collect::>()) + key_type: KeyTypeId, + seed: Option<&str>, + ) -> std::result::Result { + self.generate_new::(key_type, seed) } - fn sign_with( + fn sr25519_sign( &self, - id: KeyTypeId, - key: &CryptoTypePublicPair, + key_type: KeyTypeId, + public: &sr25519::Public, msg: &[u8], - ) -> std::result::Result>, TraitError> { - match key.0 { - ed25519::CRYPTO_ID => { - let pub_key = ed25519::Public::from_slice(key.1.as_slice()).map_err(|()| { - TraitError::Other("Corrupted public key - Invalid size".into()) - })?; - let key_pair = self - .0 - .read() - .key_pair_by_type::(&pub_key, id) - .map_err(TraitError::from)?; - key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() - }, - sr25519::CRYPTO_ID => { - let pub_key = sr25519::Public::from_slice(key.1.as_slice()).map_err(|()| { - TraitError::Other("Corrupted public key - Invalid size".into()) - })?; - let key_pair = self - .0 - .read() - .key_pair_by_type::(&pub_key, id) - .map_err(TraitError::from)?; - key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() - }, - ecdsa::CRYPTO_ID => { - let pub_key = ecdsa::Public::from_slice(key.1.as_slice()).map_err(|()| { - TraitError::Other("Corrupted public key - Invalid size".into()) - })?; - let key_pair = self - .0 - .read() - .key_pair_by_type::(&pub_key, id) - .map_err(TraitError::from)?; - key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() - }, - _ => Err(TraitError::KeyNotSupported(id)), - } - } - - fn sr25519_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.0 - .read() - .raw_public_keys(key_type) - .map(|v| { - v.into_iter() - .filter_map(|k| sr25519::Public::from_slice(k.as_slice()).ok()) - .collect() - }) - .unwrap_or_default() + ) -> std::result::Result, TraitError> { + self.sign::(key_type, public, msg) } - fn sr25519_generate_new( + fn sr25519_vrf_sign( &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> std::result::Result { - let pair = match seed { - Some(seed) => - self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), - None => self.0.write().generate_by_type::(id), - } - .map_err(|e| -> TraitError { e.into() })?; - - Ok(pair.public()) + key_type: KeyTypeId, + public: &sr25519::Public, + transcript: &sr25519::vrf::VrfTranscript, + ) -> std::result::Result, TraitError> { + self.vrf_sign::(key_type, public, transcript) } fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.0 - .read() - .raw_public_keys(key_type) - .map(|v| { - v.into_iter() - .filter_map(|k| ed25519::Public::from_slice(k.as_slice()).ok()) - .collect() - }) - .unwrap_or_default() + self.public_keys::(key_type) } + /// Generate a new pair compatible with the 'sr25519' signature scheme. + /// + /// If the `[seed]` is `Some` then the key will be ephemeral and stored in memory. fn ed25519_generate_new( &self, - id: KeyTypeId, + key_type: KeyTypeId, seed: Option<&str>, ) -> std::result::Result { - let pair = match seed { - Some(seed) => - self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), - None => self.0.write().generate_by_type::(id), - } - .map_err(|e| -> TraitError { e.into() })?; + self.generate_new::(key_type, seed) + } - Ok(pair.public()) + fn ed25519_sign( + &self, + key_type: KeyTypeId, + public: &ed25519::Public, + msg: &[u8], + ) -> std::result::Result, TraitError> { + self.sign::(key_type, public, msg) } fn ecdsa_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.0 - .read() - .raw_public_keys(key_type) - .map(|v| { - v.into_iter() - .filter_map(|k| ecdsa::Public::from_slice(k.as_slice()).ok()) - .collect() - }) - .unwrap_or_default() + self.public_keys::(key_type) } + /// Generate a new pair compatible with the 'ecdsa' signature scheme. + /// + /// If the `[seed]` is `Some` then the key will be ephemeral and stored in memory. fn ecdsa_generate_new( &self, - id: KeyTypeId, + key_type: KeyTypeId, seed: Option<&str>, ) -> std::result::Result { - let pair = match seed { - Some(seed) => - self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), - None => self.0.write().generate_by_type::(id), - } - .map_err(|e| -> TraitError { e.into() })?; - - Ok(pair.public()) - } - - fn insert_unknown( - &self, - key_type: KeyTypeId, - suri: &str, - public: &[u8], - ) -> std::result::Result<(), ()> { - self.0.write().insert_unknown(key_type, suri, public).map_err(|_| ()) + self.generate_new::(key_type, seed) } - fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { - public_keys - .iter() - .all(|(p, t)| self.0.read().key_phrase_by_type(p, *t).ok().flatten().is_some()) - } - - fn sr25519_vrf_sign( + fn ecdsa_sign( &self, key_type: KeyTypeId, - public: &Sr25519Public, - transcript_data: VRFTranscriptData, - ) -> std::result::Result, TraitError> { - let transcript = make_transcript(transcript_data); - let pair = self.0.read().key_pair_by_type::(public, key_type)?; - - if let Some(pair) = pair { - let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); - Ok(Some(VRFSignature { output: inout.to_output(), proof })) - } else { - Ok(None) - } + public: &ecdsa::Public, + msg: &[u8], + ) -> std::result::Result, TraitError> { + self.sign::(key_type, public, msg) } fn ecdsa_sign_prehashed( &self, - id: KeyTypeId, + key_type: KeyTypeId, public: &ecdsa::Public, msg: &[u8; 32], ) -> std::result::Result, TraitError> { - let pair = self.0.read().key_pair_by_type::(public, id)?; + let sig = self + .0 + .read() + .key_pair_by_type::(public, key_type)? + .map(|pair| pair.sign_prehashed(msg)); + Ok(sig) + } - pair.map(|k| k.sign_prehashed(msg)).map(Ok).transpose() + fn insert( + &self, + key_type: KeyTypeId, + suri: &str, + public: &[u8], + ) -> std::result::Result<(), ()> { + self.0.write().insert(key_type, suri, public).map_err(|_| ()) } -} -impl Into for LocalKeystore { - fn into(self) -> SyncCryptoStorePtr { - Arc::new(self) + fn keys(&self, key_type: KeyTypeId) -> std::result::Result>, TraitError> { + self.0.read().raw_public_keys(key_type).map_err(|e| e.into()) + } + + fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { + public_keys + .iter() + .all(|(p, t)| self.0.read().key_phrase_by_type(p, *t).ok().flatten().is_some()) } } -impl Into> for LocalKeystore { - fn into(self) -> Arc { +impl Into for LocalKeystore { + fn into(self) -> KeystorePtr { Arc::new(self) } } @@ -406,7 +277,12 @@ impl KeystoreInner { /// Insert the given public/private key pair with the given key type. /// /// Does not place it into the file system store. - fn insert_ephemeral_pair(&mut self, pair: &Pair, seed: &str, key_type: KeyTypeId) { + fn insert_ephemeral_pair( + &mut self, + pair: &Pair, + seed: &str, + key_type: KeyTypeId, + ) { let key = (key_type, pair.public().to_raw_vec()); self.additional.insert(key, seed.into()); } @@ -414,7 +290,7 @@ impl KeystoreInner { /// Insert a new key with anonymous crypto. /// /// Places it into the file system store, if a path is configured. - fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<()> { + fn insert(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<()> { if let Some(path) = self.key_file_path(public, key_type) { Self::write_to_file(path, suri)?; } @@ -426,7 +302,7 @@ impl KeystoreInner { /// /// Places it into the file system store, if a path is configured. Otherwise insert /// it into the memory cache only. - fn generate_by_type(&mut self, key_type: KeyTypeId) -> Result { + fn generate_by_type(&mut self, key_type: KeyTypeId) -> Result { let (pair, phrase, _) = Pair::generate_with_phrase(self.password()); if let Some(path) = self.key_file_path(pair.public().as_slice(), key_type) { Self::write_to_file(path, &phrase)?; @@ -455,7 +331,7 @@ impl KeystoreInner { /// Create a new key from seed. /// /// Does not place it into the file system store. - fn insert_ephemeral_from_seed_by_type( + fn insert_ephemeral_from_seed_by_type( &mut self, seed: &str, key_type: KeyTypeId, @@ -487,7 +363,7 @@ impl KeystoreInner { } /// Get a key pair for the given public key and key type. - fn key_pair_by_type( + fn key_pair_by_type( &self, public: &Pair::Public, key_type: KeyTypeId, @@ -519,12 +395,12 @@ impl KeystoreInner { } /// Returns a list of raw public keys filtered by `KeyTypeId` - fn raw_public_keys(&self, id: KeyTypeId) -> Result>> { + fn raw_public_keys(&self, key_type: KeyTypeId) -> Result>> { let mut public_keys: Vec> = self .additional .keys() .into_iter() - .filter_map(|k| if k.0 == id { Some(k.1.clone()) } else { None }) + .filter_map(|k| if k.0 == key_type { Some(k.1.clone()) } else { None }) .collect(); if let Some(path) = &self.path { @@ -536,7 +412,7 @@ impl KeystoreInner { if let Some(name) = path.file_name().and_then(|n| n.to_str()) { match array_bytes::hex2bytes(name) { Ok(ref hex) if hex.len() > 4 => { - if hex[0..4] != id.0 { + if hex[0..4] != key_type.0 { continue } let public = hex[4..].to_vec(); @@ -557,7 +433,7 @@ impl KeystoreInner { /// when something failed. pub fn key_pair( &self, - public: &::Public, + public: &::Public, ) -> Result> { self.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID) .map(|v| v.map(Into::into)) @@ -614,23 +490,14 @@ mod tests { let key: ed25519::AppPair = store.0.write().generate().unwrap(); let key2 = ed25519::Pair::generate().0; - assert!(!SyncCryptoStore::has_keys( - &store, - &[(key2.public().to_vec(), ed25519::AppPublic::ID)] - )); + assert!(!store.has_keys(&[(key2.public().to_vec(), ed25519::AppPublic::ID)])); - assert!(!SyncCryptoStore::has_keys( - &store, - &[ - (key2.public().to_vec(), ed25519::AppPublic::ID), - (key.public().to_raw_vec(), ed25519::AppPublic::ID), - ], - )); + assert!(!store.has_keys(&[ + (key2.public().to_vec(), ed25519::AppPublic::ID), + (key.public().to_raw_vec(), ed25519::AppPublic::ID), + ],)); - assert!(SyncCryptoStore::has_keys( - &store, - &[(key.public().to_raw_vec(), ed25519::AppPublic::ID)] - )); + assert!(store.has_keys(&[(key.public().to_raw_vec(), ed25519::AppPublic::ID)])); } #[test] @@ -723,7 +590,7 @@ mod tests { let key_pair = sr25519::AppPair::from_string(secret_uri, None).expect("Generates key pair"); store - .insert_unknown(SR25519, secret_uri, key_pair.public().as_ref()) + .insert(SR25519, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); let store_key_pair = store @@ -742,31 +609,30 @@ mod tests { let file_name = temp_dir.path().join(array_bytes::bytes2hex("", &SR25519.0[..2])); fs::write(file_name, "test").expect("Invalid file is written"); - assert!(SyncCryptoStore::sr25519_public_keys(&store, SR25519).is_empty()); + assert!(store.sr25519_public_keys(SR25519).is_empty()); } #[test] fn generate_with_seed_is_not_stored() { let temp_dir = TempDir::new().unwrap(); let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); - let _alice_tmp_key = - SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, Some("//Alice")).unwrap(); + let _alice_tmp_key = store.sr25519_generate_new(TEST_KEY_TYPE, Some("//Alice")).unwrap(); - assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 1); + assert_eq!(store.sr25519_public_keys(TEST_KEY_TYPE).len(), 1); drop(store); let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); - assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 0); + assert_eq!(store.sr25519_public_keys(TEST_KEY_TYPE).len(), 0); } #[test] fn generate_can_be_fetched_in_memory() { let store = LocalKeystore::in_memory(); - SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, Some("//Alice")).unwrap(); + store.sr25519_generate_new(TEST_KEY_TYPE, Some("//Alice")).unwrap(); - assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 1); - SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, None).unwrap(); - assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 2); + assert_eq!(store.sr25519_public_keys(TEST_KEY_TYPE).len(), 1); + store.sr25519_generate_new(TEST_KEY_TYPE, None).unwrap(); + assert_eq!(store.sr25519_public_keys(TEST_KEY_TYPE).len(), 2); } #[test] @@ -777,7 +643,7 @@ mod tests { let temp_dir = TempDir::new().unwrap(); let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); - let public = SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, None).unwrap(); + let public = store.sr25519_generate_new(TEST_KEY_TYPE, None).unwrap(); let path = store.0.read().key_file_path(public.as_ref(), TEST_KEY_TYPE).unwrap(); let permissions = File::open(path).unwrap().metadata().unwrap().permissions(); diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 90b5ce871ef1a..25823e67b4fcf 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -51,6 +51,11 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } sp-core = { version = "7.0.0", path = "../../primitives/core" } sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +# Force 0.9.2 as snow release to fix the compilation. +# +# When libp2p also enforces this version, we can get rid off this extra dep here. +snow = "0.9.2" +enum-as-inner = "=0.5.1" [dev-dependencies] assert_matches = "1.3" diff --git a/client/network/README.md b/client/network/README.md index d396747241ab1..b9c0e028ffe77 100644 --- a/client/network/README.md +++ b/client/network/README.md @@ -223,7 +223,7 @@ For each peer the sync maintains the number of our common best block with that p whenever peer announce new blocks or our best block advances. This allows to keep track of peers that have new block data and request new information as soon as it is announced. In keep-up mode, we also track peers that announce blocks on all branches and not just the best branch. The sync algorithm tries to be greedy and download -All data that's announced. +all data that's announced. ## Fast sync @@ -262,8 +262,8 @@ After the latest state has been imported the node is fully operational, but is s data. I.e. it is unable to serve bock bodies and headers other than the most recent one. To make sure all nodes have block history available, a background sync process is started that downloads all the missing blocks. It is run in parallel with the keep-up sync and does not interfere with downloading of the recent blocks. -During this download we also import GRANPA justifications for blocks with authority set changes, so that -The warp-synced node has all the data to serve for other nodes nodes that might want to sync from it with +During this download we also import GRANDPA justifications for blocks with authority set changes, so that +the warp-synced node has all the data to serve for other nodes nodes that might want to sync from it with any method. # Usage diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index 130f354b70050..b01091ae01641 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -94,6 +94,8 @@ pub struct SyncStatus { pub best_seen_block: Option>, /// Number of peers participating in syncing. pub num_peers: u32, + /// Number of peers known to `SyncingEngine` (both full and light). + pub num_connected_peers: u32, /// Number of blocks queued for import pub queued_blocks: u32, /// State sync status in progress, if any. @@ -422,9 +424,9 @@ pub trait ChainSync: Send { /// /// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to /// import passed header (call `on_block_data`). The network request isn't sent in this case. - fn poll_block_announce_validation<'a>( + fn poll_block_announce_validation( &mut self, - cx: &mut std::task::Context<'a>, + cx: &mut std::task::Context<'_>, ) -> Poll>; /// Call when a peer has disconnected. diff --git a/client/network/common/src/sync/message.rs b/client/network/common/src/sync/message.rs index c651660c89c8d..7cdb14172885e 100644 --- a/client/network/common/src/sync/message.rs +++ b/client/network/common/src/sync/message.rs @@ -185,7 +185,7 @@ pub mod generic { pub blocks: Vec>, } - /// Announce a new complete relay chain block on the network. + /// Announce a new complete block on the network. #[derive(Debug, PartialEq, Eq, Clone)] pub struct BlockAnnounce { /// New block header. diff --git a/client/network/common/src/sync/warp.rs b/client/network/common/src/sync/warp.rs index 5f2cacd2eeae5..aef257af4d057 100644 --- a/client/network/common/src/sync/warp.rs +++ b/client/network/common/src/sync/warp.rs @@ -32,7 +32,7 @@ pub struct WarpProofRequest { /// The different types of warp syncing. pub enum WarpSyncParams { - /// Standard warp sync for the relay chain + /// Standard warp sync for the chain. WithProvider(Arc>), /// Skip downloading proofs and wait for a header of the state that should be downloaded. /// @@ -48,7 +48,7 @@ pub enum VerificationResult { Complete(SetId, AuthorityList, Block::Header), } -/// Warp sync backend. Handles retrieveing and verifying warp sync proofs. +/// Warp sync backend. Handles retrieving and verifying warp sync proofs. pub trait WarpSyncProvider: Send + Sync { /// Generate proof starting at given block hash. The proof is accumulated until maximum proof /// size is reached. diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 925a7795d290f..e00bfac79f650 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -22,6 +22,7 @@ //! See the documentation of [`Params`]. pub use crate::{ + protocol::NotificationsSink, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, @@ -31,9 +32,15 @@ pub use crate::{ use codec::Encode; use libp2p::{identity::Keypair, multiaddr, Multiaddr, PeerId}; use prometheus_endpoint::Registry; -pub use sc_network_common::{role::Role, sync::warp::WarpSyncProvider, ExHashT}; +pub use sc_network_common::{ + role::{Role, Roles}, + sync::warp::WarpSyncProvider, + ExHashT, +}; +use sc_utils::mpsc::TracingUnboundedSender; use zeroize::Zeroize; +use sp_runtime::traits::Block as BlockT; use std::{ error::Error, fmt, fs, @@ -44,7 +51,6 @@ use std::{ path::{Path, PathBuf}, pin::Pin, str::{self, FromStr}, - sync::Arc, }; pub use libp2p::{ @@ -560,6 +566,7 @@ pub struct NetworkConfiguration { /// List of request-response protocols that the node supports. pub request_response_protocols: Vec, + /// Configuration for the default set of nodes used for block syncing and transactions. pub default_peers_set: SetConfig, @@ -584,6 +591,9 @@ pub struct NetworkConfiguration { /// Maximum number of peers to ask the same blocks in parallel. pub max_parallel_downloads: u32, + /// Maximum number of blocks per request. + pub max_blocks_per_request: u32, + /// Initial syncing mode. pub sync_mode: SyncMode, @@ -647,6 +657,7 @@ impl NetworkConfiguration { node_name: node_name.into(), transport: TransportConfig::Normal { enable_mdns: false, allow_private_ip: true }, max_parallel_downloads: 5, + max_blocks_per_request: 64, sync_mode: SyncMode::Full, enable_dht_random_walk: true, allow_non_globals_in_dht: false, @@ -688,7 +699,7 @@ impl NetworkConfiguration { } /// Network initialization parameters. -pub struct Params { +pub struct Params { /// Assigned role for our node (full, light, ...). pub role: Role, @@ -698,12 +709,12 @@ pub struct Params { /// Network layer configuration. pub network_config: NetworkConfiguration, - /// Client that contains the blockchain. - pub chain: Arc, - /// Legacy name of the protocol to use on the wire. Should be different for each chain. pub protocol_id: ProtocolId, + /// Genesis hash of the chain + pub genesis_hash: Block::Hash, + /// Fork ID to distinguish protocols of different hard forks. Part of the standard protocol /// name on the wire. pub fork_id: Option, @@ -714,6 +725,9 @@ pub struct Params { /// Block announce protocol configuration pub block_announce_config: NonDefaultSetConfig, + /// TX channel for direct communication with `SyncingEngine` and `Protocol`. + pub tx: TracingUnboundedSender>, + /// Request response protocol configurations pub request_response_protocol_configs: Vec, } diff --git a/client/network/src/event.rs b/client/network/src/event.rs index 3ecd8f9311429..975fde0e40a28 100644 --- a/client/network/src/event.rs +++ b/client/network/src/event.rs @@ -19,12 +19,14 @@ //! Network event types. These are are not the part of the protocol, but rather //! events that happen on the network like DHT get/put results received. -use crate::types::ProtocolName; +use crate::{types::ProtocolName, NotificationsSink}; use bytes::Bytes; +use futures::channel::oneshot; use libp2p::{core::PeerId, kad::record::Key}; -use sc_network_common::role::ObservedRole; +use sc_network_common::{role::ObservedRole, sync::message::BlockAnnouncesHandshake}; +use sp_runtime::traits::Block as BlockT; /// Events generated by DHT as a response to get_value and put_value requests. #[derive(Debug, Clone)] @@ -90,3 +92,44 @@ pub enum Event { messages: Vec<(ProtocolName, Bytes)>, }, } + +/// Event sent to `SyncingEngine` +// TODO: remove once `NotificationService` is implemented. +pub enum SyncEvent { + /// Opened a substream with the given node with the given notifications protocol. + /// + /// The protocol is always one of the notification protocols that have been registered. + NotificationStreamOpened { + /// Node we opened the substream with. + remote: PeerId, + /// Received handshake. + received_handshake: BlockAnnouncesHandshake, + /// Notification sink. + sink: NotificationsSink, + /// Channel for reporting accept/reject of the substream. + tx: oneshot::Sender, + }, + + /// Closed a substream with the given node. Always matches a corresponding previous + /// `NotificationStreamOpened` message. + NotificationStreamClosed { + /// Node we closed the substream with. + remote: PeerId, + }, + + /// Notification sink was replaced. + NotificationSinkReplaced { + /// Node we closed the substream with. + remote: PeerId, + /// Notification sink. + sink: NotificationsSink, + }, + + /// Received one or more messages from the given node using the given protocol. + NotificationsReceived { + /// Node we received the message from. + remote: PeerId, + /// Concerned protocol and associated message. + messages: Vec, + }, +} diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index c290f4b94db53..5374ac13435be 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -259,7 +259,7 @@ pub mod request_responses; pub mod types; pub mod utils; -pub use event::{DhtEvent, Event}; +pub use event::{DhtEvent, Event, SyncEvent}; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; pub use request_responses::{IfDisconnected, RequestFailure, RequestResponseConfig}; @@ -278,8 +278,8 @@ pub use service::{ NetworkStatusProvider, NetworkSyncForkRequest, NotificationSender as NotificationSenderT, NotificationSenderError, NotificationSenderReady, }, - DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, OutboundFailure, - PublicKey, + DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, NotificationsSink, + OutboundFailure, PublicKey, }; pub use types::ProtocolName; diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 06ca02c0ca8d5..0075e856e7574 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -24,6 +24,7 @@ use crate::{ use bytes::Bytes; use codec::{DecodeAll, Encode}; +use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt}; use libp2p::{ core::connection::ConnectionId, swarm::{ @@ -35,11 +36,14 @@ use libp2p::{ use log::{debug, error, warn}; use sc_network_common::{role::Roles, sync::message::BlockAnnouncesHandshake}; +use sc_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::Block as BlockT; use std::{ - collections::{HashMap, HashSet, VecDeque}, + collections::{HashMap, HashSet}, + future::Future, iter, + pin::Pin, task::Poll, }; @@ -68,10 +72,11 @@ mod rep { pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); } +type PendingSyncSubstreamValidation = + Pin> + Send>>; + // Lock must always be taken in order declared here. pub struct Protocol { - /// Pending list of messages to return from `poll` as a priority. - pending_messages: VecDeque, /// Used to report reputation changes. peerset_handle: sc_peerset::PeersetHandle, /// Handles opening the unique substream and sending and receiving raw messages. @@ -87,6 +92,8 @@ pub struct Protocol { bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, /// Connected peers. peers: HashMap, + sync_substream_validations: FuturesUnordered, + tx: TracingUnboundedSender>, _marker: std::marker::PhantomData, } @@ -96,6 +103,7 @@ impl Protocol { roles: Roles, network_config: &config::NetworkConfiguration, block_announces_protocol: config::NonDefaultSetConfig, + tx: TracingUnboundedSender>, ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let mut known_addresses = Vec::new(); @@ -171,7 +179,6 @@ impl Protocol { }; let protocol = Self { - pending_messages: VecDeque::new(), peerset_handle: peerset_handle.clone(), behaviour, notification_protocols: iter::once(block_announces_protocol.notifications_protocol) @@ -179,6 +186,8 @@ impl Protocol { .collect(), bad_handshake_substreams: Default::default(), peers: HashMap::new(), + sync_substream_validations: FuturesUnordered::new(), + tx, // TODO: remove when `BlockAnnouncesHandshake` is moved away from `Protocol` _marker: Default::default(), }; @@ -397,8 +406,21 @@ impl NetworkBehaviour for Protocol { cx: &mut std::task::Context, params: &mut impl PollParameters, ) -> Poll> { - if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) + while let Poll::Ready(Some(validation_result)) = + self.sync_substream_validations.poll_next_unpin(cx) + { + match validation_result { + Ok((peer, roles)) => { + self.peers.insert(peer, roles); + }, + Err(peer) => { + log::debug!( + target: "sub-libp2p", + "`SyncingEngine` rejected stream" + ); + self.behaviour.disconnect_peer(&peer, HARDCODED_PEERSETS_SYNC); + }, + } } let event = match self.behaviour.poll(cx, params) { @@ -440,16 +462,29 @@ impl NetworkBehaviour for Protocol { best_hash: handshake.best_hash, genesis_hash: handshake.genesis_hash, }; - self.peers.insert(peer_id, roles); - CustomMessageOutcome::NotificationStreamOpened { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), - negotiated_fallback, - received_handshake: handshake.encode(), - roles, - notifications_sink, - } + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send( + crate::SyncEvent::NotificationStreamOpened { + remote: peer_id, + received_handshake: handshake, + sink: notifications_sink, + tx, + }, + ); + self.sync_substream_validations.push(Box::pin(async move { + match rx.await { + Ok(accepted) => + if accepted { + Ok((peer_id, roles)) + } else { + Err(peer_id) + }, + Err(_) => Err(peer_id), + } + })); + + CustomMessageOutcome::None }, Ok(msg) => { debug!( @@ -467,17 +502,28 @@ impl NetworkBehaviour for Protocol { ) { Ok(handshake) => { let roles = handshake.roles; - self.peers.insert(peer_id, roles); - CustomMessageOutcome::NotificationStreamOpened { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)] - .clone(), - negotiated_fallback, - received_handshake, - roles, - notifications_sink, - } + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send( + crate::SyncEvent::NotificationStreamOpened { + remote: peer_id, + received_handshake: handshake, + sink: notifications_sink, + tx, + }, + ); + self.sync_substream_validations.push(Box::pin(async move { + match rx.await { + Ok(accepted) => + if accepted { + Ok((peer_id, roles)) + } else { + Err(peer_id) + }, + Err(_) => Err(peer_id), + } + })); + CustomMessageOutcome::None }, Err(err2) => { log::debug!( @@ -535,6 +581,12 @@ impl NetworkBehaviour for Protocol { NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => if self.bad_handshake_substreams.contains(&(peer_id, set_id)) { CustomMessageOutcome::None + } else if set_id == HARDCODED_PEERSETS_SYNC { + let _ = self.tx.unbounded_send(crate::SyncEvent::NotificationSinkReplaced { + remote: peer_id, + sink: notifications_sink, + }); + CustomMessageOutcome::None } else { CustomMessageOutcome::NotificationStreamReplaced { remote: peer_id, @@ -548,6 +600,12 @@ impl NetworkBehaviour for Protocol { // handshake. The outer layers have never received an opening event about this // substream, and consequently shouldn't receive a closing event either. CustomMessageOutcome::None + } else if set_id == HARDCODED_PEERSETS_SYNC { + let _ = self.tx.unbounded_send(crate::SyncEvent::NotificationStreamClosed { + remote: peer_id, + }); + self.peers.remove(&peer_id); + CustomMessageOutcome::None } else { CustomMessageOutcome::NotificationStreamClosed { remote: peer_id, @@ -558,6 +616,12 @@ impl NetworkBehaviour for Protocol { NotificationsOut::Notification { peer_id, set_id, message } => { if self.bad_handshake_substreams.contains(&(peer_id, set_id)) { CustomMessageOutcome::None + } else if set_id == HARDCODED_PEERSETS_SYNC { + let _ = self.tx.unbounded_send(crate::SyncEvent::NotificationsReceived { + remote: peer_id, + messages: vec![message.freeze()], + }); + CustomMessageOutcome::None } else { let protocol_name = self.notification_protocols[usize::from(set_id)].clone(); CustomMessageOutcome::NotificationsReceived { @@ -572,10 +636,6 @@ impl NetworkBehaviour for Protocol { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)) } - if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) - } - // This block can only be reached if an event was pulled from the behaviour and that // resulted in `CustomMessageOutcome::None`. Since there might be another pending // message from the behaviour, the task is scheduled again. diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index 74e27fa17c602..9e93389389d29 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -2628,8 +2628,7 @@ mod tests { conn, NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, ); - if let Some(&PeerState::Incoming { ref connections, .. }) = notif.peers.get(&(peer, set_id)) - { + if let Some(PeerState::Incoming { connections, .. }) = notif.peers.get(&(peer, set_id)) { assert_eq!(connections.len(), 1); assert_eq!(connections[0], (conn, ConnectionState::OpenDesiredByRemote)); } else { @@ -2647,8 +2646,7 @@ mod tests { }, )); - if let Some(&PeerState::Incoming { ref connections, .. }) = notif.peers.get(&(peer, set_id)) - { + if let Some(PeerState::Incoming { connections, .. }) = notif.peers.get(&(peer, set_id)) { assert_eq!(connections.len(), 2); assert_eq!(connections[0], (conn, ConnectionState::OpenDesiredByRemote)); assert_eq!(connections[1], (conn2, ConnectionState::Closed)); @@ -2797,8 +2795,7 @@ mod tests { }, )); - if let Some(&PeerState::Disabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) - { + if let Some(PeerState::Disabled { connections, .. }) = notif.peers.get(&(peer, set_id)) { assert_eq!(connections.len(), 1); assert_eq!(connections[0], (conn1, ConnectionState::Closed)); } else { @@ -2884,8 +2881,7 @@ mod tests { )); } - if let Some(&PeerState::Disabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) - { + if let Some(PeerState::Disabled { connections, .. }) = notif.peers.get(&(peer, set_id)) { assert_eq!(connections[0], (conn1, ConnectionState::Closed)); assert_eq!(connections[1], (conn2, ConnectionState::Closed)); } else { @@ -2900,8 +2896,7 @@ mod tests { NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, ); - if let Some(&PeerState::Enabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) - { + if let Some(PeerState::Enabled { connections, .. }) = notif.peers.get(&(peer, set_id)) { assert_eq!(connections[0], (conn1, ConnectionState::Opening)); assert_eq!(connections[1], (conn2, ConnectionState::Opening)); } else { @@ -3297,8 +3292,7 @@ mod tests { )); } - if let Some(&PeerState::Disabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) - { + if let Some(PeerState::Disabled { connections, .. }) = notif.peers.get(&(peer, set_id)) { assert_eq!(connections[0], (conn1, ConnectionState::Closed)); assert_eq!(connections[1], (conn2, ConnectionState::Closed)); } else { @@ -3356,8 +3350,7 @@ mod tests { )); } - if let Some(&PeerState::Disabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) - { + if let Some(PeerState::Disabled { connections, .. }) = notif.peers.get(&(peer, set_id)) { assert_eq!(connections[0], (conn1, ConnectionState::Closed)); assert_eq!(connections[1], (conn2, ConnectionState::Closed)); } else { @@ -3413,8 +3406,7 @@ mod tests { )); } - if let Some(&PeerState::Disabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) - { + if let Some(PeerState::Disabled { connections, .. }) = notif.peers.get(&(peer, set_id)) { assert_eq!(connections[0], (conn1, ConnectionState::Closed)); assert_eq!(connections[1], (conn2, ConnectionState::Closed)); } else { @@ -3424,8 +3416,7 @@ mod tests { // open substreams on both active connections notif.peerset_report_connect(peer, set_id); - if let Some(&PeerState::Enabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) - { + if let Some(PeerState::Enabled { connections, .. }) = notif.peers.get(&(peer, set_id)) { assert_eq!(connections[0], (conn1, ConnectionState::Opening)); assert_eq!(connections[1], (conn2, ConnectionState::Closed)); } else { diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 6dc00b36ceb53..9708b24d29b52 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -36,7 +36,7 @@ use crate::{ network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, - protocol::{self, NotificationsSink, NotifsHandlerError, Protocol, Ready}, + protocol::{self, NotifsHandlerError, Protocol, Ready}, request_responses::{IfDisconnected, RequestFailure}, service::{ signature::{Signature, SigningError}, @@ -73,8 +73,7 @@ use parking_lot::Mutex; use sc_network_common::ExHashT; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use sp_blockchain::HeaderBackend; -use sp_runtime::traits::{Block as BlockT, Zero}; +use sp_runtime::traits::Block as BlockT; use std::{ cmp, @@ -92,6 +91,7 @@ use std::{ pub use behaviour::{InboundFailure, OutboundFailure, ResponseFailure}; pub use libp2p::identity::{error::DecodingError, Keypair, PublicKey}; +pub use protocol::NotificationsSink; mod metrics; mod out_events; @@ -147,9 +147,7 @@ where /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. - pub fn new + 'static>( - mut params: Params, - ) -> Result { + pub fn new(mut params: Params) -> Result { // Private and public keys configuration. let local_identity = params.network_config.node_key.clone().into_keypair()?; let local_public = local_identity.public(); @@ -230,6 +228,7 @@ where From::from(¶ms.role), ¶ms.network_config, params.block_announce_config, + params.tx, )?; // List of multiaddresses that we know in the network. @@ -277,13 +276,11 @@ where config.discovery_limit( u64::from(params.network_config.default_peers_set.out_peers) + 15, ); - let genesis_hash = params - .chain - .hash(Zero::zero()) - .ok() - .flatten() - .expect("Genesis block exists; qed"); - config.with_kademlia(genesis_hash, params.fork_id.as_deref(), ¶ms.protocol_id); + config.with_kademlia( + params.genesis_hash, + params.fork_id.as_deref(), + ¶ms.protocol_id, + ); config.with_dht_random_walk(params.network_config.enable_dht_random_walk); config.allow_non_globals_in_dht(params.network_config.allow_non_globals_in_dht); config.use_kademlia_disjoint_query_paths( diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index 921efd7def622..ece565aad4b09 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -17,7 +17,10 @@ //! Helper for handling (i.e. answering) block requests from a remote peer via the //! `crate::request_responses::RequestResponsesBehaviour`. -use crate::schema::v1::{block_request::FromBlock, BlockResponse, Direction}; +use crate::{ + schema::v1::{block_request::FromBlock, BlockResponse, Direction}, + MAX_BLOCKS_IN_RESPONSE, +}; use codec::{Decode, Encode}; use futures::{ @@ -50,7 +53,6 @@ use std::{ }; const LOG_TARGET: &str = "sync"; -const MAX_BLOCKS_IN_RESPONSE: usize = 128; const MAX_BODY_BYTES: usize = 8 * 1024 * 1024; const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2; diff --git a/client/network/sync/src/blocks.rs b/client/network/sync/src/blocks.rs index 4cc1ca080d177..240c1ca1f8b26 100644 --- a/client/network/sync/src/blocks.rs +++ b/client/network/sync/src/blocks.rs @@ -89,7 +89,7 @@ impl BlockCollection { Some(&BlockRangeState::Downloading { .. }) => { trace!(target: "sync", "Inserting block data still marked as being downloaded: {}", start); }, - Some(&BlockRangeState::Complete(ref existing)) if existing.len() >= blocks.len() => { + Some(BlockRangeState::Complete(existing)) if existing.len() >= blocks.len() => { trace!(target: "sync", "Ignored block data already downloaded: {}", start); return }, @@ -109,7 +109,7 @@ impl BlockCollection { pub fn needed_blocks( &mut self, who: PeerId, - count: usize, + count: u32, peer_best: NumberFor, common: NumberFor, max_parallel: u32, diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index e6e62101ffcd0..eca0ebfe41a9f 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -24,8 +24,8 @@ use crate::{ ChainSync, ClientError, SyncingService, }; -use codec::{Decode, DecodeAll, Encode}; -use futures::{FutureExt, Stream, StreamExt}; +use codec::{Decode, Encode}; +use futures::{FutureExt, StreamExt}; use futures_timer::Delay; use libp2p::PeerId; use lru::LruCache; @@ -39,9 +39,8 @@ use sc_network::{ config::{ NetworkConfiguration, NonDefaultSetConfig, ProtocolId, SyncMode as SyncOperationMode, }, - event::Event, utils::LruHashSet, - ProtocolName, + NotificationsSink, ProtocolName, }; use sc_network_common::{ role::Roles, @@ -58,45 +57,38 @@ use sc_network_common::{ use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::HeaderMetadata; use sp_consensus::block_validation::BlockAnnounceValidator; -use sp_runtime::{ - traits::{Block as BlockT, CheckedSub, Header, NumberFor, Zero}, - SaturatedConversion, -}; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; use std::{ collections::{HashMap, HashSet}, num::NonZeroUsize, - pin::Pin, sync::{ atomic::{AtomicBool, AtomicUsize, Ordering}, Arc, }, task::Poll, + time::{Duration, Instant}, }; /// Interval at which we perform time based maintenance const TICK_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(1100); -/// When light node connects to the full node and the full node is behind light node -/// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful -/// and disconnect to free connection slot. -const LIGHT_MAXIMAL_BLOCKS_DIFFERENCE: u64 = 8192; - /// Maximum number of known block hashes to keep for a peer. const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead +/// If the block announces stream to peer has been inactive for two minutes meaning local node +/// has not sent or received block announcements to/from the peer, report the node for inactivity, +/// disconnect it and attempt to establish connection to some other peer. +const INACTIVITY_EVICT_THRESHOLD: Duration = Duration::from_secs(30); + mod rep { use sc_peerset::ReputationChange as Rep; - /// Reputation change when we are a light client and a peer is behind us. - pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); - /// We received a message that failed to decode. - pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); /// Peer has different genesis. pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); - /// Peer role does not match (e.g. light peer connecting to another light peer). - pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); /// Peer send us a block announcement that failed at validation. pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement"); + /// Block announce substream with the peer has been inactive too long + pub const INACTIVE_SUBSTREAM: Rep = Rep::new(-(1 << 10), "Inactive block announce substream"); } struct Metrics { @@ -174,6 +166,12 @@ pub struct Peer { pub info: ExtendedPeerInfo, /// Holds a set of blocks known to this peer. pub known_blocks: LruHashSet, + /// Notification sink. + sink: NotificationsSink, + /// Instant when the last notification was sent to peer. + last_notification_sent: Instant, + /// Instant when the last notification was received from peer. + last_notification_received: Instant, } pub struct SyncingEngine { @@ -196,6 +194,9 @@ pub struct SyncingEngine { /// Channel for receiving service commands service_rx: TracingUnboundedReceiver>, + /// Channel for receiving inbound connections from `Protocol`. + rx: sc_utils::mpsc::TracingUnboundedReceiver>, + /// Assigned roles. roles: Roles, @@ -211,6 +212,9 @@ pub struct SyncingEngine { /// All connected peers. Contains both full and light node peers. peers: HashMap>, + /// Evicted peers + evicted: HashSet, + /// List of nodes for which we perform additional logging because they are important for the /// user. important_peers: HashSet, @@ -266,6 +270,7 @@ where block_request_protocol_name: ProtocolName, state_request_protocol_name: ProtocolName, warp_sync_protocol_name: Option, + rx: sc_utils::mpsc::TracingUnboundedReceiver>, ) -> Result<(Self, SyncingService, NonDefaultSetConfig), ClientError> { let mode = match network_config.sync_mode { SyncOperationMode::Full => SyncMode::Full, @@ -274,6 +279,14 @@ where SyncOperationMode::Warp => SyncMode::Warp, }; let max_parallel_downloads = network_config.max_parallel_downloads; + let max_blocks_per_request = if network_config.max_blocks_per_request > + crate::MAX_BLOCKS_IN_RESPONSE as u32 + { + log::info!(target: "sync", "clamping maximum blocks per request to {}", crate::MAX_BLOCKS_IN_RESPONSE); + crate::MAX_BLOCKS_IN_RESPONSE as u32 + } else { + network_config.max_blocks_per_request + }; let cache_capacity = NonZeroUsize::new( (network_config.default_peers_set.in_peers as usize + network_config.default_peers_set.out_peers as usize) @@ -328,6 +341,7 @@ where roles, block_announce_validator, max_parallel_downloads, + max_blocks_per_request, warp_sync_params, metrics_registry, network_service.clone(), @@ -354,11 +368,13 @@ where chain_sync, network_service, peers: HashMap::new(), + evicted: HashSet::new(), block_announce_data_cache: LruCache::new(cache_capacity), block_announce_protocol_name, num_connected: num_connected.clone(), is_major_syncing: is_major_syncing.clone(), service_rx, + rx, genesis_hash, important_peers, default_peers_set_no_slot_connected_peers: HashSet::new(), @@ -516,6 +532,7 @@ where }, }; peer.known_blocks.insert(hash); + peer.last_notification_received = Instant::now(); if peer.info.roles.is_full() { let is_best = match announce.state.unwrap_or(BlockState::Best) { @@ -566,11 +583,8 @@ where data: Some(data.clone()), }; - self.network_service.write_notification( - *who, - self.block_announce_protocol_name.clone(), - message.encode(), - ); + peer.last_notification_sent = Instant::now(); + peer.sink.send_sync_notification(message.encode()); } } } @@ -587,102 +601,49 @@ where ) } - pub async fn run(mut self, mut stream: Pin + Send>>) { + pub async fn run(mut self) { loop { - futures::future::poll_fn(|cx| self.poll(cx, &mut stream)).await; + futures::future::poll_fn(|cx| self.poll(cx)).await; } } - pub fn poll( - &mut self, - cx: &mut std::task::Context, - event_stream: &mut Pin + Send>>, - ) -> Poll<()> { + pub fn poll(&mut self, cx: &mut std::task::Context) -> Poll<()> { self.num_connected.store(self.peers.len(), Ordering::Relaxed); self.is_major_syncing .store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed); while let Poll::Ready(()) = self.tick_timeout.poll_unpin(cx) { self.report_metrics(); - self.tick_timeout.reset(TICK_TIMEOUT); - } - - while let Poll::Ready(Some(event)) = event_stream.poll_next_unpin(cx) { - match event { - Event::NotificationStreamOpened { - remote, protocol, received_handshake, .. - } => { - if protocol != self.block_announce_protocol_name { - continue - } - match as DecodeAll>::decode_all( - &mut &received_handshake[..], - ) { - Ok(handshake) => { - if self.on_sync_peer_connected(remote, handshake).is_err() { - log::debug!( - target: "sync", - "Failed to register peer {remote:?}: {received_handshake:?}", - ); - } - }, - Err(err) => { - log::debug!( - target: "sync", - "Couldn't decode handshake sent by {}: {:?}: {}", - remote, - received_handshake, - err, - ); - self.network_service.report_peer(remote, rep::BAD_MESSAGE); - }, - } - }, - Event::NotificationStreamClosed { remote, protocol } => { - if protocol != self.block_announce_protocol_name { - continue - } - - if self.on_sync_peer_disconnected(remote).is_err() { - log::trace!( - target: "sync", - "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", - remote - ); - } - }, - Event::NotificationsReceived { remote, messages } => { - for (protocol, message) in messages { - if protocol != self.block_announce_protocol_name { - continue - } + // go over all connected peers and check if any of them have been idle for a while. Idle + // in this case means that we haven't sent or received block announcements to/from this + // peer. If that is the case, because of #5685, it could be that the block announces + // substream is not actually open and and this peer is just wasting a slot and is should + // be replaced with some other node that is willing to send us block announcements. + for (id, peer) in self.peers.iter() { + // because of a delay between disconnecting a peer in `SyncingEngine` and getting + // the response back from `Protocol`, a peer might be reported and disconnect + // multiple times. To prevent this from happening (until the underlying issue is + // fixed), keep track of evicted peers and report and disconnect them only once. + if self.evicted.contains(id) { + continue + } - if self.peers.contains_key(&remote) { - if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) { - self.push_block_announce_validation(remote, announce); + let last_received_late = + peer.last_notification_received.elapsed() > INACTIVITY_EVICT_THRESHOLD; + let last_sent_late = + peer.last_notification_sent.elapsed() > INACTIVITY_EVICT_THRESHOLD; - // Make sure that the newly added block announce validation future - // was polled once to be registered in the task. - if let Poll::Ready(res) = - self.chain_sync.poll_block_announce_validation(cx) - { - self.process_block_announce_validation_result(res) - } - } else { - log::warn!(target: "sub-libp2p", "Failed to decode block announce"); - } - } else { - log::trace!( - target: "sync", - "Received sync for peer earlier refused by sync layer: {}", - remote - ); - } - } - }, - _ => {}, + if last_received_late && last_sent_late { + log::debug!(target: "sync", "evict peer {id} since it has been idling for too long"); + self.network_service.report_peer(*id, rep::INACTIVE_SUBSTREAM); + self.network_service + .disconnect_peer(*id, self.block_announce_protocol_name.clone()); + self.evicted.insert(*id); + } } + + self.tick_timeout.reset(TICK_TIMEOUT); } while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) { @@ -723,7 +684,9 @@ where ToServiceCommand::NewBestBlockImported(hash, number) => self.new_best_block_imported(hash, number), ToServiceCommand::Status(tx) => { - let _ = tx.send(self.chain_sync.status()); + let mut status = self.chain_sync.status(); + status.num_connected_peers = self.peers.len() as u32; + let _ = tx.send(status); }, ToServiceCommand::NumActivePeers(tx) => { let _ = tx.send(self.chain_sync.num_active_peers()); @@ -756,6 +719,71 @@ where } } + while let Poll::Ready(Some(event)) = self.rx.poll_next_unpin(cx) { + match event { + sc_network::SyncEvent::NotificationStreamOpened { + remote, + received_handshake, + sink, + tx, + } => match self.on_sync_peer_connected(remote, &received_handshake, sink) { + Ok(()) => { + let _ = tx.send(true); + }, + Err(()) => { + log::debug!( + target: "sync", + "Failed to register peer {remote:?}: {received_handshake:?}", + ); + let _ = tx.send(false); + }, + }, + sc_network::SyncEvent::NotificationStreamClosed { remote } => { + self.evicted.remove(&remote); + if self.on_sync_peer_disconnected(remote).is_err() { + log::trace!( + target: "sync", + "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", + remote + ); + } + }, + sc_network::SyncEvent::NotificationsReceived { remote, messages } => { + for message in messages { + if self.peers.contains_key(&remote) { + if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) { + self.push_block_announce_validation(remote, announce); + + // Make sure that the newly added block announce validation future + // was polled once to be registered in the task. + if let Poll::Ready(res) = + self.chain_sync.poll_block_announce_validation(cx) + { + self.process_block_announce_validation_result(res) + } + } else { + log::warn!(target: "sub-libp2p", "Failed to decode block announce"); + } + } else { + log::trace!( + target: "sync", + "Received sync for peer earlier refused by sync layer: {}", + remote + ); + } + } + }, + sc_network::SyncEvent::NotificationSinkReplaced { remote, sink } => { + if let Some(peer) = self.peers.get_mut(&remote) { + peer.sink = sink; + } + }, + } + } + + // poll `ChainSync` last because of a block announcement was received through the + // event stream between `SyncingEngine` and `Protocol` and the validation finished + // right after it as queued, the resulting block request (if any) can be sent right away. while let Poll::Ready(result) = self.chain_sync.poll(cx) { self.process_block_announce_validation_result(result); } @@ -767,13 +795,13 @@ where /// /// Returns a result if the handshake of this peer was indeed accepted. pub fn on_sync_peer_disconnected(&mut self, peer: PeerId) -> Result<(), ()> { - if self.important_peers.contains(&peer) { - log::warn!(target: "sync", "Reserved peer {} disconnected", peer); - } else { - log::debug!(target: "sync", "{} disconnected", peer); - } - if self.peers.remove(&peer).is_some() { + if self.important_peers.contains(&peer) { + log::warn!(target: "sync", "Reserved peer {} disconnected", peer); + } else { + log::debug!(target: "sync", "{} disconnected", peer); + } + self.chain_sync.peer_disconnected(&peer); self.default_peers_set_no_slot_connected_peers.remove(&peer); self.event_streams @@ -792,7 +820,8 @@ where pub fn on_sync_peer_connected( &mut self, who: PeerId, - status: BlockAnnouncesHandshake, + status: &BlockAnnouncesHandshake, + sink: NotificationsSink, ) -> Result<(), ()> { log::trace!(target: "sync", "New peer {} {:?}", who, status); @@ -804,8 +833,6 @@ where if status.genesis_hash != self.genesis_hash { self.network_service.report_peer(who, rep::GENESIS_MISMATCH); - self.network_service - .disconnect_peer(who, self.block_announce_protocol_name.clone()); if self.important_peers.contains(&who) { log::error!( @@ -834,31 +861,6 @@ where return Err(()) } - if self.roles.is_light() { - // we're not interested in light peers - if status.roles.is_light() { - log::debug!(target: "sync", "Peer {} is unable to serve light requests", who); - self.network_service.report_peer(who, rep::BAD_ROLE); - self.network_service - .disconnect_peer(who, self.block_announce_protocol_name.clone()); - return Err(()) - } - - // we don't interested in peers that are far behind us - let self_best_block = self.client.info().best_number; - let blocks_difference = self_best_block - .checked_sub(&status.best_number) - .unwrap_or_else(Zero::zero) - .saturated_into::(); - if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { - log::debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); - self.network_service.report_peer(who, rep::PEER_BEHIND_US_LIGHT); - self.network_service - .disconnect_peer(who, self.block_announce_protocol_name.clone()); - return Err(()) - } - } - let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&who); let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; @@ -869,8 +871,6 @@ where this_peer_reserved_slot { log::debug!(target: "sync", "Too many full nodes, rejecting {}", who); - self.network_service - .disconnect_peer(who, self.block_announce_protocol_name.clone()); return Err(()) } @@ -879,8 +879,6 @@ where { // Make sure that not all slots are occupied by light clients. log::debug!(target: "sync", "Too many light nodes, rejecting {}", who); - self.network_service - .disconnect_peer(who, self.block_announce_protocol_name.clone()); return Err(()) } @@ -893,14 +891,15 @@ where known_blocks: LruHashSet::new( NonZeroUsize::new(MAX_KNOWN_BLOCKS).expect("Constant is nonzero"), ), + sink, + last_notification_sent: Instant::now(), + last_notification_received: Instant::now(), }; let req = if peer.info.roles.is_full() { match self.chain_sync.new_peer(who, peer.info.best_hash, peer.info.best_number) { Ok(req) => req, Err(BadPeer(id, repu)) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); self.network_service.report_peer(id, repu); return Err(()) }, diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 45d14ffa7bb37..3f1cbebd57255 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -107,9 +107,6 @@ pub mod state_request_handler; pub mod warp; pub mod warp_request_handler; -/// Maximum blocks to request in a single packet. -const MAX_BLOCKS_TO_REQUEST: usize = 64; - /// Maximum blocks to store in the import queue. const MAX_IMPORTING_BLOCKS: usize = 2048; @@ -147,6 +144,9 @@ const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; /// Maximum allowed size for a block announce. const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; +/// Maximum blocks per response. +pub(crate) const MAX_BLOCKS_IN_RESPONSE: usize = 128; + mod rep { use sc_peerset::ReputationChange as Rep; /// Reputation change when a peer sent us a message that led to a @@ -311,6 +311,8 @@ pub struct ChainSync { block_announce_validator: Box + Send>, /// Maximum number of peers to ask the same blocks in parallel. max_parallel_downloads: u32, + /// Maximum blocks per request. + max_blocks_per_request: u32, /// Total number of downloaded blocks. downloaded_blocks: usize, /// All block announcement that are currently being validated. @@ -342,7 +344,7 @@ pub struct ChainSync { /// Protocol name used to send out warp sync requests warp_sync_protocol_name: Option, /// Pending responses - pending_responses: FuturesUnordered>, + pending_responses: HashMap>, /// Handle to import queue. import_queue: Box>, /// Metrics. @@ -523,6 +525,7 @@ where state: sync_state, best_seen_block, num_peers: self.peers.len() as u32, + num_connected_peers: 0u32, queued_blocks: self.queue_blocks.len() as u32, state_sync: self.state_sync.as_ref().map(|s| s.progress()), warp_sync: warp_sync_progress, @@ -1235,6 +1238,7 @@ where gap_sync.blocks.clear_peer_download(who) } self.peers.remove(who); + self.pending_responses.remove(who); self.extra_justifications.peer_disconnected(who); self.allowed_requests.set_all(); self.fork_targets.retain(|_, target| { @@ -1357,7 +1361,7 @@ where if self.peers.contains_key(&who) { self.pending_responses - .push(Box::pin(async move { (who, PeerRequest::Block(request), rx.await) })); + .insert(who, Box::pin(async move { (who, PeerRequest::Block(request), rx.await) })); } match self.encode_block_request(&opaque_req) { @@ -1402,6 +1406,7 @@ where roles: Roles, block_announce_validator: Box + Send>, max_parallel_downloads: u32, + max_blocks_per_request: u32, warp_sync_params: Option>, metrics_registry: Option<&Registry>, network_service: service::network::NetworkServiceHandle, @@ -1436,6 +1441,7 @@ where allowed_requests: Default::default(), block_announce_validator, max_parallel_downloads, + max_blocks_per_request, downloaded_blocks: 0, block_announce_validation: Default::default(), block_announce_validation_per_peer_stats: Default::default(), @@ -1452,7 +1458,7 @@ where .notifications_protocol .clone() .into(), - pending_responses: Default::default(), + pending_responses: HashMap::new(), import_queue, metrics: if let Some(r) = &metrics_registry { match SyncingMetrics::register(r) { @@ -1805,6 +1811,9 @@ where return None } + // since the request is not a justification, remove it from pending responses + self.pending_responses.remove(&id); + // handle peers that were in other states. match self.new_peer(id, p.best_hash, p.best_number) { Ok(None) => None, @@ -2004,7 +2013,7 @@ where if self.peers.contains_key(&who) { self.pending_responses - .push(Box::pin(async move { (who, PeerRequest::State, rx.await) })); + .insert(who, Box::pin(async move { (who, PeerRequest::State, rx.await) })); } match self.encode_state_request(&request) { @@ -2032,7 +2041,7 @@ where if self.peers.contains_key(&who) { self.pending_responses - .push(Box::pin(async move { (who, PeerRequest::WarpProof, rx.await) })); + .insert(who, Box::pin(async move { (who, PeerRequest::WarpProof, rx.await) })); } match &self.warp_sync_protocol_name { @@ -2170,9 +2179,20 @@ where } fn poll_pending_responses(&mut self, cx: &mut std::task::Context) -> Poll> { - while let Poll::Ready(Some((id, request, response))) = - self.pending_responses.poll_next_unpin(cx) - { + let ready_responses = self + .pending_responses + .values_mut() + .filter_map(|future| match future.poll_unpin(cx) { + Poll::Pending => None, + Poll::Ready(result) => Some(result), + }) + .collect::>(); + + for (id, request, response) in ready_responses { + self.pending_responses + .remove(&id) + .expect("Logic error: peer id from pending response is missing in the map."); + match response { Ok(Ok(resp)) => match request { PeerRequest::Block(req) => { @@ -2364,6 +2384,7 @@ where let queue = &self.queue_blocks; let allowed_requests = self.allowed_requests.take(); let max_parallel = if is_major_syncing { 1 } else { self.max_parallel_downloads }; + let max_blocks_per_request = self.max_blocks_per_request; let gap_sync = &mut self.gap_sync; self.peers .iter_mut() @@ -2403,6 +2424,7 @@ where blocks, attrs, max_parallel, + max_blocks_per_request, last_finalized, best_queued, ) { @@ -2429,6 +2451,7 @@ where client.block_status(*hash).unwrap_or(BlockStatus::Unknown) } }, + max_blocks_per_request, ) { trace!(target: "sync", "Downloading fork {:?} from {}", hash, id); peer.state = PeerSyncState::DownloadingStale(hash); @@ -2441,6 +2464,7 @@ where attrs, sync.target, sync.best_queued_number, + max_blocks_per_request, ) }) { peer.state = PeerSyncState::DownloadingGap(range.start); @@ -2684,9 +2708,7 @@ where break } - if result.is_err() { - has_error = true; - } + has_error |= result.is_err(); match result { Ok(BlockImportStatus::ImportedKnown(number, who)) => @@ -2697,9 +2719,7 @@ where if aux.clear_justification_requests { trace!( target: "sync", - "Block imported clears all pending justification requests {}: {:?}", - number, - hash, + "Block imported clears all pending justification requests {number}: {hash:?}", ); self.clear_justification_requests(); } @@ -2707,9 +2727,7 @@ where if aux.needs_justification { trace!( target: "sync", - "Block imported but requires justification {}: {:?}", - number, - hash, + "Block imported but requires justification {number}: {hash:?}", ); self.request_justification(&hash, number); } @@ -2769,25 +2787,26 @@ where output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); output.extend(self.restart()); }, - Err(BlockImportError::VerificationFailed(who, e)) => + Err(BlockImportError::VerificationFailed(who, e)) => { + let extra_message = + who.map_or_else(|| "".into(), |peer| format!(" received from ({peer})")); + + warn!( + target: "sync", + "💔 Verification failed for block {hash:?}{extra_message}: {e:?}", + ); + if let Some(peer) = who { - warn!( - target: "sync", - "💔 Verification failed for block {:?} received from peer: {}, {:?}", - hash, - peer, - e, - ); output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); - output.extend(self.restart()); - }, + } + + output.extend(self.restart()); + }, Err(BlockImportError::BadBlock(who)) => if let Some(peer) = who { warn!( target: "sync", - "💔 Block {:?} received from peer {} has been blacklisted", - hash, - peer, + "💔 Block {hash:?} received from peer {peer} has been blacklisted", ); output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); }, @@ -2795,10 +2814,10 @@ where // This may happen if the chain we were requesting upon has been discarded // in the meantime because other chain has been finalized. // Don't mark it as bad as it still may be synced if explicitly requested. - trace!(target: "sync", "Obsolete block {:?}", hash); + trace!(target: "sync", "Obsolete block {hash:?}"); }, e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { - warn!(target: "sync", "💔 Error importing block {:?}: {}", hash, e.unwrap_err()); + warn!(target: "sync", "💔 Error importing block {hash:?}: {}", e.unwrap_err()); self.state_sync = None; self.warp_sync = None; output.extend(self.restart()); @@ -2909,6 +2928,7 @@ fn peer_block_request( blocks: &mut BlockCollection, attrs: BlockAttributes, max_parallel_downloads: u32, + max_blocks_per_request: u32, finalized: NumberFor, best_num: NumberFor, ) -> Option<(Range>, BlockRequest)> { @@ -2924,7 +2944,7 @@ fn peer_block_request( } let range = blocks.needed_blocks( *id, - MAX_BLOCKS_TO_REQUEST, + max_blocks_per_request, peer.best_number, peer.common_number, max_parallel_downloads, @@ -2959,10 +2979,11 @@ fn peer_gap_block_request( attrs: BlockAttributes, target: NumberFor, common_number: NumberFor, + max_blocks_per_request: u32, ) -> Option<(Range>, BlockRequest)> { let range = blocks.needed_blocks( *id, - MAX_BLOCKS_TO_REQUEST, + max_blocks_per_request, std::cmp::min(peer.best_number, target), common_number, 1, @@ -2991,6 +3012,7 @@ fn fork_sync_request( finalized: NumberFor, attributes: BlockAttributes, check_block: impl Fn(&B::Hash) -> BlockStatus, + max_blocks_per_request: u32, ) -> Option<(B::Hash, BlockRequest)> { targets.retain(|hash, r| { if r.number <= finalized { @@ -3010,7 +3032,7 @@ fn fork_sync_request( // Download the fork only if it is behind or not too far ahead our tip of the chain // Otherwise it should be downloaded in full sync mode. if r.number <= best_num || - (r.number - best_num).saturated_into::() < MAX_BLOCKS_TO_REQUEST as u32 + (r.number - best_num).saturated_into::() < max_blocks_per_request as u32 { let parent_status = r.parent_hash.as_ref().map_or(BlockStatus::Unknown, check_block); let count = if parent_status == BlockStatus::Unknown { @@ -3198,6 +3220,7 @@ mod test { Roles::from(&Role::Full), block_announce_validator, 1, + 64, None, None, chain_sync_network_handle, @@ -3264,6 +3287,7 @@ mod test { Roles::from(&Role::Full), Box::new(DefaultBlockAnnounceValidator), 1, + 64, None, None, chain_sync_network_handle, @@ -3445,6 +3469,7 @@ mod test { Roles::from(&Role::Full), Box::new(DefaultBlockAnnounceValidator), 5, + 64, None, None, chain_sync_network_handle, @@ -3571,6 +3596,7 @@ mod test { Roles::from(&Role::Full), Box::new(DefaultBlockAnnounceValidator), 5, + 64, None, None, chain_sync_network_handle, @@ -3585,6 +3611,7 @@ mod test { let peer_id2 = PeerId::random(); let best_block = blocks.last().unwrap().clone(); + let max_blocks_to_request = sync.max_blocks_per_request; // Connect the node we will sync from sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) .unwrap(); @@ -3594,8 +3621,8 @@ mod test { while best_block_num < MAX_DOWNLOAD_AHEAD { let request = get_block_request( &mut sync, - FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), - MAX_BLOCKS_TO_REQUEST as u32, + FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), + max_blocks_to_request as u32, &peer_id1, ); @@ -3609,14 +3636,14 @@ mod test { let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); assert!(matches!( res, - OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST + OnBlockData::Import(_, blocks) if blocks.len() == max_blocks_to_request as usize ),); - best_block_num += MAX_BLOCKS_TO_REQUEST as u32; + best_block_num += max_blocks_to_request as u32; let _ = sync.on_blocks_processed( - MAX_BLOCKS_TO_REQUEST as usize, - MAX_BLOCKS_TO_REQUEST as usize, + max_blocks_to_request as usize, + max_blocks_to_request as usize, resp_blocks .iter() .rev() @@ -3674,8 +3701,8 @@ mod test { // peer 2 as well. get_block_request( &mut sync, - FromBlock::Number(peer1_from + MAX_BLOCKS_TO_REQUEST as u64), - MAX_BLOCKS_TO_REQUEST as u32, + FromBlock::Number(peer1_from + max_blocks_to_request as u64), + max_blocks_to_request as u32, &peer_id2, ); } @@ -3727,6 +3754,7 @@ mod test { Roles::from(&Role::Full), Box::new(DefaultBlockAnnounceValidator), 5, + 64, None, None, chain_sync_network_handle, @@ -3772,11 +3800,12 @@ mod test { // Now request and import the fork. let mut best_block_num = *finalized_block.header().number() as u32; + let max_blocks_to_request = sync.max_blocks_per_request; while best_block_num < *fork_blocks.last().unwrap().header().number() as u32 - 1 { let request = get_block_request( &mut sync, - FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), - MAX_BLOCKS_TO_REQUEST as u32, + FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), + max_blocks_to_request as u32, &peer_id1, ); @@ -3790,14 +3819,14 @@ mod test { let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); assert!(matches!( res, - OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST + OnBlockData::Import(_, blocks) if blocks.len() == sync.max_blocks_per_request as usize ),); - best_block_num += MAX_BLOCKS_TO_REQUEST as u32; + best_block_num += sync.max_blocks_per_request as u32; let _ = sync.on_blocks_processed( - MAX_BLOCKS_TO_REQUEST as usize, - MAX_BLOCKS_TO_REQUEST as usize, + max_blocks_to_request as usize, + max_blocks_to_request as usize, resp_blocks .iter() .rev() @@ -3868,6 +3897,7 @@ mod test { Roles::from(&Role::Full), Box::new(DefaultBlockAnnounceValidator), 5, + 64, None, None, chain_sync_network_handle, @@ -3913,10 +3943,12 @@ mod test { // Now request and import the fork. let mut best_block_num = *finalized_block.header().number() as u32; + let max_blocks_to_request = sync.max_blocks_per_request; + let mut request = get_block_request( &mut sync, - FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), - MAX_BLOCKS_TO_REQUEST as u32, + FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), + max_blocks_to_request as u32, &peer_id1, ); let last_block_num = *fork_blocks.last().unwrap().header().number() as u32 - 1; @@ -3931,18 +3963,18 @@ mod test { let res = sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); assert!(matches!( res, - OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST + OnBlockData::Import(_, blocks) if blocks.len() == max_blocks_to_request as usize ),); - best_block_num += MAX_BLOCKS_TO_REQUEST as u32; + best_block_num += max_blocks_to_request as u32; if best_block_num < last_block_num { // make sure we're not getting a duplicate request in the time before the blocks are // processed request = get_block_request( &mut sync, - FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), - MAX_BLOCKS_TO_REQUEST as u32, + FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), + max_blocks_to_request as u32, &peer_id1, ); } @@ -3964,16 +3996,17 @@ mod test { // The import queue may send notifications in batches of varying size. So we simulate // this here by splitting the batch into 2 notifications. + let max_blocks_to_request = sync.max_blocks_per_request; let second_batch = notify_imported.split_off(notify_imported.len() / 2); let _ = sync.on_blocks_processed( - MAX_BLOCKS_TO_REQUEST as usize, - MAX_BLOCKS_TO_REQUEST as usize, + max_blocks_to_request as usize, + max_blocks_to_request as usize, notify_imported, ); let _ = sync.on_blocks_processed( - MAX_BLOCKS_TO_REQUEST as usize, - MAX_BLOCKS_TO_REQUEST as usize, + max_blocks_to_request as usize, + max_blocks_to_request as usize, second_batch, ); @@ -4009,6 +4042,7 @@ mod test { Roles::from(&Role::Full), Box::new(DefaultBlockAnnounceValidator), 1, + 64, None, None, chain_sync_network_handle, @@ -4054,6 +4088,7 @@ mod test { Roles::from(&Role::Full), Box::new(DefaultBlockAnnounceValidator), 1, + 64, None, None, chain_sync_network_handle, @@ -4091,4 +4126,89 @@ mod test { let state = AncestorSearchState::::BinarySearch(1, 3); assert!(handle_ancestor_search_state(&state, 2, true).is_none()); } + + #[test] + fn sync_restart_removes_block_but_not_justification_requests() { + let mut client = Arc::new(TestClientBuilder::new().build()); + let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); + let (_chain_sync_network_provider, chain_sync_network_handle) = + NetworkServiceProvider::new(); + let (mut sync, _) = ChainSync::new( + SyncMode::Full, + client.clone(), + ProtocolId::from("test-protocol-name"), + &Some(String::from("test-fork-id")), + Roles::from(&Role::Full), + block_announce_validator, + 1, + 64, + None, + None, + chain_sync_network_handle, + import_queue, + ProtocolName::from("block-request"), + ProtocolName::from("state-request"), + None, + ) + .unwrap(); + + let peers = vec![PeerId::random(), PeerId::random()]; + + let mut new_blocks = |n| { + for _ in 0..n { + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + } + + let info = client.info(); + (info.best_hash, info.best_number) + }; + + let (b1_hash, b1_number) = new_blocks(50); + + // add new peer and request blocks from them + sync.new_peer(peers[0], Hash::random(), 42).unwrap(); + + // we wil send block requests to these peers + // for these blocks we don't know about + for (peer, request) in sync.block_requests() { + sync.send_block_request(peer, request); + } + + // add a new peer at a known block + sync.new_peer(peers[1], b1_hash, b1_number).unwrap(); + + // we request a justification for a block we have locally + sync.request_justification(&b1_hash, b1_number); + + // the justification request should be scheduled to the + // new peer which is at the given block + let mut requests = sync.justification_requests().collect::>(); + assert_eq!(requests.len(), 1); + let (peer, request) = requests.remove(0); + sync.send_block_request(peer, request); + + assert!(!std::matches!( + sync.peers.get(&peers[0]).unwrap().state, + PeerSyncState::DownloadingJustification(_), + )); + assert_eq!( + sync.peers.get(&peers[1]).unwrap().state, + PeerSyncState::DownloadingJustification(b1_hash), + ); + assert_eq!(sync.pending_responses.len(), 2); + + let requests = sync.restart().collect::>(); + assert!(requests.iter().any(|res| res.as_ref().unwrap().0 == peers[0])); + + assert_eq!(sync.pending_responses.len(), 1); + assert!(sync.pending_responses.get(&peers[1]).is_some()); + assert_eq!( + sync.peers.get(&peers[1]).unwrap().state, + PeerSyncState::DownloadingJustification(b1_hash), + ); + sync.peer_disconnected(&peers[1]); + assert_eq!(sync.pending_responses.len(), 0); + } } diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 8368fa278712a..9763feed5ea54 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -26,6 +26,7 @@ sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-network = { version = "0.10.0-dev", path = "../" } sc-network-common = { version = "0.10.0-dev", path = "../common" } +sc-utils = { version = "4.0.0-dev", path = "../../utils" } sc-network-light = { version = "0.10.0-dev", path = "../light" } sc-network-sync = { version = "0.10.0-dev", path = "../sync" } sc-service = { version = "0.10.0-dev", default-features = false, features = ["test-helpers"], path = "../../service" } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 75b8287b08dcf..f85d6ed63c247 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -55,8 +55,8 @@ use sc_network::{ }, request_responses::ProtocolConfig as RequestResponseConfig, types::ProtocolName, - Multiaddr, NetworkBlock, NetworkEventStream, NetworkService, NetworkStateInfo, - NetworkSyncForkRequest, NetworkWorker, + Multiaddr, NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest, + NetworkWorker, }; use sc_network_common::{ role::Roles, @@ -83,7 +83,7 @@ use sp_core::H256; use sp_runtime::{ codec::{Decode, Encode}, generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor}, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, Justification, Justifications, }; use substrate_test_runtime_client::AccountKeyring; @@ -896,6 +896,7 @@ where let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); + let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000); let (engine, sync_service, block_announce_config) = sc_network_sync::engine::SyncingEngine::new( Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }), @@ -911,22 +912,26 @@ where block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), Some(warp_protocol_config.name.clone()), + rx, ) .unwrap(); let sync_service_import_queue = Box::new(sync_service.clone()); let sync_service = Arc::new(sync_service.clone()); + let genesis_hash = + client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed"); let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: Box::new(|f| { tokio::spawn(f); }), network_config, - chain: client.clone(), + genesis_hash, protocol_id, fork_id, metrics_registry: None, block_announce_config, + tx, request_response_protocol_configs: [ block_request_protocol_config, state_request_protocol_config, @@ -948,9 +953,8 @@ where import_queue.run(sync_service_import_queue).await; }); - let service = network.service().clone(); tokio::spawn(async move { - engine.run(service.event_stream("syncing")).await; + engine.run().await; }); self.mut_peers(move |peers| { diff --git a/client/network/test/src/service.rs b/client/network/test/src/service.rs index b1de2a91ebcc9..5871860a7c4a6 100644 --- a/client/network/test/src/service.rs +++ b/client/network/test/src/service.rs @@ -34,7 +34,8 @@ use sc_network_sync::{ service::network::{NetworkServiceHandle, NetworkServiceProvider}, state_request_handler::StateRequestHandler, }; -use sp_runtime::traits::Block as BlockT; +use sp_blockchain::HeaderBackend; +use sp_runtime::traits::{Block as BlockT, Zero}; use substrate_test_runtime_client::{ runtime::{Block as TestBlock, Hash as TestHash}, TestClientBuilder, TestClientBuilderExt as _, @@ -176,6 +177,7 @@ impl TestNetworkBuilder { let (chain_sync_network_provider, chain_sync_network_handle) = self.chain_sync_network.unwrap_or(NetworkServiceProvider::new()); + let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000); let (engine, chain_sync_service, block_announce_config) = SyncingEngine::new( Roles::from(&config::Role::Full), @@ -191,20 +193,23 @@ impl TestNetworkBuilder { block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), None, + rx, ) .unwrap(); let mut link = self.link.unwrap_or(Box::new(chain_sync_service.clone())); + let genesis_hash = + client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed"); let worker = NetworkWorker::< substrate_test_runtime_client::runtime::Block, substrate_test_runtime_client::runtime::Hash, - >::new(config::Params { + >::new(config::Params:: { block_announce_config, role: config::Role::Full, executor: Box::new(|f| { tokio::spawn(f); }), + genesis_hash, network_config, - chain: client.clone(), protocol_id, fork_id, metrics_registry: None, @@ -214,6 +219,7 @@ impl TestNetworkBuilder { light_client_request_protocol_config, ] .to_vec(), + tx, }) .unwrap(); @@ -231,8 +237,7 @@ impl TestNetworkBuilder { tokio::time::sleep(std::time::Duration::from_millis(250)).await; } }); - let stream = worker.service().event_stream("syncing"); - tokio::spawn(engine.run(stream)); + tokio::spawn(engine.run()); TestNetwork::new(worker) } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index d87b03fb3a78c..af46d15a2bacd 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -414,7 +414,7 @@ async fn can_sync_small_non_best_forks() { // poll until the two nodes connect, otherwise announcing the block will not work futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).num_peers() == 0 { + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { Poll::Pending } else { Poll::Ready(()) diff --git a/client/network/transactions/src/lib.rs b/client/network/transactions/src/lib.rs index 381dd654b600b..f57556d3986b0 100644 --- a/client/network/transactions/src/lib.rs +++ b/client/network/transactions/src/lib.rs @@ -473,7 +473,7 @@ where let (hashes, to_send): (Vec<_>, Vec<_>) = transactions .iter() - .filter(|&(ref hash, _)| peer.known_transactions.insert(hash.clone())) + .filter(|(hash, _)| peer.known_transactions.insert(hash.clone())) .cloned() .unzip(); diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index cc66e9254d003..ba18a1d4fc70d 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2" } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.85" thiserror = "1.0" @@ -23,6 +23,5 @@ sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/a sp-core = { version = "7.0.0", path = "../../primitives/core" } sp-rpc = { version = "6.0.0", path = "../../primitives/rpc" } sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } sp-version = { version = "5.0.0", path = "../../primitives/version" } jsonrpsee = { version = "0.16.2", features = ["server", "client-core", "macros"] } diff --git a/client/rpc-api/src/author/error.rs b/client/rpc-api/src/author/error.rs index 7ca96bbab7f19..8149a1f8d1afe 100644 --- a/client/rpc-api/src/author/error.rs +++ b/client/rpc-api/src/author/error.rs @@ -47,7 +47,7 @@ pub enum Error { BadKeyType, /// Some random issue with the key store. Shouldn't happen. #[error("The key store is unavailable")] - KeyStoreUnavailable, + KeystoreUnavailable, /// Invalid session keys encoding. #[error("Session keys are not encoded correctly")] InvalidSessionKeys, diff --git a/client/rpc-api/src/chain/mod.rs b/client/rpc-api/src/chain/mod.rs index 2ed767c370a65..f215cd978f03a 100644 --- a/client/rpc-api/src/chain/mod.rs +++ b/client/rpc-api/src/chain/mod.rs @@ -29,7 +29,7 @@ pub trait ChainApi { #[method(name = "chain_getHeader", blocking)] fn header(&self, hash: Option) -> RpcResult>; - /// Get header and body of a relay chain block. + /// Get header and body of a block. #[method(name = "chain_getBlock", blocking)] fn block(&self, hash: Option) -> RpcResult>; diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index ebc1eb9b4ff52..dbc2a505456a5 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -33,7 +33,7 @@ pub use self::helpers::ReadProof; /// Substrate state API #[rpc(client, server)] pub trait StateApi { - /// Call a contract at a block's state. + /// Call a method from the runtime API at a block's state. #[method(name = "state_call", aliases = ["state_callAt"], blocking)] fn call(&self, name: String, bytes: Bytes, hash: Option) -> RpcResult; @@ -85,8 +85,10 @@ pub trait StateApi { /// Query historical storage entries (by key) starting from a block given as the second /// parameter. /// - /// NOTE This first returned result contains the initial state of storage for all keys. + /// NOTE: The first returned result contains the initial state of storage for all keys. /// Subsequent values in the vector represent changes to the previous state (diffs). + /// WARNING: The time complexity of this query is O(|keys|*dist(block, hash)), and the + /// memory complexity is O(dist(block, hash)) -- use with caution. #[method(name = "state_queryStorage", blocking)] fn query_storage( &self, @@ -95,7 +97,9 @@ pub trait StateApi { hash: Option, ) -> RpcResult>>; - /// Query storage entries (by key) starting at block hash given as the second parameter. + /// Query storage entries (by key) at a block hash given as the second parameter. + /// NOTE: Each StorageChangeSet in the result corresponds to exactly one element -- + /// the storage value under an input key at the input block hash. #[method(name = "state_queryStorageAt", blocking)] fn query_storage_at( &self, diff --git a/client/rpc-spec-v2/Cargo.toml b/client/rpc-spec-v2/Cargo.toml index 43fb189081bae..23b96877f3b17 100644 --- a/client/rpc-spec-v2/Cargo.toml +++ b/client/rpc-spec-v2/Cargo.toml @@ -43,4 +43,5 @@ substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../primitives/maybe-compressed-blob" } sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } +sc-utils = { version = "4.0.0-dev", path = "../utils" } assert_matches = "1.3.0" diff --git a/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index 9173b7340b7e5..a0d19654e7959 100644 --- a/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -40,10 +40,7 @@ use sp_api::CallApiAt; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, Info, }; -use sp_runtime::{ - traits::{Block as BlockT, Header as HeaderT, One}, - Saturating, -}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use std::{collections::HashSet, sync::Arc}; /// Generates the events of the `chainHead_follow` method. @@ -116,7 +113,7 @@ struct StartupPoint { /// The head of the finalized chain. pub finalized_hash: Block::Hash, /// Last finalized block number. - pub finalized_number: <::Header as HeaderT>::Number, + pub finalized_number: NumberFor, } impl From> for StartupPoint { @@ -318,10 +315,7 @@ where } // Ensure we are only reporting blocks after the starting point. - let Some(block_number) = self.client.number(notification.hash)? else { - return Err(SubscriptionManagementError::BlockNumberAbsent) - }; - if block_number < startup_point.finalized_number { + if *notification.header.number() < startup_point.finalized_number { return Ok(Default::default()) } @@ -349,59 +343,48 @@ where return Ok(Default::default()) }; - // Find the parent hash. - let Some(first_number) = self.client.number(*first_hash)? else { - return Err(SubscriptionManagementError::BlockNumberAbsent) - }; - let Some(parent) = self.client.hash(first_number.saturating_sub(One::one()))? else { - return Err(SubscriptionManagementError::BlockHashAbsent) + // Find the parent header. + let Some(first_header) = self.client.header(*first_hash)? else { + return Err(SubscriptionManagementError::BlockHeaderAbsent) }; - let last_finalized = finalized_block_hashes - .last() - .expect("At least one finalized hash inserted; qed"); - let parents = std::iter::once(&parent).chain(finalized_block_hashes.iter()); - for (hash, parent) in finalized_block_hashes.iter().zip(parents) { - // This block is already reported by the import notification. + let parents = + std::iter::once(first_header.parent_hash()).chain(finalized_block_hashes.iter()); + for (i, (hash, parent)) in finalized_block_hashes.iter().zip(parents).enumerate() { + // Check if the block was already reported and thus, is already pinned. if !self.sub_handle.pin_block(*hash)? { continue } - // Generate only the `NewBlock` event for this block. - if hash != last_finalized { + // Generate `NewBlock` events for all blocks beside the last block in the list + if i + 1 != finalized_block_hashes.len() { + // Generate only the `NewBlock` event for this block. events.extend(self.generate_import_events(*hash, *parent, false)); - continue - } - - match self.best_block_cache { - Some(best_block_hash) => { - // If the best reported block is a children of the last finalized, - // then we had a gap in notification. + } else { + // If we end up here and the `best_block` is a descendent of the finalized block + // (last block in the list), it means that there were skipped notifications. + // Otherwise `pin_block` would had returned `true`. + // + // When the node falls out of sync and then syncs up to the tip of the chain, it can + // happen that we skip notifications. Then it is better to terminate the connection + // instead of trying to send notifications for all missed blocks. + if let Some(best_block_hash) = self.best_block_cache { let ancestor = sp_blockchain::lowest_common_ancestor( &*self.client, - *last_finalized, + *hash, best_block_hash, )?; - // A descendent of the finalized block was already reported - // before the `NewBlock` event containing the finalized block - // is reported. - if ancestor.hash == *last_finalized { + if ancestor.hash == *hash { return Err(SubscriptionManagementError::Custom( "A descendent of the finalized block was already reported".into(), )) } - self.best_block_cache = Some(*hash); - }, - // This is the first best block event that we generate. - None => { - self.best_block_cache = Some(*hash); - }, - }; + } - // This is the first time we see this block. Generate the `NewBlock` event; if this is - // the last block, also generate the `BestBlock` event. - events.extend(self.generate_import_events(*hash, *parent, true)) + // Let's generate the `NewBlock` and `NewBestBlock` events for the block. + events.extend(self.generate_import_events(*hash, *parent, true)) + } } Ok(events) @@ -448,17 +431,13 @@ where let last_finalized = notification.hash; // Ensure we are only reporting blocks after the starting point. - let Some(block_number) = self.client.number(last_finalized)? else { - return Err(SubscriptionManagementError::BlockNumberAbsent) - }; - if block_number < startup_point.finalized_number { + if *notification.header.number() < startup_point.finalized_number { return Ok(Default::default()) } // The tree route contains the exclusive path from the last finalized block to the block // reported by the notification. Ensure the finalized block is also reported. - let mut finalized_block_hashes = - notification.tree_route.iter().cloned().collect::>(); + let mut finalized_block_hashes = notification.tree_route.to_vec(); finalized_block_hashes.push(last_finalized); // If the finalized hashes were not reported yet, generate the `NewBlock` events. @@ -476,9 +455,8 @@ where match self.best_block_cache { Some(block_cache) => { - // Check if the current best block is also reported as pruned. - let reported_pruned = pruned_block_hashes.iter().find(|&&hash| hash == block_cache); - if reported_pruned.is_none() { + // If the best block wasn't pruned, we are done here. + if !pruned_block_hashes.iter().any(|hash| *hash == block_cache) { events.push(finalized_event); return Ok(events) } @@ -499,20 +477,6 @@ where events.push(finalized_event); Ok(events) } else { - let ancestor = sp_blockchain::lowest_common_ancestor( - &*self.client, - last_finalized, - best_block_hash, - )?; - - // The client's best block must be a descendent of the last finalized block. - // In other words, the lowest common ancestor must be the last finalized block. - if ancestor.hash != last_finalized { - return Err(SubscriptionManagementError::Custom( - "The finalized block is not an ancestor of the best block".into(), - )) - } - // The RPC needs to also submit a new best block changed before the // finalized event. self.best_block_cache = Some(best_block_hash); diff --git a/client/rpc-spec-v2/src/chain_head/mod.rs b/client/rpc-spec-v2/src/chain_head/mod.rs index afa8d3b2189ae..1c489d323f195 100644 --- a/client/rpc-spec-v2/src/chain_head/mod.rs +++ b/client/rpc-spec-v2/src/chain_head/mod.rs @@ -22,6 +22,8 @@ //! //! Methods are prefixed by `chainHead`. +#[cfg(test)] +mod test_utils; #[cfg(test)] mod tests; diff --git a/client/rpc-spec-v2/src/chain_head/subscription.rs b/client/rpc-spec-v2/src/chain_head/subscription.rs index 77d57e747ebc1..687374bba5e00 100644 --- a/client/rpc-spec-v2/src/chain_head/subscription.rs +++ b/client/rpc-spec-v2/src/chain_head/subscription.rs @@ -36,10 +36,8 @@ pub enum SubscriptionManagementError { ExceededLimits, /// Error originated from the blockchain (client or backend). Blockchain(Error), - /// The database does not contain a block number. - BlockNumberAbsent, - /// The database does not contain a block hash. - BlockHashAbsent, + /// The database does not contain a block header. + BlockHeaderAbsent, /// Custom error. Custom(String), } diff --git a/client/rpc-spec-v2/src/chain_head/test_utils.rs b/client/rpc-spec-v2/src/chain_head/test_utils.rs new file mode 100644 index 0000000000000..ee563debb4502 --- /dev/null +++ b/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -0,0 +1,320 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use parking_lot::Mutex; +use sc_client_api::{ + execution_extensions::ExecutionExtensions, BlockBackend, BlockImportNotification, + BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, FinalityNotification, + FinalityNotifications, FinalizeSummary, ImportNotifications, KeysIter, PairsIter, StorageData, + StorageEventStream, StorageKey, StorageProvider, +}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; +use sp_api::{CallApiAt, CallApiAtParams, NumberFor, RuntimeVersion}; +use sp_blockchain::{BlockStatus, CachedHeaderMetadata, HeaderBackend, HeaderMetadata, Info}; +use sp_consensus::BlockOrigin; +use sp_runtime::{ + generic::SignedBlock, + traits::{Block as BlockT, Header as HeaderT}, + Justifications, +}; +use std::sync::Arc; +use substrate_test_runtime::{Block, Hash, Header}; + +pub struct ChainHeadMockClient { + client: Arc, + import_sinks: Mutex>>>, + finality_sinks: Mutex>>>, +} + +impl ChainHeadMockClient { + pub fn new(client: Arc) -> Self { + ChainHeadMockClient { + client, + import_sinks: Default::default(), + finality_sinks: Default::default(), + } + } + + pub async fn trigger_import_stream(&self, header: Header) { + // Ensure the client called the `import_notification_stream`. + while self.import_sinks.lock().is_empty() { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } + + // Build the notification. + let (sink, _stream) = tracing_unbounded("test_sink", 100_000); + let notification = + BlockImportNotification::new(header.hash(), BlockOrigin::Own, header, true, None, sink); + + for sink in self.import_sinks.lock().iter_mut() { + sink.unbounded_send(notification.clone()).unwrap(); + } + } + + pub async fn trigger_finality_stream(&self, header: Header) { + // Ensure the client called the `finality_notification_stream`. + while self.finality_sinks.lock().is_empty() { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } + + // Build the notification. + let (sink, _stream) = tracing_unbounded("test_sink", 100_000); + let summary = FinalizeSummary { + header: header.clone(), + finalized: vec![header.hash()], + stale_heads: vec![], + }; + let notification = FinalityNotification::from_summary(summary, sink); + + for sink in self.finality_sinks.lock().iter_mut() { + sink.unbounded_send(notification.clone()).unwrap(); + } + } +} + +// ChainHead calls `import_notification_stream` and `finality_notification_stream` in order to +// subscribe to block events. +impl BlockchainEvents for ChainHeadMockClient { + fn import_notification_stream(&self) -> ImportNotifications { + let (sink, stream) = tracing_unbounded("import_notification_stream", 1024); + self.import_sinks.lock().push(sink); + stream + } + + fn every_import_notification_stream(&self) -> ImportNotifications { + unimplemented!() + } + + fn finality_notification_stream(&self) -> FinalityNotifications { + let (sink, stream) = tracing_unbounded("finality_notification_stream", 1024); + self.finality_sinks.lock().push(sink); + stream + } + + fn storage_changes_notification_stream( + &self, + _filter_keys: Option<&[StorageKey]>, + _child_filter_keys: Option<&[(StorageKey, Option>)]>, + ) -> sp_blockchain::Result> { + unimplemented!() + } +} + +// The following implementations are imposed by the `chainHead` trait bounds. + +impl, Client: ExecutorProvider> + ExecutorProvider for ChainHeadMockClient +{ + type Executor = >::Executor; + + fn executor(&self) -> &Self::Executor { + self.client.executor() + } + + fn execution_extensions(&self) -> &ExecutionExtensions { + self.client.execution_extensions() + } +} + +impl< + BE: sc_client_api::backend::Backend + Send + Sync + 'static, + Block: BlockT, + Client: StorageProvider, + > StorageProvider for ChainHeadMockClient +{ + fn storage( + &self, + hash: Block::Hash, + key: &StorageKey, + ) -> sp_blockchain::Result> { + self.client.storage(hash, key) + } + + fn storage_hash( + &self, + hash: Block::Hash, + key: &StorageKey, + ) -> sp_blockchain::Result> { + self.client.storage_hash(hash, key) + } + + fn storage_keys( + &self, + hash: Block::Hash, + prefix: Option<&StorageKey>, + start_key: Option<&StorageKey>, + ) -> sp_blockchain::Result> { + self.client.storage_keys(hash, prefix, start_key) + } + + fn storage_pairs( + &self, + hash: ::Hash, + prefix: Option<&StorageKey>, + start_key: Option<&StorageKey>, + ) -> sp_blockchain::Result> { + self.client.storage_pairs(hash, prefix, start_key) + } + + fn child_storage( + &self, + hash: Block::Hash, + child_info: &ChildInfo, + key: &StorageKey, + ) -> sp_blockchain::Result> { + self.client.child_storage(hash, child_info, key) + } + + fn child_storage_keys( + &self, + hash: Block::Hash, + child_info: ChildInfo, + prefix: Option<&StorageKey>, + start_key: Option<&StorageKey>, + ) -> sp_blockchain::Result> { + self.client.child_storage_keys(hash, child_info, prefix, start_key) + } + + fn child_storage_hash( + &self, + hash: Block::Hash, + child_info: &ChildInfo, + key: &StorageKey, + ) -> sp_blockchain::Result> { + self.client.child_storage_hash(hash, child_info, key) + } +} + +impl> CallApiAt for ChainHeadMockClient { + type StateBackend = >::StateBackend; + + fn call_api_at( + &self, + params: CallApiAtParams>::StateBackend>, + ) -> Result, sp_api::ApiError> { + self.client.call_api_at(params) + } + + fn runtime_version_at(&self, hash: Block::Hash) -> Result { + self.client.runtime_version_at(hash) + } + + fn state_at(&self, at: Block::Hash) -> Result { + self.client.state_at(at) + } +} + +impl> BlockBackend + for ChainHeadMockClient +{ + fn block_body( + &self, + hash: Block::Hash, + ) -> sp_blockchain::Result::Extrinsic>>> { + self.client.block_body(hash) + } + + fn block(&self, hash: Block::Hash) -> sp_blockchain::Result>> { + self.client.block(hash) + } + + fn block_status(&self, hash: Block::Hash) -> sp_blockchain::Result { + self.client.block_status(hash) + } + + fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result> { + self.client.justifications(hash) + } + + fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result> { + self.client.block_hash(number) + } + + fn indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result>> { + self.client.indexed_transaction(hash) + } + + fn has_indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result { + self.client.has_indexed_transaction(hash) + } + + fn block_indexed_body(&self, hash: Block::Hash) -> sp_blockchain::Result>>> { + self.client.block_indexed_body(hash) + } + fn requires_full_sync(&self) -> bool { + self.client.requires_full_sync() + } +} + +impl + Send + Sync> HeaderMetadata + for ChainHeadMockClient +{ + type Error = >::Error; + + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.client.header_metadata(hash) + } + + fn insert_header_metadata( + &self, + hash: Block::Hash, + header_metadata: CachedHeaderMetadata, + ) { + self.client.insert_header_metadata(hash, header_metadata) + } + + fn remove_header_metadata(&self, hash: Block::Hash) { + self.client.remove_header_metadata(hash) + } +} + +impl + Send + Sync> HeaderBackend + for ChainHeadMockClient +{ + fn header( + &self, + hash: Block::Hash, + ) -> sp_blockchain::Result::Header>> { + self.client.header(hash) + } + + fn info(&self) -> Info { + self.client.info() + } + + fn status(&self, hash: Block::Hash) -> sc_client_api::blockchain::Result { + self.client.status(hash) + } + + fn number( + &self, + hash: Block::Hash, + ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> { + self.client.number(hash) + } + + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> sp_blockchain::Result> { + self.client.hash(number) + } +} diff --git a/client/rpc-spec-v2/src/chain_head/tests.rs b/client/rpc-spec-v2/src/chain_head/tests.rs index 0886efa94a756..1d5cb8da26305 100644 --- a/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/client/rpc-spec-v2/src/chain_head/tests.rs @@ -1,6 +1,9 @@ +use crate::chain_head::test_utils::ChainHeadMockClient; + use super::*; use assert_matches::assert_matches; use codec::{Decode, Encode}; +use futures::Future; use jsonrpsee::{ core::{error::Error, server::rpc_module::Subscription as RpcSubscription}, types::{error::CallError, EmptyServerParams as EmptyParams}, @@ -33,7 +36,7 @@ const CHILD_STORAGE_KEY: &[u8] = b"child"; const CHILD_VALUE: &[u8] = b"child value"; async fn get_next_event(sub: &mut RpcSubscription) -> T { - let (event, _sub_id) = tokio::time::timeout(std::time::Duration::from_secs(1), sub.next()) + let (event, _sub_id) = tokio::time::timeout(std::time::Duration::from_secs(60), sub.next()) .await .unwrap() .unwrap() @@ -41,6 +44,12 @@ async fn get_next_event(sub: &mut RpcSubscriptio event } +async fn run_with_timeout(future: F) -> ::Output { + tokio::time::timeout(std::time::Duration::from_secs(60 * 10), future) + .await + .unwrap() +} + async fn setup_api() -> ( Arc>, RpcModule>>, @@ -170,7 +179,7 @@ async fn follow_with_runtime() { let runtime_str = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",4],\ - [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ + [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xc6e9a76309f39b09\",1],[\"0xdd718d5cc53262d4\",1],[\"0xcbca25e39f142387\",2],\ [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1]],\ \"transactionVersion\":1,\"stateVersion\":1}"; @@ -1317,3 +1326,95 @@ async fn follow_report_multiple_pruned_block() { }); assert_eq!(event, expected); } + +#[tokio::test] +async fn follow_finalized_before_new_block() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + let client_mock = Arc::new(ChainHeadMockClient::new(client.clone())); + + let api = ChainHead::new( + client_mock.clone(), + backend, + Arc::new(TaskExecutor::default()), + CHAIN_GENESIS, + MAX_PINNED_BLOCKS, + ) + .into_rpc(); + + // Make sure the block is imported for it to be pinned. + let block_1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_1_hash = block_1.header.hash(); + client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); + + let mut sub = api.subscribe("chainHead_unstable_follow", [false]).await.unwrap(); + + // Trigger the `FinalizedNotification` for block 1 before the `BlockImportNotification`, and + // expect for the `chainHead` to generate `NewBlock`, `BestBlock` and `Finalized` events. + + // Trigger the Finalized notification before the NewBlock one. + run_with_timeout(client_mock.trigger_finality_stream(block_1.header.clone())).await; + + // Initialized must always be reported first. + let finalized_hash = client.info().finalized_hash; + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::Initialized(Initialized { + finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_runtime: None, + runtime_updates: false, + }); + assert_eq!(event, expected); + + // Block 1 must be reported because we triggered the finalized notification. + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::NewBlock(NewBlock { + block_hash: format!("{:?}", block_1_hash), + parent_block_hash: format!("{:?}", finalized_hash), + new_runtime: None, + runtime_updates: false, + }); + assert_eq!(event, expected); + + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::BestBlockChanged(BestBlockChanged { + best_block_hash: format!("{:?}", block_1_hash), + }); + assert_eq!(event, expected); + + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::Finalized(Finalized { + finalized_block_hashes: vec![format!("{:?}", block_1_hash)], + pruned_block_hashes: vec![], + }); + assert_eq!(event, expected); + + let block_2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_2_hash = block_2.header.hash(); + client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); + + // Triggering the `BlockImportNotification` notification for block 1 should have no effect + // on the notification because the events were handled by the `FinalizedNotification`. + // Also trigger the `BlockImportNotification` notification for block 2 to ensure + // `NewBlock and `BestBlock` events are generated. + + // Trigger NewBlock notification for block 1 and block 2. + run_with_timeout(client_mock.trigger_import_stream(block_1.header)).await; + run_with_timeout(client_mock.trigger_import_stream(block_2.header)).await; + + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::NewBlock(NewBlock { + block_hash: format!("{:?}", block_2_hash), + parent_block_hash: format!("{:?}", block_1_hash), + new_runtime: None, + runtime_updates: false, + }); + assert_eq!(event, expected); + + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::BestBlockChanged(BestBlockChanged { + best_block_hash: format!("{:?}", block_2_hash), + }); + assert_eq!(event, expected); +} diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 251a4a715c51b..930b2c15160a8 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.2.2" } futures = "0.3.21" hash-db = { version = "0.15.2", default-features = false } -jsonrpsee = { version = "0.16.2", features = ["server"] } lazy_static = { version = "1.4.0", optional = true } +jsonrpsee = { version = "0.16.2", features = ["server"] } log = "0.4.17" parking_lot = "0.12.1" serde_json = "1.0.85" diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 2bb88352eb2e5..00a126500e26d 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -40,7 +40,7 @@ use sc_transaction_pool_api::{ use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::Bytes; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::KeystorePtr; use sp_runtime::{generic, traits::Block as BlockT}; use sp_session::SessionKeys; @@ -55,7 +55,7 @@ pub struct Author { /// Transactions pool pool: Arc

, /// The key store. - keystore: SyncCryptoStorePtr, + keystore: KeystorePtr, /// Whether to deny unsafe calls deny_unsafe: DenyUnsafe, /// Executor to spawn subscriptions. @@ -67,7 +67,7 @@ impl Author { pub fn new( client: Arc, pool: Arc

, - keystore: SyncCryptoStorePtr, + keystore: KeystorePtr, deny_unsafe: DenyUnsafe, executor: SubscriptionTaskExecutor, ) -> Self { @@ -112,8 +112,9 @@ where self.deny_unsafe.check_if_safe()?; let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?; - SyncCryptoStore::insert_unknown(&*self.keystore, key_type, &suri, &public[..]) - .map_err(|_| Error::KeyStoreUnavailable)?; + self.keystore + .insert(key_type, &suri, &public[..]) + .map_err(|_| Error::KeystoreUnavailable)?; Ok(()) } @@ -139,14 +140,14 @@ where .map_err(|e| Error::Client(Box::new(e)))? .ok_or(Error::InvalidSessionKeys)?; - Ok(SyncCryptoStore::has_keys(&*self.keystore, &keys)) + Ok(self.keystore.has_keys(&keys)) } fn has_key(&self, public_key: Bytes, key_type: String) -> RpcResult { self.deny_unsafe.check_if_safe()?; let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?; - Ok(SyncCryptoStore::has_keys(&*self.keystore, &[(public_key.to_vec(), key_type)])) + Ok(self.keystore.has_keys(&[(public_key.to_vec(), key_type)])) } fn pending_extrinsics(&self) -> RpcResult> { diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index bf880cfbcfe9b..fbbd1a92fdaa7 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -31,12 +31,12 @@ use sc_transaction_pool_api::TransactionStatus; use sp_core::{ blake2_256, bytes::to_hex, - crypto::{ByteArray, CryptoTypePublicPair, Pair}, - ed25519, sr25519, + crypto::{ByteArray, Pair}, + ed25519, testing::{ED25519, SR25519}, H256, }; -use sp_keystore::testing::KeyStore; +use sp_keystore::{testing::MemoryKeystore, Keystore}; use std::sync::Arc; use substrate_test_runtime_client::{ self, @@ -58,13 +58,13 @@ type FullTransactionPool = BasicPool, Block>, Block struct TestSetup { pub client: Arc>, - pub keystore: Arc, + pub keystore: Arc, pub pool: Arc, } impl Default for TestSetup { fn default() -> Self { - let keystore = Arc::new(KeyStore::new()); + let keystore = Arc::new(MemoryKeystore::new()); let client_builder = substrate_test_runtime_client::TestClientBuilder::new(); let client = Arc::new(client_builder.set_keystore(keystore.clone()).build()); @@ -225,11 +225,9 @@ async fn author_should_insert_key() { keypair.public().0.to_vec().into(), ); api.call::<_, ()>("author_insertKey", params).await.unwrap(); - let pubkeys = SyncCryptoStore::keys(&*setup.keystore, ED25519).unwrap(); + let pubkeys = setup.keystore.keys(ED25519).unwrap(); - assert!( - pubkeys.contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, keypair.public().to_raw_vec())) - ); + assert!(pubkeys.contains(&keypair.public().to_raw_vec())); } #[tokio::test] @@ -240,12 +238,10 @@ async fn author_should_rotate_keys() { let new_pubkeys: Bytes = api.call("author_rotateKeys", EmptyParams::new()).await.unwrap(); let session_keys = SessionKeys::decode(&mut &new_pubkeys[..]).expect("SessionKeys decode successfully"); - let ed25519_pubkeys = SyncCryptoStore::keys(&*setup.keystore, ED25519).unwrap(); - let sr25519_pubkeys = SyncCryptoStore::keys(&*setup.keystore, SR25519).unwrap(); - assert!(ed25519_pubkeys - .contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, session_keys.ed25519.to_raw_vec()))); - assert!(sr25519_pubkeys - .contains(&CryptoTypePublicPair(sr25519::CRYPTO_ID, session_keys.sr25519.to_raw_vec()))); + let ed25519_pubkeys = setup.keystore.keys(ED25519).unwrap(); + let sr25519_pubkeys = setup.keystore.keys(SR25519).unwrap(); + assert!(ed25519_pubkeys.contains(&session_keys.ed25519.to_raw_vec())); + assert!(sr25519_pubkeys.contains(&session_keys.sr25519.to_raw_vec())); } #[tokio::test] diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index d54811320e749..2b5994f217df3 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -59,10 +59,10 @@ where } } - /// Get header of a relay chain block. + /// Get header of a block. fn header(&self, hash: Option) -> Result, Error>; - /// Get header and body of a relay chain block. + /// Get header and body of a block. fn block(&self, hash: Option) -> Result>, Error>; /// Get hash of the n-th block in the canon chain. diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 12d59ad3b382d..f81ec991db133 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -169,7 +169,6 @@ pub fn new_full( client: Arc, executor: SubscriptionTaskExecutor, deny_unsafe: DenyUnsafe, - rpc_max_payload: Option, ) -> (State, ChildState) where Block: BlockT + 'static, @@ -189,12 +188,9 @@ where + 'static, Client::Api: Metadata, { - let child_backend = Box::new(self::state_full::FullState::new( - client.clone(), - executor.clone(), - rpc_max_payload, - )); - let backend = Box::new(self::state_full::FullState::new(client, executor, rpc_max_payload)); + let child_backend = + Box::new(self::state_full::FullState::new(client.clone(), executor.clone())); + let backend = Box::new(self::state_full::FullState::new(client, executor)); (State { backend, deny_unsafe }, ChildState { backend: child_backend }) } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index f26d42484f24f..20ca5f7131e71 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -66,7 +66,6 @@ pub struct FullState { client: Arc, executor: SubscriptionTaskExecutor, _phantom: PhantomData<(BE, Block)>, - rpc_max_payload: Option, } impl FullState @@ -79,12 +78,8 @@ where Block: BlockT + 'static, { /// Create new state API backend for full nodes. - pub fn new( - client: Arc, - executor: SubscriptionTaskExecutor, - rpc_max_payload: Option, - ) -> Self { - Self { client, executor, _phantom: PhantomData, rpc_max_payload } + pub fn new(client: Arc, executor: SubscriptionTaskExecutor) -> Self { + Self { client, executor, _phantom: PhantomData } } /// Returns given block hash or best block hash if None is passed. @@ -481,7 +476,6 @@ where targets, storage_keys, methods, - self.rpc_max_payload, ) .trace_block() .map_err(|e| invalid_block::(block, None, e.to_string())) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 53f8f1d48bc95..ae193e662b0e7 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -55,7 +55,7 @@ async fn should_return_storage() { .add_extra_storage(b":map:acc2".to_vec(), vec![1, 2, 3]) .build(); let genesis_hash = client.genesis_hash(); - let (client, child) = new_full(Arc::new(client), test_executor(), DenyUnsafe::No, None); + let (client, child) = new_full(Arc::new(client), test_executor(), DenyUnsafe::No); let key = StorageKey(KEY.to_vec()); assert_eq!( @@ -103,7 +103,7 @@ async fn should_return_storage_entries() { .add_extra_child_storage(&child_info, KEY2.to_vec(), CHILD_VALUE2.to_vec()) .build(); let genesis_hash = client.genesis_hash(); - let (_client, child) = new_full(Arc::new(client), test_executor(), DenyUnsafe::No, None); + let (_client, child) = new_full(Arc::new(client), test_executor(), DenyUnsafe::No); let keys = &[StorageKey(KEY1.to_vec()), StorageKey(KEY2.to_vec())]; assert_eq!( @@ -134,7 +134,7 @@ async fn should_return_child_storage() { .build(), ); let genesis_hash = client.genesis_hash(); - let (_client, child) = new_full(client, test_executor(), DenyUnsafe::No, None); + let (_client, child) = new_full(client, test_executor(), DenyUnsafe::No); let child_key = prefixed_storage_key(); let key = StorageKey(b"key".to_vec()); @@ -165,7 +165,7 @@ async fn should_return_child_storage_entries() { .build(), ); let genesis_hash = client.genesis_hash(); - let (_client, child) = new_full(client, test_executor(), DenyUnsafe::No, None); + let (_client, child) = new_full(client, test_executor(), DenyUnsafe::No); let child_key = prefixed_storage_key(); let keys = vec![StorageKey(b"key1".to_vec()), StorageKey(b"key2".to_vec())]; @@ -196,7 +196,7 @@ async fn should_return_child_storage_entries() { async fn should_call_contract() { let client = Arc::new(substrate_test_runtime_client::new()); let genesis_hash = client.genesis_hash(); - let (client, _child) = new_full(client, test_executor(), DenyUnsafe::No, None); + let (client, _child) = new_full(client, test_executor(), DenyUnsafe::No); use jsonrpsee::{core::Error, types::error::CallError}; @@ -210,7 +210,7 @@ async fn should_call_contract() { async fn should_notify_about_storage_changes() { let mut sub = { let mut client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No, None); + let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No); let api_rpc = api.into_rpc(); let sub = api_rpc.subscribe("state_subscribeStorage", EmptyParams::new()).await.unwrap(); @@ -242,7 +242,7 @@ async fn should_notify_about_storage_changes() { async fn should_send_initial_storage_changes_and_notifications() { let mut sub = { let mut client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No, None); + let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No); let alice_balance_key = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())); @@ -278,7 +278,7 @@ async fn should_send_initial_storage_changes_and_notifications() { #[tokio::test] async fn should_query_storage() { async fn run_tests(mut client: Arc) { - let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No, None); + let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No); let mut add_block = |nonce| { let mut builder = client.new_block(Default::default()).unwrap(); @@ -480,11 +480,11 @@ async fn should_query_storage() { #[tokio::test] async fn should_return_runtime_version() { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No, None); + let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No); let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",4],\ - [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ + [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xc6e9a76309f39b09\",1],[\"0xdd718d5cc53262d4\",1],[\"0xcbca25e39f142387\",2],\ [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1]],\ \"transactionVersion\":1,\"stateVersion\":1}"; @@ -501,7 +501,7 @@ async fn should_return_runtime_version() { async fn should_notify_on_runtime_version_initially() { let mut sub = { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full(client, test_executor(), DenyUnsafe::No, None); + let (api, _child) = new_full(client, test_executor(), DenyUnsafe::No); let api_rpc = api.into_rpc(); let sub = api_rpc @@ -530,7 +530,7 @@ fn should_deserialize_storage_key() { #[tokio::test] async fn wildcard_storage_subscriptions_are_rpc_unsafe() { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full(client, test_executor(), DenyUnsafe::Yes, None); + let (api, _child) = new_full(client, test_executor(), DenyUnsafe::Yes); let api_rpc = api.into_rpc(); let err = api_rpc.subscribe("state_subscribeStorage", EmptyParams::new()).await; @@ -540,7 +540,7 @@ async fn wildcard_storage_subscriptions_are_rpc_unsafe() { #[tokio::test] async fn concrete_storage_subscriptions_are_rpc_safe() { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full(client, test_executor(), DenyUnsafe::Yes, None); + let (api, _child) = new_full(client, test_executor(), DenyUnsafe::Yes); let api_rpc = api.into_rpc(); let key = StorageKey(STORAGE_KEY.to_vec()); diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 5f49032934ebc..c67732f478e4c 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -77,7 +77,6 @@ sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sc-offchain = { version = "4.0.0-dev", path = "../offchain" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev" } sc-tracing = { version = "4.0.0-dev", path = "../tracing" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } sc-sysinfo = { version = "6.0.0-dev", path = "../sysinfo" } sc-storage-monitor = { version = "0.1.0", path = "../storage-monitor" } tracing = "0.1.29" diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 91ef65cf134e4..2208c55cf4090 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -36,11 +36,12 @@ use sc_client_api::{ }; use sc_client_db::{Backend, DatabaseSettings}; use sc_consensus::import_queue::ImportQueue; -use sc_executor::RuntimeVersionOf; -use sc_keystore::LocalKeystore; -use sc_network::{ - config::SyncMode, NetworkEventStream, NetworkService, NetworkStateInfo, NetworkStatusProvider, +use sc_executor::{ + sp_wasm_interface::HostFunctions, HeapAllocStrategy, NativeElseWasmExecutor, + NativeExecutionDispatch, RuntimeVersionOf, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, }; +use sc_keystore::LocalKeystore; +use sc_network::{config::SyncMode, NetworkService, NetworkStateInfo, NetworkStatusProvider}; use sc_network_bitswap::BitswapRequestHandler; use sc_network_common::{role::Roles, sync::warp::WarpSyncParams}; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; @@ -59,7 +60,7 @@ use sc_rpc::{ }; use sc_rpc_spec_v2::{chain_head::ChainHeadApiServer, transaction::TransactionApiServer}; use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; -use sc_transaction_pool_api::MaintainedTransactionPool; +use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_api::{CallApiAt, ProvideRuntimeApi}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; @@ -67,7 +68,7 @@ use sp_consensus::block_validation::{ BlockAnnounceValidator, Chain, DefaultBlockAnnounceValidator, }; use sp_core::traits::{CodeExecutor, SpawnNamed}; -use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}; use std::{str::FromStr, sync::Arc, time::SystemTime}; @@ -76,37 +77,16 @@ pub type TFullClient = Client, TFullCallExecutor, TBl, TRtApi>; /// Full client backend type. -pub type TFullBackend = sc_client_db::Backend; +pub type TFullBackend = Backend; /// Full client call executor type. -pub type TFullCallExecutor = - crate::client::LocalCallExecutor, TExec>; +pub type TFullCallExecutor = crate::client::LocalCallExecutor, TExec>; type TFullParts = (TFullClient, Arc>, KeystoreContainer, TaskManager); -trait AsCryptoStoreRef { - fn keystore_ref(&self) -> Arc; - fn sync_keystore_ref(&self) -> Arc; -} - -impl AsCryptoStoreRef for Arc -where - T: CryptoStore + SyncCryptoStore + 'static, -{ - fn keystore_ref(&self) -> Arc { - self.clone() - } - fn sync_keystore_ref(&self) -> Arc { - self.clone() - } -} - -/// Construct and hold different layers of Keystore wrappers -pub struct KeystoreContainer { - remote: Option>, - local: Arc, -} +/// Construct a local keystore shareable container +pub struct KeystoreContainer(Arc); impl KeystoreContainer { /// Construct KeystoreContainer @@ -117,50 +97,17 @@ impl KeystoreContainer { KeystoreConfig::InMemory => LocalKeystore::in_memory(), }); - Ok(Self { remote: Default::default(), local: keystore }) - } - - /// Set the remote keystore. - /// Should be called right away at startup and not at runtime: - /// even though this overrides any previously set remote store, it - /// does not reset any references previously handed out - they will - /// stick around. - pub fn set_remote_keystore(&mut self, remote: Arc) - where - T: CryptoStore + SyncCryptoStore + 'static, - { - self.remote = Some(Box::new(remote)) + Ok(Self(keystore)) } - /// Returns an adapter to the asynchronous keystore that implements `CryptoStore` - pub fn keystore(&self) -> Arc { - if let Some(c) = self.remote.as_ref() { - c.keystore_ref() - } else { - self.local.clone() - } + /// Returns a shared reference to a dynamic `Keystore` trait implementation. + pub fn keystore(&self) -> KeystorePtr { + self.0.clone() } - /// Returns the synchronous keystore wrapper - pub fn sync_keystore(&self) -> SyncCryptoStorePtr { - if let Some(c) = self.remote.as_ref() { - c.sync_keystore_ref() - } else { - self.local.clone() as SyncCryptoStorePtr - } - } - - /// Returns the local keystore if available - /// - /// The function will return None if the available keystore is not a local keystore. - /// - /// # Note - /// - /// Using the [`LocalKeystore`] will result in loosing the ability to use any other keystore - /// implementation, like a remote keystore for example. Only use this if you a certain that you - /// require it! - pub fn local_keystore(&self) -> Option> { - Some(self.local.clone()) + /// Returns a shared reference to the local keystore . + pub fn local_keystore(&self) -> Arc { + self.0.clone() } } @@ -234,8 +181,9 @@ where let client = { let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( config.execution_strategies.clone(), - Some(keystore_container.sync_keystore()), + Some(keystore_container.keystore()), sc_offchain::OffchainDb::factory_from_backend(&*backend), + Arc::new(executor.clone()), ); let wasm_runtime_substitutes = config @@ -283,6 +231,27 @@ where Ok((client, backend, keystore_container, task_manager)) } +/// Creates a [`NativeElseWasmExecutor`] according to [`Configuration`]. +pub fn new_native_or_wasm_executor( + config: &Configuration, +) -> NativeElseWasmExecutor { + NativeElseWasmExecutor::new_with_wasm_executor(new_wasm_executor(config)) +} + +/// Creates a [`WasmExecutor`] according to [`Configuration`]. +pub fn new_wasm_executor(config: &Configuration) -> WasmExecutor { + let strategy = config + .default_heap_pages + .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |p| HeapAllocStrategy::Static { extra_pages: p as _ }); + WasmExecutor::::builder() + .with_execution_method(config.wasm_method) + .with_onchain_heap_alloc_strategy(strategy) + .with_offchain_heap_alloc_strategy(strategy) + .with_max_runtime_instances(config.max_runtime_instances) + .with_runtime_cache_size(config.runtime_cache_size) + .build() +} + /// Create an instance of default DB-backend backend. pub fn new_db_backend( settings: DatabaseSettings, @@ -308,7 +277,7 @@ pub fn new_client( telemetry: Option, config: ClientConfig, ) -> Result< - crate::client::Client< + Client< Backend, crate::client::LocalCallExecutor, E>, Block, @@ -327,12 +296,11 @@ where let executor = crate::client::LocalCallExecutor::new( backend.clone(), executor, - spawn_handle.clone(), config.clone(), execution_extensions, )?; - crate::client::Client::new( + Client::new( backend, executor, spawn_handle, @@ -374,7 +342,7 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { /// A task manager returned by `new_full_parts`. pub task_manager: &'a mut TaskManager, /// A shared keystore returned by `new_full_parts`. - pub keystore: SyncCryptoStorePtr, + pub keystore: KeystorePtr, /// A shared transaction pool. pub transaction_pool: Arc, /// Builds additional [`RpcModule`]s that should be added to the server @@ -645,7 +613,7 @@ fn gen_rpc_module( spawn_handle: SpawnTaskHandle, client: Arc, transaction_pool: Arc, - keystore: SyncCryptoStorePtr, + keystore: KeystorePtr, system_rpc_tx: TracingUnboundedSender>, config: &Configuration, backend: Arc, @@ -684,12 +652,8 @@ where let (chain, state, child_state) = { let chain = sc_rpc::chain::new_full(client.clone(), task_executor.clone()).into_rpc(); - let (state, child_state) = sc_rpc::state::new_full( - client.clone(), - task_executor.clone(), - deny_unsafe, - config.rpc_max_payload, - ); + let (state, child_state) = + sc_rpc::state::new_full(client.clone(), task_executor.clone(), deny_unsafe); let state = state.into_rpc(); let child_state = child_state.into_rpc(); @@ -792,7 +756,7 @@ where + HeaderBackend + BlockchainEvents + 'static, - TExPool: MaintainedTransactionPool::Hash> + 'static, + TExPool: TransactionPool::Hash> + 'static, TImpQu: ImportQueue + 'static, { let BuildNetworkParams { @@ -882,6 +846,7 @@ where protocol_config }; + let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000); let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let (engine, sync_service, block_announce_config) = SyncingEngine::new( Roles::from(&config.role), @@ -897,6 +862,7 @@ where block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), warp_sync_protocol_config.as_ref().map(|config| config.name.clone()), + rx, )?; let sync_service_import_queue = sync_service.clone(); let sync_service = Arc::new(sync_service); @@ -907,7 +873,8 @@ where protocol_config })); - let mut network_params = sc_network::config::Params { + let genesis_hash = client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed"); + let mut network_params = sc_network::config::Params:: { role: config.role.clone(), executor: { let spawn_handle = Clone::clone(&spawn_handle); @@ -916,11 +883,12 @@ where }) }, network_config: config.network.clone(), - chain: client.clone(), + genesis_hash, protocol_id: protocol_id.clone(), fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_announce_config, + tx, request_response_protocol_configs: request_response_protocol_configs .into_iter() .chain([ @@ -960,15 +928,13 @@ where )?; spawn_handle.spawn("network-transactions-handler", Some("networking"), tx_handler.run()); - spawn_handle.spawn( + spawn_handle.spawn_blocking( "chain-sync-network-service-provider", Some("networking"), chain_sync_network_provider.run(network.clone()), ); spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(sync_service_import_queue))); - - let event_stream = network.event_stream("syncing"); - spawn_handle.spawn("syncing", None, engine.run(event_stream)); + spawn_handle.spawn_blocking("syncing", None, engine.run()); let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc", 10_000); spawn_handle.spawn( diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 82d3e36ac80ea..7f83d62874c8e 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -23,7 +23,7 @@ use sc_client_api::{ use sc_executor::{RuntimeVersion, RuntimeVersionOf}; use sp_api::{ProofRecorder, StorageTransactionCache}; use sp_core::{ - traits::{CallContext, CodeExecutor, RuntimeCode, SpawnNamed}, + traits::{CallContext, CodeExecutor, RuntimeCode}, ExecutionContext, }; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; @@ -39,7 +39,6 @@ pub struct LocalCallExecutor { executor: E, wasm_override: Arc>, wasm_substitutes: WasmSubstitutes, - spawn_handle: Box, execution_extensions: Arc>, } @@ -52,7 +51,6 @@ where pub fn new( backend: Arc, executor: E, - spawn_handle: Box, client_config: ClientConfig, execution_extensions: ExecutionExtensions, ) -> sp_blockchain::Result { @@ -72,7 +70,6 @@ where backend, executor, wasm_override: Arc::new(wasm_override), - spawn_handle, wasm_substitutes, execution_extensions: Arc::new(execution_extensions), }) @@ -84,13 +81,14 @@ where fn check_override<'a>( &'a self, onchain_code: RuntimeCode<'a>, - hash: ::Hash, + state: &B::State, + hash: Block::Hash, ) -> sp_blockchain::Result<(RuntimeCode<'a>, RuntimeVersion)> where Block: BlockT, B: backend::Backend, { - let on_chain_version = self.on_chain_runtime_version(hash)?; + let on_chain_version = self.on_chain_runtime_version(&onchain_code, state)?; let code_and_version = if let Some(d) = self.wasm_override.as_ref().as_ref().and_then(|o| { o.get( &on_chain_version.spec_version, @@ -118,17 +116,18 @@ where } /// Returns the on chain runtime version. - fn on_chain_runtime_version(&self, hash: Block::Hash) -> sp_blockchain::Result { + fn on_chain_runtime_version( + &self, + code: &RuntimeCode, + state: &B::State, + ) -> sp_blockchain::Result { let mut overlay = OverlayedChanges::default(); - let state = self.backend.state_at(hash)?; let mut cache = StorageTransactionCache::::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, &state, None); - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = - state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + let mut ext = Ext::new(&mut overlay, &mut cache, state, None); + self.executor - .runtime_version(&mut ext, &runtime_code) + .runtime_version(&mut ext, code) .map_err(|e| sp_blockchain::Error::VersionInvalid(e.to_string())) } } @@ -142,7 +141,6 @@ where backend: self.backend.clone(), executor: self.executor.clone(), wasm_override: self.wasm_override.clone(), - spawn_handle: self.spawn_handle.clone(), wasm_substitutes: self.wasm_substitutes.clone(), execution_extensions: self.execution_extensions.clone(), } @@ -180,7 +178,7 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, at_hash)?.0; + let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; let extensions = self.execution_extensions.extensions( at_hash, @@ -196,7 +194,6 @@ where call_data, extensions, &runtime_code, - self.spawn_handle.clone(), context, ) .set_parent_hash(at_hash); @@ -238,7 +235,7 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, at_hash)?.0; + let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; match recorder { Some(recorder) => { @@ -256,7 +253,6 @@ where call_data, extensions, &runtime_code, - self.spawn_handle.clone(), call_context, ) .with_storage_transaction_cache(storage_transaction_cache.as_deref_mut()) @@ -272,7 +268,6 @@ where call_data, extensions, &runtime_code, - self.spawn_handle.clone(), call_context, ) .with_storage_transaction_cache(storage_transaction_cache.as_deref_mut()) @@ -289,7 +284,7 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - self.check_override(runtime_code, at_hash).map(|(_, v)| v) + self.check_override(runtime_code, &state, at_hash).map(|(_, v)| v) } fn prove_execution( @@ -307,13 +302,12 @@ where let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend); let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, at_hash)?.0; + let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; sp_state_machine::prove_execution_on_trie_backend( trie_backend, &mut Default::default(), &self.executor, - self.spawn_handle.clone(), method, call_data, &runtime_code, @@ -368,7 +362,7 @@ mod tests { use super::*; use backend::Backend; use sc_client_api::in_mem; - use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod}; + use sc_executor::{NativeElseWasmExecutor, WasmExecutor}; use sp_core::{ testing::TaskExecutor, traits::{FetchRuntimeCode, WrappedRuntimeCode}, @@ -376,14 +370,18 @@ mod tests { use std::collections::HashMap; use substrate_test_runtime_client::{runtime, GenesisInit, LocalExecutorDispatch}; + fn executor() -> NativeElseWasmExecutor { + NativeElseWasmExecutor::new_with_wasm_executor( + WasmExecutor::builder() + .with_max_runtime_instances(1) + .with_runtime_cache_size(2) + .build(), + ) + } + #[test] fn should_get_override_if_exists() { - let executor = NativeElseWasmExecutor::::new( - WasmExecutionMethod::Interpreted, - Some(128), - 1, - 2, - ); + let executor = executor(); let overrides = crate::client::wasm_override::dummy_overrides(); let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); @@ -425,7 +423,6 @@ mod tests { backend: backend.clone(), executor: executor.clone(), wasm_override: Arc::new(Some(overrides)), - spawn_handle: Box::new(TaskExecutor::new()), wasm_substitutes: WasmSubstitutes::new( Default::default(), executor.clone(), @@ -436,11 +433,16 @@ mod tests { Default::default(), None, None, + Arc::new(executor.clone()), )), }; let check = call_executor - .check_override(onchain_code, backend.blockchain().info().genesis_hash) + .check_override( + onchain_code, + &backend.state_at(backend.blockchain().info().genesis_hash).unwrap(), + backend.blockchain().info().genesis_hash, + ) .expect("RuntimeCode override") .0; @@ -451,12 +453,7 @@ mod tests { fn returns_runtime_version_from_substitute() { const SUBSTITUTE_SPEC_NAME: &str = "substitute-spec-name-cool"; - let executor = NativeElseWasmExecutor::::new( - WasmExecutionMethod::Interpreted, - Some(128), - 1, - 2, - ); + let executor = executor(); let backend = Arc::new(in_mem::Backend::::new()); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index e532a0e7b7218..757966c8af94a 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -70,7 +70,7 @@ use sp_core::{ traits::SpawnNamed, }; #[cfg(feature = "test-helpers")] -use sp_keystore::SyncCryptoStorePtr; +use sp_keystore::KeystorePtr; use sp_runtime::{ generic::{BlockId, SignedBlock}, traits::{ @@ -166,7 +166,7 @@ pub fn new_in_mem( backend: Arc>, executor: E, genesis_block_builder: G, - keystore: Option, + keystore: Option, prometheus_registry: Option, telemetry: Option, spawn_handle: Box, @@ -229,7 +229,7 @@ pub fn new_with_backend( backend: Arc, executor: E, genesis_block_builder: G, - keystore: Option, + keystore: Option, spawn_handle: Box, prometheus_registry: Option, telemetry: Option, @@ -248,15 +248,11 @@ where Default::default(), keystore, sc_offchain::OffchainDb::factory_from_backend(&*backend), + Arc::new(executor.clone()), ); - let call_executor = LocalCallExecutor::new( - backend.clone(), - executor, - spawn_handle.clone(), - config.clone(), - extensions, - )?; + let call_executor = + LocalCallExecutor::new(backend.clone(), executor, config.clone(), extensions)?; Client::new( backend, @@ -945,19 +941,24 @@ where return Err(sp_blockchain::Error::NotInFinalizedChain) } - let route_from_best = - sp_blockchain::tree_route(self.backend.blockchain(), best_block, block)?; - - // if the block is not a direct ancestor of the current best chain, - // then some other block is the common ancestor. - if route_from_best.common_block().hash != block { - // NOTE: we're setting the finalized block as best block, this might - // be slightly inaccurate since we might have a "better" block - // further along this chain, but since best chain selection logic is - // plugable we cannot make a better choice here. usages that need - // an accurate "best" block need to go through `SelectChain` - // instead. - operation.op.mark_head(block)?; + // If there is only one leaf, best block is guaranteed to be + // a descendant of the new finalized block. If not, + // we need to check. + if self.backend.blockchain().leaves()?.len() > 1 { + let route_from_best = + sp_blockchain::tree_route(self.backend.blockchain(), best_block, block)?; + + // if the block is not a direct ancestor of the current best chain, + // then some other block is the common ancestor. + if route_from_best.common_block().hash != block { + // NOTE: we're setting the finalized block as best block, this might + // be slightly inaccurate since we might have a "better" block + // further along this chain, but since best chain selection logic is + // plugable we cannot make a better choice here. usages that need + // an accurate "best" block need to go through `SelectChain` + // instead. + operation.op.mark_head(block)?; + } } let enacted = route_from_finalized.enacted(); diff --git a/client/service/src/client/wasm_override.rs b/client/service/src/client/wasm_override.rs index 3a56e5c595829..725c8ab9429ac 100644 --- a/client/service/src/client/wasm_override.rs +++ b/client/service/src/client/wasm_override.rs @@ -264,21 +264,26 @@ pub fn dummy_overrides() -> WasmOverride { #[cfg(test)] mod tests { use super::*; - use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod}; + use sc_executor::{HeapAllocStrategy, NativeElseWasmExecutor, WasmExecutor}; use std::fs::{self, File}; use substrate_test_runtime_client::LocalExecutorDispatch; + fn executor() -> NativeElseWasmExecutor { + NativeElseWasmExecutor::::new_with_wasm_executor( + WasmExecutor::builder() + .with_onchain_heap_alloc_strategy(HeapAllocStrategy::Static {extra_pages: 128}) + .with_offchain_heap_alloc_strategy(HeapAllocStrategy::Static {extra_pages: 128}) + .with_max_runtime_instances(1) + .with_runtime_cache_size(2) + .build() + ) + } + fn wasm_test(fun: F) where F: Fn(&Path, &[u8], &NativeElseWasmExecutor), { - let exec = - NativeElseWasmExecutor::::new( - WasmExecutionMethod::Interpreted, - Some(128), - 1, - 2, - ); + let exec = executor(); let bytes = substrate_test_runtime::wasm_binary_unwrap(); let dir = tempfile::tempdir().expect("Create a temporary directory"); fun(dir.path(), bytes, &exec); @@ -287,12 +292,7 @@ mod tests { #[test] fn should_get_runtime_version() { - let executor = NativeElseWasmExecutor::::new( - WasmExecutionMethod::Interpreted, - Some(128), - 1, - 2, - ); + let executor = executor(); let version = WasmOverride::runtime_version( &executor, diff --git a/client/service/src/config.rs b/client/service/src/config.rs index c7d98a4533436..6550fcdd8ef4f 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -61,8 +61,6 @@ pub struct Configuration { pub network: NetworkConfiguration, /// Configuration for the keystore. pub keystore: KeystoreConfig, - /// Remote URI to connect to for async keystore support - pub keystore_remote: Option, /// Configuration for the database. pub database: DatabaseSource, /// Maximum size of internal trie cache in bytes. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 54f11ec25a02a..c0c7c537c64dc 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -56,9 +56,9 @@ use sp_runtime::{ pub use self::{ builder::{ build_network, build_offchain_workers, new_client, new_db_backend, new_full_client, - new_full_parts, new_full_parts_with_genesis_builder, spawn_tasks, BuildNetworkParams, - KeystoreContainer, NetworkStarter, SpawnTasksParams, TFullBackend, TFullCallExecutor, - TFullClient, + new_full_parts, new_full_parts_with_genesis_builder, new_native_or_wasm_executor, + spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, SpawnTasksParams, + TFullBackend, TFullCallExecutor, TFullClient, }, client::{ClientConfig, LocalCallExecutor}, error::Error, diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index b583e2136f0b2..b11aaa79a4a1d 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -45,6 +45,7 @@ use sp_trie::{LayoutV0, TrieConfiguration}; use std::{collections::HashSet, sync::Arc}; use substrate_test_runtime::TestAPI; use substrate_test_runtime_client::{ + new_native_or_wasm_executor, prelude::*, runtime::{ genesismap::{insert_genesis_block, GenesisConfig}, @@ -58,29 +59,6 @@ mod db; const TEST_ENGINE_ID: ConsensusEngineId = *b"TEST"; -pub struct ExecutorDispatch; - -impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { - type ExtendHostFunctions = (); - - fn dispatch(method: &str, data: &[u8]) -> Option> { - substrate_test_runtime_client::runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - substrate_test_runtime_client::runtime::native_version() - } -} - -fn executor() -> sc_executor::NativeElseWasmExecutor { - sc_executor::NativeElseWasmExecutor::new( - sc_executor::WasmExecutionMethod::Interpreted, - None, - 8, - 2, - ) -} - fn construct_block( backend: &InMemoryBackend, number: BlockNumber, @@ -106,17 +84,15 @@ fn construct_block( let mut overlay = OverlayedChanges::default(); let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(backend); let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); - let task_executor = Box::new(TaskExecutor::new()); StateMachine::new( backend, &mut overlay, - &executor(), + &new_native_or_wasm_executor(), "Core_initialize_block", &header.encode(), Default::default(), &runtime_code, - task_executor.clone() as Box<_>, CallContext::Onchain, ) .execute(ExecutionStrategy::NativeElseWasm) @@ -126,12 +102,11 @@ fn construct_block( StateMachine::new( backend, &mut overlay, - &executor(), + &new_native_or_wasm_executor(), "BlockBuilder_apply_extrinsic", &tx.encode(), Default::default(), &runtime_code, - task_executor.clone() as Box<_>, CallContext::Onchain, ) .execute(ExecutionStrategy::NativeElseWasm) @@ -141,12 +116,11 @@ fn construct_block( let ret_data = StateMachine::new( backend, &mut overlay, - &executor(), + &new_native_or_wasm_executor(), "BlockBuilder_finalize_block", &[], Default::default(), &runtime_code, - task_executor.clone() as Box<_>, CallContext::Onchain, ) .execute(ExecutionStrategy::NativeElseWasm) @@ -186,8 +160,9 @@ fn finality_notification_check( assert_eq!(notif.hash, *finalized.last().unwrap()); assert_eq!(stale_heads, stale_heads_expected); }, - Err(TryRecvError::Closed) => - panic!("unexpected notification result, client send channel was closed"), + Err(TryRecvError::Closed) => { + panic!("unexpected notification result, client send channel was closed") + }, Err(TryRecvError::Empty) => assert!(finalized.is_empty()), } } @@ -214,12 +189,11 @@ fn construct_genesis_should_work_with_native() { let _ = StateMachine::new( &backend, &mut overlay, - &executor(), + &new_native_or_wasm_executor(), "Core_execute_block", &b1data, Default::default(), &runtime_code, - TaskExecutor::new(), CallContext::Onchain, ) .execute(ExecutionStrategy::NativeElseWasm) @@ -248,12 +222,11 @@ fn construct_genesis_should_work_with_wasm() { let _ = StateMachine::new( &backend, &mut overlay, - &executor(), + &new_native_or_wasm_executor(), "Core_execute_block", &b1data, Default::default(), &runtime_code, - TaskExecutor::new(), CallContext::Onchain, ) .execute(ExecutionStrategy::AlwaysWasm) @@ -282,12 +255,11 @@ fn construct_genesis_with_bad_transaction_should_panic() { let r = StateMachine::new( &backend, &mut overlay, - &executor(), + &new_native_or_wasm_executor(), "Core_execute_block", &b1data, Default::default(), &runtime_code, - TaskExecutor::new(), CallContext::Onchain, ) .execute(ExecutionStrategy::NativeElseWasm); @@ -1569,7 +1541,11 @@ fn respects_block_rules() { } #[test] +#[cfg(disable_flaky)] +#[allow(dead_code)] +// FIXME: https://github.com/paritytech/substrate/issues/11321 fn returns_status_for_pruned_blocks() { + use sc_consensus::BlockStatus; sp_tracing::try_init_simple(); let tmp = tempfile::tempdir().unwrap(); @@ -1915,7 +1891,7 @@ fn cleans_up_closed_notification_sinks_on_block_import() { use substrate_test_runtime_client::GenesisInit; let backend = Arc::new(sc_client_api::in_mem::Backend::new()); - let executor = substrate_test_runtime_client::new_native_executor(); + let executor = new_native_or_wasm_executor(); let client_config = sc_service::ClientConfig::default(); let genesis_block_builder = sc_service::GenesisBlockBuilder::new( diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index f80446a4d43eb..29e68261a1025 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -65,9 +65,7 @@ impl Drop for TestNet { } } -pub trait TestNetNode: - Clone + Future> + Send + 'static -{ +pub trait TestNetNode: Clone + Future> + Send + 'static { type Block: BlockT; type Backend: Backend; type Executor: CallExecutor + Send + Sync; @@ -128,7 +126,7 @@ impl Clone impl Future for TestNetComponents { - type Output = Result<(), sc_service::Error>; + type Output = Result<(), Error>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { Pin::new(&mut self.task_manager.lock().future()).poll(cx) @@ -184,10 +182,7 @@ where loop { interval.tick().await; - if full_nodes - .iter() - .all(|&(ref id, ref service, _, _)| full_predicate(*id, service)) - { + if full_nodes.iter().all(|(id, service, _, _)| full_predicate(*id, service)) { break } } @@ -242,14 +237,13 @@ fn node_config< tokio_handle, transaction_pool: Default::default(), network: network_config, - keystore_remote: Default::default(), keystore: KeystoreConfig::Path { path: root.join("key"), password: None }, database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Default::default(), blocks_pruning: BlocksPruning::KeepFinalized, chain_spec: Box::new((*spec).clone()), - wasm_method: sc_service::config::WasmExecutionMethod::Interpreted, + wasm_method: Default::default(), wasm_runtime_overrides: Default::default(), execution_strategies: Default::default(), rpc_http: None, diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 14b4622ac0670..c656f126ae6eb 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -187,12 +187,14 @@ impl fmt::Debug for StateDbError { "Incompatible pruning modes [stored: {:?}; requested: {:?}]", stored, requested ), - Self::TooManySiblingBlocks { number } => - write!(f, "Too many sibling blocks at #{number} inserted"), + Self::TooManySiblingBlocks { number } => { + write!(f, "Too many sibling blocks at #{number} inserted") + }, Self::BlockAlreadyExists => write!(f, "Block already exists"), Self::Metadata(message) => write!(f, "Invalid metadata: {}", message), - Self::BlockUnavailable => - write!(f, "Trying to get a block record from db while it is not commit to db yet"), + Self::BlockUnavailable => { + write!(f, "Trying to get a block record from db while it is not commit to db yet") + }, Self::BlockMissing => write!(f, "Block record is missing from the pruning window"), } } @@ -406,7 +408,7 @@ impl StateDbSync { } fn prune(&mut self, commit: &mut CommitSet) -> Result<(), Error> { - if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = + if let (&mut Some(ref mut pruning), PruningMode::Constrained(constraints)) = (&mut self.pruning, &self.mode) { loop { @@ -594,7 +596,7 @@ impl StateDb { } /// Prevents pruning of specified block and its descendants. - /// `hint` used for futher checking if the given block exists + /// `hint` used for further checking if the given block exists pub fn pin(&self, hash: &BlockHash, number: u64, hint: F) -> Result<(), PinError> where F: Fn() -> bool, @@ -665,7 +667,7 @@ pub enum IsPruned { Pruned, /// Definitely not pruned NotPruned, - /// May or may not pruned, need futher checking + /// May or may not pruned, need further checking MaybePruned, } diff --git a/client/sync-state-rpc/src/lib.rs b/client/sync-state-rpc/src/lib.rs index 78d5cafa31e26..dda8a7edfa9bb 100644 --- a/client/sync-state-rpc/src/lib.rs +++ b/client/sync-state-rpc/src/lib.rs @@ -41,20 +41,21 @@ #![deny(unused_crate_dependencies)] +use std::sync::Arc; + use jsonrpsee::{ - core::{Error as JsonRpseeError, RpcResult}, + core::{async_trait, Error as JsonRpseeError, RpcResult}, proc_macros::rpc, types::{error::CallError, ErrorObject}, }; + use sc_client_api::StorageData; +use sc_consensus_babe::{BabeWorkerHandle, Error as BabeError}; use sp_blockchain::HeaderBackend; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::sync::Arc; type SharedAuthoritySet = sc_consensus_grandpa::SharedAuthoritySet<::Hash, NumberFor>; -type SharedEpochChanges = - sc_consensus_epochs::SharedEpochChanges; /// Error type used by this crate. #[derive(Debug, thiserror::Error)] @@ -66,6 +67,9 @@ pub enum Error { #[error("Failed to load the block weight for block {0:?}")] LoadingBlockWeightFailed(Block::Hash), + #[error("Failed to load the BABE epoch data: {0}")] + LoadingEpochDataFailed(BabeError), + #[error("JsonRpc error: {0}")] JsonRpc(String), @@ -125,7 +129,7 @@ pub struct LightSyncState { pub trait SyncStateApi { /// Returns the JSON serialized chainspec running the node, with a sync state. #[method(name = "sync_state_genSyncSpec")] - fn system_gen_sync_spec(&self, raw: bool) -> RpcResult; + async fn system_gen_sync_spec(&self, raw: bool) -> RpcResult; } /// An api for sync state RPC calls. @@ -133,7 +137,7 @@ pub struct SyncState { chain_spec: Box, client: Arc, shared_authority_set: SharedAuthoritySet, - shared_epoch_changes: SharedEpochChanges, + babe_worker_handle: BabeWorkerHandle, } impl SyncState @@ -146,18 +150,24 @@ where chain_spec: Box, client: Arc, shared_authority_set: SharedAuthoritySet, - shared_epoch_changes: SharedEpochChanges, + babe_worker_handle: BabeWorkerHandle, ) -> Result> { if sc_chain_spec::get_extension::(chain_spec.extensions()) .is_some() { - Ok(Self { chain_spec, client, shared_authority_set, shared_epoch_changes }) + Ok(Self { chain_spec, client, shared_authority_set, babe_worker_handle }) } else { Err(Error::::LightSyncStateExtensionNotFound) } } - fn build_sync_state(&self) -> Result, Error> { + async fn build_sync_state(&self) -> Result, Error> { + let epoch_changes = self + .babe_worker_handle + .epoch_data() + .await + .map_err(Error::LoadingEpochDataFailed)?; + let finalized_hash = self.client.info().finalized_hash; let finalized_header = self .client @@ -170,20 +180,21 @@ where Ok(LightSyncState { finalized_block_header: finalized_header, - babe_epoch_changes: self.shared_epoch_changes.shared_data().clone(), + babe_epoch_changes: epoch_changes, babe_finalized_block_weight: finalized_block_weight, grandpa_authority_set: self.shared_authority_set.clone_inner(), }) } } +#[async_trait] impl SyncStateApiServer for SyncState where Block: BlockT, Backend: HeaderBackend + sc_client_api::AuxStore + 'static, { - fn system_gen_sync_spec(&self, raw: bool) -> RpcResult { - let current_sync_state = self.build_sync_state()?; + async fn system_gen_sync_spec(&self, raw: bool) -> RpcResult { + let current_sync_state = self.build_sync_state().await?; let mut chain_spec = self.chain_spec.cloned_box(); let extension = sc_chain_spec::get_extension_mut::( diff --git a/client/tracing/proc-macro/Cargo.toml b/client/tracing/proc-macro/Cargo.toml index 031f1fe49d970..54078fe1f2cae 100644 --- a/client/tracing/proc-macro/Cargo.toml +++ b/client/tracing/proc-macro/Cargo.toml @@ -16,6 +16,6 @@ proc-macro = true [dependencies] proc-macro-crate = "1.1.3" -proc-macro2 = "1.0.37" -quote = { version = "1.0.10", features = ["proc-macro"] } -syn = { version = "1.0.98", features = ["proc-macro", "full", "extra-traits", "parsing"] } +proc-macro2 = "1.0.56" +quote = { version = "1.0.26", features = ["proc-macro"] } +syn = { version = "2.0.14", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/client/tracing/src/block/mod.rs b/client/tracing/src/block/mod.rs index f5efadc34fb92..c0442abc125b9 100644 --- a/client/tracing/src/block/mod.rs +++ b/client/tracing/src/block/mod.rs @@ -34,38 +34,21 @@ use tracing::{ use crate::{SpanDatum, TraceEvent, Values}; use sc_client_api::BlockBackend; -use sc_rpc_server::RPC_MAX_PAYLOAD_DEFAULT; use sp_api::{Core, Encode, Metadata, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::hexdisplay::HexDisplay; -use sp_rpc::tracing::{BlockTrace, Span, TraceBlockResponse, TraceError}; +use sp_rpc::tracing::{BlockTrace, Span, TraceBlockResponse}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header}, }; use sp_tracing::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; -// Heuristic for average event size in bytes. -const AVG_EVENT: usize = 600 * 8; -// Heuristic for average span size in bytes. -const AVG_SPAN: usize = 100 * 8; -// Estimate of the max base RPC payload size when the Id is bound as a u64. If strings -// are used for the RPC Id this may need to be adjusted. Note: The base payload -// does not include the RPC result. -// -// The estimate is based on the JSON-RPC response message which has the following format: -// `{"jsonrpc":"2.0","result":[],"id":18446744073709551615}`. -// -// We care about the total size of the payload because jsonrpc-server will simply ignore -// messages larger than `sc_rpc_server::MAX_PAYLOAD` and the caller will not get any -// response. -const BASE_PAYLOAD: usize = 100; // Default to only pallet, frame support and state related traces const DEFAULT_TARGETS: &str = "pallet,frame,state"; const TRACE_TARGET: &str = "block_trace"; // The name of a field required for all events. const REQUIRED_EVENT_FIELD: &str = "method"; -const MEGABYTE: usize = 1024 * 1024; /// Tracing Block Result type alias pub type TraceBlockResult = Result; @@ -182,7 +165,6 @@ pub struct BlockExecutor { targets: Option, storage_keys: Option, methods: Option, - rpc_max_payload: usize, } impl BlockExecutor @@ -203,12 +185,8 @@ where targets: Option, storage_keys: Option, methods: Option, - rpc_max_payload: Option, ) -> Self { - let rpc_max_payload = rpc_max_payload - .map(|mb| mb.saturating_mul(MEGABYTE)) - .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); - Self { client, block, targets, storage_keys, methods, rpc_max_payload } + Self { client, block, targets, storage_keys, methods } } /// Execute block, record all spans and events belonging to `Self::targets` @@ -289,24 +267,15 @@ where .collect(); tracing::debug!(target: "state_tracing", "Captured {} spans and {} events", spans.len(), events.len()); - let approx_payload_size = BASE_PAYLOAD + events.len() * AVG_EVENT + spans.len() * AVG_SPAN; - let response = if approx_payload_size > self.rpc_max_payload { - TraceBlockResponse::TraceError(TraceError { - error: "Payload likely exceeds max payload size of RPC server.".to_string(), - }) - } else { - TraceBlockResponse::BlockTrace(BlockTrace { - block_hash: block_id_as_string(BlockId::::Hash(self.block)), - parent_hash: block_id_as_string(BlockId::::Hash(parent_hash)), - tracing_targets: targets.to_string(), - storage_keys: self.storage_keys.clone().unwrap_or_default(), - methods: self.methods.clone().unwrap_or_default(), - spans, - events, - }) - }; - - Ok(response) + Ok(TraceBlockResponse::BlockTrace(BlockTrace { + block_hash: block_id_as_string(BlockId::::Hash(self.block)), + parent_hash: block_id_as_string(BlockId::::Hash(parent_hash)), + tracing_targets: targets.to_string(), + storage_keys: self.storage_keys.clone().unwrap_or_default(), + methods: self.methods.clone().unwrap_or_default(), + spans, + events, + })) } } diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index ae1e9dc0c1fba..3d1784ecf71c3 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -589,7 +589,6 @@ async fn prune_known_txs_for_block TracingUnboundedReceiver { impl Drop for TracingUnboundedReceiver { fn drop(&mut self) { + // Close the channel to prevent any further messages to be sent into the channel self.close(); // the number of messages about to be dropped let count = self.inner.len(); @@ -150,6 +151,10 @@ impl Drop for TracingUnboundedReceiver { .with_label_values(&[self.name, "dropped"]) .inc_by(count.saturated_into()); } + // Drain all the pending messages in the channel since they can never be accessed, + // this can be removed once https://github.com/smol-rs/async-channel/issues/23 is + // resolved + while let Ok(_) = self.inner.try_recv() {} } } @@ -177,3 +182,22 @@ impl FusedStream for TracingUnboundedReceiver { self.inner.is_terminated() } } + +#[cfg(test)] +mod tests { + use super::tracing_unbounded; + use async_channel::{self, RecvError, TryRecvError}; + + #[test] + fn test_tracing_unbounded_receiver_drop() { + let (tracing_unbounded_sender, tracing_unbounded_receiver) = + tracing_unbounded("test-receiver-drop", 10); + let (tx, rx) = async_channel::unbounded::(); + + tracing_unbounded_sender.unbounded_send(tx).unwrap(); + drop(tracing_unbounded_receiver); + + assert_eq!(rx.try_recv(), Err(TryRecvError::Closed)); + assert_eq!(rx.recv_blocking(), Err(RecvError)); + } +} diff --git a/docs/CONTRIBUTING.adoc b/docs/CONTRIBUTING.adoc index d39b8a6d729ff..05a9434ac81ab 100644 --- a/docs/CONTRIBUTING.adoc +++ b/docs/CONTRIBUTING.adoc @@ -30,26 +30,23 @@ A Pull Request (PR) needs to be reviewed and approved by project maintainers unl - `A-*` Pull request status. ONE REQUIRED. - `B-*` Changelog and/or Runtime-upgrade post composition markers. ONE REQUIRED. (used by automation) -- `C-*` Release notes release-priority markers. EXACTLY ONE REQUIRED. (used by automation) -- `D-*` More general tags on the PR denoting various implications and requirements. +- `C-*` Release notes release-criticality markers. EXACTLY ONE REQUIRED. (used by automation) +- `D-*` Audit tags denoting auditing requirements on the PR. *Process:* . Please tag each PR with exactly one `A`, `B`, `C` and `D` label at the minimum. -. Once a PR is ready for review please add the https://github.com/paritytech/substrate/pulls?q=is%3Apr+is%3Aopen+label%3AA0-pleasereview[`A0-pleasereview`] label. Generally PRs should sit with this label for 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. -. If the first review is not an approval, swap `A0-pleasereview` to any label `[A3, A7]` to indicate that the PR has received some feedback, but needs further work. For example. https://github.com/paritytech/substrate/labels/A3-inprogress[`A3-inprogress`] is a general indicator that the PR is work in progress and https://github.com/paritytech/substrate/labels/A4-gotissues[`A4-gotissues`] means that it has significant problems that need fixing. Once the work is done, change the label back to `A0-pleasereview`. You might end up swapping a few times back and forth to climb up the A label group. Once a PR is https://github.com/paritytech/substrate/labels/A8-mergeoncegreen[`A8-mergeoncegreen`], it is ready to merge. -. PRs must be tagged with their release notes requirements via the `B1-B9` labels. -. PRs must be tagged with their release importance via the `C1-C9` labels. +. Once a PR is ready for review please add the https://github.com/paritytech/substrate/pulls?q=is%3Apr+is%3Aopen+label%3AA0-please_review+[`A0-please_review`] label. Generally PRs should sit with this label for 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. +. If the first review is not an approval, swap `A0-please_review` to any label `[A3, A5]` to indicate that the PR has received some feedback, but needs further work. For example. https://github.com/paritytech/substrate/labels/A3-in_progress[`A3-in_progress`] is a general indicator that the PR is work in progress. +. PRs must be tagged with their release notes requirements via the `B*` labels. If a PR should be mentioned in the release notes, use `B1` and add either: `T0-node`, `T1-runtime`or `T2-API` - this defines in which section of the release notes the PR will be shown. +. PRs must be tagged with their release importance via the `C1-C7` labels. . PRs must be tagged with their audit requirements via the `D1-D9` labels. -. PRs that must be backported to a stable branch must be tagged with https://github.com/paritytech/substrate/labels/E1-runtimemigration[`E0-patchthis`]. -. PRs that introduce runtime migrations must be tagged with https://github.com/paritytech/substrate/labels/E1-runtimemigration[`E1-runtimemigration`]. See the https://github.com/paritytech/substrate/blob/master/utils/frame/try-runtime/cli/src/lib.rs#L18[Migration Best Practices here] for more info about how to test runtime migrations. -. PRs that introduce irreversible database migrations must be tagged with https://github.com/paritytech/substrate/labels/E2-databasemigration[`E2-databasemigration`]. -. PRs that add host functions must be tagged with with https://github.com/paritytech/substrate/labels/E4-newhostfunctions[`E4-newhostfunctions`]. -. PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/E5-breaksapi[`E5-breaksapi`]. -. PRs that materially change the FRAME/runtime semantics must be tagged with https://github.com/paritytech/substrate/labels/E6-transactionversion[`E6-transactionversion`]. -. PRs that change the mechanism for block authoring in a backwards-incompatible way must be tagged with https://github.com/paritytech/substrate/labels/E7-breaksauthoring[`E7-breaksauthoring`]. -. PRs that "break everything" must be tagged with https://github.com/paritytech/substrate/labels/E8-breakseverything[`E8-breakseverything`]. -. PRs that block a new release must be tagged with https://github.com/paritytech/substrate/labels/E9-blocker%20%E2%9B%94%EF%B8%8F[`E9-blocker`]. +. PRs that introduce runtime migrations must be tagged with https://github.com/paritytech/substrate/labels/E0-runtime_migration[`E0-runtime_migration`]. See the https://github.com/paritytech/substrate/blob/master/utils/frame/try-runtime/cli/src/lib.rs#L18[Migration Best Practices here] for more info about how to test runtime migrations. +. PRs that introduce irreversible database migrations must be tagged with https://github.com/paritytech/substrate/labels/E1-database_migration[`E1-database_migration`]. +. PRs that add host functions must be tagged with with https://github.com/paritytech/substrate/labels/E3-host_functions[`E3-host_functions`]. +. PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/F3-breaks_API[`F3-breaks_API`]. +. PRs that change the mechanism for block authoring in a backwards-incompatible way must be tagged with https://github.com/paritytech/substrate/labels/F1-breaks_authoring[`F1-breaks_authoring`]. +. PRs that "break everything" must be tagged with https://github.com/paritytech/substrate/labels/F0-breaks_everything[`F0-breaks_everything`]. . PRs should be categorized into projects. . No PR should be merged until all reviews' comments are addressed and CI is successful. @@ -91,21 +88,20 @@ To create a Polkadot companion PR: Note: The merge-bot currently doesn't work with forks on org accounts, only individual accounts. (Hint: it's recommended to use `bot merge` to merge all substrate PRs, not just ones with a polkadot companion.) -If your PR is reviewed well, but a Polkadot PR is missing, signal it with https://github.com/paritytech/substrate/labels/A7-needspolkadotpr[`A7-needspolkadotpr`] to prevent it from getting automatically merged. +If your PR is reviewed well, but a Polkadot PR is missing, signal it with https://github.com/paritytech/substrate/labels/E6-needs_polkadot_pr[`E6-needs_polkadot_pr`] to prevent it from getting automatically merged. In most cases the CI will add this label automatically. As there might be multiple pending PRs that might conflict with one another, a) you should not merge the substrate PR until the Polkadot PR has also been reviewed and b) both should be merged pretty quickly after another to not block others. == Helping out -We use https://github.com/paritytech/substrate/labels[labels] to manage PRs and issues and communicate state of a PR. Please familiarize yourself with them. Furthermore we are organizing issues in https://github.com/paritytech/substrate/milestones[milestones]. Best way to get started is to a pick a ticket from the current milestone tagged https://github.com/paritytech/substrate/issues?q=is%3Aissue+is%3Aopen+label%3AQ2-easy[`easy`] or https://github.com/paritytech/substrate/issues?q=is%3Aissue+is%3Aopen+label%3AQ3-medium[`medium`] and get going or https://github.com/paritytech/substrate/issues?q=is%3Aissue+is%3Aopen+label%3AX1-mentor[`mentor`] and get in contact with the mentor offering their support on that larger task. +We use https://paritytech.github.io/labels/doc_substrate.html[labels] to manage PRs and issues and communicate state of a PR. Please familiarize yourself with them. Best way to get started is to a pick a ticket tagged https://github.com/paritytech/substrate/issues?q=is%3Aissue+is%3Aopen+label%3AZ1-easy[`easy`] or https://github.com/paritytech/substrate/issues?q=is%3Aissue+is%3Aopen+label%3AZ2-medium[`medium`] and get going or https://github.com/paritytech/substrate/issues?q=is%3Aissue+is%3Aopen+label%3AZ6-mentor[`mentor`] and get in contact with the mentor offering their support on that larger task. == Issues Please label issues with the following labels: -. `I-*` Issue severity and type. EXACTLY ONE REQUIRED. -. `P-*` Issue priority. AT MOST ONE ALLOWED. -. `Q-*` Issue difficulty. AT MOST ONE ALLOWED. -. `Z-*` More general tags on the issue, denoting context and resolution. +. `I-*` or `J-*` Issue severity and type. EXACTLY ONE REQUIRED. +. `U-*` Issue urgency, suggesting in what time manner does this issue need to be resolved. AT MOST ONE ALLOWED. +. `Z-*` Issue difficulty. AT MOST ONE ALLOWED. == Releases diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 6cc162f7d2f3d..86a64caaf8e75 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -114,7 +114,7 @@ use frame_support::{ ChangeMembers, Currency, Get, InitializeMembers, IsSubType, OnUnbalanced, ReservableCurrency, }, - weights::{OldWeight, Weight}, + weights::Weight, }; use pallet_identity::IdentityField; @@ -497,7 +497,7 @@ pub mod pallet { pub type UnscrupulousWebsites, I: 'static = ()> = StorageValue<_, BoundedVec, T::MaxUnscrupulousItems>, ValueQuery>; - #[pallet::call] + #[pallet::call(weight(>::WeightInfo))] impl, I: 'static> Pallet { /// Add a new proposal to be voted on. /// @@ -539,36 +539,7 @@ pub mod pallet { Ok(()) } - /// Close a vote that is either approved, disapproved, or whose voting period has ended. - /// - /// Must be called by a Fellow. - #[pallet::call_index(2)] - #[pallet::weight({ - let b = *length_bound; - let m = T::MaxFellows::get(); - let p1 = *proposal_weight_bound; - let p2 = T::MaxProposals::get(); - T::WeightInfo::close_early_approved(b, m, p2) - .max(T::WeightInfo::close_early_disapproved(m, p2)) - .max(T::WeightInfo::close_approved(b, m, p2)) - .max(T::WeightInfo::close_disapproved(m, p2)) - .saturating_add(p1.into()) - })] - #[allow(deprecated)] - #[deprecated(note = "1D weight is used in this extrinsic, please migrate to use `close`")] - pub fn close_old_weight( - origin: OriginFor, - proposal_hash: T::Hash, - #[pallet::compact] index: ProposalIndex, - #[pallet::compact] proposal_weight_bound: OldWeight, - #[pallet::compact] length_bound: u32, - ) -> DispatchResultWithPostInfo { - let proposal_weight_bound: Weight = proposal_weight_bound.into(); - let who = ensure_signed(origin)?; - ensure!(Self::has_voting_rights(&who), Error::::NoVotingRights); - - Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) - } + // Index 2 was `close_old_weight`; it was removed due to weights v1 deprecation. /// Initialize the Alliance, onboard fellows and allies. /// @@ -678,7 +649,6 @@ pub mod pallet { /// Set a new IPFS CID to the alliance rule. #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::set_rule())] pub fn set_rule(origin: OriginFor, rule: Cid) -> DispatchResult { T::AdminOrigin::ensure_origin(origin)?; @@ -690,7 +660,6 @@ pub mod pallet { /// Make an announcement of a new IPFS CID about alliance issues. #[pallet::call_index(6)] - #[pallet::weight(T::WeightInfo::announce())] pub fn announce(origin: OriginFor, announcement: Cid) -> DispatchResult { T::AnnouncementOrigin::ensure_origin(origin)?; @@ -706,7 +675,6 @@ pub mod pallet { /// Remove an announcement. #[pallet::call_index(7)] - #[pallet::weight(T::WeightInfo::remove_announcement())] pub fn remove_announcement(origin: OriginFor, announcement: Cid) -> DispatchResult { T::AnnouncementOrigin::ensure_origin(origin)?; @@ -724,7 +692,6 @@ pub mod pallet { /// Submit oneself for candidacy. A fixed deposit is reserved. #[pallet::call_index(8)] - #[pallet::weight(T::WeightInfo::join_alliance())] pub fn join_alliance(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; @@ -761,7 +728,6 @@ pub mod pallet { /// A Fellow can nominate someone to join the alliance as an Ally. There is no deposit /// required from the nominator or nominee. #[pallet::call_index(9)] - #[pallet::weight(T::WeightInfo::nominate_ally())] pub fn nominate_ally(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let nominator = ensure_signed(origin)?; ensure!(Self::has_voting_rights(&nominator), Error::::NoVotingRights); @@ -786,7 +752,6 @@ pub mod pallet { /// Elevate an Ally to Fellow. #[pallet::call_index(10)] - #[pallet::weight(T::WeightInfo::elevate_ally())] pub fn elevate_ally(origin: OriginFor, ally: AccountIdLookupOf) -> DispatchResult { T::MembershipManager::ensure_origin(origin)?; let ally = T::Lookup::lookup(ally)?; @@ -803,7 +768,6 @@ pub mod pallet { /// As a member, give a retirement notice and start a retirement period required to pass in /// order to retire. #[pallet::call_index(11)] - #[pallet::weight(T::WeightInfo::give_retirement_notice())] pub fn give_retirement_notice(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; let role = Self::member_role_of(&who).ok_or(Error::::NotMember)?; @@ -826,7 +790,6 @@ pub mod pallet { /// This can only be done once you have called `give_retirement_notice` and the /// `RetirementPeriod` has passed. #[pallet::call_index(12)] - #[pallet::weight(T::WeightInfo::retire())] pub fn retire(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; let retirement_period_end = RetiringMembers::::get(&who) @@ -849,7 +812,6 @@ pub mod pallet { /// Kick a member from the Alliance and slash its deposit. #[pallet::call_index(13)] - #[pallet::weight(T::WeightInfo::kick_member())] pub fn kick_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::MembershipManager::ensure_origin(origin)?; let member = T::Lookup::lookup(who)?; @@ -951,7 +913,6 @@ pub mod pallet { /// who do not want to leave the Alliance but do not have the capacity to participate /// operationally for some time. #[pallet::call_index(17)] - #[pallet::weight(T::WeightInfo::abdicate_fellow_status())] pub fn abdicate_fellow_status(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; let role = Self::member_role_of(&who).ok_or(Error::::NotMember)?; diff --git a/frame/alliance/src/mock.rs b/frame/alliance/src/mock.rs index b8fd998eb20a0..c334a3943b025 100644 --- a/frame/alliance/src/mock.rs +++ b/frame/alliance/src/mock.rs @@ -43,10 +43,12 @@ type AccountId = u64; parameter_types! { pub const BlockHashCount: BlockNumber = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(Weight::MAX); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); + type BlockWeights = BlockWeights; type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; @@ -85,6 +87,10 @@ impl pallet_balances::Config for Test { type MaxLocks = MaxLocks; type MaxReserves = (); type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } const MOTION_DURATION_IN_BLOCKS: BlockNumber = 3; @@ -93,6 +99,7 @@ parameter_types! { pub const MotionDuration: BlockNumber = MOTION_DURATION_IN_BLOCKS; pub const MaxProposals: u32 = 100; pub const MaxMembers: u32 = 100; + pub MaxProposalWeight: Weight = sp_runtime::Perbill::from_percent(50) * BlockWeights::get().max_block; } type AllianceCollective = pallet_collective::Instance1; impl pallet_collective::Config for Test { @@ -105,6 +112,7 @@ impl pallet_collective::Config for Test { type DefaultVote = pallet_collective::PrimeDefaultVote; type WeightInfo = (); type SetMembersOrigin = EnsureRoot; + type MaxProposalWeight = MaxProposalWeight; } parameter_types! { diff --git a/frame/alliance/src/tests.rs b/frame/alliance/src/tests.rs index 5942595469d5f..de7cda4710fc7 100644 --- a/frame/alliance/src/tests.rs +++ b/frame/alliance/src/tests.rs @@ -629,3 +629,12 @@ fn remove_unscrupulous_items_works() { assert_eq!(Alliance::unscrupulous_accounts(), Vec::::new()); }); } + +#[test] +fn weights_sane() { + let info = crate::Call::::join_alliance {}.get_dispatch_info(); + assert_eq!(<() as crate::WeightInfo>::join_alliance(), info.weight); + + let info = crate::Call::::nominate_ally { who: 10 }.get_dispatch_info(); + assert_eq!(<() as crate::WeightInfo>::nominate_ally(), info.weight); +} diff --git a/frame/alliance/src/weights.rs b/frame/alliance/src/weights.rs index 8ff4bad31313c..0276a9844a4a9 100644 --- a/frame/alliance/src/weights.rs +++ b/frame/alliance/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_alliance //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -88,20 +88,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `692 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `11659 + m * (132 ±0) + p * (144 ±0)` - // Minimum execution time: 28_849 nanoseconds. - Weight::from_parts(29_823_933, 11659) - // Standard Error: 74 - .saturating_add(Weight::from_parts(830, 0).saturating_mul(b.into())) - // Standard Error: 775 - .saturating_add(Weight::from_parts(22_980, 0).saturating_mul(m.into())) - // Standard Error: 765 - .saturating_add(Weight::from_parts(90_520, 0).saturating_mul(p.into())) + // Measured: `618 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (31 ±0) + p * (37 ±0)` + // Minimum execution time: 32_316_000 picoseconds. + Weight::from_parts(35_185_484, 6676) + // Standard Error: 83 + .saturating_add(Weight::from_parts(1_089, 0).saturating_mul(b.into())) + // Standard Error: 871 + .saturating_add(Weight::from_parts(21_235, 0).saturating_mul(m.into())) + // Standard Error: 860 + .saturating_add(Weight::from_parts(120_353, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 132).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 144).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 31).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -110,12 +110,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1177 + m * (64 ±0)` - // Estimated: `9337 + m * (64 ±0)` - // Minimum execution time: 23_570 nanoseconds. - Weight::from_parts(25_473_196, 9337) - // Standard Error: 824 - .saturating_add(Weight::from_parts(54_603, 0).saturating_mul(m.into())) + // Measured: `1042 + m * (64 ±0)` + // Estimated: `6676 + m * (64 ±0)` + // Minimum execution time: 25_982_000 picoseconds. + Weight::from_parts(28_118_657, 6676) + // Standard Error: 855 + .saturating_add(Weight::from_parts(61_309, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -134,18 +134,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `730 + m * (96 ±0) + p * (36 ±0)` - // Estimated: `11979 + m * (388 ±0) + p * (148 ±0)` - // Minimum execution time: 35_373 nanoseconds. - Weight::from_parts(32_763_656, 11979) - // Standard Error: 2_041 - .saturating_add(Weight::from_parts(52_915, 0).saturating_mul(m.into())) - // Standard Error: 1_991 - .saturating_add(Weight::from_parts(78_594, 0).saturating_mul(p.into())) + // Measured: `622 + m * (96 ±0) + p * (37 ±0)` + // Estimated: `6676 + m * (96 ±0) + p * (37 ±0)` + // Minimum execution time: 40_922_000 picoseconds. + Weight::from_parts(39_098_903, 6676) + // Standard Error: 714 + .saturating_add(Weight::from_parts(44_125, 0).saturating_mul(m.into())) + // Standard Error: 696 + .saturating_add(Weight::from_parts(111_263, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 388).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 148).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 96).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -162,20 +162,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1139 + m * (96 ±0) + p * (41 ±0)` - // Estimated: `15730 + m * (388 ±0) + p * (164 ±0)` - // Minimum execution time: 44_590 nanoseconds. - Weight::from_parts(43_367_913, 15730) - // Standard Error: 71 - .saturating_add(Weight::from_parts(819, 0).saturating_mul(b.into())) - // Standard Error: 751 - .saturating_add(Weight::from_parts(39_124, 0).saturating_mul(m.into())) - // Standard Error: 732 - .saturating_add(Weight::from_parts(90_469, 0).saturating_mul(p.into())) + // Measured: `903 + m * (96 ±0) + p * (41 ±0)` + // Estimated: `6676 + m * (98 ±0) + p * (40 ±0)` + // Minimum execution time: 51_890_000 picoseconds. + Weight::from_parts(49_880_817, 6676) + // Standard Error: 81 + .saturating_add(Weight::from_parts(688, 0).saturating_mul(b.into())) + // Standard Error: 862 + .saturating_add(Weight::from_parts(54_419, 0).saturating_mul(m.into())) + // Standard Error: 840 + .saturating_add(Weight::from_parts(122_253, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 388).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 164).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 98).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -193,18 +193,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `730 + m * (96 ±0) + p * (36 ±0)` - // Estimated: `13201 + m * (485 ±0) + p * (185 ±0)` - // Minimum execution time: 36_796 nanoseconds. - Weight::from_parts(34_578_765, 13201) - // Standard Error: 1_037 - .saturating_add(Weight::from_parts(43_508, 0).saturating_mul(m.into())) - // Standard Error: 1_024 - .saturating_add(Weight::from_parts(77_084, 0).saturating_mul(p.into())) + // Measured: `622 + m * (96 ±0) + p * (37 ±0)` + // Estimated: `6676 + m * (96 ±0) + p * (37 ±0)` + // Minimum execution time: 42_391_000 picoseconds. + Weight::from_parts(40_156_254, 6676) + // Standard Error: 728 + .saturating_add(Weight::from_parts(47_889, 0).saturating_mul(m.into())) + // Standard Error: 719 + .saturating_add(Weight::from_parts(112_596, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 485).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 185).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 96).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -223,20 +223,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `757 + m * (96 ±0) + p * (36 ±0)` - // Estimated: `13146 + m * (480 ±0) + p * (185 ±0)` - // Minimum execution time: 36_897 nanoseconds. - Weight::from_parts(34_169_666, 13146) - // Standard Error: 47 - .saturating_add(Weight::from_parts(972, 0).saturating_mul(b.into())) - // Standard Error: 510 - .saturating_add(Weight::from_parts(38_084, 0).saturating_mul(m.into())) - // Standard Error: 492 - .saturating_add(Weight::from_parts(78_576, 0).saturating_mul(p.into())) + // Measured: `591 + m * (96 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` + // Minimum execution time: 42_320_000 picoseconds. + Weight::from_parts(40_205_526, 6676) + // Standard Error: 64 + .saturating_add(Weight::from_parts(49, 0).saturating_mul(b.into())) + // Standard Error: 690 + .saturating_add(Weight::from_parts(46_508, 0).saturating_mul(m.into())) + // Standard Error: 665 + .saturating_add(Weight::from_parts(112_222, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 480).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 185).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Alliance Members (r:2 w:2) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -247,13 +247,13 @@ impl WeightInfo for SubstrateWeight { fn init_members(m: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `217` - // Estimated: `12084` - // Minimum execution time: 29_313 nanoseconds. - Weight::from_parts(20_502_244, 12084) - // Standard Error: 304 - .saturating_add(Weight::from_parts(107_994, 0).saturating_mul(m.into())) - // Standard Error: 300 - .saturating_add(Weight::from_parts(92_645, 0).saturating_mul(z.into())) + // Estimated: `12362` + // Minimum execution time: 32_509_000 picoseconds. + Weight::from_parts(23_584_337, 12362) + // Standard Error: 377 + .saturating_add(Weight::from_parts(114_917, 0).saturating_mul(m.into())) + // Standard Error: 373 + .saturating_add(Weight::from_parts(97_593, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -274,25 +274,25 @@ impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 50]`. fn disband(x: u32, y: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (283 ±0)` - // Estimated: `32201 + x * (2587 ±0) + y * (2590 ±0) + z * (3209 ±1)` - // Minimum execution time: 232_895 nanoseconds. - Weight::from_parts(233_860_000, 32201) - // Standard Error: 19_092 - .saturating_add(Weight::from_parts(445_664, 0).saturating_mul(x.into())) - // Standard Error: 19_000 - .saturating_add(Weight::from_parts(432_571, 0).saturating_mul(y.into())) - // Standard Error: 37_965 - .saturating_add(Weight::from_parts(9_386_151, 0).saturating_mul(z.into())) + // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (251 ±0)` + // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` + // Minimum execution time: 275_061_000 picoseconds. + Weight::from_parts(565_248, 12362) + // Standard Error: 15_948 + .saturating_add(Weight::from_parts(1_636_348, 0).saturating_mul(x.into())) + // Standard Error: 15_761 + .saturating_add(Weight::from_parts(1_580_146, 0).saturating_mul(y.into())) + // Standard Error: 31_496 + .saturating_add(Weight::from_parts(17_217_382, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(z.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(z.into()))) - .saturating_add(Weight::from_parts(0, 2587).saturating_mul(x.into())) - .saturating_add(Weight::from_parts(0, 2590).saturating_mul(y.into())) - .saturating_add(Weight::from_parts(0, 3209).saturating_mul(z.into())) + .saturating_add(Weight::from_parts(0, 2539).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2539).saturating_mul(y.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(z.into())) } /// Storage: Alliance Rule (r:0 w:1) /// Proof: Alliance Rule (max_values: Some(1), max_size: Some(87), added: 582, mode: MaxEncodedLen) @@ -300,8 +300,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_156 nanoseconds. - Weight::from_parts(9_376_000, 0) + // Minimum execution time: 10_261_000 picoseconds. + Weight::from_parts(10_389_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Alliance Announcements (r:1 w:1) @@ -309,9 +309,9 @@ impl WeightInfo for SubstrateWeight { fn announce() -> Weight { // Proof Size summary in bytes: // Measured: `246` - // Estimated: `9197` - // Minimum execution time: 12_163 nanoseconds. - Weight::from_parts(12_397_000, 9197) + // Estimated: `10187` + // Minimum execution time: 13_483_000 picoseconds. + Weight::from_parts(13_805_000, 10187) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -319,10 +319,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Alliance Announcements (max_values: Some(1), max_size: Some(8702), added: 9197, mode: MaxEncodedLen) fn remove_announcement() -> Weight { // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `9197` - // Minimum execution time: 12_778 nanoseconds. - Weight::from_parts(12_991_000, 9197) + // Measured: `319` + // Estimated: `10187` + // Minimum execution time: 14_816_000 picoseconds. + Weight::from_parts(15_163_000, 10187) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -336,10 +336,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) fn join_alliance() -> Weight { // Proof Size summary in bytes: - // Measured: `562` - // Estimated: `23358` - // Minimum execution time: 40_216 nanoseconds. - Weight::from_parts(40_535_000, 23358) + // Measured: `468` + // Estimated: `18048` + // Minimum execution time: 46_149_000 picoseconds. + Weight::from_parts(46_827_000, 18048) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -349,10 +349,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) fn nominate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `429` - // Estimated: `20755` - // Minimum execution time: 28_027 nanoseconds. - Weight::from_parts(28_392_000, 20755) + // Measured: `367` + // Estimated: `18048` + // Minimum execution time: 28_463_000 picoseconds. + Weight::from_parts(28_730_000, 18048) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -366,10 +366,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) fn elevate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `13382` - // Minimum execution time: 24_427 nanoseconds. - Weight::from_parts(24_952_000, 13382) + // Measured: `443` + // Estimated: `12362` + // Minimum execution time: 28_401_000 picoseconds. + Weight::from_parts(28_717_000, 12362) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -385,10 +385,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Alliance RetiringMembers (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) fn give_retirement_notice() -> Weight { // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `24754` - // Minimum execution time: 33_163 nanoseconds. - Weight::from_parts(33_556_000, 24754) + // Measured: `443` + // Estimated: `23734` + // Minimum execution time: 36_538_000 picoseconds. + Weight::from_parts(37_197_000, 23734) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -402,10 +402,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn retire() -> Weight { // Proof Size summary in bytes: - // Measured: `750` - // Estimated: `13355` - // Minimum execution time: 32_980 nanoseconds. - Weight::from_parts(33_452_000, 13355) + // Measured: `687` + // Estimated: `6676` + // Minimum execution time: 42_324_000 picoseconds. + Weight::from_parts(42_890_000, 6676) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -423,10 +423,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) fn kick_member() -> Weight { // Proof Size summary in bytes: - // Measured: `801` - // Estimated: `25098` - // Minimum execution time: 54_660 nanoseconds. - Weight::from_parts(55_186_000, 25098) + // Measured: `707` + // Estimated: `18048` + // Minimum execution time: 68_003_000 picoseconds. + Weight::from_parts(68_657_000, 18048) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -439,13 +439,13 @@ impl WeightInfo for SubstrateWeight { fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `246` - // Estimated: `29894` - // Minimum execution time: 7_709 nanoseconds. - Weight::from_parts(7_773_000, 29894) - // Standard Error: 2_645 - .saturating_add(Weight::from_parts(1_266_755, 0).saturating_mul(n.into())) - // Standard Error: 1_036 - .saturating_add(Weight::from_parts(67_359, 0).saturating_mul(l.into())) + // Estimated: `27187` + // Minimum execution time: 8_304_000 picoseconds. + Weight::from_parts(8_424_000, 27187) + // Standard Error: 2_765 + .saturating_add(Weight::from_parts(1_529_793, 0).saturating_mul(n.into())) + // Standard Error: 1_082 + .saturating_add(Weight::from_parts(71_352, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -457,14 +457,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[0, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + n * (289 ±0) + l * (100 ±0)` - // Estimated: `29894` - // Minimum execution time: 7_438 nanoseconds. - Weight::from_parts(7_570_000, 29894) - // Standard Error: 165_072 - .saturating_add(Weight::from_parts(13_026_975, 0).saturating_mul(n.into())) - // Standard Error: 64_649 - .saturating_add(Weight::from_parts(485_565, 0).saturating_mul(l.into())) + // Measured: `0 + l * (100 ±0) + n * (289 ±0)` + // Estimated: `27187` + // Minimum execution time: 8_348_000 picoseconds. + Weight::from_parts(8_505_000, 27187) + // Standard Error: 187_398 + .saturating_add(Weight::from_parts(16_545_597, 0).saturating_mul(n.into())) + // Standard Error: 73_393 + .saturating_add(Weight::from_parts(350_415, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -478,10 +478,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) fn abdicate_fellow_status() -> Weight { // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `19068` - // Minimum execution time: 31_326 nanoseconds. - Weight::from_parts(31_768_000, 19068) + // Measured: `443` + // Estimated: `18048` + // Minimum execution time: 34_461_000 picoseconds. + Weight::from_parts(34_992_000, 18048) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -504,20 +504,20 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `692 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `11659 + m * (132 ±0) + p * (144 ±0)` - // Minimum execution time: 28_849 nanoseconds. - Weight::from_parts(29_823_933, 11659) - // Standard Error: 74 - .saturating_add(Weight::from_parts(830, 0).saturating_mul(b.into())) - // Standard Error: 775 - .saturating_add(Weight::from_parts(22_980, 0).saturating_mul(m.into())) - // Standard Error: 765 - .saturating_add(Weight::from_parts(90_520, 0).saturating_mul(p.into())) + // Measured: `618 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (31 ±0) + p * (37 ±0)` + // Minimum execution time: 32_316_000 picoseconds. + Weight::from_parts(35_185_484, 6676) + // Standard Error: 83 + .saturating_add(Weight::from_parts(1_089, 0).saturating_mul(b.into())) + // Standard Error: 871 + .saturating_add(Weight::from_parts(21_235, 0).saturating_mul(m.into())) + // Standard Error: 860 + .saturating_add(Weight::from_parts(120_353, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 132).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 144).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 31).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -526,12 +526,12 @@ impl WeightInfo for () { /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1177 + m * (64 ±0)` - // Estimated: `9337 + m * (64 ±0)` - // Minimum execution time: 23_570 nanoseconds. - Weight::from_parts(25_473_196, 9337) - // Standard Error: 824 - .saturating_add(Weight::from_parts(54_603, 0).saturating_mul(m.into())) + // Measured: `1042 + m * (64 ±0)` + // Estimated: `6676 + m * (64 ±0)` + // Minimum execution time: 25_982_000 picoseconds. + Weight::from_parts(28_118_657, 6676) + // Standard Error: 855 + .saturating_add(Weight::from_parts(61_309, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -550,18 +550,18 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `730 + m * (96 ±0) + p * (36 ±0)` - // Estimated: `11979 + m * (388 ±0) + p * (148 ±0)` - // Minimum execution time: 35_373 nanoseconds. - Weight::from_parts(32_763_656, 11979) - // Standard Error: 2_041 - .saturating_add(Weight::from_parts(52_915, 0).saturating_mul(m.into())) - // Standard Error: 1_991 - .saturating_add(Weight::from_parts(78_594, 0).saturating_mul(p.into())) + // Measured: `622 + m * (96 ±0) + p * (37 ±0)` + // Estimated: `6676 + m * (96 ±0) + p * (37 ±0)` + // Minimum execution time: 40_922_000 picoseconds. + Weight::from_parts(39_098_903, 6676) + // Standard Error: 714 + .saturating_add(Weight::from_parts(44_125, 0).saturating_mul(m.into())) + // Standard Error: 696 + .saturating_add(Weight::from_parts(111_263, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 388).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 148).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 96).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -578,20 +578,20 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1139 + m * (96 ±0) + p * (41 ±0)` - // Estimated: `15730 + m * (388 ±0) + p * (164 ±0)` - // Minimum execution time: 44_590 nanoseconds. - Weight::from_parts(43_367_913, 15730) - // Standard Error: 71 - .saturating_add(Weight::from_parts(819, 0).saturating_mul(b.into())) - // Standard Error: 751 - .saturating_add(Weight::from_parts(39_124, 0).saturating_mul(m.into())) - // Standard Error: 732 - .saturating_add(Weight::from_parts(90_469, 0).saturating_mul(p.into())) + // Measured: `903 + m * (96 ±0) + p * (41 ±0)` + // Estimated: `6676 + m * (98 ±0) + p * (40 ±0)` + // Minimum execution time: 51_890_000 picoseconds. + Weight::from_parts(49_880_817, 6676) + // Standard Error: 81 + .saturating_add(Weight::from_parts(688, 0).saturating_mul(b.into())) + // Standard Error: 862 + .saturating_add(Weight::from_parts(54_419, 0).saturating_mul(m.into())) + // Standard Error: 840 + .saturating_add(Weight::from_parts(122_253, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 388).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 164).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 98).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -609,18 +609,18 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `730 + m * (96 ±0) + p * (36 ±0)` - // Estimated: `13201 + m * (485 ±0) + p * (185 ±0)` - // Minimum execution time: 36_796 nanoseconds. - Weight::from_parts(34_578_765, 13201) - // Standard Error: 1_037 - .saturating_add(Weight::from_parts(43_508, 0).saturating_mul(m.into())) - // Standard Error: 1_024 - .saturating_add(Weight::from_parts(77_084, 0).saturating_mul(p.into())) + // Measured: `622 + m * (96 ±0) + p * (37 ±0)` + // Estimated: `6676 + m * (96 ±0) + p * (37 ±0)` + // Minimum execution time: 42_391_000 picoseconds. + Weight::from_parts(40_156_254, 6676) + // Standard Error: 728 + .saturating_add(Weight::from_parts(47_889, 0).saturating_mul(m.into())) + // Standard Error: 719 + .saturating_add(Weight::from_parts(112_596, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 485).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 185).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 96).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -639,20 +639,20 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `757 + m * (96 ±0) + p * (36 ±0)` - // Estimated: `13146 + m * (480 ±0) + p * (185 ±0)` - // Minimum execution time: 36_897 nanoseconds. - Weight::from_parts(34_169_666, 13146) - // Standard Error: 47 - .saturating_add(Weight::from_parts(972, 0).saturating_mul(b.into())) - // Standard Error: 510 - .saturating_add(Weight::from_parts(38_084, 0).saturating_mul(m.into())) - // Standard Error: 492 - .saturating_add(Weight::from_parts(78_576, 0).saturating_mul(p.into())) + // Measured: `591 + m * (96 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` + // Minimum execution time: 42_320_000 picoseconds. + Weight::from_parts(40_205_526, 6676) + // Standard Error: 64 + .saturating_add(Weight::from_parts(49, 0).saturating_mul(b.into())) + // Standard Error: 690 + .saturating_add(Weight::from_parts(46_508, 0).saturating_mul(m.into())) + // Standard Error: 665 + .saturating_add(Weight::from_parts(112_222, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 480).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 185).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Alliance Members (r:2 w:2) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -663,13 +663,13 @@ impl WeightInfo for () { fn init_members(m: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `217` - // Estimated: `12084` - // Minimum execution time: 29_313 nanoseconds. - Weight::from_parts(20_502_244, 12084) - // Standard Error: 304 - .saturating_add(Weight::from_parts(107_994, 0).saturating_mul(m.into())) - // Standard Error: 300 - .saturating_add(Weight::from_parts(92_645, 0).saturating_mul(z.into())) + // Estimated: `12362` + // Minimum execution time: 32_509_000 picoseconds. + Weight::from_parts(23_584_337, 12362) + // Standard Error: 377 + .saturating_add(Weight::from_parts(114_917, 0).saturating_mul(m.into())) + // Standard Error: 373 + .saturating_add(Weight::from_parts(97_593, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -690,25 +690,25 @@ impl WeightInfo for () { /// The range of component `z` is `[0, 50]`. fn disband(x: u32, y: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (283 ±0)` - // Estimated: `32201 + x * (2587 ±0) + y * (2590 ±0) + z * (3209 ±1)` - // Minimum execution time: 232_895 nanoseconds. - Weight::from_parts(233_860_000, 32201) - // Standard Error: 19_092 - .saturating_add(Weight::from_parts(445_664, 0).saturating_mul(x.into())) - // Standard Error: 19_000 - .saturating_add(Weight::from_parts(432_571, 0).saturating_mul(y.into())) - // Standard Error: 37_965 - .saturating_add(Weight::from_parts(9_386_151, 0).saturating_mul(z.into())) + // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (251 ±0)` + // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` + // Minimum execution time: 275_061_000 picoseconds. + Weight::from_parts(565_248, 12362) + // Standard Error: 15_948 + .saturating_add(Weight::from_parts(1_636_348, 0).saturating_mul(x.into())) + // Standard Error: 15_761 + .saturating_add(Weight::from_parts(1_580_146, 0).saturating_mul(y.into())) + // Standard Error: 31_496 + .saturating_add(Weight::from_parts(17_217_382, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(y.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(z.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(z.into()))) - .saturating_add(Weight::from_parts(0, 2587).saturating_mul(x.into())) - .saturating_add(Weight::from_parts(0, 2590).saturating_mul(y.into())) - .saturating_add(Weight::from_parts(0, 3209).saturating_mul(z.into())) + .saturating_add(Weight::from_parts(0, 2539).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2539).saturating_mul(y.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(z.into())) } /// Storage: Alliance Rule (r:0 w:1) /// Proof: Alliance Rule (max_values: Some(1), max_size: Some(87), added: 582, mode: MaxEncodedLen) @@ -716,8 +716,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_156 nanoseconds. - Weight::from_parts(9_376_000, 0) + // Minimum execution time: 10_261_000 picoseconds. + Weight::from_parts(10_389_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Alliance Announcements (r:1 w:1) @@ -725,9 +725,9 @@ impl WeightInfo for () { fn announce() -> Weight { // Proof Size summary in bytes: // Measured: `246` - // Estimated: `9197` - // Minimum execution time: 12_163 nanoseconds. - Weight::from_parts(12_397_000, 9197) + // Estimated: `10187` + // Minimum execution time: 13_483_000 picoseconds. + Weight::from_parts(13_805_000, 10187) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -735,10 +735,10 @@ impl WeightInfo for () { /// Proof: Alliance Announcements (max_values: Some(1), max_size: Some(8702), added: 9197, mode: MaxEncodedLen) fn remove_announcement() -> Weight { // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `9197` - // Minimum execution time: 12_778 nanoseconds. - Weight::from_parts(12_991_000, 9197) + // Measured: `319` + // Estimated: `10187` + // Minimum execution time: 14_816_000 picoseconds. + Weight::from_parts(15_163_000, 10187) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -752,10 +752,10 @@ impl WeightInfo for () { /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) fn join_alliance() -> Weight { // Proof Size summary in bytes: - // Measured: `562` - // Estimated: `23358` - // Minimum execution time: 40_216 nanoseconds. - Weight::from_parts(40_535_000, 23358) + // Measured: `468` + // Estimated: `18048` + // Minimum execution time: 46_149_000 picoseconds. + Weight::from_parts(46_827_000, 18048) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -765,10 +765,10 @@ impl WeightInfo for () { /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) fn nominate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `429` - // Estimated: `20755` - // Minimum execution time: 28_027 nanoseconds. - Weight::from_parts(28_392_000, 20755) + // Measured: `367` + // Estimated: `18048` + // Minimum execution time: 28_463_000 picoseconds. + Weight::from_parts(28_730_000, 18048) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -782,10 +782,10 @@ impl WeightInfo for () { /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) fn elevate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `13382` - // Minimum execution time: 24_427 nanoseconds. - Weight::from_parts(24_952_000, 13382) + // Measured: `443` + // Estimated: `12362` + // Minimum execution time: 28_401_000 picoseconds. + Weight::from_parts(28_717_000, 12362) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -801,10 +801,10 @@ impl WeightInfo for () { /// Proof: Alliance RetiringMembers (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) fn give_retirement_notice() -> Weight { // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `24754` - // Minimum execution time: 33_163 nanoseconds. - Weight::from_parts(33_556_000, 24754) + // Measured: `443` + // Estimated: `23734` + // Minimum execution time: 36_538_000 picoseconds. + Weight::from_parts(37_197_000, 23734) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -818,10 +818,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn retire() -> Weight { // Proof Size summary in bytes: - // Measured: `750` - // Estimated: `13355` - // Minimum execution time: 32_980 nanoseconds. - Weight::from_parts(33_452_000, 13355) + // Measured: `687` + // Estimated: `6676` + // Minimum execution time: 42_324_000 picoseconds. + Weight::from_parts(42_890_000, 6676) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -839,10 +839,10 @@ impl WeightInfo for () { /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) fn kick_member() -> Weight { // Proof Size summary in bytes: - // Measured: `801` - // Estimated: `25098` - // Minimum execution time: 54_660 nanoseconds. - Weight::from_parts(55_186_000, 25098) + // Measured: `707` + // Estimated: `18048` + // Minimum execution time: 68_003_000 picoseconds. + Weight::from_parts(68_657_000, 18048) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -855,13 +855,13 @@ impl WeightInfo for () { fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `246` - // Estimated: `29894` - // Minimum execution time: 7_709 nanoseconds. - Weight::from_parts(7_773_000, 29894) - // Standard Error: 2_645 - .saturating_add(Weight::from_parts(1_266_755, 0).saturating_mul(n.into())) - // Standard Error: 1_036 - .saturating_add(Weight::from_parts(67_359, 0).saturating_mul(l.into())) + // Estimated: `27187` + // Minimum execution time: 8_304_000 picoseconds. + Weight::from_parts(8_424_000, 27187) + // Standard Error: 2_765 + .saturating_add(Weight::from_parts(1_529_793, 0).saturating_mul(n.into())) + // Standard Error: 1_082 + .saturating_add(Weight::from_parts(71_352, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -873,14 +873,14 @@ impl WeightInfo for () { /// The range of component `l` is `[0, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + n * (289 ±0) + l * (100 ±0)` - // Estimated: `29894` - // Minimum execution time: 7_438 nanoseconds. - Weight::from_parts(7_570_000, 29894) - // Standard Error: 165_072 - .saturating_add(Weight::from_parts(13_026_975, 0).saturating_mul(n.into())) - // Standard Error: 64_649 - .saturating_add(Weight::from_parts(485_565, 0).saturating_mul(l.into())) + // Measured: `0 + l * (100 ±0) + n * (289 ±0)` + // Estimated: `27187` + // Minimum execution time: 8_348_000 picoseconds. + Weight::from_parts(8_505_000, 27187) + // Standard Error: 187_398 + .saturating_add(Weight::from_parts(16_545_597, 0).saturating_mul(n.into())) + // Standard Error: 73_393 + .saturating_add(Weight::from_parts(350_415, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -894,10 +894,10 @@ impl WeightInfo for () { /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) fn abdicate_fellow_status() -> Weight { // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `19068` - // Minimum execution time: 31_326 nanoseconds. - Weight::from_parts(31_768_000, 19068) + // Measured: `443` + // Estimated: `18048` + // Minimum execution time: 34_461_000 picoseconds. + Weight::from_parts(34_992_000, 18048) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } diff --git a/frame/asset-rate/Cargo.toml b/frame/asset-rate/Cargo.toml new file mode 100644 index 0000000000000..36aabb12bcbbd --- /dev/null +++ b/frame/asset-rate/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "pallet-asset-rate" +version = "4.0.0-dev" +description = "Whitelist non-native assets for treasury spending and provide conversion to native balance" +authors = ["William Freudenberger "] +homepage = "https://substrate.io" +edition = "2021" +license = "Apache-2.0" +repository = "https://github.com/paritytech/substrate/" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } + +[dev-dependencies] +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-io = { version = "7.0.0", path = "../../primitives/io" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/frame/asset-rate/src/benchmarking.rs b/frame/asset-rate/src/benchmarking.rs new file mode 100644 index 0000000000000..dde0d764affb2 --- /dev/null +++ b/frame/asset-rate/src/benchmarking.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The crate's benchmarks. + +use super::*; +use crate::{pallet as pallet_asset_rate, Pallet as AssetRate}; + +use frame_benchmarking::v2::*; +use frame_support::assert_ok; +use frame_system::RawOrigin; + +const ASSET_ID: u32 = 1; + +fn default_conversion_rate() -> FixedU128 { + FixedU128::from_u32(1u32) +} + +#[benchmarks(where ::AssetId: From)] +mod benchmarks { + use super::*; + + #[benchmark] + fn create() -> Result<(), BenchmarkError> { + let asset_id: T::AssetId = ASSET_ID.into(); + #[extrinsic_call] + _(RawOrigin::Root, asset_id, default_conversion_rate()); + + assert_eq!( + pallet_asset_rate::ConversionRateToNative::::get(asset_id), + Some(default_conversion_rate()) + ); + Ok(()) + } + + #[benchmark] + fn update() -> Result<(), BenchmarkError> { + let asset_id: T::AssetId = ASSET_ID.into(); + assert_ok!(AssetRate::::create( + RawOrigin::Root.into(), + asset_id, + default_conversion_rate() + )); + + #[extrinsic_call] + _(RawOrigin::Root, asset_id, FixedU128::from_u32(2)); + + assert_eq!( + pallet_asset_rate::ConversionRateToNative::::get(asset_id), + Some(FixedU128::from_u32(2)) + ); + Ok(()) + } + + #[benchmark] + fn remove() -> Result<(), BenchmarkError> { + let asset_id: T::AssetId = ASSET_ID.into(); + assert_ok!(AssetRate::::create( + RawOrigin::Root.into(), + ASSET_ID.into(), + default_conversion_rate() + )); + + #[extrinsic_call] + _(RawOrigin::Root, asset_id); + + assert!(pallet_asset_rate::ConversionRateToNative::::get(asset_id).is_none()); + Ok(()) + } + + impl_benchmark_test_suite! { AssetRate, crate::mock::new_test_ext(), crate::mock::Test } +} diff --git a/frame/asset-rate/src/lib.rs b/frame/asset-rate/src/lib.rs new file mode 100644 index 0000000000000..8c6597a389833 --- /dev/null +++ b/frame/asset-rate/src/lib.rs @@ -0,0 +1,239 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Asset Rate Pallet +//! +//! - [`Config`] +//! - [`Call`] +//! +//! ## Overview +//! +//! The AssetRate pallet provides means of setting conversion rates for some asset to native +//! balance. +//! +//! The supported dispatchable functions are documented in the [`Call`] enum. +//! +//! ### Terminology +//! +//! * **Asset balance**: The balance type of an arbitrary asset. The network might only know about +//! the identifier of the asset and nothing more. +//! * **Native balance**: The balance type of the network's native currency. +//! +//! ### Goals +//! +//! The asset-rate system in Substrate is designed to make the following possible: +//! +//! * Providing a soft conversion for the balance of supported assets to a default asset class. +//! * Updating existing conversion rates. +//! +//! ## Interface +//! +//! ### Permissioned Functions +//! +//! * `create`: Creates a new asset conversion rate. +//! * `remove`: Removes an existing asset conversion rate. +//! * `update`: Overwrites an existing assert conversion rate. +//! +//! Please refer to the [`Call`] enum and its associated variants for documentation on each +//! function. +//! +//! ### Assumptions +//! +//! * Conversion rates are only used as estimates, and are not designed to be precise or closely +//! tracking real world values. +//! * All conversion rates reflect the ration of some asset to native, e.g. native = asset * rate. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::traits::{ + fungible::Inspect, + tokens::{Balance, ConversionFromAssetBalance}, +}; +use sp_runtime::{traits::Zero, FixedPointNumber, FixedPointOperand, FixedU128}; + +pub use pallet::*; +pub use weights::WeightInfo; + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +pub mod weights; + +// Type alias for `frame_system`'s account id. +type AccountIdOf = ::AccountId; +// This pallet's asset id and balance type. +type AssetIdOf = ::AssetId; +// Generic fungible balance type. +type BalanceOf = <::Currency as Inspect>>::Balance; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + + /// The runtime event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The origin permissioned to create a conversion rate for an asset. + type CreateOrigin: EnsureOrigin; + + /// The origin permissioned to remove an existing conversion rate for an asset. + type RemoveOrigin: EnsureOrigin; + + /// The origin permissioned to update an existiing conversion rate for an asset. + type UpdateOrigin: EnsureOrigin; + + /// The units in which we record balances. + type Balance: Balance + FixedPointOperand; + + /// The currency mechanism for this pallet. + type Currency: Inspect; + + /// The identifier for the class of asset. + type AssetId: frame_support::traits::tokens::AssetId; + } + + /// Maps an asset to its fixed point representation in the native balance. + /// + /// E.g. `native_amount = asset_amount * ConversionRateToNative::::get(asset_id)` + #[pallet::storage] + pub type ConversionRateToNative = + StorageMap<_, Blake2_128Concat, T::AssetId, FixedU128, OptionQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + // Some `asset_id` conversion rate was created. + AssetRateCreated { asset_id: T::AssetId, rate: FixedU128 }, + // Some `asset_id` conversion rate was removed. + AssetRateRemoved { asset_id: T::AssetId }, + // Some existing `asset_id` conversion rate was updated from `old` to `new`. + AssetRateUpdated { asset_id: T::AssetId, old: FixedU128, new: FixedU128 }, + } + + #[pallet::error] + pub enum Error { + /// The given asset ID is unknown. + UnknownAssetId, + /// The given asset ID already has an assigned conversion rate and cannot be re-created. + AlreadyExists, + } + + #[pallet::call] + impl Pallet { + /// Initialize a conversion rate to native balance for the given asset. + /// + /// ## Complexity + /// - O(1) + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::create())] + pub fn create( + origin: OriginFor, + asset_id: T::AssetId, + rate: FixedU128, + ) -> DispatchResult { + T::CreateOrigin::ensure_origin(origin)?; + + ensure!( + !ConversionRateToNative::::contains_key(asset_id), + Error::::AlreadyExists + ); + ConversionRateToNative::::set(asset_id, Some(rate)); + + Self::deposit_event(Event::AssetRateCreated { asset_id, rate }); + Ok(()) + } + + /// Update the conversion rate to native balance for the given asset. + /// + /// ## Complexity + /// - O(1) + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::update())] + pub fn update( + origin: OriginFor, + asset_id: T::AssetId, + rate: FixedU128, + ) -> DispatchResult { + T::UpdateOrigin::ensure_origin(origin)?; + + let mut old = FixedU128::zero(); + ConversionRateToNative::::mutate(asset_id, |maybe_rate| { + if let Some(r) = maybe_rate { + old = *r; + *r = rate; + + Ok(()) + } else { + Err(Error::::UnknownAssetId) + } + })?; + + Self::deposit_event(Event::AssetRateUpdated { asset_id, old, new: rate }); + Ok(()) + } + + /// Remove an existing conversion rate to native balance for the given asset. + /// + /// ## Complexity + /// - O(1) + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::remove())] + pub fn remove(origin: OriginFor, asset_id: T::AssetId) -> DispatchResult { + T::RemoveOrigin::ensure_origin(origin)?; + + ensure!( + ConversionRateToNative::::contains_key(asset_id), + Error::::UnknownAssetId + ); + ConversionRateToNative::::remove(asset_id); + + Self::deposit_event(Event::AssetRateRemoved { asset_id }); + Ok(()) + } + } +} + +/// Exposes conversion of an arbitrary balance of an asset to native balance. +impl ConversionFromAssetBalance, AssetIdOf, BalanceOf> for Pallet +where + T: Config, + BalanceOf: FixedPointOperand + Zero, +{ + type Error = pallet::Error; + + fn from_asset_balance( + balance: BalanceOf, + asset_id: AssetIdOf, + ) -> Result, pallet::Error> { + let rate = pallet::ConversionRateToNative::::get(asset_id) + .ok_or(pallet::Error::::UnknownAssetId.into())?; + Ok(rate.saturating_mul_int(balance)) + } +} diff --git a/frame/asset-rate/src/mock.rs b/frame/asset-rate/src/mock.rs new file mode 100644 index 0000000000000..9775b7a747926 --- /dev/null +++ b/frame/asset-rate/src/mock.rs @@ -0,0 +1,100 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The crate's mock. + +use crate as pallet_asset_rate; +use frame_support::traits::{ConstU16, ConstU64}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + AssetRate: pallet_asset_rate, + Balances: pallet_balances, + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = ConstU16<42>; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type Balance = u64; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type HoldIdentifier = (); + type FreezeIdentifier = (); + type MaxHolds = (); + type MaxFreezes = (); +} + +impl pallet_asset_rate::Config for Test { + type WeightInfo = (); + type RuntimeEvent = RuntimeEvent; + type CreateOrigin = frame_system::EnsureRoot; + type RemoveOrigin = frame_system::EnsureRoot; + type UpdateOrigin = frame_system::EnsureRoot; + type Balance = u64; + type Currency = Balances; + type AssetId = u32; +} + +// Build genesis storage according to the mock runtime. +pub fn new_test_ext() -> sp_io::TestExternalities { + frame_system::GenesisConfig::default().build_storage::().unwrap().into() +} diff --git a/frame/asset-rate/src/tests.rs b/frame/asset-rate/src/tests.rs new file mode 100644 index 0000000000000..4e5a3167bef21 --- /dev/null +++ b/frame/asset-rate/src/tests.rs @@ -0,0 +1,121 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The crate's tests. + +use super::*; +use crate::pallet as pallet_asset_rate; +use frame_support::{assert_noop, assert_ok}; +use mock::{new_test_ext, AssetRate, RuntimeOrigin, Test}; +use sp_runtime::FixedU128; + +const ASSET_ID: u32 = 42; + +#[test] +fn create_works() { + new_test_ext().execute_with(|| { + assert!(pallet_asset_rate::ConversionRateToNative::::get(ASSET_ID).is_none()); + assert_ok!(AssetRate::create(RuntimeOrigin::root(), ASSET_ID, FixedU128::from_float(0.1))); + + assert_eq!( + pallet_asset_rate::ConversionRateToNative::::get(ASSET_ID), + Some(FixedU128::from_float(0.1)) + ); + }); +} + +#[test] +fn create_existing_throws() { + new_test_ext().execute_with(|| { + assert!(pallet_asset_rate::ConversionRateToNative::::get(ASSET_ID).is_none()); + assert_ok!(AssetRate::create(RuntimeOrigin::root(), ASSET_ID, FixedU128::from_float(0.1))); + + assert_noop!( + AssetRate::create(RuntimeOrigin::root(), ASSET_ID, FixedU128::from_float(0.1)), + Error::::AlreadyExists + ); + }); +} + +#[test] +fn remove_works() { + new_test_ext().execute_with(|| { + assert_ok!(AssetRate::create(RuntimeOrigin::root(), ASSET_ID, FixedU128::from_float(0.1))); + + assert_ok!(AssetRate::remove(RuntimeOrigin::root(), ASSET_ID,)); + assert!(pallet_asset_rate::ConversionRateToNative::::get(ASSET_ID).is_none()); + }); +} + +#[test] +fn remove_unknown_throws() { + new_test_ext().execute_with(|| { + assert_noop!( + AssetRate::remove(RuntimeOrigin::root(), ASSET_ID,), + Error::::UnknownAssetId + ); + }); +} + +#[test] +fn update_works() { + new_test_ext().execute_with(|| { + assert_ok!(AssetRate::create(RuntimeOrigin::root(), ASSET_ID, FixedU128::from_float(0.1))); + assert_ok!(AssetRate::update(RuntimeOrigin::root(), ASSET_ID, FixedU128::from_float(0.5))); + + assert_eq!( + pallet_asset_rate::ConversionRateToNative::::get(ASSET_ID), + Some(FixedU128::from_float(0.5)) + ); + }); +} + +#[test] +fn update_unknown_throws() { + new_test_ext().execute_with(|| { + assert_noop!( + AssetRate::update(RuntimeOrigin::root(), ASSET_ID, FixedU128::from_float(0.5)), + Error::::UnknownAssetId + ); + }); +} + +#[test] +fn convert_works() { + new_test_ext().execute_with(|| { + assert_ok!(AssetRate::create(RuntimeOrigin::root(), ASSET_ID, FixedU128::from_float(2.51))); + + let conversion = , + ::AssetId, + BalanceOf, + >>::from_asset_balance(10, ASSET_ID); + assert_eq!(conversion.expect("Conversion rate exists for asset"), 25); + }); +} + +#[test] +fn convert_unknown_throws() { + new_test_ext().execute_with(|| { + let conversion = , + ::AssetId, + BalanceOf, + >>::from_asset_balance(10, ASSET_ID); + assert!(conversion.is_err()); + }); +} diff --git a/frame/asset-rate/src/weights.rs b/frame/asset-rate/src/weights.rs new file mode 100644 index 0000000000000..4fae62634ef13 --- /dev/null +++ b/frame/asset-rate/src/weights.rs @@ -0,0 +1,129 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_asset_rate +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `local`, CPU: `` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/substrate +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_asset_rate +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/asset-rate/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_asset_rate. +pub trait WeightInfo { + fn create() -> Weight; + fn update() -> Weight; + fn remove() -> Weight; +} + +/// Weights for pallet_asset_rate using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: AssetRate ConversionRateToNative (r:1 w:1) + /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3501` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(7_000_000, 3501) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: AssetRate ConversionRateToNative (r:1 w:1) + /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + fn update() -> Weight { + // Proof Size summary in bytes: + // Measured: `137` + // Estimated: `3501` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(8_000_000, 3501) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: AssetRate ConversionRateToNative (r:1 w:1) + /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + fn remove() -> Weight { + // Proof Size summary in bytes: + // Measured: `137` + // Estimated: `3501` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(8_000_000, 3501) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + /// Storage: AssetRate ConversionRateToNative (r:1 w:1) + /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3501` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(7_000_000, 3501) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: AssetRate ConversionRateToNative (r:1 w:1) + /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + fn update() -> Weight { + // Proof Size summary in bytes: + // Measured: `137` + // Estimated: `3501` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(8_000_000, 3501) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: AssetRate ConversionRateToNative (r:1 w:1) + /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + fn remove() -> Weight { + // Proof Size summary in bytes: + // Measured: `137` + // Estimated: `3501` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(8_000_000, 3501) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 6fe0e3e6c3042..d3adc47f5f803 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } # Needed for various traits. In our case, `OnFinalize`. sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index 5b4f1489df5db..047ede2ce6964 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -105,14 +105,18 @@ fn add_sufficients, I: 'static>(minter: T::AccountId, n: u32) { fn add_approvals, I: 'static>(minter: T::AccountId, n: u32) { let asset_id = default_asset_id::(); - T::Currency::deposit_creating(&minter, T::ApprovalDeposit::get() * n.into()); + T::Currency::deposit_creating( + &minter, + T::ApprovalDeposit::get() * n.into() + T::Currency::minimum_balance(), + ); let minter_lookup = T::Lookup::unlookup(minter.clone()); let origin = SystemOrigin::Signed(minter); Assets::::mint(origin.clone().into(), asset_id, minter_lookup, (100 * (n + 1)).into()) .unwrap(); + let enough = T::Currency::minimum_balance(); for i in 0..n { let target = account("approval", i, SEED); - T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); + T::Currency::make_free_balance_be(&target, enough); let target_lookup = T::Lookup::unlookup(target); Assets::::approve_transfer( origin.clone().into(), diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs index 47657ff267494..1e10e0066e851 100644 --- a/frame/assets/src/functions.rs +++ b/frame/assets/src/functions.rs @@ -76,7 +76,14 @@ impl, I: 'static> Pallet { d.sufficients += 1; ExistenceReason::Sufficient } else { - frame_system::Pallet::::inc_consumers(who).map_err(|_| Error::::NoProvider)?; + frame_system::Pallet::::inc_consumers(who) + .map_err(|_| Error::::UnavailableConsumer)?; + // We ensure that we can still increment consumers once more because we could otherwise + // allow accidental usage of all consumer references which could cause grief. + if !frame_system::Pallet::::can_inc_consumer(who) { + frame_system::Pallet::::dec_consumers(who); + return Err(Error::::UnavailableConsumer.into()) + } ExistenceReason::Consumer }; d.accounts = accounts; @@ -131,7 +138,7 @@ impl, I: 'static> Pallet { if amount < details.min_balance { return DepositConsequence::BelowMinimum } - if !details.is_sufficient && !frame_system::Pallet::::can_inc_consumer(who) { + if !details.is_sufficient && !frame_system::Pallet::::can_accrue_consumers(who, 2) { return DepositConsequence::CannotCreate } if details.is_sufficient && details.sufficients.checked_add(1).is_none() { @@ -165,7 +172,7 @@ impl, I: 'static> Pallet { } let account = match Account::::get(id, who) { Some(a) => a, - None => return NoFunds, + None => return BalanceLow, }; if account.is_frozen { return Frozen @@ -193,7 +200,7 @@ impl, I: 'static> Pallet { Success } } else { - NoFunds + BalanceLow } } @@ -254,7 +261,7 @@ impl, I: 'static> Pallet { ensure!(f.best_effort || actual >= amount, Error::::BalanceLow); let conseq = Self::can_decrease(id, target, actual, f.keep_alive); - let actual = match conseq.into_result() { + let actual = match conseq.into_result(f.keep_alive) { Ok(dust) => actual.saturating_add(dust), //< guaranteed by reducible_balance Err(e) => { debug_assert!(false, "passed from reducible_balance; qed"); diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs index f420ea02c31e9..893d74b6aa306 100644 --- a/frame/assets/src/impl_fungibles.rs +++ b/frame/assets/src/impl_fungibles.rs @@ -17,6 +17,16 @@ //! Implementations for fungibles trait. +use frame_support::{ + defensive, + traits::tokens::{ + Fortitude, + Precision::{self, BestEffort}, + Preservation::{self, Expendable}, + Provenance::{self, Minted}, + }, +}; + use super::*; impl, I: 'static> fungibles::Inspect<::AccountId> for Pallet { @@ -35,21 +45,27 @@ impl, I: 'static> fungibles::Inspect<::AccountId Pallet::::balance(asset, who) } + fn total_balance(asset: Self::AssetId, who: &::AccountId) -> Self::Balance { + Pallet::::balance(asset, who) + } + fn reducible_balance( asset: Self::AssetId, who: &::AccountId, - keep_alive: bool, + preservation: Preservation, + _: Fortitude, ) -> Self::Balance { - Pallet::::reducible_balance(asset, who, keep_alive).unwrap_or(Zero::zero()) + Pallet::::reducible_balance(asset, who, !matches!(preservation, Expendable)) + .unwrap_or(Zero::zero()) } fn can_deposit( asset: Self::AssetId, who: &::AccountId, amount: Self::Balance, - mint: bool, + provenance: Provenance, ) -> DepositConsequence { - Pallet::::can_increase(asset, who, amount, mint) + Pallet::::can_increase(asset, who, amount, provenance == Minted) } fn can_withdraw( @@ -65,69 +81,57 @@ impl, I: 'static> fungibles::Inspect<::AccountId } } -impl, I: 'static> fungibles::InspectMetadata<::AccountId> - for Pallet -{ - /// Return the name of an asset. - fn name(asset: &Self::AssetId) -> Vec { - Metadata::::get(asset).name.to_vec() - } - - /// Return the symbol of an asset. - fn symbol(asset: &Self::AssetId) -> Vec { - Metadata::::get(asset).symbol.to_vec() - } - - /// Return the decimals of an asset. - fn decimals(asset: &Self::AssetId) -> u8 { - Metadata::::get(asset).decimals - } -} - impl, I: 'static> fungibles::Mutate<::AccountId> for Pallet { - fn mint_into( - asset: Self::AssetId, - who: &::AccountId, + fn done_mint_into( + asset_id: Self::AssetId, + beneficiary: &::AccountId, amount: Self::Balance, - ) -> DispatchResult { - Self::do_mint(asset, who, amount, None) + ) { + Self::deposit_event(Event::Issued { asset_id, owner: beneficiary.clone(), amount }) } - fn burn_from( - asset: Self::AssetId, - who: &::AccountId, - amount: Self::Balance, - ) -> Result { - let f = DebitFlags { keep_alive: false, best_effort: false }; - Self::do_burn(asset, who, amount, None, f) + fn done_burn_from( + asset_id: Self::AssetId, + target: &::AccountId, + balance: Self::Balance, + ) { + Self::deposit_event(Event::Burned { asset_id, owner: target.clone(), balance }); } - fn slash( - asset: Self::AssetId, - who: &::AccountId, + fn done_transfer( + asset_id: Self::AssetId, + source: &::AccountId, + dest: &::AccountId, amount: Self::Balance, - ) -> Result { - let f = DebitFlags { keep_alive: false, best_effort: true }; - Self::do_burn(asset, who, amount, None, f) + ) { + Self::deposit_event(Event::Transferred { + asset_id, + from: source.clone(), + to: dest.clone(), + amount, + }); } } -impl, I: 'static> fungibles::Transfer for Pallet { - fn transfer( - asset: Self::AssetId, - source: &T::AccountId, - dest: &T::AccountId, - amount: T::Balance, - keep_alive: bool, - ) -> Result { - let f = TransferFlags { keep_alive, best_effort: false, burn_dust: false }; - Self::do_transfer(asset, source, dest, amount, None, f) - } +impl, I: 'static> fungibles::Balanced<::AccountId> + for Pallet +{ + type OnDropCredit = fungibles::DecreaseIssuance; + type OnDropDebt = fungibles::IncreaseIssuance; } impl, I: 'static> fungibles::Unbalanced for Pallet { - fn set_balance(_: Self::AssetId, _: &T::AccountId, _: Self::Balance) -> DispatchResult { - unreachable!("set_balance is not used if other functions are impl'd"); + fn handle_raw_dust(_: Self::AssetId, _: Self::Balance) {} + fn handle_dust(_: fungibles::Dust) { + defensive!("`decrease_balance` and `increase_balance` have non-default impls; nothing else calls this; qed"); + } + fn write_balance( + _: Self::AssetId, + _: &T::AccountId, + _: Self::Balance, + ) -> Result, DispatchError> { + defensive!("write_balance is not used if other functions are impl'd"); + Err(DispatchError::Unavailable) } fn set_total_issuance(id: T::AssetId, amount: Self::Balance) { Asset::::mutate_exists(id, |maybe_asset| { @@ -140,36 +144,27 @@ impl, I: 'static> fungibles::Unbalanced for Pallet Result { - let f = DebitFlags { keep_alive: false, best_effort: false }; + let f = DebitFlags { + keep_alive: preservation != Expendable, + best_effort: precision == BestEffort, + }; Self::decrease_balance(asset, who, amount, f, |_, _| Ok(())) } - fn decrease_balance_at_most( - asset: T::AssetId, - who: &T::AccountId, - amount: Self::Balance, - ) -> Self::Balance { - let f = DebitFlags { keep_alive: false, best_effort: true }; - Self::decrease_balance(asset, who, amount, f, |_, _| Ok(())).unwrap_or(Zero::zero()) - } fn increase_balance( asset: T::AssetId, who: &T::AccountId, amount: Self::Balance, + _: Precision, ) -> Result { Self::increase_balance(asset, who, amount, |_| Ok(()))?; Ok(amount) } - fn increase_balance_at_most( - asset: T::AssetId, - who: &T::AccountId, - amount: Self::Balance, - ) -> Self::Balance { - match Self::increase_balance(asset, who, amount, |_| Ok(())) { - Ok(()) => amount, - Err(_) => Zero::zero(), - } - } + + // TODO: #13196 implement deactivate/reactivate once we have inactive balance tracking. } impl, I: 'static> fungibles::Create for Pallet { diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index b36a5cabd377a..d32b407e67f6e 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -540,9 +540,9 @@ pub mod pallet { /// Minimum balance should be non-zero. MinBalanceZero, /// Unable to increment the consumer reference counters on the account. Either no provider - /// reference exists to allow a non-zero balance of a non-self-sufficient asset, or the - /// maximum number of consumers has been reached. - NoProvider, + /// reference exists to allow a non-zero balance of a non-self-sufficient asset, or one + /// fewer then the maximum number of consumers has been reached. + UnavailableConsumer, /// Invalid metadata given. BadMetadata, /// No approval exists that would allow the transfer. @@ -568,7 +568,7 @@ pub mod pallet { CallbackFailed, } - #[pallet::call] + #[pallet::call(weight(>::WeightInfo))] impl, I: 'static> Pallet { /// Issue a new class of fungible assets from a public origin. /// @@ -590,7 +590,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::create())] pub fn create( origin: OriginFor, id: T::AssetIdParameter, @@ -654,7 +653,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::force_create())] pub fn force_create( origin: OriginFor, id: T::AssetIdParameter, @@ -680,7 +678,6 @@ pub mod pallet { /// /// The asset class must be frozen before calling `start_destroy`. #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::start_destroy())] pub fn start_destroy(origin: OriginFor, id: T::AssetIdParameter) -> DispatchResult { let maybe_check_owner = match T::ForceOrigin::try_origin(origin) { Ok(_) => None, @@ -749,7 +746,6 @@ pub mod pallet { /// /// Each successful call emits the `Event::Destroyed` event. #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::finish_destroy())] pub fn finish_destroy(origin: OriginFor, id: T::AssetIdParameter) -> DispatchResult { let _ = ensure_signed(origin)?; let id: T::AssetId = id.into(); @@ -769,7 +765,6 @@ pub mod pallet { /// Weight: `O(1)` /// Modes: Pre-existing balance of `beneficiary`; Account pre-existence of `beneficiary`. #[pallet::call_index(6)] - #[pallet::weight(T::WeightInfo::mint())] pub fn mint( origin: OriginFor, id: T::AssetIdParameter, @@ -799,7 +794,6 @@ pub mod pallet { /// Weight: `O(1)` /// Modes: Post-existence of `who`; Pre & post Zombie-status of `who`. #[pallet::call_index(7)] - #[pallet::weight(T::WeightInfo::burn())] pub fn burn( origin: OriginFor, id: T::AssetIdParameter, @@ -834,7 +828,6 @@ pub mod pallet { /// Modes: Pre-existence of `target`; Post-existence of sender; Account pre-existence of /// `target`. #[pallet::call_index(8)] - #[pallet::weight(T::WeightInfo::transfer())] pub fn transfer( origin: OriginFor, id: T::AssetIdParameter, @@ -868,7 +861,6 @@ pub mod pallet { /// Modes: Pre-existence of `target`; Post-existence of sender; Account pre-existence of /// `target`. #[pallet::call_index(9)] - #[pallet::weight(T::WeightInfo::transfer_keep_alive())] pub fn transfer_keep_alive( origin: OriginFor, id: T::AssetIdParameter, @@ -903,7 +895,6 @@ pub mod pallet { /// Modes: Pre-existence of `dest`; Post-existence of `source`; Account pre-existence of /// `dest`. #[pallet::call_index(10)] - #[pallet::weight(T::WeightInfo::force_transfer())] pub fn force_transfer( origin: OriginFor, id: T::AssetIdParameter, @@ -931,7 +922,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(11)] - #[pallet::weight(T::WeightInfo::freeze())] pub fn freeze( origin: OriginFor, id: T::AssetIdParameter, @@ -968,7 +958,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(12)] - #[pallet::weight(T::WeightInfo::thaw())] pub fn thaw( origin: OriginFor, id: T::AssetIdParameter, @@ -1004,7 +993,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(13)] - #[pallet::weight(T::WeightInfo::freeze_asset())] pub fn freeze_asset(origin: OriginFor, id: T::AssetIdParameter) -> DispatchResult { let origin = ensure_signed(origin)?; let id: T::AssetId = id.into(); @@ -1031,7 +1019,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(14)] - #[pallet::weight(T::WeightInfo::thaw_asset())] pub fn thaw_asset(origin: OriginFor, id: T::AssetIdParameter) -> DispatchResult { let origin = ensure_signed(origin)?; let id: T::AssetId = id.into(); @@ -1059,7 +1046,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(15)] - #[pallet::weight(T::WeightInfo::transfer_ownership())] pub fn transfer_ownership( origin: OriginFor, id: T::AssetIdParameter, @@ -1103,7 +1089,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(16)] - #[pallet::weight(T::WeightInfo::set_team())] pub fn set_team( origin: OriginFor, id: T::AssetIdParameter, @@ -1173,7 +1158,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(18)] - #[pallet::weight(T::WeightInfo::clear_metadata())] pub fn clear_metadata(origin: OriginFor, id: T::AssetIdParameter) -> DispatchResult { let origin = ensure_signed(origin)?; let id: T::AssetId = id.into(); @@ -1257,7 +1241,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(20)] - #[pallet::weight(T::WeightInfo::force_clear_metadata())] pub fn force_clear_metadata( origin: OriginFor, id: T::AssetIdParameter, @@ -1297,7 +1280,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(21)] - #[pallet::weight(T::WeightInfo::force_asset_status())] pub fn force_asset_status( origin: OriginFor, id: T::AssetIdParameter, @@ -1354,7 +1336,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(22)] - #[pallet::weight(T::WeightInfo::approve_transfer())] pub fn approve_transfer( origin: OriginFor, id: T::AssetIdParameter, @@ -1381,7 +1362,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(23)] - #[pallet::weight(T::WeightInfo::cancel_approval())] pub fn cancel_approval( origin: OriginFor, id: T::AssetIdParameter, @@ -1418,7 +1398,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(24)] - #[pallet::weight(T::WeightInfo::force_cancel_approval())] pub fn force_cancel_approval( origin: OriginFor, id: T::AssetIdParameter, @@ -1468,7 +1447,6 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::call_index(25)] - #[pallet::weight(T::WeightInfo::transfer_approved())] pub fn transfer_approved( origin: OriginFor, id: T::AssetIdParameter, @@ -1531,7 +1509,6 @@ pub mod pallet { /// /// Emits `AssetMinBalanceChanged` event when successful. #[pallet::call_index(28)] - #[pallet::weight(T::WeightInfo::set_min_balance())] pub fn set_min_balance( origin: OriginFor, id: T::AssetIdParameter, diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 3f456a7de3eda..3926d2fa8b010 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -74,7 +74,7 @@ impl frame_system::Config for Test { type SystemWeightInfo = (); type SS58Prefix = (); type OnSetCode = (); - type MaxConsumers = ConstU32<2>; + type MaxConsumers = ConstU32<3>; } impl pallet_balances::Config for Test { @@ -87,6 +87,10 @@ impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = (); type ReserveIdentifier = [u8; 8]; + type HoldIdentifier = (); + type FreezeIdentifier = (); + type MaxHolds = (); + type MaxFreezes = (); } pub struct AssetsCallbackHandle; diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index bc61810a76ac4..4dacfac41cb6b 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -21,7 +21,8 @@ use super::*; use crate::{mock::*, Error}; use frame_support::{ assert_noop, assert_ok, - traits::{fungibles::InspectEnumerable, Currency}, + dispatch::GetDispatchInfo, + traits::{fungibles::InspectEnumerable, tokens::Preservation::Protect, Currency}, }; use pallet_balances::Error as BalancesError; use sp_io::storage; @@ -33,17 +34,60 @@ fn asset_ids() -> Vec { s } +#[test] +fn transfer_should_never_burn() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + + while System::inc_consumers(&2).is_ok() {} + let _ = System::dec_consumers(&2); + let _ = System::dec_consumers(&2); + // Exactly one consumer ref remaining. + assert_eq!(System::consumers(&2), 1); + + let _ = >::transfer(0, &1, &2, 50, Protect); + System::assert_has_event(RuntimeEvent::Assets(crate::Event::Transferred { + asset_id: 0, + from: 1, + to: 2, + amount: 50, + })); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 1) + Assets::balance(0, 2), 100); + }); +} + #[test] fn basic_minting_should_work() { new_test_ext().execute_with(|| { assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::force_create(RuntimeOrigin::root(), 1, 1, true, 1)); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + System::assert_last_event(RuntimeEvent::Assets(crate::Event::Issued { + asset_id: 0, + owner: 1, + amount: 100, + })); assert_eq!(Assets::balance(0, 1), 100); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 2, 100)); + System::assert_last_event(RuntimeEvent::Assets(crate::Event::Issued { + asset_id: 0, + owner: 2, + amount: 100, + })); assert_eq!(Assets::balance(0, 2), 100); assert_eq!(asset_ids(), vec![0, 1, 999]); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 1, 1, 100)); + System::assert_last_event(RuntimeEvent::Assets(crate::Event::Issued { + asset_id: 1, + owner: 1, + amount: 100, + })); assert_eq!(Assets::account_balances(1), vec![(0, 100), (999, 100), (1, 100)]); }); } @@ -93,7 +137,7 @@ fn minting_insufficient_assets_with_deposit_without_consumer_should_work() { assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 0)); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Balances::reserved_balance(&1), 10); - assert_eq!(System::consumers(&1), 0); + assert_eq!(System::consumers(&1), 1); }); } @@ -167,7 +211,7 @@ fn approval_lifecycle_works() { // so we create it :) assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - Balances::make_free_balance_be(&1, 1); + Balances::make_free_balance_be(&1, 2); assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_eq!(Balances::reserved_balance(&1), 1); @@ -193,7 +237,7 @@ fn transfer_approved_all_funds() { // so we create it :) assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - Balances::make_free_balance_be(&1, 1); + Balances::make_free_balance_be(&1, 2); assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_eq!(Balances::reserved_balance(&1), 1); @@ -215,7 +259,7 @@ fn approval_deposits_work() { let e = BalancesError::::InsufficientBalance; assert_noop!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50), e); - Balances::make_free_balance_be(&1, 1); + Balances::make_free_balance_be(&1, 2); assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Balances::reserved_balance(&1), 1); @@ -233,7 +277,7 @@ fn cannot_transfer_more_than_approved() { new_test_ext().execute_with(|| { assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - Balances::make_free_balance_be(&1, 1); + Balances::make_free_balance_be(&1, 2); assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); let e = Error::::Unapproved; assert_noop!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 51), e); @@ -245,7 +289,7 @@ fn cannot_transfer_more_than_exists() { new_test_ext().execute_with(|| { assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - Balances::make_free_balance_be(&1, 1); + Balances::make_free_balance_be(&1, 2); assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 101)); let e = Error::::BalanceLow; assert_noop!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 101), e); @@ -257,7 +301,7 @@ fn cancel_approval_works() { new_test_ext().execute_with(|| { assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - Balances::make_free_balance_be(&1, 1); + Balances::make_free_balance_be(&1, 2); assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_noop!( @@ -287,7 +331,7 @@ fn force_cancel_approval_works() { new_test_ext().execute_with(|| { assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - Balances::make_free_balance_be(&1, 1); + Balances::make_free_balance_be(&1, 2); assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); let e = Error::::NoPermission; @@ -516,7 +560,7 @@ fn min_balance_should_work() { // Death by `transfer_approved`. assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - Balances::make_free_balance_be(&1, 1); + Balances::make_free_balance_be(&1, 2); assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 100)); assert_ok!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 91)); assert_eq!(take_hooks(), vec![Hook::Died(0, 1)]); @@ -766,6 +810,11 @@ fn burning_asset_balance_with_positive_balance_should_work() { assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_ok!(Assets::burn(RuntimeOrigin::signed(1), 0, 1, u64::MAX)); + System::assert_last_event(RuntimeEvent::Assets(crate::Event::Burned { + asset_id: 0, + owner: 1, + balance: 100, + })); assert_eq!(Assets::balance(0, 1), 0); }); } @@ -1156,7 +1205,7 @@ fn set_min_balance_should_work() { #[test] fn balance_conversion_should_work() { new_test_ext().execute_with(|| { - use frame_support::traits::tokens::BalanceConversion; + use frame_support::traits::tokens::ConversionToAssetBalance; let id = 42; assert_ok!(Assets::force_create(RuntimeOrigin::root(), id, 1, true, 10)); @@ -1217,7 +1266,7 @@ fn querying_allowance_should_work() { use frame_support::traits::tokens::fungibles::approvals::{Inspect, Mutate}; assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - Balances::make_free_balance_be(&1, 1); + Balances::make_free_balance_be(&1, 2); assert_ok!(Assets::approve(0, &1, &2, 50)); assert_eq!(Assets::allowance(0, &1, &2), 50); // Transfer asset 0, from owner 1 and delegate 2 to destination 3 @@ -1316,3 +1365,32 @@ fn asset_create_and_destroy_is_reverted_if_callback_fails() { ); }); } + +#[test] +fn multiple_transfer_alls_work_ok() { + new_test_ext().execute_with(|| { + // Only run PoC when the system pallet is enabled, since the underlying bug is in the + // system pallet it won't work with BalancesAccountStore + // Start with a balance of 100 + Balances::force_set_balance(RuntimeOrigin::root(), 1, 100).unwrap(); + // Emulate a sufficient, in reality this could be reached by transferring a sufficient + // asset to the account + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + // Spend the same balance multiple times + assert_ok!(Balances::transfer_all(RuntimeOrigin::signed(1), 1337, false)); + assert_ok!(Balances::transfer_all(RuntimeOrigin::signed(1), 1337, false)); + + assert_eq!(Balances::free_balance(&1), 0); + assert_eq!(Balances::free_balance(&1337), 100); + }); +} + +#[test] +fn weights_sane() { + let info = crate::Call::::create { id: 10, admin: 4, min_balance: 3 }.get_dispatch_info(); + assert_eq!(<() as crate::WeightInfo>::create(), info.weight); + + let info = crate::Call::::finish_destroy { id: 10 }.get_dispatch_info(); + assert_eq!(<() as crate::WeightInfo>::finish_destroy(), info.weight); +} diff --git a/frame/assets/src/types.rs b/frame/assets/src/types.rs index d50461ba59898..c83e764f4a68d 100644 --- a/frame/assets/src/types.rs +++ b/frame/assets/src/types.rs @@ -20,7 +20,7 @@ use super::*; use frame_support::{ pallet_prelude::*, - traits::{fungible, tokens::BalanceConversion}, + traits::{fungible, tokens::ConversionToAssetBalance}, }; use sp_runtime::{traits::Convert, FixedPointNumber, FixedPointOperand, FixedU128}; @@ -228,7 +228,7 @@ type BalanceOf = >>::Balance; /// Converts a balance value into an asset balance based on the ratio between the fungible's /// minimum balance and the minimum asset balance. pub struct BalanceToAssetBalance(PhantomData<(F, T, CON, I)>); -impl BalanceConversion, AssetIdOf, AssetBalanceOf> +impl ConversionToAssetBalance, AssetIdOf, AssetBalanceOf> for BalanceToAssetBalance where F: fungible::Inspect>, diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs index b9c3ab21d8a33..4cc90e25f43fa 100644 --- a/frame/assets/src/weights.rs +++ b/frame/assets/src/weights.rs @@ -18,26 +18,25 @@ //! Autogenerated weights for pallet_assets //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_assets // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_assets -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/assets/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -87,10 +86,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `325` - // Estimated: `7268` - // Minimum execution time: 28_265_000 picoseconds. - Weight::from_parts(28_764_000, 7268) + // Measured: `293` + // Estimated: `3675` + // Minimum execution time: 33_678_000 picoseconds. + Weight::from_parts(34_320_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -100,8 +99,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3675` - // Minimum execution time: 15_125_000 picoseconds. - Weight::from_parts(15_468_000, 3675) + // Minimum execution time: 15_521_000 picoseconds. + Weight::from_parts(16_035_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -109,10 +108,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) fn start_destroy() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `385` // Estimated: `3675` - // Minimum execution time: 15_368_000 picoseconds. - Weight::from_parts(15_625_000, 3675) + // Minimum execution time: 15_921_000 picoseconds. + Weight::from_parts(16_153_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -125,17 +124,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn destroy_accounts(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `25 + c * (240 ±0)` - // Estimated: `8232 + c * (5180 ±0)` - // Minimum execution time: 20_816_000 picoseconds. - Weight::from_parts(21_045_000, 8232) - // Standard Error: 7_118 - .saturating_add(Weight::from_parts(12_723_454, 0).saturating_mul(c.into())) + // Measured: `0 + c * (208 ±0)` + // Estimated: `3675 + c * (2603 ±0)` + // Minimum execution time: 21_242_000 picoseconds. + Weight::from_parts(21_532_000, 3675) + // Standard Error: 6_449 + .saturating_add(Weight::from_parts(13_150_845, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 5180).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(c.into())) } /// Storage: Assets Asset (r:1 w:1) /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) @@ -144,12 +143,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `a` is `[0, 1000]`. fn destroy_approvals(a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `554 + a * (86 ±0)` - // Estimated: `7288 + a * (2623 ±0)` - // Minimum execution time: 20_923_000 picoseconds. - Weight::from_parts(21_229_000, 7288) - // Standard Error: 7_215 - .saturating_add(Weight::from_parts(12_915_292, 0).saturating_mul(a.into())) + // Measured: `522 + a * (86 ±0)` + // Estimated: `3675 + a * (2623 ±0)` + // Minimum execution time: 21_604_000 picoseconds. + Weight::from_parts(21_881_000, 3675) + // Standard Error: 4_225 + .saturating_add(Weight::from_parts(15_968_205, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -162,10 +161,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) fn finish_destroy() -> Weight { // Proof Size summary in bytes: - // Measured: `383` - // Estimated: `7280` - // Minimum execution time: 15_764_000 picoseconds. - Weight::from_parts(16_245_000, 7280) + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 16_465_000 picoseconds. + Weight::from_parts(16_775_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -175,10 +174,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) fn mint() -> Weight { // Proof Size summary in bytes: - // Measured: `383` - // Estimated: `7242` - // Minimum execution time: 28_814_000 picoseconds. - Weight::from_parts(29_407_000, 7242) + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 30_709_000 picoseconds. + Weight::from_parts(31_159_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -188,10 +187,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) fn burn() -> Weight { // Proof Size summary in bytes: - // Measured: `491` - // Estimated: `7242` - // Minimum execution time: 34_784_000 picoseconds. - Weight::from_parts(35_402_000, 7242) + // Measured: `459` + // Estimated: `3675` + // Minimum execution time: 36_944_000 picoseconds. + Weight::from_parts(38_166_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -203,10 +202,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `530` - // Estimated: `13412` - // Minimum execution time: 49_110_000 picoseconds. - Weight::from_parts(50_483_000, 13412) + // Measured: `498` + // Estimated: `6144` + // Minimum execution time: 52_497_000 picoseconds. + Weight::from_parts(53_147_000, 6144) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -218,10 +217,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: - // Measured: `530` - // Estimated: `13412` - // Minimum execution time: 43_585_000 picoseconds. - Weight::from_parts(44_207_000, 13412) + // Measured: `498` + // Estimated: `6144` + // Minimum execution time: 46_203_000 picoseconds. + Weight::from_parts(46_853_000, 6144) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -233,10 +232,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `530` - // Estimated: `13412` - // Minimum execution time: 49_238_000 picoseconds. - Weight::from_parts(77_664_000, 13412) + // Measured: `498` + // Estimated: `6144` + // Minimum execution time: 52_911_000 picoseconds. + Weight::from_parts(55_511_000, 6144) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -246,10 +245,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `491` - // Estimated: `7242` - // Minimum execution time: 19_198_000 picoseconds. - Weight::from_parts(19_585_000, 7242) + // Measured: `459` + // Estimated: `3675` + // Minimum execution time: 20_308_000 picoseconds. + Weight::from_parts(20_524_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -259,10 +258,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) fn thaw() -> Weight { // Proof Size summary in bytes: - // Measured: `491` - // Estimated: `7242` - // Minimum execution time: 19_659_000 picoseconds. - Weight::from_parts(20_079_000, 7242) + // Measured: `459` + // Estimated: `3675` + // Minimum execution time: 20_735_000 picoseconds. + Weight::from_parts(21_026_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -270,10 +269,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) fn freeze_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `385` // Estimated: `3675` - // Minimum execution time: 15_035_000 picoseconds. - Weight::from_parts(15_594_000, 3675) + // Minimum execution time: 16_148_000 picoseconds. + Weight::from_parts(16_404_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -281,10 +280,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) fn thaw_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `385` // Estimated: `3675` - // Minimum execution time: 15_073_000 picoseconds. - Weight::from_parts(15_862_000, 3675) + // Minimum execution time: 16_075_000 picoseconds. + Weight::from_parts(16_542_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -294,10 +293,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `383` - // Estimated: `7280` - // Minimum execution time: 17_343_000 picoseconds. - Weight::from_parts(17_656_000, 7280) + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 17_866_000 picoseconds. + Weight::from_parts(18_129_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -305,10 +304,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) fn set_team() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `351` // Estimated: `3675` - // Minimum execution time: 15_902_000 picoseconds. - Weight::from_parts(16_439_000, 3675) + // Minimum execution time: 16_637_000 picoseconds. + Weight::from_parts(16_918_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -318,12 +317,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, _s: u32, ) -> Weight { + fn set_metadata(_n: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `383` - // Estimated: `7280` - // Minimum execution time: 27_222_000 picoseconds. - Weight::from_parts(29_151_657, 7280) + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 33_304_000 picoseconds. + Weight::from_parts(34_764_544, 3675) + // Standard Error: 634 + .saturating_add(Weight::from_parts(4_733, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -333,10 +334,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `579` - // Estimated: `7280` - // Minimum execution time: 27_976_000 picoseconds. - Weight::from_parts(28_289_000, 7280) + // Measured: `515` + // Estimated: `3675` + // Minimum execution time: 33_640_000 picoseconds. + Weight::from_parts(34_100_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -346,14 +347,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(_n: u32, s: u32, ) -> Weight { + fn force_set_metadata(n: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `190` - // Estimated: `7280` - // Minimum execution time: 16_162_000 picoseconds. - Weight::from_parts(17_137_043, 7280) - // Standard Error: 982 - .saturating_add(Weight::from_parts(4_180, 0).saturating_mul(s.into())) + // Estimated: `3675` + // Minimum execution time: 16_549_000 picoseconds. + Weight::from_parts(17_337_982, 3675) + // Standard Error: 586 + .saturating_add(Weight::from_parts(4_584, 0).saturating_mul(n.into())) + // Standard Error: 586 + .saturating_add(Weight::from_parts(4_288, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -363,10 +366,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) fn force_clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `579` - // Estimated: `7280` - // Minimum execution time: 27_219_000 picoseconds. - Weight::from_parts(27_931_000, 7280) + // Measured: `515` + // Estimated: `3675` + // Minimum execution time: 34_183_000 picoseconds. + Weight::from_parts(34_577_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -374,10 +377,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) fn force_asset_status() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `351` // Estimated: `3675` - // Minimum execution time: 15_313_000 picoseconds. - Weight::from_parts(15_775_000, 3675) + // Minimum execution time: 15_505_000 picoseconds. + Weight::from_parts(15_784_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -387,10 +390,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) fn approve_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `417` - // Estimated: `7288` - // Minimum execution time: 31_865_000 picoseconds. - Weight::from_parts(32_316_000, 7288) + // Measured: `385` + // Estimated: `3675` + // Minimum execution time: 38_762_000 picoseconds. + Weight::from_parts(39_138_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -404,10 +407,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn transfer_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `700` - // Estimated: `17025` - // Minimum execution time: 67_203_000 picoseconds. - Weight::from_parts(109_742_000, 17025) + // Measured: `668` + // Estimated: `6144` + // Minimum execution time: 73_396_000 picoseconds. + Weight::from_parts(73_986_000, 6144) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -417,10 +420,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) fn cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `587` - // Estimated: `7288` - // Minimum execution time: 33_430_000 picoseconds. - Weight::from_parts(33_824_000, 7288) + // Measured: `555` + // Estimated: `3675` + // Minimum execution time: 39_707_000 picoseconds. + Weight::from_parts(40_222_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -430,10 +433,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) fn force_cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `587` - // Estimated: `7288` - // Minimum execution time: 33_596_000 picoseconds. - Weight::from_parts(34_226_000, 7288) + // Measured: `555` + // Estimated: `3675` + // Minimum execution time: 40_357_000 picoseconds. + Weight::from_parts(40_731_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -441,10 +444,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) fn set_min_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `351` // Estimated: `3675` - // Minimum execution time: 16_367_000 picoseconds. - Weight::from_parts(16_703_000, 3675) + // Minimum execution time: 16_937_000 picoseconds. + Weight::from_parts(17_219_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -458,10 +461,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `325` - // Estimated: `7268` - // Minimum execution time: 28_265_000 picoseconds. - Weight::from_parts(28_764_000, 7268) + // Measured: `293` + // Estimated: `3675` + // Minimum execution time: 33_678_000 picoseconds. + Weight::from_parts(34_320_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -471,8 +474,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3675` - // Minimum execution time: 15_125_000 picoseconds. - Weight::from_parts(15_468_000, 3675) + // Minimum execution time: 15_521_000 picoseconds. + Weight::from_parts(16_035_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -480,10 +483,10 @@ impl WeightInfo for () { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) fn start_destroy() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `385` // Estimated: `3675` - // Minimum execution time: 15_368_000 picoseconds. - Weight::from_parts(15_625_000, 3675) + // Minimum execution time: 15_921_000 picoseconds. + Weight::from_parts(16_153_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -496,17 +499,17 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn destroy_accounts(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `25 + c * (240 ±0)` - // Estimated: `8232 + c * (5180 ±0)` - // Minimum execution time: 20_816_000 picoseconds. - Weight::from_parts(21_045_000, 8232) - // Standard Error: 7_118 - .saturating_add(Weight::from_parts(12_723_454, 0).saturating_mul(c.into())) + // Measured: `0 + c * (208 ±0)` + // Estimated: `3675 + c * (2603 ±0)` + // Minimum execution time: 21_242_000 picoseconds. + Weight::from_parts(21_532_000, 3675) + // Standard Error: 6_449 + .saturating_add(Weight::from_parts(13_150_845, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 5180).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(c.into())) } /// Storage: Assets Asset (r:1 w:1) /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) @@ -515,12 +518,12 @@ impl WeightInfo for () { /// The range of component `a` is `[0, 1000]`. fn destroy_approvals(a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `554 + a * (86 ±0)` - // Estimated: `7288 + a * (2623 ±0)` - // Minimum execution time: 20_923_000 picoseconds. - Weight::from_parts(21_229_000, 7288) - // Standard Error: 7_215 - .saturating_add(Weight::from_parts(12_915_292, 0).saturating_mul(a.into())) + // Measured: `522 + a * (86 ±0)` + // Estimated: `3675 + a * (2623 ±0)` + // Minimum execution time: 21_604_000 picoseconds. + Weight::from_parts(21_881_000, 3675) + // Standard Error: 4_225 + .saturating_add(Weight::from_parts(15_968_205, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -533,10 +536,10 @@ impl WeightInfo for () { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) fn finish_destroy() -> Weight { // Proof Size summary in bytes: - // Measured: `383` - // Estimated: `7280` - // Minimum execution time: 15_764_000 picoseconds. - Weight::from_parts(16_245_000, 7280) + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 16_465_000 picoseconds. + Weight::from_parts(16_775_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -546,10 +549,10 @@ impl WeightInfo for () { /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) fn mint() -> Weight { // Proof Size summary in bytes: - // Measured: `383` - // Estimated: `7242` - // Minimum execution time: 28_814_000 picoseconds. - Weight::from_parts(29_407_000, 7242) + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 30_709_000 picoseconds. + Weight::from_parts(31_159_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -559,10 +562,10 @@ impl WeightInfo for () { /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) fn burn() -> Weight { // Proof Size summary in bytes: - // Measured: `491` - // Estimated: `7242` - // Minimum execution time: 34_784_000 picoseconds. - Weight::from_parts(35_402_000, 7242) + // Measured: `459` + // Estimated: `3675` + // Minimum execution time: 36_944_000 picoseconds. + Weight::from_parts(38_166_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -574,10 +577,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `530` - // Estimated: `13412` - // Minimum execution time: 49_110_000 picoseconds. - Weight::from_parts(50_483_000, 13412) + // Measured: `498` + // Estimated: `6144` + // Minimum execution time: 52_497_000 picoseconds. + Weight::from_parts(53_147_000, 6144) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -589,10 +592,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: - // Measured: `530` - // Estimated: `13412` - // Minimum execution time: 43_585_000 picoseconds. - Weight::from_parts(44_207_000, 13412) + // Measured: `498` + // Estimated: `6144` + // Minimum execution time: 46_203_000 picoseconds. + Weight::from_parts(46_853_000, 6144) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -604,10 +607,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `530` - // Estimated: `13412` - // Minimum execution time: 49_238_000 picoseconds. - Weight::from_parts(77_664_000, 13412) + // Measured: `498` + // Estimated: `6144` + // Minimum execution time: 52_911_000 picoseconds. + Weight::from_parts(55_511_000, 6144) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -617,10 +620,10 @@ impl WeightInfo for () { /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `491` - // Estimated: `7242` - // Minimum execution time: 19_198_000 picoseconds. - Weight::from_parts(19_585_000, 7242) + // Measured: `459` + // Estimated: `3675` + // Minimum execution time: 20_308_000 picoseconds. + Weight::from_parts(20_524_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -630,10 +633,10 @@ impl WeightInfo for () { /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) fn thaw() -> Weight { // Proof Size summary in bytes: - // Measured: `491` - // Estimated: `7242` - // Minimum execution time: 19_659_000 picoseconds. - Weight::from_parts(20_079_000, 7242) + // Measured: `459` + // Estimated: `3675` + // Minimum execution time: 20_735_000 picoseconds. + Weight::from_parts(21_026_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -641,10 +644,10 @@ impl WeightInfo for () { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) fn freeze_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `385` // Estimated: `3675` - // Minimum execution time: 15_035_000 picoseconds. - Weight::from_parts(15_594_000, 3675) + // Minimum execution time: 16_148_000 picoseconds. + Weight::from_parts(16_404_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -652,10 +655,10 @@ impl WeightInfo for () { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) fn thaw_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `385` // Estimated: `3675` - // Minimum execution time: 15_073_000 picoseconds. - Weight::from_parts(15_862_000, 3675) + // Minimum execution time: 16_075_000 picoseconds. + Weight::from_parts(16_542_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -665,10 +668,10 @@ impl WeightInfo for () { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `383` - // Estimated: `7280` - // Minimum execution time: 17_343_000 picoseconds. - Weight::from_parts(17_656_000, 7280) + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 17_866_000 picoseconds. + Weight::from_parts(18_129_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -676,10 +679,10 @@ impl WeightInfo for () { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) fn set_team() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `351` // Estimated: `3675` - // Minimum execution time: 15_902_000 picoseconds. - Weight::from_parts(16_439_000, 3675) + // Minimum execution time: 16_637_000 picoseconds. + Weight::from_parts(16_918_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -689,12 +692,14 @@ impl WeightInfo for () { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, _s: u32, ) -> Weight { + fn set_metadata(_n: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `383` - // Estimated: `7280` - // Minimum execution time: 27_222_000 picoseconds. - Weight::from_parts(29_151_657, 7280) + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 33_304_000 picoseconds. + Weight::from_parts(34_764_544, 3675) + // Standard Error: 634 + .saturating_add(Weight::from_parts(4_733, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -704,10 +709,10 @@ impl WeightInfo for () { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `579` - // Estimated: `7280` - // Minimum execution time: 27_976_000 picoseconds. - Weight::from_parts(28_289_000, 7280) + // Measured: `515` + // Estimated: `3675` + // Minimum execution time: 33_640_000 picoseconds. + Weight::from_parts(34_100_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -717,14 +722,16 @@ impl WeightInfo for () { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(_n: u32, s: u32, ) -> Weight { + fn force_set_metadata(n: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `190` - // Estimated: `7280` - // Minimum execution time: 16_162_000 picoseconds. - Weight::from_parts(17_137_043, 7280) - // Standard Error: 982 - .saturating_add(Weight::from_parts(4_180, 0).saturating_mul(s.into())) + // Estimated: `3675` + // Minimum execution time: 16_549_000 picoseconds. + Weight::from_parts(17_337_982, 3675) + // Standard Error: 586 + .saturating_add(Weight::from_parts(4_584, 0).saturating_mul(n.into())) + // Standard Error: 586 + .saturating_add(Weight::from_parts(4_288, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -734,10 +741,10 @@ impl WeightInfo for () { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) fn force_clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `579` - // Estimated: `7280` - // Minimum execution time: 27_219_000 picoseconds. - Weight::from_parts(27_931_000, 7280) + // Measured: `515` + // Estimated: `3675` + // Minimum execution time: 34_183_000 picoseconds. + Weight::from_parts(34_577_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -745,10 +752,10 @@ impl WeightInfo for () { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) fn force_asset_status() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `351` // Estimated: `3675` - // Minimum execution time: 15_313_000 picoseconds. - Weight::from_parts(15_775_000, 3675) + // Minimum execution time: 15_505_000 picoseconds. + Weight::from_parts(15_784_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -758,10 +765,10 @@ impl WeightInfo for () { /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) fn approve_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `417` - // Estimated: `7288` - // Minimum execution time: 31_865_000 picoseconds. - Weight::from_parts(32_316_000, 7288) + // Measured: `385` + // Estimated: `3675` + // Minimum execution time: 38_762_000 picoseconds. + Weight::from_parts(39_138_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -775,10 +782,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn transfer_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `700` - // Estimated: `17025` - // Minimum execution time: 67_203_000 picoseconds. - Weight::from_parts(109_742_000, 17025) + // Measured: `668` + // Estimated: `6144` + // Minimum execution time: 73_396_000 picoseconds. + Weight::from_parts(73_986_000, 6144) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -788,10 +795,10 @@ impl WeightInfo for () { /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) fn cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `587` - // Estimated: `7288` - // Minimum execution time: 33_430_000 picoseconds. - Weight::from_parts(33_824_000, 7288) + // Measured: `555` + // Estimated: `3675` + // Minimum execution time: 39_707_000 picoseconds. + Weight::from_parts(40_222_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -801,10 +808,10 @@ impl WeightInfo for () { /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) fn force_cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `587` - // Estimated: `7288` - // Minimum execution time: 33_596_000 picoseconds. - Weight::from_parts(34_226_000, 7288) + // Measured: `555` + // Estimated: `3675` + // Minimum execution time: 40_357_000 picoseconds. + Weight::from_parts(40_731_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -812,10 +819,10 @@ impl WeightInfo for () { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) fn set_min_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `351` // Estimated: `3675` - // Minimum execution time: 16_367_000 picoseconds. - Weight::from_parts(16_703_000, 3675) + // Minimum execution time: 16_937_000 picoseconds. + Weight::from_parts(17_219_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index 9c3bcf73af985..fb0f9c0e47957 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index 081a449ec9a2e..7437d62a99c95 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -62,6 +62,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl Config for Test { diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index d2fd5a5b9c372..ee6465ad3967f 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index 50c6c411c5ae0..d8693bd85bffb 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-session = { version = "4.0.0-dev", default-features = false, features = [ diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index a205c51f9c62d..6d8cdd9371e94 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = "derive", ] } impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index aa581392721ae..ff9e4b3aeac43 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } @@ -24,7 +24,7 @@ pallet-session = { version = "4.0.0-dev", default-features = false, path = "../s pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe" } -sp-consensus-vrf = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/vrf" } +sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } @@ -53,7 +53,7 @@ std = [ "scale-info/std", "sp-application-crypto/std", "sp-consensus-babe/std", - "sp-consensus-vrf/std", + "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-session/std", diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 61284267b07e7..e3ead424e7f6b 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -29,13 +29,13 @@ use frame_support::{ weights::Weight, BoundedVec, WeakBoundedVec, }; -use sp_application_crypto::ByteArray; use sp_consensus_babe::{ digests::{NextConfigDescriptor, NextEpochDescriptor, PreDigest}, AllowedSlots, BabeAuthorityWeight, BabeEpochConfiguration, ConsensusLog, Epoch, - EquivocationProof, Slot, BABE_ENGINE_ID, + EquivocationProof, Randomness as BabeRandomness, Slot, BABE_ENGINE_ID, RANDOMNESS_LENGTH, + RANDOMNESS_VRF_CONTEXT, }; -use sp_consensus_vrf::schnorrkel; +use sp_core::crypto::Wraps; use sp_runtime::{ generic::DigestItem, traits::{IsMember, One, SaturatedConversion, Saturating, Zero}, @@ -45,7 +45,7 @@ use sp_session::{GetSessionNumber, GetValidatorCount}; use sp_staking::{offence::OffenceReportSystem, SessionIndex}; use sp_std::prelude::*; -pub use sp_consensus_babe::{AuthorityId, PUBLIC_KEY_LENGTH, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH}; +pub use sp_consensus_babe::AuthorityId; const LOG_TARGET: &str = "runtime::babe"; @@ -218,7 +218,7 @@ pub mod pallet { // variable to its underlying value. #[pallet::storage] #[pallet::getter(fn randomness)] - pub type Randomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + pub type Randomness = StorageValue<_, BabeRandomness, ValueQuery>; /// Pending epoch configuration change that will be applied when the next epoch is enacted. #[pallet::storage] @@ -226,7 +226,7 @@ pub mod pallet { /// Next epoch randomness. #[pallet::storage] - pub(super) type NextRandomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + pub(super) type NextRandomness = StorageValue<_, BabeRandomness, ValueQuery>; /// Next epoch authorities. #[pallet::storage] @@ -254,7 +254,7 @@ pub mod pallet { _, Twox64Concat, u32, - BoundedVec>, + BoundedVec>, ValueQuery, >; @@ -270,8 +270,7 @@ pub mod pallet { /// It is set in `on_finalize`, before it will contain the value from the last block. #[pallet::storage] #[pallet::getter(fn author_vrf_randomness)] - pub(super) type AuthorVrfRandomness = - StorageValue<_, Option, ValueQuery>; + pub(super) type AuthorVrfRandomness = StorageValue<_, Option, ValueQuery>; /// The block numbers when the last and current epoch have started, respectively `N-1` and /// `N`. @@ -358,31 +357,33 @@ pub mod pallet { ); } - if let Some((vrf_output, vrf_proof)) = pre_digest.vrf() { - let randomness: Option = Authorities::::get() + if let Some(vrf_signature) = pre_digest.vrf_signature() { + let randomness: Option = Authorities::::get() .get(authority_index as usize) .and_then(|(authority, _)| { - schnorrkel::PublicKey::from_bytes(authority.as_slice()).ok() - }) - .and_then(|pubkey| { - let current_slot = CurrentSlot::::get(); - + let public = authority.as_inner_ref(); let transcript = sp_consensus_babe::make_transcript( &Self::randomness(), - current_slot, + CurrentSlot::::get(), EpochIndex::::get(), ); // NOTE: this is verified by the client when importing the block, before - // execution. we don't run the verification again here to avoid slowing + // execution. We don't run the verification again here to avoid slowing // down the runtime. - debug_assert!(pubkey - .vrf_verify(transcript.clone(), vrf_output, vrf_proof) - .is_ok()); - - vrf_output.0.attach_input_hash(&pubkey, transcript).ok() - }) - .map(|inout| inout.make_bytes(sp_consensus_babe::BABE_VRF_INOUT_CONTEXT)); + debug_assert!({ + use sp_core::crypto::VrfVerifier; + public.vrf_verify(&transcript, &vrf_signature) + }); + + public + .make_bytes( + RANDOMNESS_VRF_CONTEXT, + &transcript, + &vrf_signature.output, + ) + .ok() + }); if let Some(randomness) = pre_digest.is_primary().then(|| randomness).flatten() { @@ -484,9 +485,6 @@ pub mod pallet { } } -/// A BABE public key -pub type BabeKey = [u8; PUBLIC_KEY_LENGTH]; - impl FindAuthor for Pallet { fn find_author<'a, I>(digests: I) -> Option where @@ -737,7 +735,7 @@ impl Pallet { >::deposit_log(log) } - fn deposit_randomness(randomness: &schnorrkel::Randomness) { + fn deposit_randomness(randomness: &BabeRandomness) { let segment_idx = SegmentIndex::::get(); let mut segment = UnderConstruction::::get(&segment_idx); if segment.try_push(*randomness).is_ok() { @@ -831,7 +829,7 @@ impl Pallet { /// Call this function exactly once when an epoch changes, to update the /// randomness. Returns the new randomness. - fn randomness_change_epoch(next_epoch_index: u64) -> schnorrkel::Randomness { + fn randomness_change_epoch(next_epoch_index: u64) -> BabeRandomness { let this_randomness = NextRandomness::::get(); let segment_idx: u32 = SegmentIndex::::mutate(|s| sp_std::mem::replace(s, 0)); @@ -990,12 +988,12 @@ where // // an optional size hint as to how many VRF outputs there were may be provided. fn compute_randomness( - last_epoch_randomness: schnorrkel::Randomness, + last_epoch_randomness: BabeRandomness, epoch_index: u64, - rho: impl Iterator, + rho: impl Iterator, rho_size_hint: Option, -) -> schnorrkel::Randomness { - let mut s = Vec::with_capacity(40 + rho_size_hint.unwrap_or(0) * VRF_OUTPUT_LENGTH); +) -> BabeRandomness { + let mut s = Vec::with_capacity(40 + rho_size_hint.unwrap_or(0) * RANDOMNESS_LENGTH); s.extend_from_slice(&last_epoch_randomness); s.extend_from_slice(&epoch_index.to_le_bytes()); diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 9b832bfffdb69..bccdb42c71ac3 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -25,10 +25,9 @@ use frame_support::{ traits::{ConstU128, ConstU32, ConstU64, GenesisBuild, KeyOwnerProofSystem, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; -use sp_consensus_babe::{AuthorityId, AuthorityPair, Slot}; -use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; +use sp_consensus_babe::{AuthorityId, AuthorityPair, Randomness, Slot, VrfSignature}; use sp_core::{ - crypto::{IsWrappedBy, KeyTypeId, Pair}, + crypto::{KeyTypeId, Pair, VrfSigner}, H256, U256, }; use sp_io; @@ -144,6 +143,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU128<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } pallet_staking_reward_curve::build! { @@ -279,16 +282,10 @@ pub fn start_era(era_index: EraIndex) { pub fn make_primary_pre_digest( authority_index: sp_consensus_babe::AuthorityIndex, slot: sp_consensus_babe::Slot, - vrf_output: VRFOutput, - vrf_proof: VRFProof, + vrf_signature: VrfSignature, ) -> Digest { let digest_data = sp_consensus_babe::digests::PreDigest::Primary( - sp_consensus_babe::digests::PrimaryPreDigest { - authority_index, - slot, - vrf_output, - vrf_proof, - }, + sp_consensus_babe::digests::PrimaryPreDigest { authority_index, slot, vrf_signature }, ); let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); Digest { logs: vec![log] } @@ -308,16 +305,10 @@ pub fn make_secondary_plain_pre_digest( pub fn make_secondary_vrf_pre_digest( authority_index: sp_consensus_babe::AuthorityIndex, slot: sp_consensus_babe::Slot, - vrf_output: VRFOutput, - vrf_proof: VRFProof, + vrf_signature: VrfSignature, ) -> Digest { let digest_data = sp_consensus_babe::digests::PreDigest::SecondaryVRF( - sp_consensus_babe::digests::SecondaryVRFPreDigest { - authority_index, - slot, - vrf_output, - vrf_proof, - }, + sp_consensus_babe::digests::SecondaryVRFPreDigest { authority_index, slot, vrf_signature }, ); let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); Digest { logs: vec![log] } @@ -326,16 +317,16 @@ pub fn make_secondary_vrf_pre_digest( pub fn make_vrf_output( slot: Slot, pair: &sp_consensus_babe::AuthorityPair, -) -> (VRFOutput, VRFProof, [u8; 32]) { - let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); +) -> (VrfSignature, Randomness) { let transcript = sp_consensus_babe::make_transcript(&Babe::randomness(), slot, 0); - let vrf_inout = pair.vrf_sign(transcript); - let vrf_randomness: sp_consensus_vrf::schnorrkel::Randomness = - vrf_inout.0.make_bytes::<[u8; 32]>(&sp_consensus_babe::BABE_VRF_INOUT_CONTEXT); - let vrf_output = VRFOutput(vrf_inout.0.to_output()); - let vrf_proof = VRFProof(vrf_inout.1); - (vrf_output, vrf_proof, vrf_randomness) + let signature = pair.as_ref().vrf_sign(&transcript); + + let randomness = pair + .as_ref() + .make_bytes::(sp_consensus_babe::RANDOMNESS_VRF_CONTEXT, &transcript); + + (signature, randomness) } pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { diff --git a/frame/babe/src/randomness.rs b/frame/babe/src/randomness.rs index b223df6804c0b..b9b24786b7a74 100644 --- a/frame/babe/src/randomness.rs +++ b/frame/babe/src/randomness.rs @@ -19,7 +19,7 @@ //! randomness collected from VRF outputs. use super::{ - AuthorVrfRandomness, Config, EpochStart, NextRandomness, Randomness, VRF_OUTPUT_LENGTH, + AuthorVrfRandomness, Config, EpochStart, NextRandomness, Randomness, RANDOMNESS_LENGTH, }; use frame_support::traits::Randomness as RandomnessT; use sp_runtime::traits::{Hash, One, Saturating}; @@ -132,7 +132,7 @@ pub struct CurrentBlockRandomness(sp_std::marker::PhantomData); impl RandomnessT for RandomnessFromTwoEpochsAgo { fn random(subject: &[u8]) -> (T::Hash, T::BlockNumber) { let mut subject = subject.to_vec(); - subject.reserve(VRF_OUTPUT_LENGTH); + subject.reserve(RANDOMNESS_LENGTH); subject.extend_from_slice(&Randomness::::get()[..]); (T::Hashing::hash(&subject[..]), EpochStart::::get().0) @@ -142,7 +142,7 @@ impl RandomnessT for RandomnessFromTwoEpochs impl RandomnessT for RandomnessFromOneEpochAgo { fn random(subject: &[u8]) -> (T::Hash, T::BlockNumber) { let mut subject = subject.to_vec(); - subject.reserve(VRF_OUTPUT_LENGTH); + subject.reserve(RANDOMNESS_LENGTH); subject.extend_from_slice(&NextRandomness::::get()[..]); (T::Hashing::hash(&subject[..]), EpochStart::::get().1) @@ -153,7 +153,7 @@ impl RandomnessT, T::BlockNumber> for ParentBlockRand fn random(subject: &[u8]) -> (Option, T::BlockNumber) { let random = AuthorVrfRandomness::::get().map(|random| { let mut subject = subject.to_vec(); - subject.reserve(VRF_OUTPUT_LENGTH); + subject.reserve(RANDOMNESS_LENGTH); subject.extend_from_slice(&random); T::Hashing::hash(&subject[..]) diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 8b63f41b37b74..8a9aa6fcc03d2 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -25,11 +25,12 @@ use frame_support::{ }; use mock::*; use pallet_session::ShouldEndSession; -use sp_consensus_babe::{AllowedSlots, BabeEpochConfiguration, Slot}; -use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; +use sp_consensus_babe::{ + AllowedSlots, BabeEpochConfiguration, Slot, VrfSignature, RANDOMNESS_LENGTH, +}; use sp_core::crypto::Pair; -const EMPTY_RANDOMNESS: [u8; 32] = [ +const EMPTY_RANDOMNESS: [u8; RANDOMNESS_LENGTH] = [ 74, 25, 49, 128, 53, 97, 244, 49, 222, 202, 176, 2, 231, 66, 95, 10, 133, 49, 213, 228, 86, 161, 164, 127, 217, 153, 138, 37, 48, 192, 248, 0, ]; @@ -62,10 +63,9 @@ fn first_block_epoch_zero_start() { ext.execute_with(|| { let genesis_slot = Slot::from(100); - let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); + let (vrf_signature, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); - let first_vrf = vrf_output; - let pre_digest = make_primary_pre_digest(0, genesis_slot, first_vrf.clone(), vrf_proof); + let pre_digest = make_primary_pre_digest(0, genesis_slot, vrf_signature); assert_eq!(Babe::genesis_slot(), Slot::from(0)); System::reset_events(); @@ -111,8 +111,8 @@ fn current_slot_is_processed_on_initialization() { ext.execute_with(|| { let genesis_slot = Slot::from(10); - let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); - let pre_digest = make_primary_pre_digest(0, genesis_slot, vrf_output, vrf_proof); + let (vrf_signature, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); + let pre_digest = make_primary_pre_digest(0, genesis_slot, vrf_signature); System::reset_events(); System::initialize(&1, &Default::default(), &pre_digest); @@ -134,14 +134,14 @@ fn current_slot_is_processed_on_initialization() { fn test_author_vrf_output(make_pre_digest: F) where - F: Fn(sp_consensus_babe::AuthorityIndex, Slot, VRFOutput, VRFProof) -> sp_runtime::Digest, + F: Fn(sp_consensus_babe::AuthorityIndex, Slot, VrfSignature) -> sp_runtime::Digest, { let (pairs, mut ext) = new_test_ext_with_pairs(1); ext.execute_with(|| { let genesis_slot = Slot::from(10); - let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); - let pre_digest = make_pre_digest(0, genesis_slot, vrf_output, vrf_proof); + let (vrf_signature, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); + let pre_digest = make_pre_digest(0, genesis_slot, vrf_signature); System::reset_events(); System::initialize(&1, &Default::default(), &pre_digest); diff --git a/frame/bags-list/Cargo.toml b/frame/bags-list/Cargo.toml index 379698b1d39e1..1678ce1ba2ac6 100644 --- a/frame/bags-list/Cargo.toml +++ b/frame/bags-list/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } # primitives sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/bags-list/src/weights.rs b/frame/bags-list/src/weights.rs index 65d27c5aea4bb..f2b65beba2c80 100644 --- a/frame/bags-list/src/weights.rs +++ b/frame/bags-list/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_bags_list //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -66,10 +66,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn rebag_non_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1916` - // Estimated: `19186` - // Minimum execution time: 54_348 nanoseconds. - Weight::from_parts(55_024_000, 19186) + // Measured: `1724` + // Estimated: `11506` + // Minimum execution time: 63_335_000 picoseconds. + Weight::from_parts(64_097_000, 11506) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -83,10 +83,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn rebag_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1810` - // Estimated: `19114` - // Minimum execution time: 53_306 nanoseconds. - Weight::from_parts(54_263_000, 19114) + // Measured: `1618` + // Estimated: `8877` + // Minimum execution time: 62_151_000 picoseconds. + Weight::from_parts(62_827_000, 8877) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -102,10 +102,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn put_in_front_of() -> Weight { // Proof Size summary in bytes: - // Measured: `2154` - // Estimated: `25798` - // Minimum execution time: 59_513 nanoseconds. - Weight::from_parts(60_717_000, 25798) + // Measured: `1930` + // Estimated: `11506` + // Minimum execution time: 69_179_000 picoseconds. + Weight::from_parts(69_898_000, 11506) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -123,10 +123,10 @@ impl WeightInfo for () { /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn rebag_non_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1916` - // Estimated: `19186` - // Minimum execution time: 54_348 nanoseconds. - Weight::from_parts(55_024_000, 19186) + // Measured: `1724` + // Estimated: `11506` + // Minimum execution time: 63_335_000 picoseconds. + Weight::from_parts(64_097_000, 11506) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -140,10 +140,10 @@ impl WeightInfo for () { /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn rebag_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1810` - // Estimated: `19114` - // Minimum execution time: 53_306 nanoseconds. - Weight::from_parts(54_263_000, 19114) + // Measured: `1618` + // Estimated: `8877` + // Minimum execution time: 62_151_000 picoseconds. + Weight::from_parts(62_827_000, 8877) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -159,10 +159,10 @@ impl WeightInfo for () { /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn put_in_front_of() -> Weight { // Proof Size summary in bytes: - // Measured: `2154` - // Estimated: `25798` - // Minimum execution time: 59_513 nanoseconds. - Weight::from_parts(60_717_000, 25798) + // Measured: `1930` + // Estimated: `11506` + // Minimum execution time: 69_179_000 picoseconds. + Weight::from_parts(69_898_000, 11506) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index 15ef136aa3800..b3457dd8d141c 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } @@ -39,5 +39,7 @@ std = [ "sp-runtime/std", "sp-std/std", ] +# Enable support for setting the existential deposit to zero. +insecure_zero_ed = [] runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/balances/README.md b/frame/balances/README.md index 93e424a89c721..dd56ab3fadfb5 100644 --- a/frame/balances/README.md +++ b/frame/balances/README.md @@ -70,7 +70,7 @@ given account is unused. ### Dispatchable Functions - `transfer` - Transfer some liquid free balance to another account. -- `set_balance` - Set the balances of a given account. The origin of this call must be root. +- `force_set_balance` - Set the balances of a given account. The origin of this call must be root. ## Usage diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index b36fe1e341de3..5641c68516c28 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -24,6 +24,8 @@ use crate::Pallet as Balances; use frame_benchmarking::v2::*; use frame_system::RawOrigin; +use sp_runtime::traits::Bounded; +use types::ExtraFlags; const SEED: u32 = 0; // existential deposit multiplier @@ -37,7 +39,7 @@ mod benchmarks { // * Transfer will kill the sender account. // * Transfer will create the recipient account. #[benchmark] - fn transfer() { + fn transfer_allow_death() { let existential_deposit = T::ExistentialDeposit::get(); let caller = whitelisted_caller(); @@ -79,7 +81,7 @@ mod benchmarks { let transfer_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); #[extrinsic_call] - transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount); + transfer_allow_death(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount); assert!(!Balances::::free_balance(&caller).is_zero()); assert!(!Balances::::free_balance(&recipient).is_zero()); @@ -106,9 +108,9 @@ mod benchmarks { assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } - // Benchmark `set_balance` coming from ROOT account. This always creates an account. + // Benchmark `force_set_balance` coming from ROOT account. This always creates an account. #[benchmark] - fn set_balance_creating() { + fn force_set_balance_creating() { let user: T::AccountId = account("user", 0, SEED); let user_lookup = T::Lookup::unlookup(user.clone()); @@ -118,15 +120,14 @@ mod benchmarks { let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); #[extrinsic_call] - set_balance(RawOrigin::Root, user_lookup, balance_amount, balance_amount); + force_set_balance(RawOrigin::Root, user_lookup, balance_amount); assert_eq!(Balances::::free_balance(&user), balance_amount); - assert_eq!(Balances::::reserved_balance(&user), balance_amount); } - // Benchmark `set_balance` coming from ROOT account. This always kills an account. + // Benchmark `force_set_balance` coming from ROOT account. This always kills an account. #[benchmark] - fn set_balance_killing() { + fn force_set_balance_killing() { let user: T::AccountId = account("user", 0, SEED); let user_lookup = T::Lookup::unlookup(user.clone()); @@ -136,7 +137,7 @@ mod benchmarks { let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); #[extrinsic_call] - set_balance(RawOrigin::Root, user_lookup, Zero::zero(), Zero::zero()); + force_set_balance(RawOrigin::Root, user_lookup, Zero::zero()); assert!(Balances::::free_balance(&user).is_zero()); } @@ -197,7 +198,7 @@ mod benchmarks { } #[extrinsic_call] - transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount); + transfer_allow_death(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount); assert_eq!(Balances::::free_balance(&caller), Zero::zero()); assert_eq!(Balances::::free_balance(&recipient), transfer_amount); @@ -230,27 +231,64 @@ mod benchmarks { let user_lookup = T::Lookup::unlookup(user.clone()); // Give some multiple of the existential deposit - let existential_deposit = T::ExistentialDeposit::get(); - let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + let ed = T::ExistentialDeposit::get(); + let balance = ed + ed; let _ = as Currency<_>>::make_free_balance_be(&user, balance); // Reserve the balance - as ReservableCurrency<_>>::reserve(&user, balance)?; - assert_eq!(Balances::::reserved_balance(&user), balance); - assert!(Balances::::free_balance(&user).is_zero()); + as ReservableCurrency<_>>::reserve(&user, ed)?; + assert_eq!(Balances::::reserved_balance(&user), ed); + assert_eq!(Balances::::free_balance(&user), ed); #[extrinsic_call] _(RawOrigin::Root, user_lookup, balance); assert!(Balances::::reserved_balance(&user).is_zero()); - assert_eq!(Balances::::free_balance(&user), balance); + assert_eq!(Balances::::free_balance(&user), ed + ed); Ok(()) } + #[benchmark] + fn upgrade_accounts(u: Linear<1, 1_000>) { + let caller: T::AccountId = whitelisted_caller(); + let who = (0..u) + .into_iter() + .map(|i| -> T::AccountId { + let user = account("old_user", i, SEED); + let account = AccountData { + free: T::ExistentialDeposit::get(), + reserved: T::ExistentialDeposit::get(), + frozen: Zero::zero(), + flags: ExtraFlags::old_logic(), + }; + frame_system::Pallet::::inc_providers(&user); + assert!(T::AccountStore::try_mutate_exists(&user, |a| -> DispatchResult { + *a = Some(account); + Ok(()) + }) + .is_ok()); + assert!(!Balances::::account(&user).flags.is_new_logic()); + assert_eq!(frame_system::Pallet::::providers(&user), 1); + assert_eq!(frame_system::Pallet::::consumers(&user), 0); + user + }) + .collect(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), who); + + for i in 0..u { + let user: T::AccountId = account("old_user", i, SEED); + assert!(Balances::::account(&user).flags.is_new_logic()); + assert_eq!(frame_system::Pallet::::providers(&user), 1); + assert_eq!(frame_system::Pallet::::consumers(&user), 1); + } + } + impl_benchmark_test_suite! { Balances, - crate::tests_composite::ExtBuilder::default().build(), - crate::tests_composite::Test, + crate::tests::ExtBuilder::default().build(), + crate::tests::Test, } } diff --git a/frame/balances/src/impl_currency.rs b/frame/balances/src/impl_currency.rs new file mode 100644 index 0000000000000..9f764a37b8b89 --- /dev/null +++ b/frame/balances/src/impl_currency.rs @@ -0,0 +1,908 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementations for the `Currency` family of traits. +//! +//! Note that `WithdrawReasons` are intentionally not used for anything in this implementation and +//! are expected to be removed in the near future, once migration to `fungible::*` traits is done. + +use super::*; +use frame_support::{ + ensure, + pallet_prelude::DispatchResult, + traits::{ + tokens::{fungible, BalanceStatus as Status, Fortitude::Polite, Precision::BestEffort}, + Currency, DefensiveSaturating, ExistenceRequirement, + ExistenceRequirement::AllowDeath, + Get, Imbalance, LockIdentifier, LockableCurrency, NamedReservableCurrency, + ReservableCurrency, SignedImbalance, TryDrop, WithdrawReasons, + }, +}; +pub use imbalances::{NegativeImbalance, PositiveImbalance}; + +// wrapping these imbalances in a private module is necessary to ensure absolute privacy +// of the inner member. +mod imbalances { + use super::{result, Config, Imbalance, RuntimeDebug, Saturating, TryDrop, Zero}; + use frame_support::traits::SameOrOther; + use sp_std::mem; + + /// Opaque, move-only struct with private fields that serves as a token denoting that + /// funds have been created without any equal and opposite accounting. + #[must_use] + #[derive(RuntimeDebug, PartialEq, Eq)] + pub struct PositiveImbalance, I: 'static = ()>(T::Balance); + + impl, I: 'static> PositiveImbalance { + /// Create a new positive imbalance from a balance. + pub fn new(amount: T::Balance) -> Self { + PositiveImbalance(amount) + } + } + + /// Opaque, move-only struct with private fields that serves as a token denoting that + /// funds have been destroyed without any equal and opposite accounting. + #[must_use] + #[derive(RuntimeDebug, PartialEq, Eq)] + pub struct NegativeImbalance, I: 'static = ()>(T::Balance); + + impl, I: 'static> NegativeImbalance { + /// Create a new negative imbalance from a balance. + pub fn new(amount: T::Balance) -> Self { + NegativeImbalance(amount) + } + } + + impl, I: 'static> TryDrop for PositiveImbalance { + fn try_drop(self) -> result::Result<(), Self> { + self.drop_zero() + } + } + + impl, I: 'static> Default for PositiveImbalance { + fn default() -> Self { + Self::zero() + } + } + + impl, I: 'static> Imbalance for PositiveImbalance { + type Opposite = NegativeImbalance; + + fn zero() -> Self { + Self(Zero::zero()) + } + fn drop_zero(self) -> result::Result<(), Self> { + if self.0.is_zero() { + Ok(()) + } else { + Err(self) + } + } + fn split(self, amount: T::Balance) -> (Self, Self) { + let first = self.0.min(amount); + let second = self.0 - first; + + mem::forget(self); + (Self(first), Self(second)) + } + fn merge(mut self, other: Self) -> Self { + self.0 = self.0.saturating_add(other.0); + mem::forget(other); + + self + } + fn subsume(&mut self, other: Self) { + self.0 = self.0.saturating_add(other.0); + mem::forget(other); + } + fn offset(self, other: Self::Opposite) -> SameOrOther { + let (a, b) = (self.0, other.0); + mem::forget((self, other)); + + if a > b { + SameOrOther::Same(Self(a - b)) + } else if b > a { + SameOrOther::Other(NegativeImbalance::new(b - a)) + } else { + SameOrOther::None + } + } + fn peek(&self) -> T::Balance { + self.0 + } + } + + impl, I: 'static> TryDrop for NegativeImbalance { + fn try_drop(self) -> result::Result<(), Self> { + self.drop_zero() + } + } + + impl, I: 'static> Default for NegativeImbalance { + fn default() -> Self { + Self::zero() + } + } + + impl, I: 'static> Imbalance for NegativeImbalance { + type Opposite = PositiveImbalance; + + fn zero() -> Self { + Self(Zero::zero()) + } + fn drop_zero(self) -> result::Result<(), Self> { + if self.0.is_zero() { + Ok(()) + } else { + Err(self) + } + } + fn split(self, amount: T::Balance) -> (Self, Self) { + let first = self.0.min(amount); + let second = self.0 - first; + + mem::forget(self); + (Self(first), Self(second)) + } + fn merge(mut self, other: Self) -> Self { + self.0 = self.0.saturating_add(other.0); + mem::forget(other); + + self + } + fn subsume(&mut self, other: Self) { + self.0 = self.0.saturating_add(other.0); + mem::forget(other); + } + fn offset(self, other: Self::Opposite) -> SameOrOther { + let (a, b) = (self.0, other.0); + mem::forget((self, other)); + + if a > b { + SameOrOther::Same(Self(a - b)) + } else if b > a { + SameOrOther::Other(PositiveImbalance::new(b - a)) + } else { + SameOrOther::None + } + } + fn peek(&self) -> T::Balance { + self.0 + } + } + + impl, I: 'static> Drop for PositiveImbalance { + /// Basic drop handler will just square up the total issuance. + fn drop(&mut self) { + >::mutate(|v| *v = v.saturating_add(self.0)); + } + } + + impl, I: 'static> Drop for NegativeImbalance { + /// Basic drop handler will just square up the total issuance. + fn drop(&mut self) { + >::mutate(|v| *v = v.saturating_sub(self.0)); + } + } +} + +impl, I: 'static> Currency for Pallet +where + T::Balance: MaybeSerializeDeserialize + Debug, +{ + type Balance = T::Balance; + type PositiveImbalance = PositiveImbalance; + type NegativeImbalance = NegativeImbalance; + + fn total_balance(who: &T::AccountId) -> Self::Balance { + Self::account(who).total() + } + + // Check if `value` amount of free balance can be slashed from `who`. + fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { + if value.is_zero() { + return true + } + Self::free_balance(who) >= value + } + + fn total_issuance() -> Self::Balance { + TotalIssuance::::get() + } + + fn active_issuance() -> Self::Balance { + >::active_issuance() + } + + fn deactivate(amount: Self::Balance) { + >::deactivate(amount); + } + + fn reactivate(amount: Self::Balance) { + >::reactivate(amount); + } + + fn minimum_balance() -> Self::Balance { + T::ExistentialDeposit::get() + } + + // Burn funds from the total issuance, returning a positive imbalance for the amount burned. + // Is a no-op if amount to be burned is zero. + fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance { + if amount.is_zero() { + return PositiveImbalance::zero() + } + >::mutate(|issued| { + *issued = issued.checked_sub(&amount).unwrap_or_else(|| { + amount = *issued; + Zero::zero() + }); + }); + PositiveImbalance::new(amount) + } + + // Create new funds into the total issuance, returning a negative imbalance + // for the amount issued. + // Is a no-op if amount to be issued it zero. + fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance { + if amount.is_zero() { + return NegativeImbalance::zero() + } + >::mutate(|issued| { + *issued = issued.checked_add(&amount).unwrap_or_else(|| { + amount = Self::Balance::max_value() - *issued; + Self::Balance::max_value() + }) + }); + NegativeImbalance::new(amount) + } + + fn free_balance(who: &T::AccountId) -> Self::Balance { + Self::account(who).free + } + + // Ensure that an account can withdraw from their free balance given any existing withdrawal + // restrictions like locks and vesting balance. + // Is a no-op if amount to be withdrawn is zero. + fn ensure_can_withdraw( + who: &T::AccountId, + amount: T::Balance, + _reasons: WithdrawReasons, + new_balance: T::Balance, + ) -> DispatchResult { + if amount.is_zero() { + return Ok(()) + } + ensure!(new_balance >= Self::account(who).frozen, Error::::LiquidityRestrictions); + Ok(()) + } + + // Transfer some free balance from `transactor` to `dest`, respecting existence requirements. + // Is a no-op if value to be transferred is zero or the `transactor` is the same as `dest`. + fn transfer( + transactor: &T::AccountId, + dest: &T::AccountId, + value: Self::Balance, + existence_requirement: ExistenceRequirement, + ) -> DispatchResult { + if value.is_zero() || transactor == dest { + return Ok(()) + } + let keep_alive = match existence_requirement { + ExistenceRequirement::KeepAlive => Preserve, + ExistenceRequirement::AllowDeath => Expendable, + }; + >::transfer(transactor, dest, value, keep_alive)?; + Ok(()) + } + + /// Slash a target account `who`, returning the negative imbalance created and any left over + /// amount that could not be slashed. + /// + /// Is a no-op if `value` to be slashed is zero or the account does not exist. + /// + /// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn + /// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid + /// having to draw from reserved funds, however we err on the side of punishment if things are + /// inconsistent or `can_slash` wasn't used appropriately. + fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { + if value.is_zero() { + return (NegativeImbalance::zero(), Zero::zero()) + } + if Self::total_balance(who).is_zero() { + return (NegativeImbalance::zero(), value) + } + + let result = match Self::try_mutate_account_handling_dust( + who, + |account, _is_new| -> Result<(Self::NegativeImbalance, Self::Balance), DispatchError> { + // Best value is the most amount we can slash following liveness rules. + let ed = T::ExistentialDeposit::get(); + let actual = match system::Pallet::::can_dec_provider(who) { + true => value.min(account.free), + false => value.min(account.free.saturating_sub(ed)), + }; + account.free.saturating_reduce(actual); + let remaining = value.saturating_sub(actual); + Ok((NegativeImbalance::new(actual), remaining)) + }, + ) { + Ok((imbalance, remaining)) => { + Self::deposit_event(Event::Slashed { + who: who.clone(), + amount: value.saturating_sub(remaining), + }); + (imbalance, remaining) + }, + Err(_) => (Self::NegativeImbalance::zero(), value), + }; + result + } + + /// Deposit some `value` into the free balance of an existing target account `who`. + /// + /// Is a no-op if the `value` to be deposited is zero. + fn deposit_into_existing( + who: &T::AccountId, + value: Self::Balance, + ) -> Result { + if value.is_zero() { + return Ok(PositiveImbalance::zero()) + } + + Self::try_mutate_account_handling_dust( + who, + |account, is_new| -> Result { + ensure!(!is_new, Error::::DeadAccount); + account.free = account.free.checked_add(&value).ok_or(ArithmeticError::Overflow)?; + Self::deposit_event(Event::Deposit { who: who.clone(), amount: value }); + Ok(PositiveImbalance::new(value)) + }, + ) + } + + /// Deposit some `value` into the free balance of `who`, possibly creating a new account. + /// + /// This function is a no-op if: + /// - the `value` to be deposited is zero; or + /// - the `value` to be deposited is less than the required ED and the account does not yet + /// exist; or + /// - the deposit would necessitate the account to exist and there are no provider references; + /// or + /// - `value` is so large it would cause the balance of `who` to overflow. + fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance { + if value.is_zero() { + return Self::PositiveImbalance::zero() + } + + Self::try_mutate_account_handling_dust( + who, + |account, is_new| -> Result { + let ed = T::ExistentialDeposit::get(); + ensure!(value >= ed || !is_new, Error::::ExistentialDeposit); + + // defensive only: overflow should never happen, however in case it does, then this + // operation is a no-op. + account.free = match account.free.checked_add(&value) { + Some(x) => x, + None => return Ok(Self::PositiveImbalance::zero()), + }; + + Self::deposit_event(Event::Deposit { who: who.clone(), amount: value }); + Ok(PositiveImbalance::new(value)) + }, + ) + .unwrap_or_else(|_| Self::PositiveImbalance::zero()) + } + + /// Withdraw some free balance from an account, respecting existence requirements. + /// + /// Is a no-op if value to be withdrawn is zero. + fn withdraw( + who: &T::AccountId, + value: Self::Balance, + reasons: WithdrawReasons, + liveness: ExistenceRequirement, + ) -> result::Result { + if value.is_zero() { + return Ok(NegativeImbalance::zero()) + } + + Self::try_mutate_account_handling_dust( + who, + |account, _| -> Result { + let new_free_account = + account.free.checked_sub(&value).ok_or(Error::::InsufficientBalance)?; + + // bail if we need to keep the account alive and this would kill it. + let ed = T::ExistentialDeposit::get(); + let would_be_dead = new_free_account < ed; + let would_kill = would_be_dead && account.free >= ed; + ensure!(liveness == AllowDeath || !would_kill, Error::::Expendability); + + Self::ensure_can_withdraw(who, value, reasons, new_free_account)?; + + account.free = new_free_account; + + Self::deposit_event(Event::Withdraw { who: who.clone(), amount: value }); + Ok(NegativeImbalance::new(value)) + }, + ) + } + + /// Force the new free balance of a target account `who` to some new value `balance`. + fn make_free_balance_be( + who: &T::AccountId, + value: Self::Balance, + ) -> SignedImbalance { + Self::try_mutate_account_handling_dust( + who, + |account, + is_new| + -> Result, DispatchError> { + let ed = T::ExistentialDeposit::get(); + // If we're attempting to set an existing account to less than ED, then + // bypass the entire operation. It's a no-op if you follow it through, but + // since this is an instance where we might account for a negative imbalance + // (in the dust cleaner of set_account) before we account for its actual + // equal and opposite cause (returned as an Imbalance), then in the + // instance that there's no other accounts on the system at all, we might + // underflow the issuance and our arithmetic will be off. + ensure!(value >= ed || !is_new, Error::::ExistentialDeposit); + + let imbalance = if account.free <= value { + SignedImbalance::Positive(PositiveImbalance::new(value - account.free)) + } else { + SignedImbalance::Negative(NegativeImbalance::new(account.free - value)) + }; + account.free = value; + Self::deposit_event(Event::BalanceSet { who: who.clone(), free: account.free }); + Ok(imbalance) + }, + ) + .unwrap_or_else(|_| SignedImbalance::Positive(Self::PositiveImbalance::zero())) + } +} + +impl, I: 'static> ReservableCurrency for Pallet +where + T::Balance: MaybeSerializeDeserialize + Debug, +{ + /// Check if `who` can reserve `value` from their free balance. + /// + /// Always `true` if value to be reserved is zero. + fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { + if value.is_zero() { + return true + } + Self::account(who).free.checked_sub(&value).map_or(false, |new_balance| { + new_balance >= T::ExistentialDeposit::get() && + Self::ensure_can_withdraw(who, value, WithdrawReasons::RESERVE, new_balance) + .is_ok() + }) + } + + fn reserved_balance(who: &T::AccountId) -> Self::Balance { + Self::account(who).reserved + } + + /// Move `value` from the free balance from `who` to their reserved balance. + /// + /// Is a no-op if value to be reserved is zero. + fn reserve(who: &T::AccountId, value: Self::Balance) -> DispatchResult { + if value.is_zero() { + return Ok(()) + } + + Self::try_mutate_account_handling_dust(who, |account, _| -> DispatchResult { + account.free = + account.free.checked_sub(&value).ok_or(Error::::InsufficientBalance)?; + account.reserved = + account.reserved.checked_add(&value).ok_or(ArithmeticError::Overflow)?; + Self::ensure_can_withdraw(&who, value, WithdrawReasons::RESERVE, account.free) + })?; + + Self::deposit_event(Event::Reserved { who: who.clone(), amount: value }); + Ok(()) + } + + /// Unreserve some funds, returning any amount that was unable to be unreserved. + /// + /// Is a no-op if the value to be unreserved is zero or the account does not exist. + /// + /// NOTE: returns amount value which wasn't successfully unreserved. + fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { + if value.is_zero() { + return Zero::zero() + } + if Self::total_balance(who).is_zero() { + return value + } + + let actual = match Self::mutate_account_handling_dust(who, |account| { + let actual = cmp::min(account.reserved, value); + account.reserved -= actual; + // defensive only: this can never fail since total issuance which is at least + // free+reserved fits into the same data type. + account.free = account.free.defensive_saturating_add(actual); + actual + }) { + Ok(x) => x, + Err(_) => { + // This should never happen since we don't alter the total amount in the account. + // If it ever does, then we should fail gracefully though, indicating that nothing + // could be done. + return value + }, + }; + + Self::deposit_event(Event::Unreserved { who: who.clone(), amount: actual }); + value - actual + } + + /// Slash from reserved balance, returning the negative imbalance created, + /// and any amount that was unable to be slashed. + /// + /// Is a no-op if the value to be slashed is zero or the account does not exist. + fn slash_reserved( + who: &T::AccountId, + value: Self::Balance, + ) -> (Self::NegativeImbalance, Self::Balance) { + if value.is_zero() { + return (NegativeImbalance::zero(), Zero::zero()) + } + if Self::total_balance(who).is_zero() { + return (NegativeImbalance::zero(), value) + } + + // NOTE: `mutate_account` may fail if it attempts to reduce the balance to the point that an + // account is attempted to be illegally destroyed. + + match Self::mutate_account_handling_dust(who, |account| { + let actual = value.min(account.reserved); + account.reserved.saturating_reduce(actual); + + // underflow should never happen, but it if does, there's nothing to be done here. + (NegativeImbalance::new(actual), value.saturating_sub(actual)) + }) { + Ok((imbalance, not_slashed)) => { + Self::deposit_event(Event::Slashed { + who: who.clone(), + amount: value.saturating_sub(not_slashed), + }); + (imbalance, not_slashed) + }, + Err(_) => (Self::NegativeImbalance::zero(), value), + } + } + + /// Move the reserved balance of one account into the balance of another, according to `status`. + /// + /// Is a no-op if: + /// - the value to be moved is zero; or + /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. + /// + /// This is `Polite` and thus will not repatriate any funds which would lead the total balance + /// to be less than the frozen amount. Returns `Ok` with the actual amount of funds moved, + /// which may be less than `value` since the operation is done an a `BestEffort` basis. + fn repatriate_reserved( + slashed: &T::AccountId, + beneficiary: &T::AccountId, + value: Self::Balance, + status: Status, + ) -> Result { + let actual = + Self::do_transfer_reserved(slashed, beneficiary, value, BestEffort, Polite, status)?; + Ok(value.saturating_sub(actual)) + } +} + +impl, I: 'static> NamedReservableCurrency for Pallet +where + T::Balance: MaybeSerializeDeserialize + Debug, +{ + type ReserveIdentifier = T::ReserveIdentifier; + + fn reserved_balance_named(id: &Self::ReserveIdentifier, who: &T::AccountId) -> Self::Balance { + let reserves = Self::reserves(who); + reserves + .binary_search_by_key(id, |data| data.id) + .map(|index| reserves[index].amount) + .unwrap_or_default() + } + + /// Move `value` from the free balance from `who` to a named reserve balance. + /// + /// Is a no-op if value to be reserved is zero. + fn reserve_named( + id: &Self::ReserveIdentifier, + who: &T::AccountId, + value: Self::Balance, + ) -> DispatchResult { + if value.is_zero() { + return Ok(()) + } + + Reserves::::try_mutate(who, |reserves| -> DispatchResult { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + // this add can't overflow but just to be defensive. + reserves[index].amount = reserves[index].amount.defensive_saturating_add(value); + }, + Err(index) => { + reserves + .try_insert(index, ReserveData { id: *id, amount: value }) + .map_err(|_| Error::::TooManyReserves)?; + }, + }; + >::reserve(who, value)?; + Ok(()) + }) + } + + /// Unreserve some funds, returning any amount that was unable to be unreserved. + /// + /// Is a no-op if the value to be unreserved is zero. + fn unreserve_named( + id: &Self::ReserveIdentifier, + who: &T::AccountId, + value: Self::Balance, + ) -> Self::Balance { + if value.is_zero() { + return Zero::zero() + } + + Reserves::::mutate_exists(who, |maybe_reserves| -> Self::Balance { + if let Some(reserves) = maybe_reserves.as_mut() { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let to_change = cmp::min(reserves[index].amount, value); + + let remain = >::unreserve(who, to_change); + + // remain should always be zero but just to be defensive here. + let actual = to_change.defensive_saturating_sub(remain); + + // `actual <= to_change` and `to_change <= amount`; qed; + reserves[index].amount -= actual; + + if reserves[index].amount.is_zero() { + if reserves.len() == 1 { + // no more named reserves + *maybe_reserves = None; + } else { + // remove this named reserve + reserves.remove(index); + } + } + + value - actual + }, + Err(_) => value, + } + } else { + value + } + }) + } + + /// Slash from reserved balance, returning the negative imbalance created, + /// and any amount that was unable to be slashed. + /// + /// Is a no-op if the value to be slashed is zero. + fn slash_reserved_named( + id: &Self::ReserveIdentifier, + who: &T::AccountId, + value: Self::Balance, + ) -> (Self::NegativeImbalance, Self::Balance) { + if value.is_zero() { + return (NegativeImbalance::zero(), Zero::zero()) + } + + Reserves::::mutate(who, |reserves| -> (Self::NegativeImbalance, Self::Balance) { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let to_change = cmp::min(reserves[index].amount, value); + + let (imb, remain) = + >::slash_reserved(who, to_change); + + // remain should always be zero but just to be defensive here. + let actual = to_change.defensive_saturating_sub(remain); + + // `actual <= to_change` and `to_change <= amount`; qed; + reserves[index].amount -= actual; + + Self::deposit_event(Event::Slashed { who: who.clone(), amount: actual }); + (imb, value - actual) + }, + Err(_) => (NegativeImbalance::zero(), value), + } + }) + } + + /// Move the reserved balance of one account into the balance of another, according to `status`. + /// If `status` is `Reserved`, the balance will be reserved with given `id`. + /// + /// Is a no-op if: + /// - the value to be moved is zero; or + /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. + fn repatriate_reserved_named( + id: &Self::ReserveIdentifier, + slashed: &T::AccountId, + beneficiary: &T::AccountId, + value: Self::Balance, + status: Status, + ) -> Result { + if value.is_zero() { + return Ok(Zero::zero()) + } + + if slashed == beneficiary { + return match status { + Status::Free => Ok(Self::unreserve_named(id, slashed, value)), + Status::Reserved => + Ok(value.saturating_sub(Self::reserved_balance_named(id, slashed))), + } + } + + Reserves::::try_mutate(slashed, |reserves| -> Result { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let to_change = cmp::min(reserves[index].amount, value); + + let actual = if status == Status::Reserved { + // make it the reserved under same identifier + Reserves::::try_mutate( + beneficiary, + |reserves| -> Result { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let remain = + >::repatriate_reserved( + slashed, + beneficiary, + to_change, + status, + )?; + + // remain should always be zero but just to be defensive + // here. + let actual = to_change.defensive_saturating_sub(remain); + + // this add can't overflow but just to be defensive. + reserves[index].amount = + reserves[index].amount.defensive_saturating_add(actual); + + Ok(actual) + }, + Err(index) => { + let remain = + >::repatriate_reserved( + slashed, + beneficiary, + to_change, + status, + )?; + + // remain should always be zero but just to be defensive + // here + let actual = to_change.defensive_saturating_sub(remain); + + reserves + .try_insert( + index, + ReserveData { id: *id, amount: actual }, + ) + .map_err(|_| Error::::TooManyReserves)?; + + Ok(actual) + }, + } + }, + )? + } else { + let remain = >::repatriate_reserved( + slashed, + beneficiary, + to_change, + status, + )?; + + // remain should always be zero but just to be defensive here + to_change.defensive_saturating_sub(remain) + }; + + // `actual <= to_change` and `to_change <= amount`; qed; + reserves[index].amount -= actual; + + Ok(value - actual) + }, + Err(_) => Ok(value), + } + }) + } +} + +impl, I: 'static> LockableCurrency for Pallet +where + T::Balance: MaybeSerializeDeserialize + Debug, +{ + type Moment = T::BlockNumber; + + type MaxLocks = T::MaxLocks; + + // Set a lock on the balance of `who`. + // Is a no-op if lock amount is zero or `reasons` `is_none()`. + fn set_lock( + id: LockIdentifier, + who: &T::AccountId, + amount: T::Balance, + reasons: WithdrawReasons, + ) { + if amount.is_zero() || reasons.is_empty() { + return + } + let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); + let mut locks = Self::locks(who) + .into_iter() + .filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) }) + .collect::>(); + if let Some(lock) = new_lock { + locks.push(lock) + } + Self::update_locks(who, &locks[..]); + } + + // Extend a lock on the balance of `who`. + // Is a no-op if lock amount is zero or `reasons` `is_none()`. + fn extend_lock( + id: LockIdentifier, + who: &T::AccountId, + amount: T::Balance, + reasons: WithdrawReasons, + ) { + if amount.is_zero() || reasons.is_empty() { + return + } + let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); + let mut locks = Self::locks(who) + .into_iter() + .filter_map(|l| { + if l.id == id { + new_lock.take().map(|nl| BalanceLock { + id: l.id, + amount: l.amount.max(nl.amount), + reasons: l.reasons | nl.reasons, + }) + } else { + Some(l) + } + }) + .collect::>(); + if let Some(lock) = new_lock { + locks.push(lock) + } + Self::update_locks(who, &locks[..]); + } + + fn remove_lock(id: LockIdentifier, who: &T::AccountId) { + let mut locks = Self::locks(who); + locks.retain(|l| l.id != id); + Self::update_locks(who, &locks[..]); + } +} diff --git a/frame/balances/src/impl_fungible.rs b/frame/balances/src/impl_fungible.rs new file mode 100644 index 0000000000000..f8f8fe17ae0ef --- /dev/null +++ b/frame/balances/src/impl_fungible.rs @@ -0,0 +1,358 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of `fungible` traits for Balances pallet. +use super::*; +use frame_support::traits::tokens::{ + Fortitude, + Preservation::{self, Preserve, Protect}, + Provenance::{self, Minted}, +}; + +impl, I: 'static> fungible::Inspect for Pallet { + type Balance = T::Balance; + + fn total_issuance() -> Self::Balance { + TotalIssuance::::get() + } + fn active_issuance() -> Self::Balance { + TotalIssuance::::get().saturating_sub(InactiveIssuance::::get()) + } + fn minimum_balance() -> Self::Balance { + T::ExistentialDeposit::get() + } + fn total_balance(who: &T::AccountId) -> Self::Balance { + Self::account(who).total() + } + fn balance(who: &T::AccountId) -> Self::Balance { + Self::account(who).free + } + fn reducible_balance( + who: &T::AccountId, + preservation: Preservation, + force: Fortitude, + ) -> Self::Balance { + let a = Self::account(who); + let mut untouchable = Zero::zero(); + if force == Polite { + // Frozen balance applies to total. Anything on hold therefore gets discounted from the + // limit given by the freezes. + untouchable = a.frozen.saturating_sub(a.reserved); + } + // If we want to keep our provider ref.. + if preservation == Preserve + // ..or we don't want the account to die and our provider ref is needed for it to live.. + || preservation == Protect && !a.free.is_zero() && + frame_system::Pallet::::providers(who) == 1 + // ..or we don't care about the account dying but our provider ref is required.. + || preservation == Expendable && !a.free.is_zero() && + !frame_system::Pallet::::can_dec_provider(who) + { + // ..then the ED needed.. + untouchable = untouchable.max(T::ExistentialDeposit::get()); + } + // Liquid balance is what is neither on hold nor frozen/required for provider. + a.free.saturating_sub(untouchable) + } + fn can_deposit( + who: &T::AccountId, + amount: Self::Balance, + provenance: Provenance, + ) -> DepositConsequence { + if amount.is_zero() { + return DepositConsequence::Success + } + + if provenance == Minted && TotalIssuance::::get().checked_add(&amount).is_none() { + return DepositConsequence::Overflow + } + + let account = Self::account(who); + let new_free = match account.free.checked_add(&amount) { + None => return DepositConsequence::Overflow, + Some(x) if x < T::ExistentialDeposit::get() => return DepositConsequence::BelowMinimum, + Some(x) => x, + }; + + match account.reserved.checked_add(&new_free) { + Some(_) => {}, + None => return DepositConsequence::Overflow, + }; + + // NOTE: We assume that we are a provider, so don't need to do any checks in the + // case of account creation. + + DepositConsequence::Success + } + fn can_withdraw( + who: &T::AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence { + if amount.is_zero() { + return WithdrawConsequence::Success + } + + if TotalIssuance::::get().checked_sub(&amount).is_none() { + return WithdrawConsequence::Underflow + } + + let account = Self::account(who); + let new_free_balance = match account.free.checked_sub(&amount) { + Some(x) => x, + None => return WithdrawConsequence::BalanceLow, + }; + + let liquid = Self::reducible_balance(who, Expendable, Polite); + if amount > liquid { + return WithdrawConsequence::Frozen + } + + // Provider restriction - total account balance cannot be reduced to zero if it cannot + // sustain the loss of a provider reference. + // NOTE: This assumes that the pallet is a provider (which is true). Is this ever changes, + // then this will need to adapt accordingly. + let ed = T::ExistentialDeposit::get(); + let success = if new_free_balance < ed { + if frame_system::Pallet::::can_dec_provider(who) { + WithdrawConsequence::ReducedToZero(new_free_balance) + } else { + return WithdrawConsequence::WouldDie + } + } else { + WithdrawConsequence::Success + }; + + let new_total_balance = new_free_balance.saturating_add(account.reserved); + + // Eventual free funds must be no less than the frozen balance. + if new_total_balance < account.frozen { + return WithdrawConsequence::Frozen + } + + success + } +} + +impl, I: 'static> fungible::Unbalanced for Pallet { + fn handle_dust(dust: fungible::Dust) { + T::DustRemoval::on_unbalanced(dust.into_credit()); + } + fn write_balance( + who: &T::AccountId, + amount: Self::Balance, + ) -> Result, DispatchError> { + let max_reduction = + >::reducible_balance(who, Expendable, Force); + let (result, maybe_dust) = Self::mutate_account(who, |account| -> DispatchResult { + // Make sure the reduction (if there is one) is no more than the maximum allowed. + let reduction = account.free.saturating_sub(amount); + ensure!(reduction <= max_reduction, Error::::InsufficientBalance); + + account.free = amount; + Ok(()) + })?; + result?; + Ok(maybe_dust) + } + + fn set_total_issuance(amount: Self::Balance) { + TotalIssuance::::mutate(|t| *t = amount); + } + + fn deactivate(amount: Self::Balance) { + InactiveIssuance::::mutate(|b| b.saturating_accrue(amount)); + } + + fn reactivate(amount: Self::Balance) { + InactiveIssuance::::mutate(|b| b.saturating_reduce(amount)); + } +} + +impl, I: 'static> fungible::Mutate for Pallet { + fn done_mint_into(who: &T::AccountId, amount: Self::Balance) { + Self::deposit_event(Event::::Minted { who: who.clone(), amount }); + } + fn done_burn_from(who: &T::AccountId, amount: Self::Balance) { + Self::deposit_event(Event::::Burned { who: who.clone(), amount }); + } + fn done_shelve(who: &T::AccountId, amount: Self::Balance) { + Self::deposit_event(Event::::Suspended { who: who.clone(), amount }); + } + fn done_restore(who: &T::AccountId, amount: Self::Balance) { + Self::deposit_event(Event::::Restored { who: who.clone(), amount }); + } + fn done_transfer(source: &T::AccountId, dest: &T::AccountId, amount: Self::Balance) { + Self::deposit_event(Event::::Transfer { + from: source.clone(), + to: dest.clone(), + amount, + }); + } +} + +impl, I: 'static> fungible::MutateHold for Pallet {} + +impl, I: 'static> fungible::InspectHold for Pallet { + type Reason = T::HoldIdentifier; + + fn total_balance_on_hold(who: &T::AccountId) -> T::Balance { + Self::account(who).reserved + } + fn reducible_total_balance_on_hold(who: &T::AccountId, force: Fortitude) -> Self::Balance { + // The total balance must never drop below the freeze requirements if we're not forcing: + let a = Self::account(who); + let unavailable = if force == Force { + Self::Balance::zero() + } else { + // The freeze lock applies to the total balance, so we can discount the free balance + // from the amount which the total reserved balance must provide to satisfy it. + a.frozen.saturating_sub(a.free) + }; + a.reserved.saturating_sub(unavailable) + } + fn balance_on_hold(reason: &Self::Reason, who: &T::AccountId) -> T::Balance { + Holds::::get(who) + .iter() + .find(|x| &x.id == reason) + .map_or_else(Zero::zero, |x| x.amount) + } + fn hold_available(reason: &Self::Reason, who: &T::AccountId) -> bool { + if frame_system::Pallet::::providers(who) == 0 { + return false + } + let holds = Holds::::get(who); + if holds.is_full() && !holds.iter().any(|x| &x.id == reason) { + return false + } + true + } +} + +impl, I: 'static> fungible::UnbalancedHold for Pallet { + fn set_balance_on_hold( + reason: &Self::Reason, + who: &T::AccountId, + amount: Self::Balance, + ) -> DispatchResult { + let mut new_account = Self::account(who); + let mut holds = Holds::::get(who); + let mut increase = true; + let mut delta = amount; + + if let Some(item) = holds.iter_mut().find(|x| &x.id == reason) { + delta = item.amount.max(amount) - item.amount.min(amount); + increase = amount > item.amount; + item.amount = amount; + holds.retain(|x| !x.amount.is_zero()); + } else { + if !amount.is_zero() { + holds + .try_push(IdAmount { id: *reason, amount }) + .map_err(|_| Error::::TooManyHolds)?; + } + } + + new_account.reserved = if increase { + new_account.reserved.checked_add(&delta).ok_or(ArithmeticError::Overflow)? + } else { + new_account.reserved.checked_sub(&delta).ok_or(ArithmeticError::Underflow)? + }; + + let (result, maybe_dust) = Self::try_mutate_account(who, |a, _| -> DispatchResult { + *a = new_account; + Ok(()) + })?; + debug_assert!( + maybe_dust.is_none(), + "Does not alter main balance; dust only happens when it is altered; qed" + ); + Holds::::insert(who, holds); + Ok(result) + } +} + +impl, I: 'static> fungible::InspectFreeze for Pallet { + type Id = T::FreezeIdentifier; + + fn balance_frozen(id: &Self::Id, who: &T::AccountId) -> Self::Balance { + let locks = Freezes::::get(who); + locks.into_iter().find(|l| &l.id == id).map_or(Zero::zero(), |l| l.amount) + } + + fn can_freeze(id: &Self::Id, who: &T::AccountId) -> bool { + let l = Freezes::::get(who); + !l.is_full() || l.iter().any(|x| &x.id == id) + } +} + +impl, I: 'static> fungible::MutateFreeze for Pallet { + fn set_freeze(id: &Self::Id, who: &T::AccountId, amount: Self::Balance) -> DispatchResult { + if amount.is_zero() { + return Self::thaw(id, who) + } + let mut locks = Freezes::::get(who); + if let Some(i) = locks.iter_mut().find(|x| &x.id == id) { + i.amount = amount; + } else { + locks + .try_push(IdAmount { id: *id, amount }) + .map_err(|_| Error::::TooManyFreezes)?; + } + Self::update_freezes(who, locks.as_bounded_slice()) + } + + fn extend_freeze(id: &Self::Id, who: &T::AccountId, amount: Self::Balance) -> DispatchResult { + if amount.is_zero() { + return Ok(()) + } + let mut locks = Freezes::::get(who); + if let Some(i) = locks.iter_mut().find(|x| &x.id == id) { + i.amount = i.amount.max(amount); + } else { + locks + .try_push(IdAmount { id: *id, amount }) + .map_err(|_| Error::::TooManyFreezes)?; + } + Self::update_freezes(who, locks.as_bounded_slice()) + } + + fn thaw(id: &Self::Id, who: &T::AccountId) -> DispatchResult { + let mut locks = Freezes::::get(who); + locks.retain(|l| &l.id != id); + Self::update_freezes(who, locks.as_bounded_slice()) + } +} + +impl, I: 'static> fungible::Balanced for Pallet { + type OnDropCredit = fungible::DecreaseIssuance; + type OnDropDebt = fungible::IncreaseIssuance; + + fn done_deposit(who: &T::AccountId, amount: Self::Balance) { + Self::deposit_event(Event::::Deposit { who: who.clone(), amount }); + } + fn done_withdraw(who: &T::AccountId, amount: Self::Balance) { + Self::deposit_event(Event::::Withdraw { who: who.clone(), amount }); + } + fn done_issue(amount: Self::Balance) { + Self::deposit_event(Event::::Issued { amount }); + } + fn done_rescind(amount: Self::Balance) { + Self::deposit_event(Event::::Rescinded { amount }); + } +} + +impl, I: 'static> fungible::BalancedHold for Pallet {} diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index ecd7e171e39ec..b3dc77a9b879d 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -89,8 +89,9 @@ //! //! ### Dispatchable Functions //! -//! - `transfer` - Transfer some liquid free balance to another account. -//! - `set_balance` - Set the balances of a given account. The origin of this call must be root. +//! - `transfer_allow_death` - Transfer some liquid free balance to another account. +//! - `force_set_balance` - Set the balances of a given account. The origin of this call must be +//! root. //! //! ## Usage //! @@ -150,45 +151,49 @@ //! ## Assumptions //! //! * Total issued balanced of all accounts should be less than `Config::Balance::max_value()`. +//! * Existential Deposit is set to a value greater than zero. +//! +//! Note, you may find the Balances pallet still functions with an ED of zero in some circumstances, +//! however this is not a configuration which is generally supported, nor will it be. #![cfg_attr(not(feature = "std"), no_std)] - -#[macro_use] -mod tests; mod benchmarking; +mod impl_currency; +mod impl_fungible; pub mod migration; -mod tests_composite; -mod tests_local; -#[cfg(test)] -mod tests_reentrancy; +mod tests; +mod types; pub mod weights; -pub use self::imbalances::{NegativeImbalance, PositiveImbalance}; -use codec::{Codec, Decode, Encode, MaxEncodedLen}; +use codec::{Codec, MaxEncodedLen}; #[cfg(feature = "std")] use frame_support::traits::GenesisBuild; use frame_support::{ ensure, pallet_prelude::DispatchResult, traits::{ - tokens::{fungible, BalanceStatus as Status, DepositConsequence, WithdrawConsequence}, - Currency, DefensiveSaturating, ExistenceRequirement, - ExistenceRequirement::{AllowDeath, KeepAlive}, - Get, Imbalance, LockIdentifier, LockableCurrency, NamedReservableCurrency, OnUnbalanced, - ReservableCurrency, SignedImbalance, StoredMap, TryDrop, WithdrawReasons, + tokens::{ + fungible, BalanceStatus as Status, DepositConsequence, + Fortitude::{self, Force, Polite}, + Preservation::{Expendable, Preserve, Protect}, + WithdrawConsequence, + }, + Currency, Defensive, Get, OnUnbalanced, ReservableCurrency, StoredMap, }, - WeakBoundedVec, + BoundedSlice, WeakBoundedVec, }; use frame_system as system; +pub use impl_currency::{NegativeImbalance, PositiveImbalance}; use scale_info::TypeInfo; use sp_runtime::{ traits::{ AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, Saturating, StaticLookup, Zero, }, - ArithmeticError, DispatchError, FixedPointOperand, RuntimeDebug, + ArithmeticError, DispatchError, FixedPointOperand, Perbill, RuntimeDebug, TokenError, }; -use sp_std::{cmp, fmt::Debug, mem, ops::BitOr, prelude::*, result}; +use sp_std::{cmp, fmt::Debug, mem, prelude::*, result}; +pub use types::{AccountData, BalanceLock, DustCleaner, IdAmount, Reasons, ReserveData}; pub use weights::WeightInfo; pub use pallet::*; @@ -200,11 +205,23 @@ type AccountIdLookupOf = <::Lookup as StaticLookup #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{ + pallet_prelude::*, + traits::{fungible::Credit, tokens::Precision}, + }; use frame_system::pallet_prelude::*; + pub type CreditOf = Credit<::AccountId, Pallet>; + #[pallet::config] pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + /// The balance of an account. type Balance: Parameter + Member @@ -219,21 +236,32 @@ pub mod pallet { + FixedPointOperand; /// Handler for the unbalanced reduction when removing a dust account. - type DustRemoval: OnUnbalanced>; - - /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type DustRemoval: OnUnbalanced>; - /// The minimum amount required to keep an account open. + /// The minimum amount required to keep an account open. MUST BE GREATER THAN ZERO! + /// + /// If you *really* need it to be zero, you can enable the feature `insecure_zero_ed` for + /// this pallet. However, you do so at your own risk: this will open up a major DoS vector. + /// In case you have multiple sources of provider references, you may also get unexpected + /// behaviour if you set this to zero. + /// + /// Bottom line: Do yourself a favour and make it at least one! #[pallet::constant] type ExistentialDeposit: Get; /// The means of storing the balances of an account. type AccountStore: StoredMap>; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; + /// The ID type for reserves. + /// + /// Use of reserves is deprecated in favour of holds. See `https://github.com/paritytech/substrate/pull/12951/` + type ReserveIdentifier: Parameter + Member + MaxEncodedLen + Ord + Copy; + + /// The ID type for holds. + type HoldIdentifier: Parameter + Member + MaxEncodedLen + Ord + Copy; + + /// The ID type for freezes. + type FreezeIdentifier: Parameter + Member + MaxEncodedLen + Ord + Copy; /// The maximum number of locks that should exist on an account. /// Not strictly enforced, but used for weight estimation. @@ -244,8 +272,13 @@ pub mod pallet { #[pallet::constant] type MaxReserves: Get; - /// The id type for named reserves. - type ReserveIdentifier: Parameter + Member + MaxEncodedLen + Ord + Copy; + /// The maximum number of holds that can exist on an account at any time. + #[pallet::constant] + type MaxHolds: Get; + + /// The maximum number of individual freeze locks that can exist on an account at any time. + #[pallet::constant] + type MaxFreezes: Get; } /// The current storage version. @@ -256,197 +289,6 @@ pub mod pallet { #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(PhantomData<(T, I)>); - #[pallet::call] - impl, I: 'static> Pallet { - /// Transfer some liquid free balance to another account. - /// - /// `transfer` will set the `FreeBalance` of the sender and receiver. - /// If the sender's account is below the existential deposit as a result - /// of the transfer, the account will be reaped. - /// - /// The dispatch origin for this call must be `Signed` by the transactor. - /// - /// ## Complexity - /// - Dependent on arguments but not critical, given proper implementations for input config - /// types. See related functions below. - /// - It contains a limited number of reads and writes internally and no complex - /// computation. - /// - /// Related functions: - /// - /// - `ensure_can_withdraw` is always called internally but has a bounded complexity. - /// - Transferring balances to accounts that did not exist before will cause - /// `T::OnNewAccount::on_new_account` to be called. - /// - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`. - /// - `transfer_keep_alive` works the same way as `transfer`, but has an additional check - /// that the transfer will not kill the origin account. - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::transfer())] - pub fn transfer( - origin: OriginFor, - dest: AccountIdLookupOf, - #[pallet::compact] value: T::Balance, - ) -> DispatchResultWithPostInfo { - let transactor = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer( - &transactor, - &dest, - value, - ExistenceRequirement::AllowDeath, - )?; - Ok(().into()) - } - - /// Set the balances of a given account. - /// - /// This will alter `FreeBalance` and `ReservedBalance` in storage. it will - /// also alter the total issuance of the system (`TotalIssuance`) appropriately. - /// If the new free or reserved balance is below the existential deposit, - /// it will reset the account nonce (`frame_system::AccountNonce`). - /// - /// The dispatch origin for this call is `root`. - #[pallet::call_index(1)] - #[pallet::weight( - T::WeightInfo::set_balance_creating() // Creates a new account. - .max(T::WeightInfo::set_balance_killing()) // Kills an existing account. - )] - pub fn set_balance( - origin: OriginFor, - who: AccountIdLookupOf, - #[pallet::compact] new_free: T::Balance, - #[pallet::compact] new_reserved: T::Balance, - ) -> DispatchResultWithPostInfo { - ensure_root(origin)?; - let who = T::Lookup::lookup(who)?; - let existential_deposit = T::ExistentialDeposit::get(); - - let wipeout = new_free + new_reserved < existential_deposit; - let new_free = if wipeout { Zero::zero() } else { new_free }; - let new_reserved = if wipeout { Zero::zero() } else { new_reserved }; - - // First we try to modify the account's balance to the forced balance. - let (old_free, old_reserved) = Self::mutate_account(&who, |account| { - let old_free = account.free; - let old_reserved = account.reserved; - - account.free = new_free; - account.reserved = new_reserved; - - (old_free, old_reserved) - })?; - - // This will adjust the total issuance, which was not done by the `mutate_account` - // above. - if new_free > old_free { - mem::drop(PositiveImbalance::::new(new_free - old_free)); - } else if new_free < old_free { - mem::drop(NegativeImbalance::::new(old_free - new_free)); - } - - if new_reserved > old_reserved { - mem::drop(PositiveImbalance::::new(new_reserved - old_reserved)); - } else if new_reserved < old_reserved { - mem::drop(NegativeImbalance::::new(old_reserved - new_reserved)); - } - - Self::deposit_event(Event::BalanceSet { who, free: new_free, reserved: new_reserved }); - Ok(().into()) - } - - /// Exactly as `transfer`, except the origin must be root and the source account may be - /// specified. - /// ## Complexity - /// - Same as transfer, but additional read and write because the source account is not - /// assumed to be in the overlay. - #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::force_transfer())] - pub fn force_transfer( - origin: OriginFor, - source: AccountIdLookupOf, - dest: AccountIdLookupOf, - #[pallet::compact] value: T::Balance, - ) -> DispatchResultWithPostInfo { - ensure_root(origin)?; - let source = T::Lookup::lookup(source)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer( - &source, - &dest, - value, - ExistenceRequirement::AllowDeath, - )?; - Ok(().into()) - } - - /// Same as the [`transfer`] call, but with a check that the transfer will not kill the - /// origin account. - /// - /// 99% of the time you want [`transfer`] instead. - /// - /// [`transfer`]: struct.Pallet.html#method.transfer - #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::transfer_keep_alive())] - pub fn transfer_keep_alive( - origin: OriginFor, - dest: AccountIdLookupOf, - #[pallet::compact] value: T::Balance, - ) -> DispatchResultWithPostInfo { - let transactor = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer(&transactor, &dest, value, KeepAlive)?; - Ok(().into()) - } - - /// Transfer the entire transferable balance from the caller account. - /// - /// NOTE: This function only attempts to transfer _transferable_ balances. This means that - /// any locked, reserved, or existential deposits (when `keep_alive` is `true`), will not be - /// transferred by this function. To ensure that this function results in a killed account, - /// you might need to prepare the account by removing any reference counters, storage - /// deposits, etc... - /// - /// The dispatch origin of this call must be Signed. - /// - /// - `dest`: The recipient of the transfer. - /// - `keep_alive`: A boolean to determine if the `transfer_all` operation should send all - /// of the funds the account has, causing the sender account to be killed (false), or - /// transfer everything except at least the existential deposit, which will guarantee to - /// keep the sender account alive (true). ## Complexity - /// - O(1). Just like transfer, but reading the user's transferable balance first. - #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::transfer_all())] - pub fn transfer_all( - origin: OriginFor, - dest: AccountIdLookupOf, - keep_alive: bool, - ) -> DispatchResult { - use fungible::Inspect; - let transactor = ensure_signed(origin)?; - let reducible_balance = Self::reducible_balance(&transactor, keep_alive); - let dest = T::Lookup::lookup(dest)?; - let keep_alive = if keep_alive { KeepAlive } else { AllowDeath }; - >::transfer(&transactor, &dest, reducible_balance, keep_alive)?; - Ok(()) - } - - /// Unreserve some balance from a user by force. - /// - /// Can only be called by ROOT. - #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::force_unreserve())] - pub fn force_unreserve( - origin: OriginFor, - who: AccountIdLookupOf, - amount: T::Balance, - ) -> DispatchResult { - ensure_root(origin)?; - let who = T::Lookup::lookup(who)?; - let _leftover = >::unreserve(&who, amount); - Ok(()) - } - } - #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { @@ -458,7 +300,7 @@ pub mod pallet { /// Transfer succeeded. Transfer { from: T::AccountId, to: T::AccountId, amount: T::Balance }, /// A balance was set by root. - BalanceSet { who: T::AccountId, free: T::Balance, reserved: T::Balance }, + BalanceSet { who: T::AccountId, free: T::Balance }, /// Some balance was reserved (moved from free to reserved). Reserved { who: T::AccountId, amount: T::Balance }, /// Some balance was unreserved (moved from reserved to free). @@ -477,26 +319,52 @@ pub mod pallet { Withdraw { who: T::AccountId, amount: T::Balance }, /// Some amount was removed from the account (e.g. for misbehavior). Slashed { who: T::AccountId, amount: T::Balance }, + /// Some amount was minted into an account. + Minted { who: T::AccountId, amount: T::Balance }, + /// Some amount was burned from an account. + Burned { who: T::AccountId, amount: T::Balance }, + /// Some amount was suspended from an account (it can be restored later). + Suspended { who: T::AccountId, amount: T::Balance }, + /// Some amount was restored into an account. + Restored { who: T::AccountId, amount: T::Balance }, + /// An account was upgraded. + Upgraded { who: T::AccountId }, + /// Total issuance was increased by `amount`, creating a credit to be balanced. + Issued { amount: T::Balance }, + /// Total issuance was decreased by `amount`, creating a debt to be balanced. + Rescinded { amount: T::Balance }, + /// Some balance was locked. + Locked { who: T::AccountId, amount: T::Balance }, + /// Some balance was unlocked. + Unlocked { who: T::AccountId, amount: T::Balance }, + /// Some balance was frozen. + Frozen { who: T::AccountId, amount: T::Balance }, + /// Some balance was thawed. + Thawed { who: T::AccountId, amount: T::Balance }, } #[pallet::error] pub enum Error { - /// Vesting balance too high to send value + /// Vesting balance too high to send value. VestingBalance, - /// Account liquidity restrictions prevent withdrawal + /// Account liquidity restrictions prevent withdrawal. LiquidityRestrictions, /// Balance too low to send value. InsufficientBalance, - /// Value too low to create account due to existential deposit + /// Value too low to create account due to existential deposit. ExistentialDeposit, - /// Transfer/payment would kill account - KeepAlive, - /// A vesting schedule already exists for this account + /// Transfer/payment would kill account. + Expendability, + /// A vesting schedule already exists for this account. ExistingVestingSchedule, - /// Beneficiary account must pre-exist + /// Beneficiary account must pre-exist. DeadAccount, - /// Number of named reserves exceed MaxReserves + /// Number of named reserves exceed `MaxReserves`. TooManyReserves, + /// Number of holds exceed `MaxHolds`. + TooManyHolds, + /// Number of freezes exceed `MaxFreezes`. + TooManyFreezes, } /// The total units issued in the system. @@ -563,6 +431,26 @@ pub mod pallet { ValueQuery, >; + /// Holds on account balances. + #[pallet::storage] + pub type Holds, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + BoundedVec, T::MaxHolds>, + ValueQuery, + >; + + /// Freeze locks on account balances. + #[pallet::storage] + pub type Freezes, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + BoundedVec, T::MaxFreezes>, + ValueQuery, + >; + #[pallet::genesis_config] pub struct GenesisConfig, I: 'static = ()> { pub balances: Vec<(T::AccountId, T::Balance)>, @@ -579,6 +467,7 @@ pub mod pallet { impl, I: 'static> GenesisBuild for GenesisConfig { fn build(&self) { let total = self.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n); + >::put(total); for (_, balance) in &self.balances { @@ -602,1601 +491,697 @@ pub mod pallet { ); for &(ref who, free) in self.balances.iter() { + frame_system::Pallet::::inc_providers(who); assert!(T::AccountStore::insert(who, AccountData { free, ..Default::default() }) .is_ok()); } } } -} - -#[cfg(feature = "std")] -impl, I: 'static> GenesisConfig { - /// Direct implementation of `GenesisBuild::build_storage`. - /// - /// Kept in order not to break dependency. - pub fn build_storage(&self) -> Result { - >::build_storage(self) - } - - /// Direct implementation of `GenesisBuild::assimilate_storage`. - /// - /// Kept in order not to break dependency. - pub fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { - >::assimilate_storage(self, storage) - } -} - -/// Simplified reasons for withdrawing balance. -#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] -pub enum Reasons { - /// Paying system transaction fees. - Fee = 0, - /// Any reason other than paying system transaction fees. - Misc = 1, - /// Any reason at all. - All = 2, -} -impl From for Reasons { - fn from(r: WithdrawReasons) -> Reasons { - if r == WithdrawReasons::TRANSACTION_PAYMENT { - Reasons::Fee - } else if r.contains(WithdrawReasons::TRANSACTION_PAYMENT) { - Reasons::All - } else { - Reasons::Misc + #[cfg(feature = "std")] + impl, I: 'static> GenesisConfig { + /// Direct implementation of `GenesisBuild::build_storage`. + /// + /// Kept in order not to break dependency. + pub fn build_storage(&self) -> Result { + >::build_storage(self) } - } -} -impl BitOr for Reasons { - type Output = Reasons; - fn bitor(self, other: Reasons) -> Reasons { - if self == other { - return self + /// Direct implementation of `GenesisBuild::assimilate_storage`. + /// + /// Kept in order not to break dependency. + pub fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { + >::assimilate_storage(self, storage) } - Reasons::All } -} - -/// A single lock on a balance. There can be many of these on an account and they "overlap", so the -/// same balance is frozen by multiple locks. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] -pub struct BalanceLock { - /// An identifier for this lock. Only one lock may be in existence for each identifier. - pub id: LockIdentifier, - /// The amount which the free balance may not drop below when this lock is in effect. - pub amount: Balance, - /// If true, then the lock remains in effect even for payment of transaction fees. - pub reasons: Reasons, -} - -/// Store named reserved balance. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] -pub struct ReserveData { - /// The identifier for the named reserve. - pub id: ReserveIdentifier, - /// The amount of the named reserve. - pub amount: Balance, -} - -/// All balance information for an account. -#[derive(Encode, Decode, Clone, PartialEq, Eq, Default, RuntimeDebug, MaxEncodedLen, TypeInfo)] -pub struct AccountData { - /// Non-reserved part of the balance. There may still be restrictions on this, but it is the - /// total pool what may in principle be transferred, reserved and used for tipping. - /// - /// This is the only balance that matters in terms of most operations on tokens. It - /// alone is used to determine the balance when in the contract execution environment. - pub free: Balance, - /// Balance which is reserved and may not be used at all. - /// - /// This can still get slashed, but gets slashed last of all. - /// - /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens - /// that are still 'owned' by the account holder, but which are suspendable. - /// This includes named reserve and unnamed reserve. - pub reserved: Balance, - /// The amount that `free` may not drop below when withdrawing for *anything except transaction - /// fee payment*. - pub misc_frozen: Balance, - /// The amount that `free` may not drop below when withdrawing specifically for transaction - /// fee payment. - pub fee_frozen: Balance, -} -impl AccountData { - /// How much this account's balance can be reduced for the given `reasons`. - fn usable(&self, reasons: Reasons) -> Balance { - self.free.saturating_sub(self.frozen(reasons)) - } - /// The amount that this account's free balance may not be reduced beyond for the given - /// `reasons`. - fn frozen(&self, reasons: Reasons) -> Balance { - match reasons { - Reasons::All => self.misc_frozen.max(self.fee_frozen), - Reasons::Misc => self.misc_frozen, - Reasons::Fee => self.fee_frozen, + #[pallet::hooks] + impl, I: 'static> Hooks for Pallet { + #[cfg(not(feature = "insecure_zero_ed"))] + fn integrity_test() { + assert!( + !>::ExistentialDeposit::get().is_zero(), + "The existential deposit must be greater than zero!" + ); } } - /// The total balance in this account including any that is reserved and ignoring any frozen. - fn total(&self) -> Balance { - self.free.saturating_add(self.reserved) - } -} - -pub struct DustCleaner, I: 'static = ()>( - Option<(T::AccountId, NegativeImbalance)>, -); -impl, I: 'static> Drop for DustCleaner { - fn drop(&mut self) { - if let Some((who, dust)) = self.0.take() { - Pallet::::deposit_event(Event::DustLost { account: who, amount: dust.peek() }); - T::DustRemoval::on_unbalanced(dust); + #[pallet::call(weight(>::WeightInfo))] + impl, I: 'static> Pallet { + /// Transfer some liquid free balance to another account. + /// + /// `transfer_allow_death` will set the `FreeBalance` of the sender and receiver. + /// If the sender's account is below the existential deposit as a result + /// of the transfer, the account will be reaped. + /// + /// The dispatch origin for this call must be `Signed` by the transactor. + #[pallet::call_index(0)] + pub fn transfer_allow_death( + origin: OriginFor, + dest: AccountIdLookupOf, + #[pallet::compact] value: T::Balance, + ) -> DispatchResultWithPostInfo { + let source = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&source, &dest, value, Expendable)?; + Ok(().into()) } - } -} - -impl, I: 'static> Pallet { - /// Get the free balance of an account. - pub fn free_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { - Self::account(who.borrow()).free - } - - /// Get the balance of an account that can be used for transfers, reservations, or any other - /// non-locking, non-transaction-fee activity. Will be at most `free_balance`. - pub fn usable_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { - Self::account(who.borrow()).usable(Reasons::Misc) - } - /// Get the balance of an account that can be used for paying transaction fees (not tipping, - /// or any other kind of fees, though). Will be at most `free_balance`. - pub fn usable_balance_for_fees(who: impl sp_std::borrow::Borrow) -> T::Balance { - Self::account(who.borrow()).usable(Reasons::Fee) - } + /// Set the regular balance of a given account; it also takes a reserved balance but this + /// must be the same as the account's current reserved balance. + /// + /// The dispatch origin for this call is `root`. + /// + /// WARNING: This call is DEPRECATED! Use `force_set_balance` instead. + #[pallet::call_index(1)] + #[pallet::weight( + T::WeightInfo::force_set_balance_creating() // Creates a new account. + .max(T::WeightInfo::force_set_balance_killing()) // Kills an existing account. + )] + pub fn set_balance_deprecated( + origin: OriginFor, + who: AccountIdLookupOf, + #[pallet::compact] new_free: T::Balance, + #[pallet::compact] old_reserved: T::Balance, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let who = T::Lookup::lookup(who)?; + let existential_deposit = Self::ed(); - /// Get the reserved balance of an account. - pub fn reserved_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { - Self::account(who.borrow()).reserved - } + let wipeout = new_free < existential_deposit; + let new_free = if wipeout { Zero::zero() } else { new_free }; - /// Get both the free and reserved balances of an account. - fn account(who: &T::AccountId) -> AccountData { - T::AccountStore::get(who) - } + // First we try to modify the account's balance to the forced balance. + let old_free = Self::try_mutate_account_handling_dust( + &who, + |account, _is_new| -> Result { + let old_free = account.free; + ensure!(account.reserved == old_reserved, TokenError::Unsupported); + account.free = new_free; + Ok(old_free) + }, + )?; - /// Handles any steps needed after mutating an account. - /// - /// This includes DustRemoval unbalancing, in the case than the `new` account's total balance - /// is non-zero but below ED. - /// - /// Returns two values: - /// - `Some` containing the the `new` account, iff the account has sufficient balance. - /// - `Some` containing the dust to be dropped, iff some dust should be dropped. - fn post_mutation( - _who: &T::AccountId, - new: AccountData, - ) -> (Option>, Option>) { - let total = new.total(); - if total < T::ExistentialDeposit::get() { - if total.is_zero() { - (None, None) - } else { - (None, Some(NegativeImbalance::new(total))) + // This will adjust the total issuance, which was not done by the `mutate_account` + // above. + if new_free > old_free { + mem::drop(PositiveImbalance::::new(new_free - old_free)); + } else if new_free < old_free { + mem::drop(NegativeImbalance::::new(old_free - new_free)); } - } else { - (Some(new), None) - } - } - fn deposit_consequence( - _who: &T::AccountId, - amount: T::Balance, - account: &AccountData, - mint: bool, - ) -> DepositConsequence { - if amount.is_zero() { - return DepositConsequence::Success + Self::deposit_event(Event::BalanceSet { who, free: new_free }); + Ok(().into()) } - if mint && TotalIssuance::::get().checked_add(&amount).is_none() { - return DepositConsequence::Overflow - } - - let new_total_balance = match account.total().checked_add(&amount) { - Some(x) => x, - None => return DepositConsequence::Overflow, - }; - - if new_total_balance < T::ExistentialDeposit::get() { - return DepositConsequence::BelowMinimum + /// Exactly as `transfer_allow_death`, except the origin must be root and the source account + /// may be specified. + #[pallet::call_index(2)] + pub fn force_transfer( + origin: OriginFor, + source: AccountIdLookupOf, + dest: AccountIdLookupOf, + #[pallet::compact] value: T::Balance, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let source = T::Lookup::lookup(source)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&source, &dest, value, Expendable)?; + Ok(().into()) } - // NOTE: We assume that we are a provider, so don't need to do any checks in the - // case of account creation. - - DepositConsequence::Success - } - - fn withdraw_consequence( - who: &T::AccountId, - amount: T::Balance, - account: &AccountData, - ) -> WithdrawConsequence { - if amount.is_zero() { - return WithdrawConsequence::Success + /// Same as the [`transfer_allow_death`] call, but with a check that the transfer will not + /// kill the origin account. + /// + /// 99% of the time you want [`transfer_allow_death`] instead. + /// + /// [`transfer_allow_death`]: struct.Pallet.html#method.transfer + #[pallet::call_index(3)] + pub fn transfer_keep_alive( + origin: OriginFor, + dest: AccountIdLookupOf, + #[pallet::compact] value: T::Balance, + ) -> DispatchResultWithPostInfo { + let source = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&source, &dest, value, Preserve)?; + Ok(().into()) } - if TotalIssuance::::get().checked_sub(&amount).is_none() { - return WithdrawConsequence::Underflow + /// Transfer the entire transferable balance from the caller account. + /// + /// NOTE: This function only attempts to transfer _transferable_ balances. This means that + /// any locked, reserved, or existential deposits (when `keep_alive` is `true`), will not be + /// transferred by this function. To ensure that this function results in a killed account, + /// you might need to prepare the account by removing any reference counters, storage + /// deposits, etc... + /// + /// The dispatch origin of this call must be Signed. + /// + /// - `dest`: The recipient of the transfer. + /// - `keep_alive`: A boolean to determine if the `transfer_all` operation should send all + /// of the funds the account has, causing the sender account to be killed (false), or + /// transfer everything except at least the existential deposit, which will guarantee to + /// keep the sender account alive (true). + #[pallet::call_index(4)] + pub fn transfer_all( + origin: OriginFor, + dest: AccountIdLookupOf, + keep_alive: bool, + ) -> DispatchResult { + let transactor = ensure_signed(origin)?; + let keep_alive = if keep_alive { Preserve } else { Expendable }; + let reducible_balance = >::reducible_balance( + &transactor, + keep_alive, + Fortitude::Polite, + ); + let dest = T::Lookup::lookup(dest)?; + >::transfer( + &transactor, + &dest, + reducible_balance, + keep_alive, + )?; + Ok(()) } - let new_total_balance = match account.total().checked_sub(&amount) { - Some(x) => x, - None => return WithdrawConsequence::NoFunds, - }; - - // Provider restriction - total account balance cannot be reduced to zero if it cannot - // sustain the loss of a provider reference. - // NOTE: This assumes that the pallet is a provider (which is true). Is this ever changes, - // then this will need to adapt accordingly. - let ed = T::ExistentialDeposit::get(); - let success = if new_total_balance < ed { - if frame_system::Pallet::::can_dec_provider(who) { - WithdrawConsequence::ReducedToZero(new_total_balance) - } else { - return WithdrawConsequence::WouldDie - } - } else { - WithdrawConsequence::Success - }; - - // Enough free funds to have them be reduced. - let new_free_balance = match account.free.checked_sub(&amount) { - Some(b) => b, - None => return WithdrawConsequence::NoFunds, - }; - - // Eventual free funds must be no less than the frozen balance. - let min_balance = account.frozen(Reasons::All); - if new_free_balance < min_balance { - return WithdrawConsequence::Frozen + /// Unreserve some balance from a user by force. + /// + /// Can only be called by ROOT. + #[pallet::call_index(5)] + pub fn force_unreserve( + origin: OriginFor, + who: AccountIdLookupOf, + amount: T::Balance, + ) -> DispatchResult { + ensure_root(origin)?; + let who = T::Lookup::lookup(who)?; + let _leftover = >::unreserve(&who, amount); + Ok(()) } - success - } - - /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce - /// `ExistentialDeposit` law, annulling the account as needed. - /// - /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used - /// when it is known that the account already exists. - /// - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - pub fn mutate_account( - who: &T::AccountId, - f: impl FnOnce(&mut AccountData) -> R, - ) -> Result { - Self::try_mutate_account(who, |a, _| -> Result { Ok(f(a)) }) - } - - /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce - /// `ExistentialDeposit` law, annulling the account as needed. This will do nothing if the - /// result of `f` is an `Err`. - /// - /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used - /// when it is known that the account already exists. - /// - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - fn try_mutate_account>( - who: &T::AccountId, - f: impl FnOnce(&mut AccountData, bool) -> Result, - ) -> Result { - Self::try_mutate_account_with_dust(who, f).map(|(result, dust_cleaner)| { - drop(dust_cleaner); - result - }) - } - - /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce - /// `ExistentialDeposit` law, annulling the account as needed. This will do nothing if the - /// result of `f` is an `Err`. - /// - /// It returns both the result from the closure, and an optional `DustCleaner` instance which - /// should be dropped once it is known that all nested mutates that could affect storage items - /// what the dust handler touches have completed. - /// - /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used - /// when it is known that the account already exists. - /// - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - fn try_mutate_account_with_dust>( - who: &T::AccountId, - f: impl FnOnce(&mut AccountData, bool) -> Result, - ) -> Result<(R, DustCleaner), E> { - let result = T::AccountStore::try_mutate_exists(who, |maybe_account| { - let is_new = maybe_account.is_none(); - let mut account = maybe_account.take().unwrap_or_default(); - f(&mut account, is_new).map(move |result| { - let maybe_endowed = if is_new { Some(account.free) } else { None }; - let maybe_account_maybe_dust = Self::post_mutation(who, account); - *maybe_account = maybe_account_maybe_dust.0; - (maybe_endowed, maybe_account_maybe_dust.1, result) - }) - }); - result.map(|(maybe_endowed, maybe_dust, result)| { - if let Some(endowed) = maybe_endowed { - Self::deposit_event(Event::Endowed { account: who.clone(), free_balance: endowed }); + /// Upgrade a specified account. + /// + /// - `origin`: Must be `Signed`. + /// - `who`: The account to be upgraded. + /// + /// This will waive the transaction fee if at least all but 10% of the accounts needed to + /// be upgraded. (We let some not have to be upgraded just in order to allow for the + /// possibililty of churn). + #[pallet::call_index(6)] + #[pallet::weight(T::WeightInfo::upgrade_accounts(who.len() as u32))] + pub fn upgrade_accounts( + origin: OriginFor, + who: Vec, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + if who.is_empty() { + return Ok(Pays::Yes.into()) } - let dust_cleaner = DustCleaner(maybe_dust.map(|dust| (who.clone(), dust))); - (result, dust_cleaner) - }) - } - - /// Update the account entry for `who`, given the locks. - fn update_locks(who: &T::AccountId, locks: &[BalanceLock]) { - let bounded_locks = WeakBoundedVec::<_, T::MaxLocks>::force_from( - locks.to_vec(), - Some("Balances Update Locks"), - ); - - if locks.len() as u32 > T::MaxLocks::get() { - log::warn!( - target: LOG_TARGET, - "Warning: A user has more currency locks than expected. \ - A runtime configuration adjustment may be needed." - ); - } - // No way this can fail since we do not alter the existential balances. - let res = Self::mutate_account(who, |b| { - b.misc_frozen = Zero::zero(); - b.fee_frozen = Zero::zero(); - for l in locks.iter() { - if l.reasons == Reasons::All || l.reasons == Reasons::Misc { - b.misc_frozen = b.misc_frozen.max(l.amount); + let mut upgrade_count = 0; + for i in &who { + let upgraded = Self::ensure_upgraded(i); + if upgraded { + upgrade_count.saturating_inc(); } - if l.reasons == Reasons::All || l.reasons == Reasons::Fee { - b.fee_frozen = b.fee_frozen.max(l.amount); - } - } - }); - debug_assert!(res.is_ok()); - - let existed = Locks::::contains_key(who); - if locks.is_empty() { - Locks::::remove(who); - if existed { - // TODO: use Locks::::hashed_key - // https://github.com/paritytech/substrate/issues/4969 - system::Pallet::::dec_consumers(who); } - } else { - Locks::::insert(who, bounded_locks); - if !existed && system::Pallet::::inc_consumers_without_limit(who).is_err() { - // No providers for the locks. This is impossible under normal circumstances - // since the funds that are under the lock will themselves be stored in the - // account and therefore will need a reference. - log::warn!( - target: LOG_TARGET, - "Warning: Attempt to introduce lock consumer reference, yet no providers. \ - This is unexpected but should be safe." - ); + let proportion_upgraded = Perbill::from_rational(upgrade_count, who.len() as u32); + if proportion_upgraded >= Perbill::from_percent(90) { + Ok(Pays::No.into()) + } else { + Ok(Pays::Yes.into()) } } - } - /// Move the reserved balance of one account into the balance of another, according to `status`. - /// - /// Is a no-op if: - /// - the value to be moved is zero; or - /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. - /// - /// NOTE: returns actual amount of transferred value in `Ok` case. - fn do_transfer_reserved( - slashed: &T::AccountId, - beneficiary: &T::AccountId, - value: T::Balance, - best_effort: bool, - status: Status, - ) -> Result { - if value.is_zero() { - return Ok(Zero::zero()) + /// Alias for `transfer_allow_death`, provided only for name-wise compatibility. + /// + /// WARNING: DEPRECATED! Will be released in approximately 3 months. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::transfer_allow_death())] + pub fn transfer( + origin: OriginFor, + dest: AccountIdLookupOf, + #[pallet::compact] value: T::Balance, + ) -> DispatchResultWithPostInfo { + let source = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&source, &dest, value, Expendable)?; + Ok(().into()) } - if slashed == beneficiary { - return match status { - Status::Free => Ok(value.saturating_sub(Self::unreserve(slashed, value))), - Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), - } - } + /// Set the regular balance of a given account. + /// + /// The dispatch origin for this call is `root`. + #[pallet::call_index(8)] + #[pallet::weight( + T::WeightInfo::force_set_balance_creating() // Creates a new account. + .max(T::WeightInfo::force_set_balance_killing()) // Kills an existing account. + )] + pub fn force_set_balance( + origin: OriginFor, + who: AccountIdLookupOf, + #[pallet::compact] new_free: T::Balance, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let who = T::Lookup::lookup(who)?; + let existential_deposit = Self::ed(); - let ((actual, _maybe_one_dust), _maybe_other_dust) = Self::try_mutate_account_with_dust( - beneficiary, - |to_account, is_new| -> Result<(T::Balance, DustCleaner), DispatchError> { - ensure!(!is_new, Error::::DeadAccount); - Self::try_mutate_account_with_dust( - slashed, - |from_account, _| -> Result { - let actual = cmp::min(from_account.reserved, value); - ensure!(best_effort || actual == value, Error::::InsufficientBalance); - match status { - Status::Free => - to_account.free = to_account - .free - .checked_add(&actual) - .ok_or(ArithmeticError::Overflow)?, - Status::Reserved => - to_account.reserved = to_account - .reserved - .checked_add(&actual) - .ok_or(ArithmeticError::Overflow)?, - } - from_account.reserved -= actual; - Ok(actual) - }, - ) - }, - )?; - - Self::deposit_event(Event::ReserveRepatriated { - from: slashed.clone(), - to: beneficiary.clone(), - amount: actual, - destination_status: status, - }); - Ok(actual) - } -} + let wipeout = new_free < existential_deposit; + let new_free = if wipeout { Zero::zero() } else { new_free }; -impl, I: 'static> fungible::Inspect for Pallet { - type Balance = T::Balance; + // First we try to modify the account's balance to the forced balance. + let old_free = Self::mutate_account_handling_dust(&who, |account| { + let old_free = account.free; + account.free = new_free; + old_free + })?; - fn total_issuance() -> Self::Balance { - TotalIssuance::::get() - } - fn active_issuance() -> Self::Balance { - TotalIssuance::::get().saturating_sub(InactiveIssuance::::get()) - } - fn minimum_balance() -> Self::Balance { - T::ExistentialDeposit::get() - } - fn balance(who: &T::AccountId) -> Self::Balance { - Self::account(who).total() - } - fn reducible_balance(who: &T::AccountId, keep_alive: bool) -> Self::Balance { - let a = Self::account(who); - // Liquid balance is what is neither reserved nor locked/frozen. - let liquid = a.free.saturating_sub(a.fee_frozen.max(a.misc_frozen)); - if frame_system::Pallet::::can_dec_provider(who) && !keep_alive { - liquid - } else { - // `must_remain_to_exist` is the part of liquid balance which must remain to keep total - // over ED. - let must_remain_to_exist = - T::ExistentialDeposit::get().saturating_sub(a.total() - liquid); - liquid.saturating_sub(must_remain_to_exist) - } - } - fn can_deposit(who: &T::AccountId, amount: Self::Balance, mint: bool) -> DepositConsequence { - Self::deposit_consequence(who, amount, &Self::account(who), mint) - } - fn can_withdraw( - who: &T::AccountId, - amount: Self::Balance, - ) -> WithdrawConsequence { - Self::withdraw_consequence(who, amount, &Self::account(who)) - } -} + // This will adjust the total issuance, which was not done by the `mutate_account` + // above. + if new_free > old_free { + mem::drop(PositiveImbalance::::new(new_free - old_free)); + } else if new_free < old_free { + mem::drop(NegativeImbalance::::new(old_free - new_free)); + } -impl, I: 'static> fungible::Mutate for Pallet { - fn mint_into(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { - if amount.is_zero() { - return Ok(()) + Self::deposit_event(Event::BalanceSet { who, free: new_free }); + Ok(().into()) } - Self::try_mutate_account(who, |account, _is_new| -> DispatchResult { - Self::deposit_consequence(who, amount, account, true).into_result()?; - account.free += amount; - Ok(()) - })?; - TotalIssuance::::mutate(|t| *t += amount); - Self::deposit_event(Event::Deposit { who: who.clone(), amount }); - Ok(()) } - fn burn_from( - who: &T::AccountId, - amount: Self::Balance, - ) -> Result { - if amount.is_zero() { - return Ok(Self::Balance::zero()) + impl, I: 'static> Pallet { + fn ed() -> T::Balance { + T::ExistentialDeposit::get() } - let actual = Self::try_mutate_account( - who, - |account, _is_new| -> Result { - let extra = Self::withdraw_consequence(who, amount, account).into_result()?; - let actual = amount + extra; - account.free -= actual; - Ok(actual) - }, - )?; - TotalIssuance::::mutate(|t| *t -= actual); - Self::deposit_event(Event::Withdraw { who: who.clone(), amount }); - Ok(actual) - } -} - -impl, I: 'static> fungible::Transfer for Pallet { - fn transfer( - source: &T::AccountId, - dest: &T::AccountId, - amount: T::Balance, - keep_alive: bool, - ) -> Result { - let er = if keep_alive { KeepAlive } else { AllowDeath }; - >::transfer(source, dest, amount, er).map(|_| amount) - } - - fn deactivate(amount: Self::Balance) { - InactiveIssuance::::mutate(|b| b.saturating_accrue(amount)); - } - - fn reactivate(amount: Self::Balance) { - InactiveIssuance::::mutate(|b| b.saturating_reduce(amount)); - } -} - -impl, I: 'static> fungible::Unbalanced for Pallet { - fn set_balance(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { - Self::mutate_account(who, |account| -> DispatchResult { - // fungibles::Unbalanced::decrease_balance didn't check account.reserved - // free = new_balance - reserved - account.free = - amount.checked_sub(&account.reserved).ok_or(ArithmeticError::Underflow)?; - Self::deposit_event(Event::BalanceSet { - who: who.clone(), - free: account.free, - reserved: account.reserved, + /// Ensure the account `who` is using the new logic. + /// + /// Returns `true` if the account did get upgraded, `false` if it didn't need upgrading. + pub fn ensure_upgraded(who: &T::AccountId) -> bool { + let mut a = T::AccountStore::get(who); + if a.flags.is_new_logic() { + return false + } + a.flags.set_new_logic(); + if !a.reserved.is_zero() && a.frozen.is_zero() { + if system::Pallet::::providers(who) == 0 { + // Gah!! We have no provider refs :( + // This shouldn't practically happen, but we need a failsafe anyway: let's give + // them enough for an ED. + log::warn!( + target: LOG_TARGET, + "account with a non-zero reserve balance has no provider refs, account_id: '{:?}'.", + who + ); + a.free = a.free.max(Self::ed()); + system::Pallet::::inc_providers(who); + } + let _ = system::Pallet::::inc_consumers_without_limit(who).defensive(); + } + // Should never fail - we're only setting a bit. + let _ = T::AccountStore::try_mutate_exists(who, |account| -> DispatchResult { + *account = Some(a); + Ok(()) }); - - Ok(()) - })? - } - - fn set_total_issuance(amount: Self::Balance) { - TotalIssuance::::mutate(|t| *t = amount); - } -} - -impl, I: 'static> fungible::InspectHold for Pallet { - fn balance_on_hold(who: &T::AccountId) -> T::Balance { - Self::account(who).reserved - } - fn can_hold(who: &T::AccountId, amount: T::Balance) -> bool { - let a = Self::account(who); - let min_balance = T::ExistentialDeposit::get().max(a.frozen(Reasons::All)); - if a.reserved.checked_add(&amount).is_none() { - return false - } - // We require it to be min_balance + amount to ensure that the full reserved funds may be - // slashed without compromising locked funds or destroying the account. - let required_free = match min_balance.checked_add(&amount) { - Some(x) => x, - None => return false, - }; - a.free >= required_free - } -} -impl, I: 'static> fungible::MutateHold for Pallet { - fn hold(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { - if amount.is_zero() { - return Ok(()) - } - ensure!(Self::can_reserve(who, amount), Error::::InsufficientBalance); - Self::mutate_account(who, |a| { - a.free -= amount; - a.reserved += amount; - })?; - Ok(()) - } - fn release( - who: &T::AccountId, - amount: Self::Balance, - best_effort: bool, - ) -> Result { - if amount.is_zero() { - return Ok(amount) - } - // Done on a best-effort basis. - Self::try_mutate_account(who, |a, _| { - let new_free = a.free.saturating_add(amount.min(a.reserved)); - let actual = new_free - a.free; - ensure!(best_effort || actual == amount, Error::::InsufficientBalance); - // ^^^ Guaranteed to be <= amount and <= a.reserved - a.free = new_free; - a.reserved = a.reserved.saturating_sub(actual); - Ok(actual) - }) - } - fn transfer_held( - source: &T::AccountId, - dest: &T::AccountId, - amount: Self::Balance, - best_effort: bool, - on_hold: bool, - ) -> Result { - let status = if on_hold { Status::Reserved } else { Status::Free }; - Self::do_transfer_reserved(source, dest, amount, best_effort, status) - } -} - -// wrapping these imbalances in a private module is necessary to ensure absolute privacy -// of the inner member. -mod imbalances { - use super::{result, Config, Imbalance, RuntimeDebug, Saturating, TryDrop, Zero}; - use frame_support::traits::SameOrOther; - use sp_std::mem; - - /// Opaque, move-only struct with private fields that serves as a token denoting that - /// funds have been created without any equal and opposite accounting. - #[must_use] - #[derive(RuntimeDebug, PartialEq, Eq)] - pub struct PositiveImbalance, I: 'static = ()>(T::Balance); - - impl, I: 'static> PositiveImbalance { - /// Create a new positive imbalance from a balance. - pub fn new(amount: T::Balance) -> Self { - PositiveImbalance(amount) - } - } - - /// Opaque, move-only struct with private fields that serves as a token denoting that - /// funds have been destroyed without any equal and opposite accounting. - #[must_use] - #[derive(RuntimeDebug, PartialEq, Eq)] - pub struct NegativeImbalance, I: 'static = ()>(T::Balance); - - impl, I: 'static> NegativeImbalance { - /// Create a new negative imbalance from a balance. - pub fn new(amount: T::Balance) -> Self { - NegativeImbalance(amount) - } - } - - impl, I: 'static> TryDrop for PositiveImbalance { - fn try_drop(self) -> result::Result<(), Self> { - self.drop_zero() - } - } - - impl, I: 'static> Default for PositiveImbalance { - fn default() -> Self { - Self::zero() + Self::deposit_event(Event::Upgraded { who: who.clone() }); + return true } - } - impl, I: 'static> Imbalance for PositiveImbalance { - type Opposite = NegativeImbalance; - - fn zero() -> Self { - Self(Zero::zero()) - } - fn drop_zero(self) -> result::Result<(), Self> { - if self.0.is_zero() { - Ok(()) - } else { - Err(self) - } + /// Get the free balance of an account. + pub fn free_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { + Self::account(who.borrow()).free } - fn split(self, amount: T::Balance) -> (Self, Self) { - let first = self.0.min(amount); - let second = self.0 - first; - mem::forget(self); - (Self(first), Self(second)) + /// Get the balance of an account that can be used for transfers, reservations, or any other + /// non-locking, non-transaction-fee activity. Will be at most `free_balance`. + pub fn usable_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { + >::reducible_balance(who.borrow(), Expendable, Polite) } - fn merge(mut self, other: Self) -> Self { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - self - } - fn subsume(&mut self, other: Self) { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - } - fn offset(self, other: Self::Opposite) -> SameOrOther { - let (a, b) = (self.0, other.0); - mem::forget((self, other)); - - if a > b { - SameOrOther::Same(Self(a - b)) - } else if b > a { - SameOrOther::Other(NegativeImbalance::new(b - a)) - } else { - SameOrOther::None - } - } - fn peek(&self) -> T::Balance { - self.0 + /// Get the balance of an account that can be used for paying transaction fees (not tipping, + /// or any other kind of fees, though). Will be at most `free_balance`. + /// + /// This requires that the account stays alive. + pub fn usable_balance_for_fees( + who: impl sp_std::borrow::Borrow, + ) -> T::Balance { + >::reducible_balance(who.borrow(), Protect, Polite) } - } - impl, I: 'static> TryDrop for NegativeImbalance { - fn try_drop(self) -> result::Result<(), Self> { - self.drop_zero() + /// Get the reserved balance of an account. + pub fn reserved_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { + Self::account(who.borrow()).reserved } - } - impl, I: 'static> Default for NegativeImbalance { - fn default() -> Self { - Self::zero() + /// Get both the free and reserved balances of an account. + pub(crate) fn account(who: &T::AccountId) -> AccountData { + T::AccountStore::get(who) } - } - - impl, I: 'static> Imbalance for NegativeImbalance { - type Opposite = PositiveImbalance; - fn zero() -> Self { - Self(Zero::zero()) - } - fn drop_zero(self) -> result::Result<(), Self> { - if self.0.is_zero() { - Ok(()) - } else { - Err(self) + /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce + /// `ExistentialDeposit` law, annulling the account as needed. + /// + /// It returns the result from the closure. Any dust is handled through the low-level + /// `fungible::Unbalanced` trap-door for legacy dust management. + /// + /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used + /// when it is known that the account already exists. + /// + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + pub(crate) fn mutate_account_handling_dust( + who: &T::AccountId, + f: impl FnOnce(&mut AccountData) -> R, + ) -> Result { + let (r, maybe_dust) = Self::mutate_account(who, f)?; + if let Some(dust) = maybe_dust { + >::handle_raw_dust(dust); } + Ok(r) } - fn split(self, amount: T::Balance) -> (Self, Self) { - let first = self.0.min(amount); - let second = self.0 - first; - - mem::forget(self); - (Self(first), Self(second)) - } - fn merge(mut self, other: Self) -> Self { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - self - } - fn subsume(&mut self, other: Self) { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - } - fn offset(self, other: Self::Opposite) -> SameOrOther { - let (a, b) = (self.0, other.0); - mem::forget((self, other)); - - if a > b { - SameOrOther::Same(Self(a - b)) - } else if b > a { - SameOrOther::Other(PositiveImbalance::new(b - a)) - } else { - SameOrOther::None + /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce + /// `ExistentialDeposit` law, annulling the account as needed. + /// + /// It returns the result from the closure. Any dust is handled through the low-level + /// `fungible::Unbalanced` trap-door for legacy dust management. + /// + /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used + /// when it is known that the account already exists. + /// + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + pub(crate) fn try_mutate_account_handling_dust>( + who: &T::AccountId, + f: impl FnOnce(&mut AccountData, bool) -> Result, + ) -> Result { + let (r, maybe_dust) = Self::try_mutate_account(who, f)?; + if let Some(dust) = maybe_dust { + >::handle_raw_dust(dust); } + Ok(r) } - fn peek(&self) -> T::Balance { - self.0 - } - } - - impl, I: 'static> Drop for PositiveImbalance { - /// Basic drop handler will just square up the total issuance. - fn drop(&mut self) { - >::mutate(|v| *v = v.saturating_add(self.0)); - } - } - - impl, I: 'static> Drop for NegativeImbalance { - /// Basic drop handler will just square up the total issuance. - fn drop(&mut self) { - >::mutate(|v| *v = v.saturating_sub(self.0)); - } - } -} - -impl, I: 'static> Currency for Pallet -where - T::Balance: MaybeSerializeDeserialize + Debug, -{ - type Balance = T::Balance; - type PositiveImbalance = PositiveImbalance; - type NegativeImbalance = NegativeImbalance; - - fn total_balance(who: &T::AccountId) -> Self::Balance { - Self::account(who).total() - } - - // Check if `value` amount of free balance can be slashed from `who`. - fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { - if value.is_zero() { - return true - } - Self::free_balance(who) >= value - } - - fn total_issuance() -> Self::Balance { - TotalIssuance::::get() - } - fn active_issuance() -> Self::Balance { - >::active_issuance() - } - - fn deactivate(amount: Self::Balance) { - >::deactivate(amount); - } - - fn reactivate(amount: Self::Balance) { - >::reactivate(amount); - } - - fn minimum_balance() -> Self::Balance { - T::ExistentialDeposit::get() - } - - // Burn funds from the total issuance, returning a positive imbalance for the amount burned. - // Is a no-op if amount to be burned is zero. - fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance { - if amount.is_zero() { - return PositiveImbalance::zero() - } - >::mutate(|issued| { - *issued = issued.checked_sub(&amount).unwrap_or_else(|| { - amount = *issued; - Zero::zero() - }); - }); - PositiveImbalance::new(amount) - } - - // Create new funds into the total issuance, returning a negative imbalance - // for the amount issued. - // Is a no-op if amount to be issued it zero. - fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance { - if amount.is_zero() { - return NegativeImbalance::zero() + /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce + /// `ExistentialDeposit` law, annulling the account as needed. + /// + /// It returns both the result from the closure, and an optional amount of dust + /// which should be handled once it is known that all nested mutates that could affect + /// storage items what the dust handler touches have completed. + /// + /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used + /// when it is known that the account already exists. + /// + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + pub(crate) fn mutate_account( + who: &T::AccountId, + f: impl FnOnce(&mut AccountData) -> R, + ) -> Result<(R, Option), DispatchError> { + Self::try_mutate_account(who, |a, _| -> Result { Ok(f(a)) }) } - >::mutate(|issued| { - *issued = issued.checked_add(&amount).unwrap_or_else(|| { - amount = Self::Balance::max_value() - *issued; - Self::Balance::max_value() - }) - }); - NegativeImbalance::new(amount) - } - fn free_balance(who: &T::AccountId) -> Self::Balance { - Self::account(who).free - } - - // Ensure that an account can withdraw from their free balance given any existing withdrawal - // restrictions like locks and vesting balance. - // Is a no-op if amount to be withdrawn is zero. - // - // ## Complexity - // Despite iterating over a list of locks, they are limited by the number of - // lock IDs, which means the number of runtime pallets that intend to use and create locks. - fn ensure_can_withdraw( - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - new_balance: T::Balance, - ) -> DispatchResult { - if amount.is_zero() { - return Ok(()) + /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disnabled. + /// Returns `false` otherwise. + #[cfg(not(feature = "insecure_zero_ed"))] + fn have_providers_or_no_zero_ed(_: &T::AccountId) -> bool { + true } - let min_balance = Self::account(who).frozen(reasons.into()); - ensure!(new_balance >= min_balance, Error::::LiquidityRestrictions); - Ok(()) - } - // Transfer some free balance from `transactor` to `dest`, respecting existence requirements. - // Is a no-op if value to be transferred is zero or the `transactor` is the same as `dest`. - fn transfer( - transactor: &T::AccountId, - dest: &T::AccountId, - value: Self::Balance, - existence_requirement: ExistenceRequirement, - ) -> DispatchResult { - if value.is_zero() || transactor == dest { - return Ok(()) + /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disnabled. + /// Returns `false` otherwise. + #[cfg(feature = "insecure_zero_ed")] + fn have_providers_or_no_zero_ed(who: &T::AccountId) -> bool { + frame_system::Pallet::::providers(who) > 0 } - Self::try_mutate_account_with_dust( - dest, - |to_account, _| -> Result, DispatchError> { - Self::try_mutate_account_with_dust( - transactor, - |from_account, _| -> DispatchResult { - from_account.free = from_account - .free - .checked_sub(&value) - .ok_or(Error::::InsufficientBalance)?; - - // NOTE: total stake being stored in the same type means that this could - // never overflow but better to be safe than sorry. - to_account.free = - to_account.free.checked_add(&value).ok_or(ArithmeticError::Overflow)?; - - let ed = T::ExistentialDeposit::get(); - ensure!(to_account.total() >= ed, Error::::ExistentialDeposit); - - Self::ensure_can_withdraw( - transactor, - value, - WithdrawReasons::TRANSFER, - from_account.free, - ) - .map_err(|_| Error::::LiquidityRestrictions)?; - - // TODO: This is over-conservative. There may now be other providers, and - // this pallet may not even be a provider. - let allow_death = existence_requirement == ExistenceRequirement::AllowDeath; - let allow_death = - allow_death && system::Pallet::::can_dec_provider(transactor); - ensure!( - allow_death || from_account.total() >= ed, - Error::::KeepAlive - ); - - Ok(()) - }, - ) - .map(|(_, maybe_dust_cleaner)| maybe_dust_cleaner) - }, - )?; - - // Emit transfer event. - Self::deposit_event(Event::Transfer { - from: transactor.clone(), - to: dest.clone(), - amount: value, - }); - - Ok(()) - } + /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce + /// `ExistentialDeposit` law, annulling the account as needed. This will do nothing if the + /// result of `f` is an `Err`. + /// + /// It returns both the result from the closure, and an optional amount of dust + /// which should be handled once it is known that all nested mutates that could affect + /// storage items what the dust handler touches have completed. + /// + /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used + /// when it is known that the account already exists. + /// + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + pub(crate) fn try_mutate_account>( + who: &T::AccountId, + f: impl FnOnce(&mut AccountData, bool) -> Result, + ) -> Result<(R, Option), E> { + Self::ensure_upgraded(who); + let result = T::AccountStore::try_mutate_exists(who, |maybe_account| { + let is_new = maybe_account.is_none(); + let mut account = maybe_account.take().unwrap_or_default(); + let did_provide = + account.free >= Self::ed() && Self::have_providers_or_no_zero_ed(who); + let did_consume = + !is_new && (!account.reserved.is_zero() || !account.frozen.is_zero()); + + let result = f(&mut account, is_new)?; + + let does_provide = account.free >= Self::ed(); + let does_consume = !account.reserved.is_zero() || !account.frozen.is_zero(); + + if !did_provide && does_provide { + frame_system::Pallet::::inc_providers(who); + } + if did_consume && !does_consume { + frame_system::Pallet::::dec_consumers(who); + } + if !did_consume && does_consume { + frame_system::Pallet::::inc_consumers(who)?; + } + if did_provide && !does_provide { + // This could reap the account so must go last. + frame_system::Pallet::::dec_providers(who).map_err(|r| { + if did_consume && !does_consume { + // best-effort revert consumer change. + let _ = frame_system::Pallet::::inc_consumers(who).defensive(); + } + if !did_consume && does_consume { + let _ = frame_system::Pallet::::dec_consumers(who); + } + r + })?; + } - /// Slash a target account `who`, returning the negative imbalance created and any left over - /// amount that could not be slashed. - /// - /// Is a no-op if `value` to be slashed is zero or the account does not exist. - /// - /// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn - /// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid - /// having to draw from reserved funds, however we err on the side of punishment if things are - /// inconsistent or `can_slash` wasn't used appropriately. - fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { - if value.is_zero() { - return (NegativeImbalance::zero(), Zero::zero()) - } - if Self::total_balance(who).is_zero() { - return (NegativeImbalance::zero(), value) - } + let maybe_endowed = if is_new { Some(account.free) } else { None }; - for attempt in 0..2 { - match Self::try_mutate_account( - who, - |account, - _is_new| - -> Result<(Self::NegativeImbalance, Self::Balance), DispatchError> { - // Best value is the most amount we can slash following liveness rules. - let best_value = match attempt { - // First attempt we try to slash the full amount, and see if liveness issues - // happen. - 0 => value, - // If acting as a critical provider (i.e. first attempt failed), then slash - // as much as possible while leaving at least at ED. - _ => value.min( - (account.free + account.reserved) - .saturating_sub(T::ExistentialDeposit::get()), - ), - }; - - let free_slash = cmp::min(account.free, best_value); - account.free -= free_slash; // Safe because of above check - let remaining_slash = best_value - free_slash; // Safe because of above check - - if !remaining_slash.is_zero() { - // If we have remaining slash, take it from reserved balance. - let reserved_slash = cmp::min(account.reserved, remaining_slash); - account.reserved -= reserved_slash; // Safe because of above check - Ok(( - NegativeImbalance::new(free_slash + reserved_slash), - value - free_slash - reserved_slash, /* Safe because value is gt or - * eq total slashed */ - )) + // Handle any steps needed after mutating an account. + // + // This includes DustRemoval unbalancing, in the case than the `new` account's total + // balance is non-zero but below ED. + // + // Updates `maybe_account` to `Some` iff the account has sufficient balance. + // Evaluates `maybe_dust`, which is `Some` containing the dust to be dropped, iff + // some dust should be dropped. + // + // We should never be dropping if reserved is non-zero. Reserved being non-zero + // should imply that we have a consumer ref, so this is economically safe. + let ed = Self::ed(); + let maybe_dust = if account.free < ed && account.reserved.is_zero() { + if account.free.is_zero() { + None } else { - // Else we are done! - Ok(( - NegativeImbalance::new(free_slash), - value - free_slash, // Safe because value is gt or eq to total slashed - )) + Some(account.free) } - }, - ) { - Ok((imbalance, not_slashed)) => { - Self::deposit_event(Event::Slashed { - who: who.clone(), - amount: value.saturating_sub(not_slashed), - }); - return (imbalance, not_slashed) - }, - Err(_) => (), - } - } - - // Should never get here. But we'll be defensive anyway. - (Self::NegativeImbalance::zero(), value) - } - - /// Deposit some `value` into the free balance of an existing target account `who`. - /// - /// Is a no-op if the `value` to be deposited is zero. - fn deposit_into_existing( - who: &T::AccountId, - value: Self::Balance, - ) -> Result { - if value.is_zero() { - return Ok(PositiveImbalance::zero()) - } - - Self::try_mutate_account( - who, - |account, is_new| -> Result { - ensure!(!is_new, Error::::DeadAccount); - account.free = account.free.checked_add(&value).ok_or(ArithmeticError::Overflow)?; - Self::deposit_event(Event::Deposit { who: who.clone(), amount: value }); - Ok(PositiveImbalance::new(value)) - }, - ) - } - - /// Deposit some `value` into the free balance of `who`, possibly creating a new account. - /// - /// This function is a no-op if: - /// - the `value` to be deposited is zero; or - /// - the `value` to be deposited is less than the required ED and the account does not yet - /// exist; or - /// - the deposit would necessitate the account to exist and there are no provider references; - /// or - /// - `value` is so large it would cause the balance of `who` to overflow. - fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance { - if value.is_zero() { - return Self::PositiveImbalance::zero() - } - - Self::try_mutate_account( - who, - |account, is_new| -> Result { - let ed = T::ExistentialDeposit::get(); - ensure!(value >= ed || !is_new, Error::::ExistentialDeposit); - - // defensive only: overflow should never happen, however in case it does, then this - // operation is a no-op. - account.free = match account.free.checked_add(&value) { - Some(x) => x, - None => return Ok(Self::PositiveImbalance::zero()), - }; - - Self::deposit_event(Event::Deposit { who: who.clone(), amount: value }); - Ok(PositiveImbalance::new(value)) - }, - ) - .unwrap_or_else(|_| Self::PositiveImbalance::zero()) - } - - /// Withdraw some free balance from an account, respecting existence requirements. - /// - /// Is a no-op if value to be withdrawn is zero. - fn withdraw( - who: &T::AccountId, - value: Self::Balance, - reasons: WithdrawReasons, - liveness: ExistenceRequirement, - ) -> result::Result { - if value.is_zero() { - return Ok(NegativeImbalance::zero()) - } - - Self::try_mutate_account( - who, - |account, _| -> Result { - let new_free_account = - account.free.checked_sub(&value).ok_or(Error::::InsufficientBalance)?; - - // bail if we need to keep the account alive and this would kill it. - let ed = T::ExistentialDeposit::get(); - let would_be_dead = new_free_account + account.reserved < ed; - let would_kill = would_be_dead && account.free + account.reserved >= ed; - ensure!(liveness == AllowDeath || !would_kill, Error::::KeepAlive); - - Self::ensure_can_withdraw(who, value, reasons, new_free_account)?; - - account.free = new_free_account; - - Self::deposit_event(Event::Withdraw { who: who.clone(), amount: value }); - Ok(NegativeImbalance::new(value)) - }, - ) - } - - /// Force the new free balance of a target account `who` to some new value `balance`. - fn make_free_balance_be( - who: &T::AccountId, - value: Self::Balance, - ) -> SignedImbalance { - Self::try_mutate_account( - who, - |account, - is_new| - -> Result, DispatchError> { - let ed = T::ExistentialDeposit::get(); - let total = value.saturating_add(account.reserved); - // If we're attempting to set an existing account to less than ED, then - // bypass the entire operation. It's a no-op if you follow it through, but - // since this is an instance where we might account for a negative imbalance - // (in the dust cleaner of set_account) before we account for its actual - // equal and opposite cause (returned as an Imbalance), then in the - // instance that there's no other accounts on the system at all, we might - // underflow the issuance and our arithmetic will be off. - ensure!(total >= ed || !is_new, Error::::ExistentialDeposit); - - let imbalance = if account.free <= value { - SignedImbalance::Positive(PositiveImbalance::new(value - account.free)) } else { - SignedImbalance::Negative(NegativeImbalance::new(account.free - value)) + assert!( + account.free.is_zero() || account.free >= ed || !account.reserved.is_zero() + ); + *maybe_account = Some(account); + None }; - account.free = value; - Self::deposit_event(Event::BalanceSet { - who: who.clone(), - free: account.free, - reserved: account.reserved, - }); - Ok(imbalance) - }, - ) - .unwrap_or_else(|_| SignedImbalance::Positive(Self::PositiveImbalance::zero())) - } -} - -impl, I: 'static> ReservableCurrency for Pallet -where - T::Balance: MaybeSerializeDeserialize + Debug, -{ - /// Check if `who` can reserve `value` from their free balance. - /// - /// Always `true` if value to be reserved is zero. - fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { - if value.is_zero() { - return true - } - Self::account(who).free.checked_sub(&value).map_or(false, |new_balance| { - Self::ensure_can_withdraw(who, value, WithdrawReasons::RESERVE, new_balance).is_ok() - }) - } - - fn reserved_balance(who: &T::AccountId) -> Self::Balance { - Self::account(who).reserved - } - - /// Move `value` from the free balance from `who` to their reserved balance. - /// - /// Is a no-op if value to be reserved is zero. - fn reserve(who: &T::AccountId, value: Self::Balance) -> DispatchResult { - if value.is_zero() { - return Ok(()) - } - - Self::try_mutate_account(who, |account, _| -> DispatchResult { - account.free = - account.free.checked_sub(&value).ok_or(Error::::InsufficientBalance)?; - account.reserved = - account.reserved.checked_add(&value).ok_or(ArithmeticError::Overflow)?; - Self::ensure_can_withdraw(&who, value, WithdrawReasons::RESERVE, account.free) - })?; - - Self::deposit_event(Event::Reserved { who: who.clone(), amount: value }); - Ok(()) - } - - /// Unreserve some funds, returning any amount that was unable to be unreserved. - /// - /// Is a no-op if the value to be unreserved is zero or the account does not exist. - /// - /// NOTE: returns amount value which wasn't successfully unreserved. - fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { - if value.is_zero() { - return Zero::zero() - } - if Self::total_balance(who).is_zero() { - return value - } - - let actual = match Self::mutate_account(who, |account| { - let actual = cmp::min(account.reserved, value); - account.reserved -= actual; - // defensive only: this can never fail since total issuance which is at least - // free+reserved fits into the same data type. - account.free = account.free.defensive_saturating_add(actual); - actual - }) { - Ok(x) => x, - Err(_) => { - // This should never happen since we don't alter the total amount in the account. - // If it ever does, then we should fail gracefully though, indicating that nothing - // could be done. - return value - }, - }; - - Self::deposit_event(Event::Unreserved { who: who.clone(), amount: actual }); - value - actual - } - - /// Slash from reserved balance, returning the negative imbalance created, - /// and any amount that was unable to be slashed. - /// - /// Is a no-op if the value to be slashed is zero or the account does not exist. - fn slash_reserved( - who: &T::AccountId, - value: Self::Balance, - ) -> (Self::NegativeImbalance, Self::Balance) { - if value.is_zero() { - return (NegativeImbalance::zero(), Zero::zero()) - } - if Self::total_balance(who).is_zero() { - return (NegativeImbalance::zero(), value) - } - - // NOTE: `mutate_account` may fail if it attempts to reduce the balance to the point that an - // account is attempted to be illegally destroyed. - - for attempt in 0..2 { - match Self::mutate_account(who, |account| { - let best_value = match attempt { - 0 => value, - // If acting as a critical provider (i.e. first attempt failed), then ensure - // slash leaves at least the ED. - _ => value.min( - (account.free + account.reserved) - .saturating_sub(T::ExistentialDeposit::get()), - ), - }; - - let actual = cmp::min(account.reserved, best_value); - account.reserved -= actual; - - // underflow should never happen, but it if does, there's nothing to be done here. - (NegativeImbalance::new(actual), value - actual) - }) { - Ok((imbalance, not_slashed)) => { - Self::deposit_event(Event::Slashed { - who: who.clone(), - amount: value.saturating_sub(not_slashed), + Ok((maybe_endowed, maybe_dust, result)) + }); + result.map(|(maybe_endowed, maybe_dust, result)| { + if let Some(endowed) = maybe_endowed { + Self::deposit_event(Event::Endowed { + account: who.clone(), + free_balance: endowed, }); - return (imbalance, not_slashed) - }, - Err(_) => (), - } - } - // Should never get here as we ensure that ED is left in the second attempt. - // In case we do, though, then we fail gracefully. - (Self::NegativeImbalance::zero(), value) - } - - /// Move the reserved balance of one account into the balance of another, according to `status`. - /// - /// Is a no-op if: - /// - the value to be moved is zero; or - /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. - fn repatriate_reserved( - slashed: &T::AccountId, - beneficiary: &T::AccountId, - value: Self::Balance, - status: Status, - ) -> Result { - let actual = Self::do_transfer_reserved(slashed, beneficiary, value, true, status)?; - Ok(value.saturating_sub(actual)) - } -} - -impl, I: 'static> NamedReservableCurrency for Pallet -where - T::Balance: MaybeSerializeDeserialize + Debug, -{ - type ReserveIdentifier = T::ReserveIdentifier; - - fn reserved_balance_named(id: &Self::ReserveIdentifier, who: &T::AccountId) -> Self::Balance { - let reserves = Self::reserves(who); - reserves - .binary_search_by_key(id, |data| data.id) - .map(|index| reserves[index].amount) - .unwrap_or_default() - } - - /// Move `value` from the free balance from `who` to a named reserve balance. - /// - /// Is a no-op if value to be reserved is zero. - fn reserve_named( - id: &Self::ReserveIdentifier, - who: &T::AccountId, - value: Self::Balance, - ) -> DispatchResult { - if value.is_zero() { - return Ok(()) - } - - Reserves::::try_mutate(who, |reserves| -> DispatchResult { - match reserves.binary_search_by_key(id, |data| data.id) { - Ok(index) => { - // this add can't overflow but just to be defensive. - reserves[index].amount = reserves[index].amount.defensive_saturating_add(value); - }, - Err(index) => { - reserves - .try_insert(index, ReserveData { id: *id, amount: value }) - .map_err(|_| Error::::TooManyReserves)?; - }, - }; - >::reserve(who, value)?; - Ok(()) - }) - } - - /// Unreserve some funds, returning any amount that was unable to be unreserved. - /// - /// Is a no-op if the value to be unreserved is zero. - fn unreserve_named( - id: &Self::ReserveIdentifier, - who: &T::AccountId, - value: Self::Balance, - ) -> Self::Balance { - if value.is_zero() { - return Zero::zero() + } + if let Some(amount) = maybe_dust { + Pallet::::deposit_event(Event::DustLost { account: who.clone(), amount }); + } + (result, maybe_dust) + }) } - Reserves::::mutate_exists(who, |maybe_reserves| -> Self::Balance { - if let Some(reserves) = maybe_reserves.as_mut() { - match reserves.binary_search_by_key(id, |data| data.id) { - Ok(index) => { - let to_change = cmp::min(reserves[index].amount, value); - - let remain = >::unreserve(who, to_change); - - // remain should always be zero but just to be defensive here. - let actual = to_change.defensive_saturating_sub(remain); - - // `actual <= to_change` and `to_change <= amount`; qed; - reserves[index].amount -= actual; + /// Update the account entry for `who`, given the locks. + pub(crate) fn update_locks(who: &T::AccountId, locks: &[BalanceLock]) { + let bounded_locks = WeakBoundedVec::<_, T::MaxLocks>::force_from( + locks.to_vec(), + Some("Balances Update Locks"), + ); - if reserves[index].amount.is_zero() { - if reserves.len() == 1 { - // no more named reserves - *maybe_reserves = None; - } else { - // remove this named reserve - reserves.remove(index); - } - } + if locks.len() as u32 > T::MaxLocks::get() { + log::warn!( + target: LOG_TARGET, + "Warning: A user has more currency locks than expected. \ + A runtime configuration adjustment may be needed." + ); + } + let freezes = Freezes::::get(who); + let mut prev_frozen = Zero::zero(); + let mut after_frozen = Zero::zero(); + // TODO: Revisit this assumption. We no manipulate consumer/provider refs. + // No way this can fail since we do not alter the existential balances. + let res = Self::mutate_account(who, |b| { + prev_frozen = b.frozen; + b.frozen = Zero::zero(); + for l in locks.iter() { + b.frozen = b.frozen.max(l.amount); + } + for l in freezes.iter() { + b.frozen = b.frozen.max(l.amount); + } + after_frozen = b.frozen; + }); + debug_assert!(res.is_ok()); + if let Ok((_, maybe_dust)) = res { + debug_assert!(maybe_dust.is_none(), "Not altering main balance; qed"); + } - value - actual - }, - Err(_) => value, + let existed = Locks::::contains_key(who); + if locks.is_empty() { + Locks::::remove(who); + if existed { + // TODO: use Locks::::hashed_key + // https://github.com/paritytech/substrate/issues/4969 + system::Pallet::::dec_consumers(who); } } else { - value + Locks::::insert(who, bounded_locks); + if !existed && system::Pallet::::inc_consumers_without_limit(who).is_err() { + // No providers for the locks. This is impossible under normal circumstances + // since the funds that are under the lock will themselves be stored in the + // account and therefore will need a reference. + log::warn!( + target: LOG_TARGET, + "Warning: Attempt to introduce lock consumer reference, yet no providers. \ + This is unexpected but should be safe." + ); + } } - }) - } - - /// Slash from reserved balance, returning the negative imbalance created, - /// and any amount that was unable to be slashed. - /// - /// Is a no-op if the value to be slashed is zero. - fn slash_reserved_named( - id: &Self::ReserveIdentifier, - who: &T::AccountId, - value: Self::Balance, - ) -> (Self::NegativeImbalance, Self::Balance) { - if value.is_zero() { - return (NegativeImbalance::zero(), Zero::zero()) - } - Reserves::::mutate(who, |reserves| -> (Self::NegativeImbalance, Self::Balance) { - match reserves.binary_search_by_key(id, |data| data.id) { - Ok(index) => { - let to_change = cmp::min(reserves[index].amount, value); - - let (imb, remain) = - >::slash_reserved(who, to_change); - - // remain should always be zero but just to be defensive here. - let actual = to_change.defensive_saturating_sub(remain); - - // `actual <= to_change` and `to_change <= amount`; qed; - reserves[index].amount -= actual; - - Self::deposit_event(Event::Slashed { who: who.clone(), amount: actual }); - (imb, value - actual) - }, - Err(_) => (NegativeImbalance::zero(), value), + if prev_frozen > after_frozen { + let amount = prev_frozen.saturating_sub(after_frozen); + Self::deposit_event(Event::Unlocked { who: who.clone(), amount }); + } else if after_frozen > prev_frozen { + let amount = after_frozen.saturating_sub(prev_frozen); + Self::deposit_event(Event::Locked { who: who.clone(), amount }); } - }) - } - - /// Move the reserved balance of one account into the balance of another, according to `status`. - /// If `status` is `Reserved`, the balance will be reserved with given `id`. - /// - /// Is a no-op if: - /// - the value to be moved is zero; or - /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. - fn repatriate_reserved_named( - id: &Self::ReserveIdentifier, - slashed: &T::AccountId, - beneficiary: &T::AccountId, - value: Self::Balance, - status: Status, - ) -> Result { - if value.is_zero() { - return Ok(Zero::zero()) } - if slashed == beneficiary { - return match status { - Status::Free => Ok(Self::unreserve_named(id, slashed, value)), - Status::Reserved => - Ok(value.saturating_sub(Self::reserved_balance_named(id, slashed))), + /// Update the account entry for `who`, given the locks. + pub(crate) fn update_freezes( + who: &T::AccountId, + freezes: BoundedSlice, T::MaxFreezes>, + ) -> DispatchResult { + let mut prev_frozen = Zero::zero(); + let mut after_frozen = Zero::zero(); + let (_, maybe_dust) = Self::mutate_account(who, |b| { + prev_frozen = b.frozen; + b.frozen = Zero::zero(); + for l in Locks::::get(who).iter() { + b.frozen = b.frozen.max(l.amount); + } + for l in freezes.iter() { + b.frozen = b.frozen.max(l.amount); + } + after_frozen = b.frozen; + })?; + debug_assert!(maybe_dust.is_none(), "Not altering main balance; qed"); + if freezes.is_empty() { + Freezes::::remove(who); + } else { + Freezes::::insert(who, freezes); + } + if prev_frozen > after_frozen { + let amount = prev_frozen.saturating_sub(after_frozen); + Self::deposit_event(Event::Thawed { who: who.clone(), amount }); + } else if after_frozen > prev_frozen { + let amount = after_frozen.saturating_sub(prev_frozen); + Self::deposit_event(Event::Frozen { who: who.clone(), amount }); } + Ok(()) } - Reserves::::try_mutate(slashed, |reserves| -> Result { - match reserves.binary_search_by_key(id, |data| data.id) { - Ok(index) => { - let to_change = cmp::min(reserves[index].amount, value); - - let actual = if status == Status::Reserved { - // make it the reserved under same identifier - Reserves::::try_mutate( - beneficiary, - |reserves| -> Result { - match reserves.binary_search_by_key(id, |data| data.id) { - Ok(index) => { - let remain = - >::repatriate_reserved( - slashed, - beneficiary, - to_change, - status, - )?; - - // remain should always be zero but just to be defensive - // here. - let actual = to_change.defensive_saturating_sub(remain); - - // this add can't overflow but just to be defensive. - reserves[index].amount = - reserves[index].amount.defensive_saturating_add(actual); - - Ok(actual) - }, - Err(index) => { - let remain = - >::repatriate_reserved( - slashed, - beneficiary, - to_change, - status, - )?; - - // remain should always be zero but just to be defensive - // here - let actual = to_change.defensive_saturating_sub(remain); - - reserves - .try_insert( - index, - ReserveData { id: *id, amount: actual }, - ) - .map_err(|_| Error::::TooManyReserves)?; - - Ok(actual) - }, - } - }, - )? - } else { - let remain = >::repatriate_reserved( - slashed, - beneficiary, - to_change, - status, - )?; - - // remain should always be zero but just to be defensive here - to_change.defensive_saturating_sub(remain) - }; + /// Move the reserved balance of one account into the balance of another, according to + /// `status`. This will respect freezes/locks only if `fortitude` is `Polite`. + /// + /// Is a no-op if the value to be moved is zero. + /// + /// NOTE: returns actual amount of transferred value in `Ok` case. + pub(crate) fn do_transfer_reserved( + slashed: &T::AccountId, + beneficiary: &T::AccountId, + value: T::Balance, + precision: Precision, + fortitude: Fortitude, + status: Status, + ) -> Result { + if value.is_zero() { + return Ok(Zero::zero()) + } - // `actual <= to_change` and `to_change <= amount`; qed; - reserves[index].amount -= actual; + let max = >::reducible_total_balance_on_hold( + slashed, fortitude, + ); + let actual = match precision { + Precision::BestEffort => value.min(max), + Precision::Exact => value, + }; + ensure!(actual <= max, TokenError::FundsUnavailable); + if slashed == beneficiary { + return match status { + Status::Free => Ok(actual.saturating_sub(Self::unreserve(slashed, actual))), + Status::Reserved => Ok(actual), + } + } - Ok(value - actual) + let ((_, maybe_dust_1), maybe_dust_2) = Self::try_mutate_account( + beneficiary, + |to_account, is_new| -> Result<((), Option), DispatchError> { + ensure!(!is_new, Error::::DeadAccount); + Self::try_mutate_account(slashed, |from_account, _| -> DispatchResult { + match status { + Status::Free => + to_account.free = to_account + .free + .checked_add(&actual) + .ok_or(ArithmeticError::Overflow)?, + Status::Reserved => + to_account.reserved = to_account + .reserved + .checked_add(&actual) + .ok_or(ArithmeticError::Overflow)?, + } + from_account.reserved.saturating_reduce(actual); + Ok(()) + }) }, - Err(_) => Ok(value), - } - }) - } -} + )?; -impl, I: 'static> LockableCurrency for Pallet -where - T::Balance: MaybeSerializeDeserialize + Debug, -{ - type Moment = T::BlockNumber; - - type MaxLocks = T::MaxLocks; - - // Set a lock on the balance of `who`. - // Is a no-op if lock amount is zero or `reasons` `is_none()`. - fn set_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - ) { - if amount.is_zero() || reasons.is_empty() { - return - } - let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); - let mut locks = Self::locks(who) - .into_iter() - .filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) }) - .collect::>(); - if let Some(lock) = new_lock { - locks.push(lock) - } - Self::update_locks(who, &locks[..]); - } + if let Some(dust) = maybe_dust_1 { + >::handle_raw_dust(dust); + } + if let Some(dust) = maybe_dust_2 { + >::handle_raw_dust(dust); + } - // Extend a lock on the balance of `who`. - // Is a no-op if lock amount is zero or `reasons` `is_none()`. - fn extend_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - ) { - if amount.is_zero() || reasons.is_empty() { - return - } - let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); - let mut locks = Self::locks(who) - .into_iter() - .filter_map(|l| { - if l.id == id { - new_lock.take().map(|nl| BalanceLock { - id: l.id, - amount: l.amount.max(nl.amount), - reasons: l.reasons | nl.reasons, - }) - } else { - Some(l) - } - }) - .collect::>(); - if let Some(lock) = new_lock { - locks.push(lock) + Self::deposit_event(Event::ReserveRepatriated { + from: slashed.clone(), + to: beneficiary.clone(), + amount: actual, + destination_status: status, + }); + Ok(actual) } - Self::update_locks(who, &locks[..]); - } - - fn remove_lock(id: LockIdentifier, who: &T::AccountId) { - let mut locks = Self::locks(who); - locks.retain(|l| l.id != id); - Self::update_locks(who, &locks[..]); } } diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs deleted file mode 100644 index b4233a6c3a85c..0000000000000 --- a/frame/balances/src/tests.rs +++ /dev/null @@ -1,1460 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Macro for creating the tests for the module. - -#![cfg(test)] - -#[macro_export] -macro_rules! decl_tests { - ($test:ty, $ext_builder:ty, $existential_deposit:expr) => { - - use crate::*; - use sp_runtime::{ArithmeticError, TokenError, FixedPointNumber, traits::{SignedExtension, BadOrigin}}; - use frame_support::{ - assert_noop, assert_storage_noop, assert_ok, assert_err, - traits::{ - LockableCurrency, LockIdentifier, WithdrawReasons, - Currency, ReservableCurrency, ExistenceRequirement::AllowDeath - } - }; - use pallet_transaction_payment::{ChargeTransactionPayment, Multiplier}; - use frame_system::RawOrigin; - - const ID_1: LockIdentifier = *b"1 "; - const ID_2: LockIdentifier = *b"2 "; - - pub const CALL: &<$test as frame_system::Config>::RuntimeCall = - &RuntimeCall::Balances(pallet_balances::Call::transfer { dest: 0, value: 0 }); - - /// create a transaction info struct from weight. Handy to avoid building the whole struct. - pub fn info_from_weight(w: Weight) -> DispatchInfo { - DispatchInfo { weight: w, ..Default::default() } - } - - fn events() -> Vec { - let evt = System::events().into_iter().map(|evt| evt.event).collect::>(); - - System::reset_events(); - - evt - } - - #[test] - fn basic_locking_should_work() { - <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - assert_eq!(Balances::free_balance(1), 10); - Balances::set_lock(ID_1, &1, 9, WithdrawReasons::all()); - assert_noop!( - >::transfer(&1, &2, 5, AllowDeath), - Error::<$test, _>::LiquidityRestrictions - ); - }); - } - - #[test] - fn account_should_be_reaped() { - <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - assert_eq!(Balances::free_balance(1), 10); - assert_ok!(>::transfer(&1, &2, 10, AllowDeath)); - // Check that the account is dead. - assert!(!frame_system::Account::::contains_key(&1)); - }); - } - - #[test] - fn reap_failed_due_to_provider_and_consumer() { - <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - // SCENARIO: only one provider and there are remaining consumers. - assert_ok!(System::inc_consumers(&1)); - assert!(!System::can_dec_provider(&1)); - assert_noop!( - >::transfer(&1, &2, 10, AllowDeath), - Error::<$test, _>::KeepAlive - ); - assert!(System::account_exists(&1)); - assert_eq!(Balances::free_balance(1), 10); - - // SCENARIO: more than one provider, but will not kill account due to other provider. - assert_eq!(System::inc_providers(&1), frame_system::IncRefStatus::Existed); - assert_eq!(System::providers(&1), 2); - assert!(System::can_dec_provider(&1)); - assert_ok!(>::transfer(&1, &2, 10, AllowDeath)); - assert_eq!(System::providers(&1), 1); - assert!(System::account_exists(&1)); - assert_eq!(Balances::free_balance(1), 0); - }); - } - - #[test] - fn partial_locking_should_work() { - <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); - assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); - }); - } - - #[test] - fn lock_removal_should_work() { - <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); - Balances::remove_lock(ID_1, &1); - assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); - }); - } - - #[test] - fn lock_replacement_should_work() { - <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); - Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); - assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); - }); - } - - #[test] - fn double_locking_should_work() { - <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); - Balances::set_lock(ID_2, &1, 5, WithdrawReasons::all()); - assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); - }); - } - - #[test] - fn combination_locking_should_work() { - <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::empty()); - Balances::set_lock(ID_2, &1, 0, WithdrawReasons::all()); - assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); - }); - } - - #[test] - fn lock_value_extension_should_work() { - <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); - assert_noop!( - >::transfer(&1, &2, 6, AllowDeath), - Error::<$test, _>::LiquidityRestrictions - ); - Balances::extend_lock(ID_1, &1, 2, WithdrawReasons::all()); - assert_noop!( - >::transfer(&1, &2, 6, AllowDeath), - Error::<$test, _>::LiquidityRestrictions - ); - Balances::extend_lock(ID_1, &1, 8, WithdrawReasons::all()); - assert_noop!( - >::transfer(&1, &2, 3, AllowDeath), - Error::<$test, _>::LiquidityRestrictions - ); - }); - } - - #[test] - fn lock_reasons_should_work() { - <$ext_builder>::default() - .existential_deposit(1) - .monied(true) - .build() - .execute_with(|| { - pallet_transaction_payment::NextFeeMultiplier::<$test>::put( - Multiplier::saturating_from_integer(1) - ); - Balances::set_lock(ID_1, &1, 10, WithdrawReasons::RESERVE); - assert_noop!( - >::transfer(&1, &2, 1, AllowDeath), - Error::<$test, _>::LiquidityRestrictions - ); - assert_noop!( - >::reserve(&1, 1), - Error::<$test, _>::LiquidityRestrictions, - ); - assert!( as SignedExtension>::pre_dispatch( - ChargeTransactionPayment::from(1), - &1, - CALL, - &info_from_weight(Weight::from_parts(1, 0)), - 1, - ).is_err()); - assert_ok!( as SignedExtension>::pre_dispatch( - ChargeTransactionPayment::from(0), - &1, - CALL, - &info_from_weight(Weight::from_parts(1, 0)), - 1, - )); - - Balances::set_lock(ID_1, &1, 10, WithdrawReasons::TRANSACTION_PAYMENT); - assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); - assert_ok!(>::reserve(&1, 1)); - assert!( as SignedExtension>::pre_dispatch( - ChargeTransactionPayment::from(1), - &1, - CALL, - &info_from_weight(Weight::from_parts(1, 0)), - 1, - ).is_err()); - assert!( as SignedExtension>::pre_dispatch( - ChargeTransactionPayment::from(0), - &1, - CALL, - &info_from_weight(Weight::from_parts(1, 0)), - 1, - ).is_err()); - }); - } - - #[test] - fn lock_block_number_extension_should_work() { - <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, 10, WithdrawReasons::all()); - assert_noop!( - >::transfer(&1, &2, 6, AllowDeath), - Error::<$test, _>::LiquidityRestrictions - ); - Balances::extend_lock(ID_1, &1, 10, WithdrawReasons::all()); - assert_noop!( - >::transfer(&1, &2, 6, AllowDeath), - Error::<$test, _>::LiquidityRestrictions - ); - System::set_block_number(2); - Balances::extend_lock(ID_1, &1, 10, WithdrawReasons::all()); - assert_noop!( - >::transfer(&1, &2, 3, AllowDeath), - Error::<$test, _>::LiquidityRestrictions - ); - }); - } - - #[test] - fn lock_reasons_extension_should_work() { - <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, 10, WithdrawReasons::TRANSFER); - assert_noop!( - >::transfer(&1, &2, 6, AllowDeath), - Error::<$test, _>::LiquidityRestrictions - ); - Balances::extend_lock(ID_1, &1, 10, WithdrawReasons::empty()); - assert_noop!( - >::transfer(&1, &2, 6, AllowDeath), - Error::<$test, _>::LiquidityRestrictions - ); - Balances::extend_lock(ID_1, &1, 10, WithdrawReasons::RESERVE); - assert_noop!( - >::transfer(&1, &2, 6, AllowDeath), - Error::<$test, _>::LiquidityRestrictions - ); - }); - } - - #[test] - fn default_indexing_on_new_accounts_should_not_work2() { - <$ext_builder>::default() - .existential_deposit(10) - .monied(true) - .build() - .execute_with(|| { - // account 5 should not exist - // ext_deposit is 10, value is 9, not satisfies for ext_deposit - assert_noop!( - Balances::transfer(Some(1).into(), 5, 9), - Error::<$test, _>::ExistentialDeposit, - ); - assert_eq!(Balances::free_balance(1), 100); - }); - } - - #[test] - fn reserved_balance_should_prevent_reclaim_count() { - <$ext_builder>::default() - .existential_deposit(256 * 1) - .monied(true) - .build() - .execute_with(|| { - System::inc_account_nonce(&2); - assert_eq!(Balances::total_balance(&2), 256 * 20); - - assert_ok!(Balances::reserve(&2, 256 * 19 + 1)); // account 2 becomes mostly reserved - assert_eq!(Balances::free_balance(2), 255); // "free" account deleted." - assert_eq!(Balances::total_balance(&2), 256 * 20); // reserve still exists. - assert_eq!(System::account_nonce(&2), 1); - - // account 4 tries to take index 1 for account 5. - assert_ok!(Balances::transfer(Some(4).into(), 5, 256 * 1 + 0x69)); - assert_eq!(Balances::total_balance(&5), 256 * 1 + 0x69); - - assert!(Balances::slash(&2, 256 * 19 + 2).1.is_zero()); // account 2 gets slashed - // "reserve" account reduced to 255 (below ED) so account deleted - assert_eq!(Balances::total_balance(&2), 0); - assert_eq!(System::account_nonce(&2), 0); // nonce zero - - // account 4 tries to take index 1 again for account 6. - assert_ok!(Balances::transfer(Some(4).into(), 6, 256 * 1 + 0x69)); - assert_eq!(Balances::total_balance(&6), 256 * 1 + 0x69); - }); - } - - #[test] - fn reward_should_work() { - <$ext_builder>::default().monied(true).build().execute_with(|| { - assert_eq!(Balances::total_balance(&1), 10); - assert_ok!(Balances::deposit_into_existing(&1, 10).map(drop)); - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Deposit { who: 1, amount: 10 })); - assert_eq!(Balances::total_balance(&1), 20); - assert_eq!(>::get(), 120); - }); - } - - #[test] - fn dust_account_removal_should_work() { - <$ext_builder>::default() - .existential_deposit(100) - .monied(true) - .build() - .execute_with(|| { - System::inc_account_nonce(&2); - assert_eq!(System::account_nonce(&2), 1); - assert_eq!(Balances::total_balance(&2), 2000); - // index 1 (account 2) becomes zombie - assert_ok!(Balances::transfer(Some(2).into(), 5, 1901)); - assert_eq!(Balances::total_balance(&2), 0); - assert_eq!(Balances::total_balance(&5), 1901); - assert_eq!(System::account_nonce(&2), 0); - }); - } - - #[test] - fn balance_works() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 42); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { who: 1, amount: 42 })); - assert_eq!(Balances::free_balance(1), 42); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::total_balance(&1), 42); - assert_eq!(Balances::free_balance(2), 0); - assert_eq!(Balances::reserved_balance(2), 0); - assert_eq!(Balances::total_balance(&2), 0); - }); - } - - #[test] - fn balance_transfer_works() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::transfer(Some(1).into(), 2, 69)); - assert_eq!(Balances::total_balance(&1), 42); - assert_eq!(Balances::total_balance(&2), 69); - }); - } - - #[test] - fn force_transfer_works() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - assert_noop!( - Balances::force_transfer(Some(2).into(), 1, 2, 69), - BadOrigin, - ); - assert_ok!(Balances::force_transfer(RawOrigin::Root.into(), 1, 2, 69)); - assert_eq!(Balances::total_balance(&1), 42); - assert_eq!(Balances::total_balance(&2), 69); - }); - } - - #[test] - fn reserving_balance_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - - assert_eq!(Balances::total_balance(&1), 111); - assert_eq!(Balances::free_balance(1), 111); - assert_eq!(Balances::reserved_balance(1), 0); - - assert_ok!(Balances::reserve(&1, 69)); - - assert_eq!(Balances::total_balance(&1), 111); - assert_eq!(Balances::free_balance(1), 42); - assert_eq!(Balances::reserved_balance(1), 69); - }); - } - - #[test] - fn balance_transfer_when_reserved_should_not_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 69)); - assert_noop!( - Balances::transfer(Some(1).into(), 2, 69), - Error::<$test, _>::InsufficientBalance, - ); - }); - } - - #[test] - fn deducting_balance_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 69)); - assert_eq!(Balances::free_balance(1), 42); - }); - } - - #[test] - fn refunding_balance_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 42); - assert_ok!(Balances::mutate_account(&1, |a| a.reserved = 69)); - Balances::unreserve(&1, 69); - assert_eq!(Balances::free_balance(1), 111); - assert_eq!(Balances::reserved_balance(1), 0); - }); - } - - #[test] - fn slashing_balance_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 69)); - assert!(Balances::slash(&1, 69).1.is_zero()); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::reserved_balance(1), 42); - assert_eq!(>::get(), 42); - }); - } - - #[test] - fn withdrawing_balance_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&2, 111); - let _ = Balances::withdraw( - &2, 11, WithdrawReasons::TRANSFER, ExistenceRequirement::KeepAlive - ); - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Withdraw { who: 2, amount: 11 })); - assert_eq!(Balances::free_balance(2), 100); - assert_eq!(>::get(), 100); - }); - } - - #[test] - fn slashing_incomplete_balance_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 42); - assert_ok!(Balances::reserve(&1, 21)); - assert_eq!(Balances::slash(&1, 69).1, 27); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(>::get(), 0); - }); - } - - #[test] - fn unreserving_balance_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 111)); - Balances::unreserve(&1, 42); - assert_eq!(Balances::reserved_balance(1), 69); - assert_eq!(Balances::free_balance(1), 42); - }); - } - - #[test] - fn slashing_reserved_balance_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 111)); - assert_eq!(Balances::slash_reserved(&1, 42).1, 0); - assert_eq!(Balances::reserved_balance(1), 69); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(>::get(), 69); - }); - } - - #[test] - fn slashing_incomplete_reserved_balance_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 42)); - assert_eq!(Balances::slash_reserved(&1, 69).1, 27); - assert_eq!(Balances::free_balance(1), 69); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(>::get(), 69); - }); - } - - #[test] - fn repatriating_reserved_balance_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 110); - let _ = Balances::deposit_creating(&2, 1); - assert_ok!(Balances::reserve(&1, 110)); - assert_ok!(Balances::repatriate_reserved(&1, &2, 41, Status::Free), 0); - System::assert_last_event( - RuntimeEvent::Balances(crate::Event::ReserveRepatriated { from: 1, to: 2, amount: 41, destination_status: Status::Free }) - ); - assert_eq!(Balances::reserved_balance(1), 69); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::reserved_balance(2), 0); - assert_eq!(Balances::free_balance(2), 42); - }); - } - - #[test] - fn transferring_reserved_balance_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 110); - let _ = Balances::deposit_creating(&2, 1); - assert_ok!(Balances::reserve(&1, 110)); - assert_ok!(Balances::repatriate_reserved(&1, &2, 41, Status::Reserved), 0); - assert_eq!(Balances::reserved_balance(1), 69); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::reserved_balance(2), 41); - assert_eq!(Balances::free_balance(2), 1); - }); - } - - #[test] - fn transferring_reserved_balance_to_yourself_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 110); - assert_ok!(Balances::reserve(&1, 50)); - assert_ok!(Balances::repatriate_reserved(&1, &1, 50, Status::Free), 0); - assert_eq!(Balances::free_balance(1), 110); - assert_eq!(Balances::reserved_balance(1), 0); - - assert_ok!(Balances::reserve(&1, 50)); - assert_ok!(Balances::repatriate_reserved(&1, &1, 60, Status::Free), 10); - assert_eq!(Balances::free_balance(1), 110); - assert_eq!(Balances::reserved_balance(1), 0); - }); - } - - #[test] - fn transferring_reserved_balance_to_nonexistent_should_fail() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 111)); - assert_noop!(Balances::repatriate_reserved(&1, &2, 42, Status::Free), Error::<$test, _>::DeadAccount); - }); - } - - #[test] - fn transferring_incomplete_reserved_balance_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 110); - let _ = Balances::deposit_creating(&2, 1); - assert_ok!(Balances::reserve(&1, 41)); - assert_ok!(Balances::repatriate_reserved(&1, &2, 69, Status::Free), 28); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::free_balance(1), 69); - assert_eq!(Balances::reserved_balance(2), 0); - assert_eq!(Balances::free_balance(2), 42); - }); - } - - #[test] - fn transferring_too_high_value_should_not_panic() { - <$ext_builder>::default().build().execute_with(|| { - Balances::make_free_balance_be(&1, u64::MAX); - Balances::make_free_balance_be(&2, 1); - - assert_err!( - Balances::transfer(Some(1).into(), 2, u64::MAX), - ArithmeticError::Overflow, - ); - - assert_eq!(Balances::free_balance(1), u64::MAX); - assert_eq!(Balances::free_balance(2), 1); - }); - } - - #[test] - fn account_create_on_free_too_low_with_other() { - <$ext_builder>::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 100); - assert_eq!(>::get(), 100); - - // No-op. - let _ = Balances::deposit_creating(&2, 50); - assert_eq!(Balances::free_balance(2), 0); - assert_eq!(>::get(), 100); - }) - } - - #[test] - fn account_create_on_free_too_low() { - <$ext_builder>::default().existential_deposit(100).build().execute_with(|| { - // No-op. - let _ = Balances::deposit_creating(&2, 50); - assert_eq!(Balances::free_balance(2), 0); - assert_eq!(>::get(), 0); - }) - } - - #[test] - fn account_removal_on_free_too_low() { - <$ext_builder>::default().existential_deposit(100).build().execute_with(|| { - assert_eq!(>::get(), 0); - - // Setup two accounts with free balance above the existential threshold. - let _ = Balances::deposit_creating(&1, 110); - let _ = Balances::deposit_creating(&2, 110); - - assert_eq!(Balances::free_balance(1), 110); - assert_eq!(Balances::free_balance(2), 110); - assert_eq!(>::get(), 220); - - // Transfer funds from account 1 of such amount that after this transfer - // the balance of account 1 will be below the existential threshold. - // This should lead to the removal of all balance of this account. - assert_ok!(Balances::transfer(Some(1).into(), 2, 20)); - - // Verify free balance removal of account 1. - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::free_balance(2), 130); - - // Verify that TotalIssuance tracks balance removal when free balance is too low. - assert_eq!(>::get(), 130); - }); - } - - #[test] - fn burn_must_work() { - <$ext_builder>::default().monied(true).build().execute_with(|| { - let init_total_issuance = Balances::total_issuance(); - let imbalance = Balances::burn(10); - assert_eq!(Balances::total_issuance(), init_total_issuance - 10); - drop(imbalance); - assert_eq!(Balances::total_issuance(), init_total_issuance); - }); - } - - #[test] - fn transfer_keep_alive_works() { - <$ext_builder>::default().existential_deposit(1).build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 100); - assert_noop!( - Balances::transfer_keep_alive(Some(1).into(), 2, 100), - Error::<$test, _>::KeepAlive - ); - assert_eq!(Balances::total_balance(&1), 100); - assert_eq!(Balances::total_balance(&2), 0); - }); - } - - #[test] - #[should_panic = "the balance of any account should always be at least the existential deposit."] - fn cannot_set_genesis_value_below_ed() { - ($existential_deposit).with(|v| *v.borrow_mut() = 11); - let mut t = frame_system::GenesisConfig::default().build_storage::<$test>().unwrap(); - let _ = pallet_balances::GenesisConfig::<$test> { - balances: vec![(1, 10)], - }.assimilate_storage(&mut t).unwrap(); - } - - #[test] - #[should_panic = "duplicate balances in genesis."] - fn cannot_set_genesis_value_twice() { - let mut t = frame_system::GenesisConfig::default().build_storage::<$test>().unwrap(); - let _ = pallet_balances::GenesisConfig::<$test> { - balances: vec![(1, 10), (2, 20), (1, 15)], - }.assimilate_storage(&mut t).unwrap(); - } - - #[test] - fn dust_moves_between_free_and_reserved() { - <$ext_builder>::default() - .existential_deposit(100) - .build() - .execute_with(|| { - // Set balance to free and reserved at the existential deposit - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 100, 0)); - // Check balance - assert_eq!(Balances::free_balance(1), 100); - assert_eq!(Balances::reserved_balance(1), 0); - - // Reserve some free balance - assert_ok!(Balances::reserve(&1, 50)); - // Check balance, the account should be ok. - assert_eq!(Balances::free_balance(1), 50); - assert_eq!(Balances::reserved_balance(1), 50); - - // Reserve the rest of the free balance - assert_ok!(Balances::reserve(&1, 50)); - // Check balance, the account should be ok. - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::reserved_balance(1), 100); - - // Unreserve everything - Balances::unreserve(&1, 100); - // Check balance, all 100 should move to free_balance - assert_eq!(Balances::free_balance(1), 100); - assert_eq!(Balances::reserved_balance(1), 0); - }); - } - - #[test] - fn account_deleted_when_just_dust() { - <$ext_builder>::default() - .existential_deposit(100) - .build() - .execute_with(|| { - // Set balance to free and reserved at the existential deposit - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 50, 50)); - // Check balance - assert_eq!(Balances::free_balance(1), 50); - assert_eq!(Balances::reserved_balance(1), 50); - - // Reserve some free balance - let res = Balances::slash(&1, 1); - assert_eq!(res, (NegativeImbalance::new(1), 0)); - - // The account should be dead. - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::reserved_balance(1), 0); - }); - } - - #[test] - fn emit_events_with_reserve_and_unreserve() { - <$ext_builder>::default() - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&1, 100); - - System::set_block_number(2); - assert_ok!(Balances::reserve(&1, 10)); - - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Reserved { who: 1, amount: 10 })); - - System::set_block_number(3); - assert!(Balances::unreserve(&1, 5).is_zero()); - - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Unreserved { who: 1, amount: 5 })); - - System::set_block_number(4); - assert_eq!(Balances::unreserve(&1, 6), 1); - - // should only unreserve 5 - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Unreserved { who: 1, amount: 5 })); - }); - } - - #[test] - fn emit_events_with_existential_deposit() { - <$ext_builder>::default() - .existential_deposit(100) - .build() - .execute_with(|| { - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 100, 0)); - - assert_eq!( - events(), - [ - RuntimeEvent::System(system::Event::NewAccount { account: 1 }), - RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), - RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), - ] - ); - - let res = Balances::slash(&1, 1); - assert_eq!(res, (NegativeImbalance::new(1), 0)); - - assert_eq!( - events(), - [ - RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), - RuntimeEvent::Balances(crate::Event::DustLost { account: 1, amount: 99 }), - RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 1 }), - ] - ); - }); - } - - #[test] - fn emit_events_with_no_existential_deposit_suicide() { - <$ext_builder>::default() - .existential_deposit(1) - .build() - .execute_with(|| { - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 100, 0)); - - assert_eq!( - events(), - [ - RuntimeEvent::System(system::Event::NewAccount { account: 1 }), - RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), - RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), - ] - ); - - let res = Balances::slash(&1, 100); - assert_eq!(res, (NegativeImbalance::new(100), 0)); - - assert_eq!( - events(), - [ - RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), - RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 100 }), - ] - ); - }); - } - - #[test] - fn slash_loop_works() { - <$ext_builder>::default() - .existential_deposit(100) - .build() - .execute_with(|| { - /* User has no reference counter, so they can die in these scenarios */ - - // SCENARIO: Slash would not kill account. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); - // Slashed completed in full - assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); - // Account is still alive - assert!(System::account_exists(&1)); - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 900 })); - - // SCENARIO: Slash will kill account because not enough balance left. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); - // Slashed completed in full - assert_eq!(Balances::slash(&1, 950), (NegativeImbalance::new(950), 0)); - // Account is killed - assert!(!System::account_exists(&1)); - - // SCENARIO: Over-slash will kill account, and report missing slash amount. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); - // Slashed full free_balance, and reports 300 not slashed - assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1000), 300)); - // Account is dead - assert!(!System::account_exists(&1)); - - // SCENARIO: Over-slash can take from reserved, but keep alive. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 400)); - // Slashed full free_balance and 300 of reserved balance - assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); - // Account is still alive - assert!(System::account_exists(&1)); - - // SCENARIO: Over-slash can take from reserved, and kill. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 350)); - // Slashed full free_balance and 300 of reserved balance - assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); - // Account is dead because 50 reserved balance is not enough to keep alive - assert!(!System::account_exists(&1)); - - // SCENARIO: Over-slash can take as much as possible from reserved, kill, and report missing amount. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 250)); - // Slashed full free_balance and 300 of reserved balance - assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1250), 50)); - // Account is super dead - assert!(!System::account_exists(&1)); - - /* User will now have a reference counter on them, keeping them alive in these scenarios */ - - // SCENARIO: Slash would not kill account. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); - assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests - // Slashed completed in full - assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); - // Account is still alive - assert!(System::account_exists(&1)); - - // SCENARIO: Slash will take as much as possible without killing account. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); - // Slashed completed in full - assert_eq!(Balances::slash(&1, 950), (NegativeImbalance::new(900), 50)); - // Account is still alive - assert!(System::account_exists(&1)); - - // SCENARIO: Over-slash will not kill account, and report missing slash amount. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); - // Slashed full free_balance minus ED, and reports 400 not slashed - assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(900), 400)); - // Account is still alive - assert!(System::account_exists(&1)); - - // SCENARIO: Over-slash can take from reserved, but keep alive. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 400)); - // Slashed full free_balance and 300 of reserved balance - assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); - // Account is still alive - assert!(System::account_exists(&1)); - - // SCENARIO: Over-slash can take from reserved, but keep alive. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 350)); - // Slashed full free_balance and 250 of reserved balance to leave ED - assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1250), 50)); - // Account is still alive - assert!(System::account_exists(&1)); - - // SCENARIO: Over-slash can take as much as possible from reserved and report missing amount. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 250)); - // Slashed full free_balance and 300 of reserved balance - assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1150), 150)); - // Account is still alive - assert!(System::account_exists(&1)); - - // Slash on non-existent account is okay. - assert_eq!(Balances::slash(&12345, 1_300), (NegativeImbalance::new(0), 1300)); - }); - } - - #[test] - fn slash_reserved_loop_works() { - <$ext_builder>::default() - .existential_deposit(100) - .build() - .execute_with(|| { - /* User has no reference counter, so they can die in these scenarios */ - - // SCENARIO: Slash would not kill account. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); - // Slashed completed in full - assert_eq!(Balances::slash_reserved(&1, 900), (NegativeImbalance::new(900), 0)); - // Account is still alive - assert!(System::account_exists(&1)); - - // SCENARIO: Slash would kill account. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); - // Slashed completed in full - assert_eq!(Balances::slash_reserved(&1, 1_000), (NegativeImbalance::new(1_000), 0)); - // Account is dead - assert!(!System::account_exists(&1)); - - // SCENARIO: Over-slash would kill account, and reports left over slash. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); - // Slashed completed in full - assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); - // Account is dead - assert!(!System::account_exists(&1)); - - // SCENARIO: Over-slash does not take from free balance. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 300, 1_000)); - // Slashed completed in full - assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); - // Account is alive because of free balance - assert!(System::account_exists(&1)); - - /* User has a reference counter, so they cannot die */ - - // SCENARIO: Slash would not kill account. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); - assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests - // Slashed completed in full - assert_eq!(Balances::slash_reserved(&1, 900), (NegativeImbalance::new(900), 0)); - // Account is still alive - assert!(System::account_exists(&1)); - - // SCENARIO: Slash as much as possible without killing. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); - // Slashed as much as possible - assert_eq!(Balances::slash_reserved(&1, 1_000), (NegativeImbalance::new(950), 50)); - // Account is still alive - assert!(System::account_exists(&1)); - - // SCENARIO: Over-slash reports correctly, where reserved is needed to keep alive. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); - // Slashed as much as possible - assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(950), 350)); - // Account is still alive - assert!(System::account_exists(&1)); - - // SCENARIO: Over-slash reports correctly, where full reserved is removed. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 1_000)); - // Slashed as much as possible - assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); - // Account is still alive - assert!(System::account_exists(&1)); - - // Slash on non-existent account is okay. - assert_eq!(Balances::slash_reserved(&12345, 1_300), (NegativeImbalance::new(0), 1300)); - }); - } - - #[test] - fn operations_on_dead_account_should_not_change_state() { - // These functions all use `mutate_account` which may introduce a storage change when - // the account never existed to begin with, and shouldn't exist in the end. - <$ext_builder>::default() - .existential_deposit(0) - .build() - .execute_with(|| { - assert!(!frame_system::Account::::contains_key(&1337)); - - // Unreserve - assert_storage_noop!(assert_eq!(Balances::unreserve(&1337, 42), 42)); - // Reserve - assert_noop!(Balances::reserve(&1337, 42), Error::::InsufficientBalance); - // Slash Reserve - assert_storage_noop!(assert_eq!(Balances::slash_reserved(&1337, 42).1, 42)); - // Repatriate Reserve - assert_noop!(Balances::repatriate_reserved(&1337, &1338, 42, Status::Free), Error::::DeadAccount); - // Slash - assert_storage_noop!(assert_eq!(Balances::slash(&1337, 42).1, 42)); - }); - } - - #[test] - fn transfer_keep_alive_all_free_succeed() { - <$ext_builder>::default() - .existential_deposit(100) - .build() - .execute_with(|| { - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 100, 100)); - assert_ok!(Balances::transfer_keep_alive(Some(1).into(), 2, 100)); - assert_eq!(Balances::total_balance(&1), 100); - assert_eq!(Balances::total_balance(&2), 100); - }); - } - - #[test] - fn transfer_all_works() { - <$ext_builder>::default() - .existential_deposit(100) - .build() - .execute_with(|| { - // setup - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 0)); - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 0, 0)); - // transfer all and allow death - assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); - assert_eq!(Balances::total_balance(&1), 0); - assert_eq!(Balances::total_balance(&2), 200); - - // setup - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 0)); - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 0, 0)); - // transfer all and keep alive - assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); - assert_eq!(Balances::total_balance(&1), 100); - assert_eq!(Balances::total_balance(&2), 100); - - // setup - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 10)); - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 0, 0)); - // transfer all and allow death w/ reserved - assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); - assert_eq!(Balances::total_balance(&1), 0); - assert_eq!(Balances::total_balance(&2), 200); - - // setup - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 10)); - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 0, 0)); - // transfer all and keep alive w/ reserved - assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); - assert_eq!(Balances::total_balance(&1), 100); - assert_eq!(Balances::total_balance(&2), 110); - }); - } - - #[test] - fn named_reserve_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - - let id_1 = [1u8; 8]; - let id_2 = [2u8; 8]; - let id_3 = [3u8; 8]; - - // reserve - - assert_noop!(Balances::reserve_named(&id_1, &1, 112), Error::::InsufficientBalance); - - assert_ok!(Balances::reserve_named(&id_1, &1, 12)); - - assert_eq!(Balances::reserved_balance(1), 12); - assert_eq!(Balances::reserved_balance_named(&id_1, &1), 12); - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 0); - - assert_ok!(Balances::reserve_named(&id_1, &1, 2)); - - assert_eq!(Balances::reserved_balance(1), 14); - assert_eq!(Balances::reserved_balance_named(&id_1, &1), 14); - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 0); - - assert_ok!(Balances::reserve_named(&id_2, &1, 23)); - - assert_eq!(Balances::reserved_balance(1), 37); - assert_eq!(Balances::reserved_balance_named(&id_1, &1), 14); - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); - - assert_ok!(Balances::reserve(&1, 34)); - - assert_eq!(Balances::reserved_balance(1), 71); - assert_eq!(Balances::reserved_balance_named(&id_1, &1), 14); - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); - - assert_eq!(Balances::total_balance(&1), 111); - assert_eq!(Balances::free_balance(1), 40); - - assert_noop!(Balances::reserve_named(&id_3, &1, 2), Error::::TooManyReserves); - - // unreserve - - assert_eq!(Balances::unreserve_named(&id_1, &1, 10), 0); - - assert_eq!(Balances::reserved_balance(1), 61); - assert_eq!(Balances::reserved_balance_named(&id_1, &1), 4); - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); - - assert_eq!(Balances::unreserve_named(&id_1, &1, 5), 1); - - assert_eq!(Balances::reserved_balance(1), 57); - assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); - - assert_eq!(Balances::unreserve_named(&id_2, &1, 3), 0); - - assert_eq!(Balances::reserved_balance(1), 54); - assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 20); - - assert_eq!(Balances::total_balance(&1), 111); - assert_eq!(Balances::free_balance(1), 57); - - // slash_reserved_named - - assert_ok!(Balances::reserve_named(&id_1, &1, 10)); - - assert_eq!(Balances::slash_reserved_named(&id_1, &1, 25).1, 15); - - assert_eq!(Balances::reserved_balance(1), 54); - assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 20); - assert_eq!(Balances::total_balance(&1), 101); - - assert_eq!(Balances::slash_reserved_named(&id_2, &1, 5).1, 0); - - assert_eq!(Balances::reserved_balance(1), 49); - assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 15); - assert_eq!(Balances::total_balance(&1), 96); - - // repatriate_reserved_named - - let _ = Balances::deposit_creating(&2, 100); - - assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &2, 10, Status::Reserved).unwrap(), 0); - - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 5); - assert_eq!(Balances::reserved_balance_named(&id_2, &2), 10); - assert_eq!(Balances::reserved_balance(&2), 10); - - assert_eq!(Balances::repatriate_reserved_named(&id_2, &2, &1, 11, Status::Reserved).unwrap(), 1); - - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 15); - assert_eq!(Balances::reserved_balance_named(&id_2, &2), 0); - assert_eq!(Balances::reserved_balance(&2), 0); - - assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &2, 10, Status::Free).unwrap(), 0); - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 5); - assert_eq!(Balances::reserved_balance_named(&id_2, &2), 0); - assert_eq!(Balances::free_balance(&2), 110); - - // repatriate_reserved_named to self - - assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &1, 10, Status::Reserved).unwrap(), 5); - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 5); - - assert_eq!(Balances::free_balance(&1), 47); - - assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &1, 15, Status::Free).unwrap(), 10); - assert_eq!(Balances::reserved_balance_named(&id_2, &1), 0); - - assert_eq!(Balances::free_balance(&1), 52); - }); - } - - #[test] - fn reserved_named_to_yourself_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 110); - - let id = [1u8; 8]; - - assert_ok!(Balances::reserve_named(&id, &1, 50)); - assert_ok!(Balances::repatriate_reserved_named(&id, &1, &1, 50, Status::Free), 0); - assert_eq!(Balances::free_balance(1), 110); - assert_eq!(Balances::reserved_balance_named(&id, &1), 0); - - assert_ok!(Balances::reserve_named(&id, &1, 50)); - assert_ok!(Balances::repatriate_reserved_named(&id, &1, &1, 60, Status::Free), 10); - assert_eq!(Balances::free_balance(1), 110); - assert_eq!(Balances::reserved_balance_named(&id, &1), 0); - }); - } - - #[test] - fn ensure_reserved_named_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - - let id = [1u8; 8]; - - assert_ok!(Balances::ensure_reserved_named(&id, &1, 15)); - assert_eq!(Balances::reserved_balance_named(&id, &1), 15); - - assert_ok!(Balances::ensure_reserved_named(&id, &1, 10)); - assert_eq!(Balances::reserved_balance_named(&id, &1), 10); - - assert_ok!(Balances::ensure_reserved_named(&id, &1, 20)); - assert_eq!(Balances::reserved_balance_named(&id, &1), 20); - }); - } - - #[test] - fn unreserve_all_named_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - - let id = [1u8; 8]; - - assert_ok!(Balances::reserve_named(&id, &1, 15)); - - assert_eq!(Balances::unreserve_all_named(&id, &1), 15); - assert_eq!(Balances::reserved_balance_named(&id, &1), 0); - assert_eq!(Balances::free_balance(&1), 111); - - assert_eq!(Balances::unreserve_all_named(&id, &1), 0); - }); - } - - #[test] - fn slash_all_reserved_named_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - - let id = [1u8; 8]; - - assert_ok!(Balances::reserve_named(&id, &1, 15)); - - assert_eq!(Balances::slash_all_reserved_named(&id, &1).peek(), 15); - assert_eq!(Balances::reserved_balance_named(&id, &1), 0); - assert_eq!(Balances::free_balance(&1), 96); - - assert_eq!(Balances::slash_all_reserved_named(&id, &1).peek(), 0); - }); - } - - #[test] - fn repatriate_all_reserved_named_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - let _ = Balances::deposit_creating(&2, 10); - let _ = Balances::deposit_creating(&3, 10); - - let id = [1u8; 8]; - - assert_ok!(Balances::reserve_named(&id, &1, 15)); - - assert_ok!(Balances::repatriate_all_reserved_named(&id, &1, &2, Status::Reserved)); - assert_eq!(Balances::reserved_balance_named(&id, &1), 0); - assert_eq!(Balances::reserved_balance_named(&id, &2), 15); - - assert_ok!(Balances::repatriate_all_reserved_named(&id, &2, &3, Status::Free)); - assert_eq!(Balances::reserved_balance_named(&id, &2), 0); - assert_eq!(Balances::free_balance(&3), 25); - }); - } - - #[test] - fn set_balance_handles_killing_account() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(frame_system::Pallet::::inc_consumers(&1)); - assert_noop!( - Balances::set_balance(RuntimeOrigin::root(), 1, 0, 0), - DispatchError::ConsumerRemaining, - ); - }); - } - - #[test] - fn set_balance_handles_total_issuance() { - <$ext_builder>::default().build().execute_with(|| { - let old_total_issuance = Balances::total_issuance(); - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1337, 69, 42)); - assert_eq!(Balances::total_issuance(), old_total_issuance + 69 + 42); - assert_eq!(Balances::total_balance(&1337), 69 + 42); - assert_eq!(Balances::free_balance(&1337), 69); - assert_eq!(Balances::reserved_balance(&1337), 42); - }); - } - - #[test] - fn fungible_unbalanced_trait_set_balance_works() { - <$ext_builder>::default().build().execute_with(|| { - assert_eq!(>::balance(&1337), 0); - assert_ok!(>::set_balance(&1337, 100)); - assert_eq!(>::balance(&1337), 100); - - assert_ok!(Balances::reserve(&1337, 60)); - assert_eq!(Balances::free_balance(1337) , 40); - assert_eq!(Balances::reserved_balance(1337), 60); - - assert_noop!(>::set_balance(&1337, 0), ArithmeticError::Underflow); - - assert_ok!(>::set_balance(&1337, 60)); - assert_eq!(Balances::free_balance(1337) , 0); - assert_eq!(Balances::reserved_balance(1337), 60); - }); - } - - #[test] - fn fungible_unbalanced_trait_set_total_issuance_works() { - <$ext_builder>::default().build().execute_with(|| { - assert_eq!(>::total_issuance(), 0); - >::set_total_issuance(100); - assert_eq!(>::total_issuance(), 100); - }); - } - - #[test] - fn fungible_unbalanced_trait_decrease_balance_simple_works() { - <$ext_builder>::default().build().execute_with(|| { - // An Account that starts at 100 - assert_ok!(>::set_balance(&1337, 100)); - // and reserves 50 - assert_ok!(Balances::reserve(&1337, 50)); - // and is decreased by 20 - assert_ok!(>::decrease_balance(&1337, 20)); - // should end up at 80. - assert_eq!(>::balance(&1337), 80); - }); - } - - #[test] - fn fungible_unbalanced_trait_decrease_balance_works() { - <$ext_builder>::default().build().execute_with(|| { - assert_ok!(>::set_balance(&1337, 100)); - assert_eq!(>::balance(&1337), 100); - - assert_noop!( - >::decrease_balance(&1337, 101), - TokenError::NoFunds - ); - assert_eq!( - >::decrease_balance(&1337, 100), - Ok(100) - ); - assert_eq!(>::balance(&1337), 0); - - // free: 40, reserved: 60 - assert_ok!(>::set_balance(&1337, 100)); - assert_ok!(Balances::reserve(&1337, 60)); - assert_eq!(Balances::free_balance(1337) , 40); - assert_eq!(Balances::reserved_balance(1337), 60); - assert_noop!( - >::decrease_balance(&1337, 41), - TokenError::NoFunds - ); - assert_eq!( - >::decrease_balance(&1337, 40), - Ok(40) - ); - assert_eq!(>::balance(&1337), 60); - assert_eq!(Balances::free_balance(1337), 0); - assert_eq!(Balances::reserved_balance(1337), 60); - }); - } - - #[test] - fn fungible_unbalanced_trait_decrease_balance_at_most_works() { - <$ext_builder>::default().build().execute_with(|| { - assert_ok!(>::set_balance(&1337, 100)); - assert_eq!(>::balance(&1337), 100); - - assert_eq!( - >::decrease_balance_at_most(&1337, 101), - 100 - ); - assert_eq!(>::balance(&1337), 0); - - assert_ok!(>::set_balance(&1337, 100)); - assert_eq!( - >::decrease_balance_at_most(&1337, 100), - 100 - ); - assert_eq!(>::balance(&1337), 0); - - // free: 40, reserved: 60 - assert_ok!(>::set_balance(&1337, 100)); - assert_ok!(Balances::reserve(&1337, 60)); - assert_eq!(Balances::free_balance(1337) , 40); - assert_eq!(Balances::reserved_balance(1337), 60); - assert_eq!( - >::decrease_balance_at_most(&1337, 0), - 0 - ); - assert_eq!(Balances::free_balance(1337) , 40); - assert_eq!(Balances::reserved_balance(1337), 60); - assert_eq!( - >::decrease_balance_at_most(&1337, 10), - 10 - ); - assert_eq!(Balances::free_balance(1337), 30); - assert_eq!( - >::decrease_balance_at_most(&1337, 200), - 30 - ); - assert_eq!(>::balance(&1337), 60); - assert_eq!(Balances::free_balance(1337), 0); - assert_eq!(Balances::reserved_balance(1337), 60); - }); - } - - #[test] - fn fungible_unbalanced_trait_increase_balance_works() { - <$ext_builder>::default().build().execute_with(|| { - assert_noop!( - >::increase_balance(&1337, 0), - TokenError::BelowMinimum - ); - assert_eq!( - >::increase_balance(&1337, 1), - Ok(1) - ); - assert_noop!( - >::increase_balance(&1337, u64::MAX), - ArithmeticError::Overflow - ); - }); - } - - #[test] - fn fungible_unbalanced_trait_increase_balance_at_most_works() { - <$ext_builder>::default().build().execute_with(|| { - assert_eq!( - >::increase_balance_at_most(&1337, 0), - 0 - ); - assert_eq!( - >::increase_balance_at_most(&1337, 1), - 1 - ); - assert_eq!( - >::increase_balance_at_most(&1337, u64::MAX), - u64::MAX - 1 - ); - }); - } - } -} diff --git a/frame/balances/src/tests/currency_tests.rs b/frame/balances/src/tests/currency_tests.rs new file mode 100644 index 0000000000000..e25b122c1fcf0 --- /dev/null +++ b/frame/balances/src/tests/currency_tests.rs @@ -0,0 +1,1307 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests regarding the functionality of the `Currency` trait set implementations. + +use super::*; +use crate::NegativeImbalance; +use frame_support::traits::{ + BalanceStatus::{Free, Reserved}, + Currency, + ExistenceRequirement::{self, AllowDeath}, + Hooks, LockIdentifier, LockableCurrency, NamedReservableCurrency, ReservableCurrency, + WithdrawReasons, +}; + +const ID_1: LockIdentifier = *b"1 "; +const ID_2: LockIdentifier = *b"2 "; + +pub const CALL: &::RuntimeCall = + &RuntimeCall::Balances(crate::Call::transfer_allow_death { dest: 0, value: 0 }); + +#[test] +fn basic_locking_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + Balances::set_lock(ID_1, &1, 9, WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 5, AllowDeath), + TokenError::Frozen + ); + }); +} + +#[test] +fn account_should_be_reaped() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_ok!(>::transfer(&1, &2, 10, AllowDeath)); + assert_eq!(System::providers(&1), 0); + assert_eq!(System::consumers(&1), 0); + // Check that the account is dead. + assert!(!frame_system::Account::::contains_key(&1)); + }); +} + +#[test] +fn reap_failed_due_to_provider_and_consumer() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + // SCENARIO: only one provider and there are remaining consumers. + assert_ok!(System::inc_consumers(&1)); + assert!(!System::can_dec_provider(&1)); + assert_noop!( + >::transfer(&1, &2, 10, AllowDeath), + TokenError::Frozen + ); + assert!(System::account_exists(&1)); + assert_eq!(Balances::free_balance(1), 10); + + // SCENARIO: more than one provider, but will not kill account due to other provider. + assert_eq!(System::inc_providers(&1), frame_system::IncRefStatus::Existed); + assert_eq!(System::providers(&1), 2); + assert!(System::can_dec_provider(&1)); + assert_ok!(>::transfer(&1, &2, 10, AllowDeath)); + assert_eq!(System::providers(&1), 1); + assert!(System::account_exists(&1)); + assert_eq!(Balances::free_balance(1), 0); + }); +} + +#[test] +fn partial_locking_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); + assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); + }); +} + +#[test] +fn lock_removal_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); + Balances::remove_lock(ID_1, &1); + assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); + }); +} + +#[test] +fn lock_replacement_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); + Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); + assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); + }); +} + +#[test] +fn double_locking_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); + Balances::set_lock(ID_2, &1, 5, WithdrawReasons::all()); + assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); + }); +} + +#[test] +fn combination_locking_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::empty()); + Balances::set_lock(ID_2, &1, 0, WithdrawReasons::all()); + assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); + }); +} + +#[test] +fn lock_value_extension_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 6, AllowDeath), + TokenError::Frozen + ); + Balances::extend_lock(ID_1, &1, 2, WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 6, AllowDeath), + TokenError::Frozen + ); + Balances::extend_lock(ID_1, &1, 8, WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 3, AllowDeath), + TokenError::Frozen + ); + }); +} + +#[test] +fn lock_should_work_reserve() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + pallet_transaction_payment::NextFeeMultiplier::::put( + Multiplier::saturating_from_integer(1), + ); + Balances::set_lock(ID_1, &1, 10, WithdrawReasons::RESERVE); + assert_noop!( + >::transfer(&1, &2, 1, AllowDeath), + TokenError::Frozen + ); + assert_noop!(Balances::reserve(&1, 1), Error::::LiquidityRestrictions,); + assert!( as SignedExtension>::pre_dispatch( + ChargeTransactionPayment::from(1), + &1, + CALL, + &info_from_weight(Weight::from_parts(1, 0)), + 1, + ) + .is_err()); + assert!( as SignedExtension>::pre_dispatch( + ChargeTransactionPayment::from(0), + &1, + CALL, + &info_from_weight(Weight::from_parts(1, 0)), + 1, + ) + .is_err()); + }); +} + +#[test] +fn lock_should_work_tx_fee() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, 10, WithdrawReasons::TRANSACTION_PAYMENT); + assert_noop!( + >::transfer(&1, &2, 1, AllowDeath), + TokenError::Frozen + ); + assert_noop!(Balances::reserve(&1, 1), Error::::LiquidityRestrictions,); + assert!( as SignedExtension>::pre_dispatch( + ChargeTransactionPayment::from(1), + &1, + CALL, + &info_from_weight(Weight::from_parts(1, 0)), + 1, + ) + .is_err()); + assert!( as SignedExtension>::pre_dispatch( + ChargeTransactionPayment::from(0), + &1, + CALL, + &info_from_weight(Weight::from_parts(1, 0)), + 1, + ) + .is_err()); + }); +} + +#[test] +fn lock_block_number_extension_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, 10, WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 6, AllowDeath), + TokenError::Frozen + ); + Balances::extend_lock(ID_1, &1, 10, WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 6, AllowDeath), + TokenError::Frozen + ); + System::set_block_number(2); + Balances::extend_lock(ID_1, &1, 10, WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 3, AllowDeath), + TokenError::Frozen + ); + }); +} + +#[test] +fn lock_reasons_extension_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, 10, WithdrawReasons::TRANSFER); + assert_noop!( + >::transfer(&1, &2, 6, AllowDeath), + TokenError::Frozen + ); + Balances::extend_lock(ID_1, &1, 10, WithdrawReasons::empty()); + assert_noop!( + >::transfer(&1, &2, 6, AllowDeath), + TokenError::Frozen + ); + Balances::extend_lock(ID_1, &1, 10, WithdrawReasons::RESERVE); + assert_noop!( + >::transfer(&1, &2, 6, AllowDeath), + TokenError::Frozen + ); + }); +} + +#[test] +fn reserved_balance_should_prevent_reclaim_count() { + ExtBuilder::default() + .existential_deposit(256 * 1) + .monied(true) + .build_and_execute_with(|| { + System::inc_account_nonce(&2); + assert_eq!(Balances::total_balance(&2), 256 * 20); + assert_eq!(System::providers(&2), 1); + System::inc_providers(&2); + assert_eq!(System::providers(&2), 2); + + assert_ok!(Balances::reserve(&2, 256 * 19 + 1)); // account 2 becomes mostly reserved + assert_eq!(System::providers(&2), 1); + assert_eq!(Balances::free_balance(2), 255); // "free" account would be deleted. + assert_eq!(Balances::total_balance(&2), 256 * 20); // reserve still exists. + assert_eq!(System::account_nonce(&2), 1); + + // account 4 tries to take index 1 for account 5. + assert_ok!(Balances::transfer_allow_death(Some(4).into(), 5, 256 * 1 + 0x69)); + assert_eq!(Balances::total_balance(&5), 256 * 1 + 0x69); + + assert!(Balances::slash_reserved(&2, 256 * 19 + 1).1.is_zero()); // account 2 gets slashed + + // "reserve" account reduced to 255 (below ED) so account no longer consuming + assert_ok!(System::dec_providers(&2)); + assert_eq!(System::providers(&2), 0); + // account deleted + assert_eq!(System::account_nonce(&2), 0); // nonce zero + assert_eq!(Balances::total_balance(&2), 0); + + // account 4 tries to take index 1 again for account 6. + assert_ok!(Balances::transfer_allow_death(Some(4).into(), 6, 256 * 1 + 0x69)); + assert_eq!(Balances::total_balance(&6), 256 * 1 + 0x69); + }); +} + +#[test] +fn reward_should_work() { + ExtBuilder::default().monied(true).build_and_execute_with(|| { + assert_eq!(Balances::total_balance(&1), 10); + assert_ok!(Balances::deposit_into_existing(&1, 10).map(drop)); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Deposit { + who: 1, + amount: 10, + })); + assert_eq!(Balances::total_balance(&1), 20); + assert_eq!(Balances::total_issuance(), 120); + }); +} + +#[test] +fn balance_works() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 42); + System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { + who: 1, + amount: 42, + })); + assert_eq!(Balances::free_balance(1), 42); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::total_balance(&1), 42); + assert_eq!(Balances::free_balance(2), 0); + assert_eq!(Balances::reserved_balance(2), 0); + assert_eq!(Balances::total_balance(&2), 0); + }); +} + +#[test] +fn reserving_balance_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + assert_eq!(Balances::total_balance(&1), 111); + assert_eq!(Balances::free_balance(1), 111); + assert_eq!(Balances::reserved_balance(1), 0); + + assert_ok!(Balances::reserve(&1, 69)); + + assert_eq!(Balances::total_balance(&1), 111); + assert_eq!(Balances::free_balance(1), 42); + assert_eq!(Balances::reserved_balance(1), 69); + }); +} + +#[test] +fn deducting_balance_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + assert_ok!(Balances::reserve(&1, 69)); + assert_eq!(Balances::free_balance(1), 42); + }); +} + +#[test] +fn refunding_balance_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 42); + assert_ok!(Balances::mutate_account(&1, |a| a.reserved = 69)); + Balances::unreserve(&1, 69); + assert_eq!(Balances::free_balance(1), 111); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn slashing_balance_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 112); + assert_ok!(Balances::reserve(&1, 69)); + assert!(Balances::slash(&1, 42).1.is_zero()); + assert_eq!(Balances::free_balance(1), 1); + assert_eq!(Balances::reserved_balance(1), 69); + assert_eq!(Balances::total_issuance(), 70); + }); +} + +#[test] +fn withdrawing_balance_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&2, 111); + let _ = + Balances::withdraw(&2, 11, WithdrawReasons::TRANSFER, ExistenceRequirement::KeepAlive); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Withdraw { + who: 2, + amount: 11, + })); + assert_eq!(Balances::free_balance(2), 100); + assert_eq!(Balances::total_issuance(), 100); + }); +} + +#[test] +fn withdrawing_balance_should_fail_when_not_expendable() { + ExtBuilder::default().build_and_execute_with(|| { + ExistentialDeposit::set(10); + let _ = Balances::deposit_creating(&2, 20); + assert_ok!(Balances::reserve(&2, 5)); + assert_noop!( + Balances::withdraw(&2, 6, WithdrawReasons::TRANSFER, ExistenceRequirement::KeepAlive), + Error::::Expendability, + ); + assert_ok!(Balances::withdraw( + &2, + 5, + WithdrawReasons::TRANSFER, + ExistenceRequirement::KeepAlive + ),); + }); +} + +#[test] +fn slashing_incomplete_balance_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 42); + assert_ok!(Balances::reserve(&1, 21)); + assert_eq!(Balances::slash(&1, 69).1, 49); + assert_eq!(Balances::free_balance(1), 1); + assert_eq!(Balances::reserved_balance(1), 21); + assert_eq!(Balances::total_issuance(), 22); + }); +} + +#[test] +fn unreserving_balance_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + assert_ok!(Balances::reserve(&1, 110)); + Balances::unreserve(&1, 41); + assert_eq!(Balances::reserved_balance(1), 69); + assert_eq!(Balances::free_balance(1), 42); + }); +} + +#[test] +fn slashing_reserved_balance_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 112); + assert_ok!(Balances::reserve(&1, 111)); + assert_eq!(Balances::slash_reserved(&1, 42).1, 0); + assert_eq!(Balances::reserved_balance(1), 69); + assert_eq!(Balances::free_balance(1), 1); + assert_eq!(Balances::total_issuance(), 70); + }); +} + +#[test] +fn slashing_incomplete_reserved_balance_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + assert_ok!(Balances::reserve(&1, 42)); + assert_eq!(Balances::slash_reserved(&1, 69).1, 27); + assert_eq!(Balances::free_balance(1), 69); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::total_issuance(), 69); + }); +} + +#[test] +fn repatriating_reserved_balance_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + let _ = Balances::deposit_creating(&2, 1); + assert_ok!(Balances::reserve(&1, 110)); + assert_ok!(Balances::repatriate_reserved(&1, &2, 41, Free), 0); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::ReserveRepatriated { + from: 1, + to: 2, + amount: 41, + destination_status: Free, + })); + assert_eq!(Balances::reserved_balance(1), 69); + assert_eq!(Balances::free_balance(1), 1); + assert_eq!(Balances::reserved_balance(2), 0); + assert_eq!(Balances::free_balance(2), 42); + }); +} + +#[test] +fn transferring_reserved_balance_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + let _ = Balances::deposit_creating(&2, 1); + assert_ok!(Balances::reserve(&1, 110)); + assert_ok!(Balances::repatriate_reserved(&1, &2, 41, Reserved), 0); + assert_eq!(Balances::reserved_balance(1), 69); + assert_eq!(Balances::free_balance(1), 1); + assert_eq!(Balances::reserved_balance(2), 41); + assert_eq!(Balances::free_balance(2), 1); + }); +} + +#[test] +fn transferring_reserved_balance_to_yourself_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 110); + assert_ok!(Balances::reserve(&1, 50)); + assert_ok!(Balances::repatriate_reserved(&1, &1, 50, Free), 0); + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::reserved_balance(1), 0); + + assert_ok!(Balances::reserve(&1, 50)); + assert_ok!(Balances::repatriate_reserved(&1, &1, 60, Free), 10); + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn transferring_reserved_balance_to_nonexistent_should_fail() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + assert_ok!(Balances::reserve(&1, 110)); + assert_noop!( + Balances::repatriate_reserved(&1, &2, 42, Free), + Error::::DeadAccount + ); + }); +} + +#[test] +fn transferring_incomplete_reserved_balance_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 110); + let _ = Balances::deposit_creating(&2, 1); + assert_ok!(Balances::reserve(&1, 41)); + assert_ok!(Balances::repatriate_reserved(&1, &2, 69, Free), 28); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::free_balance(1), 69); + assert_eq!(Balances::reserved_balance(2), 0); + assert_eq!(Balances::free_balance(2), 42); + }); +} + +#[test] +fn transferring_too_high_value_should_not_panic() { + ExtBuilder::default().build_and_execute_with(|| { + Balances::make_free_balance_be(&1, u64::MAX); + Balances::make_free_balance_be(&2, 1); + + assert_err!( + >::transfer(&1, &2, u64::MAX, AllowDeath), + ArithmeticError::Overflow, + ); + + assert_eq!(Balances::free_balance(1), u64::MAX); + assert_eq!(Balances::free_balance(2), 1); + }); +} + +#[test] +fn account_create_on_free_too_low_with_other() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 100); + assert_eq!(Balances::total_issuance(), 100); + + // No-op. + let _ = Balances::deposit_creating(&2, 50); + assert_eq!(Balances::free_balance(2), 0); + assert_eq!(Balances::total_issuance(), 100); + }) +} + +#[test] +fn account_create_on_free_too_low() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + // No-op. + let _ = Balances::deposit_creating(&2, 50); + assert_eq!(Balances::free_balance(2), 0); + assert_eq!(Balances::total_issuance(), 0); + }) +} + +#[test] +fn account_removal_on_free_too_low() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + assert_eq!(Balances::total_issuance(), 0); + + // Setup two accounts with free balance above the existential threshold. + let _ = Balances::deposit_creating(&1, 110); + let _ = Balances::deposit_creating(&2, 110); + + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::free_balance(2), 110); + assert_eq!(Balances::total_issuance(), 220); + + // Transfer funds from account 1 of such amount that after this transfer + // the balance of account 1 will be below the existential threshold. + // This should lead to the removal of all balance of this account. + assert_ok!(Balances::transfer_allow_death(Some(1).into(), 2, 20)); + + // Verify free balance removal of account 1. + assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::free_balance(2), 130); + + // Verify that TotalIssuance tracks balance removal when free balance is too low. + assert_eq!(Balances::total_issuance(), 130); + }); +} + +#[test] +fn burn_must_work() { + ExtBuilder::default().monied(true).build_and_execute_with(|| { + let init_total_issuance = Balances::total_issuance(); + let imbalance = Balances::burn(10); + assert_eq!(Balances::total_issuance(), init_total_issuance - 10); + drop(imbalance); + assert_eq!(Balances::total_issuance(), init_total_issuance); + }); +} + +#[test] +#[should_panic = "the balance of any account should always be at least the existential deposit."] +fn cannot_set_genesis_value_below_ed() { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = 11); + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let _ = crate::GenesisConfig:: { balances: vec![(1, 10)] } + .assimilate_storage(&mut t) + .unwrap(); +} + +#[test] +#[should_panic = "duplicate balances in genesis."] +fn cannot_set_genesis_value_twice() { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let _ = crate::GenesisConfig:: { balances: vec![(1, 10), (2, 20), (1, 15)] } + .assimilate_storage(&mut t) + .unwrap(); +} + +#[test] +fn existential_deposit_respected_when_reserving() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + // Set balance to free and reserved at the existential deposit + assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 1, 101)); + // Check balance + assert_eq!(Balances::free_balance(1), 101); + assert_eq!(Balances::reserved_balance(1), 0); + + // Reserve some free balance + assert_ok!(Balances::reserve(&1, 1)); + // Check balance, the account should be ok. + assert_eq!(Balances::free_balance(1), 100); + assert_eq!(Balances::reserved_balance(1), 1); + + // Cannot reserve any more of the free balance. + assert_noop!(Balances::reserve(&1, 1), DispatchError::ConsumerRemaining); + }); +} + +#[test] +fn slash_fails_when_account_needed() { + ExtBuilder::default().existential_deposit(50).build_and_execute_with(|| { + // Set balance to free and reserved at the existential deposit + assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 1, 52)); + assert_ok!(Balances::reserve(&1, 1)); + // Check balance + assert_eq!(Balances::free_balance(1), 51); + assert_eq!(Balances::reserved_balance(1), 1); + + // Slash a small amount + let res = Balances::slash(&1, 1); + assert_eq!(res, (NegativeImbalance::new(1), 0)); + + // The account should be dead. + assert_eq!(Balances::free_balance(1), 50); + assert_eq!(Balances::reserved_balance(1), 1); + + // Slashing again doesn't work since we require the ED + let res = Balances::slash(&1, 1); + assert_eq!(res, (NegativeImbalance::new(0), 1)); + + // The account should be dead. + assert_eq!(Balances::free_balance(1), 50); + assert_eq!(Balances::reserved_balance(1), 1); + }); +} + +#[test] +fn account_deleted_when_just_dust() { + ExtBuilder::default().existential_deposit(50).build_and_execute_with(|| { + // Set balance to free and reserved at the existential deposit + assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 1, 50)); + // Check balance + assert_eq!(Balances::free_balance(1), 50); + + // Slash a small amount + let res = Balances::slash(&1, 1); + assert_eq!(res, (NegativeImbalance::new(1), 0)); + + // The account should be dead. + assert_eq!(Balances::free_balance(1), 0); + }); +} + +#[test] +fn emit_events_with_reserve_and_unreserve() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 100); + + System::set_block_number(2); + assert_ok!(Balances::reserve(&1, 10)); + + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Reserved { + who: 1, + amount: 10, + })); + + System::set_block_number(3); + assert!(Balances::unreserve(&1, 5).is_zero()); + + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Unreserved { + who: 1, + amount: 5, + })); + + System::set_block_number(4); + assert_eq!(Balances::unreserve(&1, 6), 1); + + // should only unreserve 5 + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Unreserved { + who: 1, + amount: 5, + })); + }); +} + +#[test] +fn emit_events_with_changing_locks() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 100); + System::reset_events(); + + // Locks = [] --> [10] + Balances::set_lock(*b"LOCK_000", &1, 10, WithdrawReasons::TRANSFER); + assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Locked { who: 1, amount: 10 })]); + + // Locks = [10] --> [15] + Balances::set_lock(*b"LOCK_000", &1, 15, WithdrawReasons::TRANSFER); + assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Locked { who: 1, amount: 5 })]); + + // Locks = [15] --> [15, 20] + Balances::set_lock(*b"LOCK_001", &1, 20, WithdrawReasons::TRANSACTION_PAYMENT); + assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Locked { who: 1, amount: 5 })]); + + // Locks = [15, 20] --> [17, 20] + Balances::set_lock(*b"LOCK_000", &1, 17, WithdrawReasons::TRANSACTION_PAYMENT); + for event in events() { + match event { + RuntimeEvent::Balances(crate::Event::Locked { .. }) => { + assert!(false, "unexpected lock event") + }, + RuntimeEvent::Balances(crate::Event::Unlocked { .. }) => { + assert!(false, "unexpected unlock event") + }, + _ => continue, + } + } + + // Locks = [17, 20] --> [17, 15] + Balances::set_lock(*b"LOCK_001", &1, 15, WithdrawReasons::TRANSFER); + assert_eq!( + events(), + [RuntimeEvent::Balances(crate::Event::Unlocked { who: 1, amount: 3 })] + ); + + // Locks = [17, 15] --> [15] + Balances::remove_lock(*b"LOCK_000", &1); + assert_eq!( + events(), + [RuntimeEvent::Balances(crate::Event::Unlocked { who: 1, amount: 2 })] + ); + + // Locks = [15] --> [] + Balances::remove_lock(*b"LOCK_001", &1); + assert_eq!( + events(), + [RuntimeEvent::Balances(crate::Event::Unlocked { who: 1, amount: 15 })] + ); + }); +} + +#[test] +fn emit_events_with_existential_deposit() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 1, 100)); + + assert_eq!( + events(), + [ + RuntimeEvent::System(system::Event::NewAccount { account: 1 }), + RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), + RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100 }), + ] + ); + + let res = Balances::slash(&1, 1); + assert_eq!(res, (NegativeImbalance::new(1), 0)); + + assert_eq!( + events(), + [ + RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), + RuntimeEvent::Balances(crate::Event::DustLost { account: 1, amount: 99 }), + RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 1 }), + ] + ); + }); +} + +#[test] +fn emit_events_with_no_existential_deposit_suicide() { + ExtBuilder::default().existential_deposit(1).build_and_execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_eq!( + events(), + [ + RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100 }), + RuntimeEvent::System(system::Event::NewAccount { account: 1 }), + RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), + ] + ); + + let res = Balances::slash(&1, 100); + assert_eq!(res, (NegativeImbalance::new(100), 0)); + + assert_eq!( + events(), + [ + RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), + RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 100 }), + ] + ); + }); +} + +#[test] +fn slash_over_works() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + // SCENARIO: Over-slash will kill account, and report missing slash amount. + Balances::make_free_balance_be(&1, 1_000); + // Slashed full free_balance, and reports 300 not slashed + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1000), 300)); + // Account is dead + assert!(!System::account_exists(&1)); + }); +} + +#[test] +fn slash_full_works() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + Balances::make_free_balance_be(&1, 1_000); + // Slashed completed in full + assert_eq!(Balances::slash(&1, 1_000), (NegativeImbalance::new(1000), 0)); + // Account is still alive + assert!(!System::account_exists(&1)); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Slashed { + who: 1, + amount: 1000, + })); + }); +} + +#[test] +fn slash_partial_works() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + Balances::make_free_balance_be(&1, 1_000); + // Slashed completed in full + assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Slashed { + who: 1, + amount: 900, + })); + }); +} + +#[test] +fn slash_dusting_works() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + Balances::make_free_balance_be(&1, 1_000); + // Slashed completed in full + assert_eq!(Balances::slash(&1, 950), (NegativeImbalance::new(950), 0)); + assert!(!System::account_exists(&1)); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Slashed { + who: 1, + amount: 950, + })); + }); +} + +#[test] +fn slash_does_not_take_from_reserve() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + Balances::make_free_balance_be(&1, 1_000); + assert_ok!(Balances::reserve(&1, 100)); + // Slashed completed in full + assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(800), 100)); + assert_eq!(Balances::reserved_balance(&1), 100); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Slashed { + who: 1, + amount: 800, + })); + }); +} + +#[test] +fn slash_consumed_slash_full_works() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + Balances::make_free_balance_be(&1, 1_000); + assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests + // Slashed completed in full + assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + }); +} + +#[test] +fn slash_consumed_slash_over_works() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + Balances::make_free_balance_be(&1, 1_000); + assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests + // Slashed completed in full + assert_eq!(Balances::slash(&1, 1_000), (NegativeImbalance::new(900), 100)); + // Account is still alive + assert!(System::account_exists(&1)); + }); +} + +#[test] +fn slash_consumed_slash_partial_works() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + Balances::make_free_balance_be(&1, 1_000); + assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests + // Slashed completed in full + assert_eq!(Balances::slash(&1, 800), (NegativeImbalance::new(800), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + }); +} + +#[test] +fn slash_on_non_existant_works() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + // Slash on non-existent account is okay. + assert_eq!(Balances::slash(&12345, 1_300), (NegativeImbalance::new(0), 1300)); + }); +} + +#[test] +fn slash_reserved_slash_partial_works() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + Balances::make_free_balance_be(&1, 1_000); + assert_ok!(Balances::reserve(&1, 900)); + // Slashed completed in full + assert_eq!(Balances::slash_reserved(&1, 800), (NegativeImbalance::new(800), 0)); + assert_eq!(System::consumers(&1), 1); + assert_eq!(Balances::reserved_balance(&1), 100); + assert_eq!(Balances::free_balance(&1), 100); + }); +} + +#[test] +fn slash_reserved_slash_everything_works() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + Balances::make_free_balance_be(&1, 1_000); + assert_ok!(Balances::reserve(&1, 900)); + assert_eq!(System::consumers(&1), 1); + // Slashed completed in full + assert_eq!(Balances::slash_reserved(&1, 900), (NegativeImbalance::new(900), 0)); + assert_eq!(System::consumers(&1), 0); + // Account is still alive + assert!(System::account_exists(&1)); + }); +} + +#[test] +fn slash_reserved_overslash_does_not_touch_free_balance() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + // SCENARIO: Over-slash doesn't touch free balance. + Balances::make_free_balance_be(&1, 1_000); + assert_ok!(Balances::reserve(&1, 800)); + // Slashed done + assert_eq!(Balances::slash_reserved(&1, 900), (NegativeImbalance::new(800), 100)); + assert_eq!(Balances::free_balance(&1), 200); + }); +} + +#[test] +fn slash_reserved_on_non_existant_works() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + // Slash on non-existent account is okay. + assert_eq!(Balances::slash_reserved(&12345, 1_300), (NegativeImbalance::new(0), 1300)); + }); +} + +#[test] +fn operations_on_dead_account_should_not_change_state() { + // These functions all use `mutate_account` which may introduce a storage change when + // the account never existed to begin with, and shouldn't exist in the end. + ExtBuilder::default().existential_deposit(1).build_and_execute_with(|| { + assert!(!frame_system::Account::::contains_key(&1337)); + + // Unreserve + assert_storage_noop!(assert_eq!(Balances::unreserve(&1337, 42), 42)); + // Reserve + assert_noop!(Balances::reserve(&1337, 42), Error::::InsufficientBalance); + // Slash Reserve + assert_storage_noop!(assert_eq!(Balances::slash_reserved(&1337, 42).1, 42)); + // Repatriate Reserve + assert_noop!( + Balances::repatriate_reserved(&1337, &1338, 42, Free), + Error::::DeadAccount + ); + // Slash + assert_storage_noop!(assert_eq!(Balances::slash(&1337, 42).1, 42)); + }); +} + +#[test] +#[should_panic = "The existential deposit must be greater than zero!"] +fn zero_ed_is_prohibited() { + // These functions all use `mutate_account` which may introduce a storage change when + // the account never existed to begin with, and shouldn't exist in the end. + ExtBuilder::default().existential_deposit(0).build_and_execute_with(|| { + Balances::integrity_test(); + }); +} + +#[test] +fn named_reserve_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + let id_1 = TestId::Foo; + let id_2 = TestId::Bar; + let id_3 = TestId::Baz; + + // reserve + + assert_noop!( + Balances::reserve_named(&id_1, &1, 112), + Error::::InsufficientBalance + ); + + assert_ok!(Balances::reserve_named(&id_1, &1, 12)); + + assert_eq!(Balances::reserved_balance(1), 12); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 12); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 0); + + assert_ok!(Balances::reserve_named(&id_1, &1, 2)); + + assert_eq!(Balances::reserved_balance(1), 14); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 14); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 0); + + assert_ok!(Balances::reserve_named(&id_2, &1, 23)); + + assert_eq!(Balances::reserved_balance(1), 37); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 14); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); + + assert_ok!(Balances::reserve(&1, 34)); + + assert_eq!(Balances::reserved_balance(1), 71); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 14); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); + + assert_eq!(Balances::total_balance(&1), 111); + assert_eq!(Balances::free_balance(1), 40); + + assert_noop!(Balances::reserve_named(&id_3, &1, 2), Error::::TooManyReserves); + + // unreserve + + assert_eq!(Balances::unreserve_named(&id_1, &1, 10), 0); + + assert_eq!(Balances::reserved_balance(1), 61); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 4); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); + + assert_eq!(Balances::unreserve_named(&id_1, &1, 5), 1); + + assert_eq!(Balances::reserved_balance(1), 57); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); + + assert_eq!(Balances::unreserve_named(&id_2, &1, 3), 0); + + assert_eq!(Balances::reserved_balance(1), 54); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 20); + + assert_eq!(Balances::total_balance(&1), 111); + assert_eq!(Balances::free_balance(1), 57); + + // slash_reserved_named + + assert_ok!(Balances::reserve_named(&id_1, &1, 10)); + + assert_eq!(Balances::slash_reserved_named(&id_1, &1, 25).1, 15); + + assert_eq!(Balances::reserved_balance(1), 54); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 20); + assert_eq!(Balances::total_balance(&1), 101); + + assert_eq!(Balances::slash_reserved_named(&id_2, &1, 5).1, 0); + + assert_eq!(Balances::reserved_balance(1), 49); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 15); + assert_eq!(Balances::total_balance(&1), 96); + + // repatriate_reserved_named + + let _ = Balances::deposit_creating(&2, 100); + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &2, 10, Reserved).unwrap(), 0); + + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 5); + assert_eq!(Balances::reserved_balance_named(&id_2, &2), 10); + assert_eq!(Balances::reserved_balance(&2), 10); + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &2, &1, 11, Reserved).unwrap(), 1); + + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 15); + assert_eq!(Balances::reserved_balance_named(&id_2, &2), 0); + assert_eq!(Balances::reserved_balance(&2), 0); + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &2, 10, Free).unwrap(), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 5); + assert_eq!(Balances::reserved_balance_named(&id_2, &2), 0); + assert_eq!(Balances::free_balance(&2), 110); + + // repatriate_reserved_named to self + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &1, 10, Reserved).unwrap(), 5); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 5); + + assert_eq!(Balances::free_balance(&1), 47); + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &1, 15, Free).unwrap(), 10); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 0); + + assert_eq!(Balances::free_balance(&1), 52); + }); +} + +#[test] +fn reserve_must_succeed_if_can_reserve_does() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 1); + let _ = Balances::deposit_creating(&2, 2); + assert!(Balances::can_reserve(&1, 1) == Balances::reserve(&1, 1).is_ok()); + assert!(Balances::can_reserve(&2, 1) == Balances::reserve(&2, 1).is_ok()); + }); +} + +#[test] +fn reserved_named_to_yourself_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 110); + + let id = TestId::Foo; + + assert_ok!(Balances::reserve_named(&id, &1, 50)); + assert_ok!(Balances::repatriate_reserved_named(&id, &1, &1, 50, Free), 0); + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + + assert_ok!(Balances::reserve_named(&id, &1, 50)); + assert_ok!(Balances::repatriate_reserved_named(&id, &1, &1, 60, Free), 10); + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + }); +} + +#[test] +fn ensure_reserved_named_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + let id = TestId::Foo; + + assert_ok!(Balances::ensure_reserved_named(&id, &1, 15)); + assert_eq!(Balances::reserved_balance_named(&id, &1), 15); + + assert_ok!(Balances::ensure_reserved_named(&id, &1, 10)); + assert_eq!(Balances::reserved_balance_named(&id, &1), 10); + + assert_ok!(Balances::ensure_reserved_named(&id, &1, 20)); + assert_eq!(Balances::reserved_balance_named(&id, &1), 20); + }); +} + +#[test] +fn unreserve_all_named_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + let id = TestId::Foo; + + assert_ok!(Balances::reserve_named(&id, &1, 15)); + + assert_eq!(Balances::unreserve_all_named(&id, &1), 15); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + assert_eq!(Balances::free_balance(&1), 111); + + assert_eq!(Balances::unreserve_all_named(&id, &1), 0); + }); +} + +#[test] +fn slash_all_reserved_named_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + let id = TestId::Foo; + + assert_ok!(Balances::reserve_named(&id, &1, 15)); + + assert_eq!(Balances::slash_all_reserved_named(&id, &1).peek(), 15); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + assert_eq!(Balances::free_balance(&1), 96); + + assert_eq!(Balances::slash_all_reserved_named(&id, &1).peek(), 0); + }); +} + +#[test] +fn repatriate_all_reserved_named_should_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + let _ = Balances::deposit_creating(&2, 10); + let _ = Balances::deposit_creating(&3, 10); + + let id = TestId::Foo; + + assert_ok!(Balances::reserve_named(&id, &1, 15)); + + assert_ok!(Balances::repatriate_all_reserved_named(&id, &1, &2, Reserved)); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id, &2), 15); + + assert_ok!(Balances::repatriate_all_reserved_named(&id, &2, &3, Free)); + assert_eq!(Balances::reserved_balance_named(&id, &2), 0); + assert_eq!(Balances::free_balance(&3), 25); + }); +} + +#[test] +fn freezing_and_locking_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + assert_ok!(>::set_freeze(&TestId::Foo, &1, 4)); + Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 2); + assert_eq!(Balances::account(&1).frozen, 5); + assert_ok!(>::set_freeze(&TestId::Foo, &1, 6)); + assert_eq!(Balances::account(&1).frozen, 6); + assert_ok!(>::set_freeze(&TestId::Foo, &1, 4)); + assert_eq!(Balances::account(&1).frozen, 5); + Balances::set_lock(ID_1, &1, 3, WithdrawReasons::all()); + assert_eq!(Balances::account(&1).frozen, 4); + Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); + assert_eq!(Balances::account(&1).frozen, 5); + Balances::remove_lock(ID_1, &1); + assert_eq!(Balances::account(&1).frozen, 4); + assert_eq!(System::consumers(&1), 1); + }); +} diff --git a/frame/balances/src/tests/dispatchable_tests.rs b/frame/balances/src/tests/dispatchable_tests.rs new file mode 100644 index 0000000000000..76d0961e577d2 --- /dev/null +++ b/frame/balances/src/tests/dispatchable_tests.rs @@ -0,0 +1,224 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests regarding the functionality of the dispatchables/extrinsics. + +use super::*; +use frame_support::traits::tokens::Preservation::Expendable; +use fungible::{hold::Mutate as HoldMutate, Inspect, Mutate}; + +#[test] +fn default_indexing_on_new_accounts_should_not_work2() { + ExtBuilder::default() + .existential_deposit(10) + .monied(true) + .build_and_execute_with(|| { + // account 5 should not exist + // ext_deposit is 10, value is 9, not satisfies for ext_deposit + assert_noop!( + Balances::transfer_allow_death(Some(1).into(), 5, 9), + TokenError::BelowMinimum, + ); + assert_eq!(Balances::free_balance(1), 100); + }); +} + +#[test] +fn dust_account_removal_should_work() { + ExtBuilder::default() + .existential_deposit(100) + .monied(true) + .build_and_execute_with(|| { + System::inc_account_nonce(&2); + assert_eq!(System::account_nonce(&2), 1); + assert_eq!(Balances::total_balance(&2), 2000); + // index 1 (account 2) becomes zombie + assert_ok!(Balances::transfer_allow_death(Some(2).into(), 5, 1901)); + assert_eq!(Balances::total_balance(&2), 0); + assert_eq!(Balances::total_balance(&5), 1901); + assert_eq!(System::account_nonce(&2), 0); + }); +} + +#[test] +fn balance_transfer_works() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::mint_into(&1, 111); + assert_ok!(Balances::transfer_allow_death(Some(1).into(), 2, 69)); + assert_eq!(Balances::total_balance(&1), 42); + assert_eq!(Balances::total_balance(&2), 69); + }); +} + +#[test] +fn force_transfer_works() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::mint_into(&1, 111); + assert_noop!(Balances::force_transfer(Some(2).into(), 1, 2, 69), BadOrigin,); + assert_ok!(Balances::force_transfer(RawOrigin::Root.into(), 1, 2, 69)); + assert_eq!(Balances::total_balance(&1), 42); + assert_eq!(Balances::total_balance(&2), 69); + }); +} + +#[test] +fn balance_transfer_when_on_hold_should_not_work() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::mint_into(&1, 111); + assert_ok!(Balances::hold(&TestId::Foo, &1, 69)); + assert_noop!( + Balances::transfer_allow_death(Some(1).into(), 2, 69), + TokenError::FundsUnavailable, + ); + }); +} + +#[test] +fn transfer_keep_alive_works() { + ExtBuilder::default().existential_deposit(1).build_and_execute_with(|| { + let _ = Balances::mint_into(&1, 100); + assert_noop!( + Balances::transfer_keep_alive(Some(1).into(), 2, 100), + TokenError::NotExpendable + ); + assert_eq!(Balances::total_balance(&1), 100); + assert_eq!(Balances::total_balance(&2), 0); + }); +} + +#[test] +fn transfer_keep_alive_all_free_succeed() { + ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1, 300)); + assert_ok!(Balances::hold(&TestId::Foo, &1, 100)); + assert_ok!(Balances::transfer_keep_alive(Some(1).into(), 2, 100)); + assert_eq!(Balances::total_balance(&1), 200); + assert_eq!(Balances::total_balance(&2), 100); + }); +} + +#[test] +fn transfer_all_works_1() { + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + // setup + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1, 200)); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 2, 0)); + // transfer all and allow death + assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); + assert_eq!(Balances::total_balance(&1), 0); + assert_eq!(Balances::total_balance(&2), 200); + }); +} + +#[test] +fn transfer_all_works_2() { + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + // setup + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1, 200)); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 2, 0)); + // transfer all and keep alive + assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); + assert_eq!(Balances::total_balance(&1), 100); + assert_eq!(Balances::total_balance(&2), 100); + }); +} + +#[test] +fn transfer_all_works_3() { + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + // setup + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1, 210)); + assert_ok!(Balances::hold(&TestId::Foo, &1, 10)); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 2, 0)); + // transfer all and allow death w/ reserved + assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); + assert_eq!(Balances::total_balance(&1), 110); + assert_eq!(Balances::total_balance(&2), 100); + }); +} + +#[test] +fn transfer_all_works_4() { + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + // setup + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1, 210)); + assert_ok!(Balances::hold(&TestId::Foo, &1, 10)); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 2, 0)); + // transfer all and keep alive w/ reserved + assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); + assert_eq!(Balances::total_balance(&1), 110); + assert_eq!(Balances::total_balance(&2), 100); + }); +} + +#[test] +fn set_balance_handles_killing_account() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::mint_into(&1, 111); + assert_ok!(frame_system::Pallet::::inc_consumers(&1)); + assert_noop!( + Balances::force_set_balance(RuntimeOrigin::root(), 1, 0), + DispatchError::ConsumerRemaining, + ); + }); +} + +#[test] +fn set_balance_handles_total_issuance() { + ExtBuilder::default().build_and_execute_with(|| { + let old_total_issuance = Balances::total_issuance(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1337, 69)); + assert_eq!(Balances::total_issuance(), old_total_issuance + 69); + assert_eq!(Balances::total_balance(&1337), 69); + assert_eq!(Balances::free_balance(&1337), 69); + }); +} + +#[test] +fn upgrade_accounts_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + System::inc_providers(&7); + assert_ok!(::AccountStore::try_mutate_exists( + &7, + |a| -> DispatchResult { + *a = Some(AccountData { + free: 5, + reserved: 5, + frozen: Zero::zero(), + flags: crate::types::ExtraFlags::old_logic(), + }); + Ok(()) + } + )); + assert!(!Balances::account(&7).flags.is_new_logic()); + assert_eq!(System::providers(&7), 1); + assert_eq!(System::consumers(&7), 0); + assert_ok!(Balances::upgrade_accounts(Some(1).into(), vec![7])); + assert!(Balances::account(&7).flags.is_new_logic()); + assert_eq!(System::providers(&7), 1); + assert_eq!(System::consumers(&7), 1); + + >::unreserve(&7, 5); + assert_ok!(>::transfer(&7, &1, 10, Expendable)); + assert_eq!(Balances::total_balance(&7), 0); + assert_eq!(System::providers(&7), 0); + assert_eq!(System::consumers(&7), 0); + }); +} diff --git a/frame/balances/src/tests/fungible_tests.rs b/frame/balances/src/tests/fungible_tests.rs new file mode 100644 index 0000000000000..ab2606c53ff71 --- /dev/null +++ b/frame/balances/src/tests/fungible_tests.rs @@ -0,0 +1,470 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests regarding the functionality of the `fungible` trait set implementations. + +use super::*; +use frame_support::traits::tokens::{ + Fortitude::{Force, Polite}, + Precision::{BestEffort, Exact}, + Preservation::{Expendable, Preserve, Protect}, + Restriction::Free, +}; +use fungible::{Inspect, InspectFreeze, InspectHold, Mutate, MutateFreeze, MutateHold, Unbalanced}; + +#[test] +fn inspect_trait_reducible_balance_basic_works() { + ExtBuilder::default().existential_deposit(10).build_and_execute_with(|| { + Balances::set_balance(&1, 100); + assert_eq!(Balances::reducible_balance(&1, Expendable, Polite), 100); + assert_eq!(Balances::reducible_balance(&1, Protect, Polite), 90); + assert_eq!(Balances::reducible_balance(&1, Preserve, Polite), 90); + assert_eq!(Balances::reducible_balance(&1, Expendable, Force), 100); + assert_eq!(Balances::reducible_balance(&1, Protect, Force), 90); + assert_eq!(Balances::reducible_balance(&1, Preserve, Force), 90); + }); +} + +#[test] +fn inspect_trait_reducible_balance_other_provide_works() { + ExtBuilder::default().existential_deposit(10).build_and_execute_with(|| { + Balances::set_balance(&1, 100); + System::inc_providers(&1); + assert_eq!(Balances::reducible_balance(&1, Expendable, Polite), 100); + assert_eq!(Balances::reducible_balance(&1, Protect, Polite), 100); + assert_eq!(Balances::reducible_balance(&1, Preserve, Polite), 90); + assert_eq!(Balances::reducible_balance(&1, Expendable, Force), 100); + assert_eq!(Balances::reducible_balance(&1, Protect, Force), 100); + assert_eq!(Balances::reducible_balance(&1, Preserve, Force), 90); + }); +} + +#[test] +fn inspect_trait_reducible_balance_frozen_works() { + ExtBuilder::default().existential_deposit(10).build_and_execute_with(|| { + Balances::set_balance(&1, 100); + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 50)); + assert_eq!(Balances::reducible_balance(&1, Expendable, Polite), 50); + assert_eq!(Balances::reducible_balance(&1, Protect, Polite), 50); + assert_eq!(Balances::reducible_balance(&1, Preserve, Polite), 50); + assert_eq!(Balances::reducible_balance(&1, Expendable, Force), 90); + assert_eq!(Balances::reducible_balance(&1, Protect, Force), 90); + assert_eq!(Balances::reducible_balance(&1, Preserve, Force), 90); + }); +} + +#[test] +fn unbalanced_trait_set_balance_works() { + ExtBuilder::default().build_and_execute_with(|| { + assert_eq!(>::balance(&1337), 0); + assert_ok!(Balances::write_balance(&1337, 100)); + assert_eq!(>::balance(&1337), 100); + + assert_ok!(>::hold(&TestId::Foo, &1337, 60)); + assert_eq!(>::balance(&1337), 40); + assert_eq!(>::total_balance_on_hold(&1337), 60); + assert_eq!( + >::balance_on_hold(&TestId::Foo, &1337), + 60 + ); + + assert_noop!(Balances::write_balance(&1337, 0), Error::::InsufficientBalance); + + assert_ok!(Balances::write_balance(&1337, 1)); + assert_eq!(>::balance(&1337), 1); + assert_eq!( + >::balance_on_hold(&TestId::Foo, &1337), + 60 + ); + + assert_ok!(>::release(&TestId::Foo, &1337, 60, Exact)); + assert_eq!(>::balance_on_hold(&TestId::Foo, &1337), 0); + assert_eq!(>::total_balance_on_hold(&1337), 0); + }); +} + +#[test] +fn unbalanced_trait_set_total_issuance_works() { + ExtBuilder::default().build_and_execute_with(|| { + assert_eq!(>::total_issuance(), 0); + Balances::set_total_issuance(100); + assert_eq!(>::total_issuance(), 100); + }); +} + +#[test] +fn unbalanced_trait_decrease_balance_simple_works() { + ExtBuilder::default().build_and_execute_with(|| { + // An Account that starts at 100 + assert_ok!(Balances::write_balance(&1337, 100)); + assert_eq!(>::balance(&1337), 100); + // and reserves 50 + assert_ok!(>::hold(&TestId::Foo, &1337, 50)); + assert_eq!(>::balance(&1337), 50); + // and is decreased by 20 + assert_ok!(Balances::decrease_balance(&1337, 20, Exact, Expendable, Polite)); + assert_eq!(>::balance(&1337), 30); + }); +} + +#[test] +fn unbalanced_trait_decrease_balance_works_1() { + ExtBuilder::default().build_and_execute_with(|| { + assert_ok!(Balances::write_balance(&1337, 100)); + assert_eq!(>::balance(&1337), 100); + + assert_noop!( + Balances::decrease_balance(&1337, 101, Exact, Expendable, Polite), + TokenError::FundsUnavailable + ); + assert_eq!(Balances::decrease_balance(&1337, 100, Exact, Expendable, Polite), Ok(100)); + assert_eq!(>::balance(&1337), 0); + }); +} + +#[test] +fn unbalanced_trait_decrease_balance_works_2() { + ExtBuilder::default().build_and_execute_with(|| { + // free: 40, reserved: 60 + assert_ok!(Balances::write_balance(&1337, 100)); + assert_ok!(Balances::hold(&TestId::Foo, &1337, 60)); + assert_eq!(>::balance(&1337), 40); + assert_eq!(Balances::total_balance_on_hold(&1337), 60); + assert_noop!( + Balances::decrease_balance(&1337, 40, Exact, Expendable, Polite), + Error::::InsufficientBalance + ); + assert_eq!(Balances::decrease_balance(&1337, 39, Exact, Expendable, Polite), Ok(39)); + assert_eq!(>::balance(&1337), 1); + assert_eq!(Balances::total_balance_on_hold(&1337), 60); + }); +} + +#[test] +fn unbalanced_trait_decrease_balance_at_most_works_1() { + ExtBuilder::default().build_and_execute_with(|| { + assert_ok!(Balances::write_balance(&1337, 100)); + assert_eq!(>::balance(&1337), 100); + + assert_eq!(Balances::decrease_balance(&1337, 101, BestEffort, Expendable, Polite), Ok(100)); + assert_eq!(>::balance(&1337), 0); + }); +} + +#[test] +fn unbalanced_trait_decrease_balance_at_most_works_2() { + ExtBuilder::default().build_and_execute_with(|| { + assert_ok!(Balances::write_balance(&1337, 99)); + assert_eq!(Balances::decrease_balance(&1337, 99, BestEffort, Expendable, Polite), Ok(99)); + assert_eq!(>::balance(&1337), 0); + }); +} + +#[test] +fn unbalanced_trait_decrease_balance_at_most_works_3() { + ExtBuilder::default().build_and_execute_with(|| { + // free: 40, reserved: 60 + assert_ok!(Balances::write_balance(&1337, 100)); + assert_ok!(Balances::hold(&TestId::Foo, &1337, 60)); + assert_eq!(Balances::free_balance(1337), 40); + assert_eq!(Balances::total_balance_on_hold(&1337), 60); + assert_eq!(Balances::decrease_balance(&1337, 0, BestEffort, Expendable, Polite), Ok(0)); + assert_eq!(Balances::free_balance(1337), 40); + assert_eq!(Balances::total_balance_on_hold(&1337), 60); + assert_eq!(Balances::decrease_balance(&1337, 10, BestEffort, Expendable, Polite), Ok(10)); + assert_eq!(Balances::free_balance(1337), 30); + assert_eq!(Balances::decrease_balance(&1337, 200, BestEffort, Expendable, Polite), Ok(29)); + assert_eq!(>::balance(&1337), 1); + assert_eq!(Balances::free_balance(1337), 1); + assert_eq!(Balances::total_balance_on_hold(&1337), 60); + }); +} + +#[test] +fn unbalanced_trait_increase_balance_works() { + ExtBuilder::default().build_and_execute_with(|| { + assert_noop!(Balances::increase_balance(&1337, 0, Exact), TokenError::BelowMinimum); + assert_eq!(Balances::increase_balance(&1337, 1, Exact), Ok(1)); + assert_noop!(Balances::increase_balance(&1337, u64::MAX, Exact), ArithmeticError::Overflow); + }); +} + +#[test] +fn unbalanced_trait_increase_balance_at_most_works() { + ExtBuilder::default().build_and_execute_with(|| { + assert_eq!(Balances::increase_balance(&1337, 0, BestEffort), Ok(0)); + assert_eq!(Balances::increase_balance(&1337, 1, BestEffort), Ok(1)); + assert_eq!(Balances::increase_balance(&1337, u64::MAX, BestEffort), Ok(u64::MAX - 1)); + }); +} + +#[test] +fn freezing_and_holds_should_overlap() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 10)); + assert_ok!(Balances::hold(&TestId::Foo, &1, 9)); + assert_eq!(Balances::account(&1).free, 1); + assert_eq!(System::consumers(&1), 1); + assert_eq!(Balances::account(&1).free, 1); + assert_eq!(Balances::account(&1).frozen, 10); + assert_eq!(Balances::account(&1).reserved, 9); + assert_eq!(Balances::total_balance_on_hold(&1), 9); + }); +} + +#[test] +fn frozen_hold_balance_cannot_be_moved_without_force() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 10)); + assert_ok!(Balances::hold(&TestId::Foo, &1, 9)); + assert_eq!(Balances::reducible_total_balance_on_hold(&1, Force), 9); + assert_eq!(Balances::reducible_total_balance_on_hold(&1, Polite), 0); + let e = TokenError::Frozen; + assert_noop!( + Balances::transfer_on_hold(&TestId::Foo, &1, &2, 1, Exact, Free, Polite), + e + ); + assert_ok!(Balances::transfer_on_hold(&TestId::Foo, &1, &2, 1, Exact, Free, Force)); + }); +} + +#[test] +fn frozen_hold_balance_best_effort_transfer_works() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 5)); + assert_ok!(Balances::hold(&TestId::Foo, &1, 9)); + assert_eq!(Balances::reducible_total_balance_on_hold(&1, Force), 9); + assert_eq!(Balances::reducible_total_balance_on_hold(&1, Polite), 5); + assert_ok!(Balances::transfer_on_hold( + &TestId::Foo, + &1, + &2, + 10, + BestEffort, + Free, + Polite + )); + assert_eq!(Balances::total_balance(&1), 5); + assert_eq!(Balances::total_balance(&2), 25); + }); +} + +#[test] +fn partial_freezing_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 5)); + assert_eq!(System::consumers(&1), 1); + assert_ok!(>::transfer(&1, &2, 5, Expendable)); + assert_noop!( + >::transfer(&1, &2, 1, Expendable), + TokenError::Frozen + ); + }); +} + +#[test] +fn thaw_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, u64::MAX)); + assert_ok!(Balances::thaw(&TestId::Foo, &1)); + assert_eq!(System::consumers(&1), 0); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &1), 0); + assert_eq!(Balances::account(&1).frozen, 0); + assert_ok!(>::transfer(&1, &2, 10, Expendable)); + }); +} + +#[test] +fn set_freeze_zero_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, u64::MAX)); + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 0)); + assert_eq!(System::consumers(&1), 0); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &1), 0); + assert_eq!(Balances::account(&1).frozen, 0); + assert_ok!(>::transfer(&1, &2, 10, Expendable)); + }); +} + +#[test] +fn set_freeze_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, u64::MAX)); + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 5)); + assert_ok!(>::transfer(&1, &2, 5, Expendable)); + assert_noop!( + >::transfer(&1, &2, 1, Expendable), + TokenError::Frozen + ); + }); +} + +#[test] +fn extend_freeze_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 5)); + assert_ok!(Balances::extend_freeze(&TestId::Foo, &1, 10)); + assert_eq!(Balances::account(&1).frozen, 10); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &1), 10); + assert_noop!( + >::transfer(&1, &2, 1, Expendable), + TokenError::Frozen + ); + }); +} + +#[test] +fn double_freezing_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 5)); + assert_ok!(Balances::set_freeze(&TestId::Bar, &1, 5)); + assert_eq!(System::consumers(&1), 1); + assert_ok!(>::transfer(&1, &2, 5, Expendable)); + assert_noop!( + >::transfer(&1, &2, 1, Expendable), + TokenError::Frozen + ); + }); +} + +#[test] +fn can_hold_entire_balance_when_second_provider() { + ExtBuilder::default() + .existential_deposit(1) + .monied(false) + .build_and_execute_with(|| { + >::set_balance(&1, 100); + assert_noop!(Balances::hold(&TestId::Foo, &1, 100), TokenError::FundsUnavailable); + System::inc_providers(&1); + assert_eq!(System::providers(&1), 2); + assert_ok!(Balances::hold(&TestId::Foo, &1, 100)); + assert_eq!(System::providers(&1), 1); + assert_noop!(System::dec_providers(&1), DispatchError::ConsumerRemaining); + }); +} + +#[test] +fn unholding_frees_hold_slot() { + ExtBuilder::default() + .existential_deposit(1) + .monied(false) + .build_and_execute_with(|| { + >::set_balance(&1, 100); + assert_ok!(Balances::hold(&TestId::Foo, &1, 10)); + assert_ok!(Balances::hold(&TestId::Bar, &1, 10)); + assert_ok!(Balances::release(&TestId::Foo, &1, 10, Exact)); + assert_ok!(Balances::hold(&TestId::Baz, &1, 10)); + }); +} + +#[test] +fn sufficients_work_properly_with_reference_counting() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + // Only run PoC when the system pallet is enabled, since the underlying bug is in the + // system pallet it won't work with BalancesAccountStore + if UseSystem::get() { + // Start with a balance of 100 + >::set_balance(&1, 100); + // Emulate a sufficient, in reality this could be reached by transferring a + // sufficient asset to the account + System::inc_sufficients(&1); + // Spend the same balance multiple times + assert_ok!(>::transfer(&1, &1337, 100, Expendable)); + assert_eq!(Balances::free_balance(&1), 0); + assert_noop!( + >::transfer(&1, &1337, 100, Expendable), + TokenError::FundsUnavailable + ); + } + }); +} + +#[test] +fn emit_events_with_changing_freezes() { + ExtBuilder::default().build_and_execute_with(|| { + let _ = Balances::set_balance(&1, 100); + System::reset_events(); + + // Freeze = [] --> [10] + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 10)); + assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Frozen { who: 1, amount: 10 })]); + + // Freeze = [10] --> [15] + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 15)); + assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Frozen { who: 1, amount: 5 })]); + + // Freeze = [15] --> [15, 20] + assert_ok!(Balances::set_freeze(&TestId::Bar, &1, 20)); + assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Frozen { who: 1, amount: 5 })]); + + // Freeze = [15, 20] --> [17, 20] + assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 17)); + for event in events() { + match event { + RuntimeEvent::Balances(crate::Event::Frozen { .. }) => { + assert!(false, "unexpected freeze event") + }, + RuntimeEvent::Balances(crate::Event::Thawed { .. }) => { + assert!(false, "unexpected thaw event") + }, + _ => continue, + } + } + + // Freeze = [17, 20] --> [17, 15] + assert_ok!(Balances::set_freeze(&TestId::Bar, &1, 15)); + assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Thawed { who: 1, amount: 3 })]); + + // Freeze = [17, 15] --> [15] + assert_ok!(Balances::thaw(&TestId::Foo, &1)); + assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Thawed { who: 1, amount: 2 })]); + + // Freeze = [15] --> [] + assert_ok!(Balances::thaw(&TestId::Bar, &1)); + assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Thawed { who: 1, amount: 15 })]); + }); +} diff --git a/frame/balances/src/tests/mod.rs b/frame/balances/src/tests/mod.rs new file mode 100644 index 0000000000000..9b451f36d1d94 --- /dev/null +++ b/frame/balances/src/tests/mod.rs @@ -0,0 +1,305 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests. + +#![cfg(test)] + +use crate::{self as pallet_balances, AccountData, Config, CreditOf, Error, Pallet}; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::{ + assert_err, assert_noop, assert_ok, assert_storage_noop, + dispatch::{DispatchInfo, GetDispatchInfo}, + parameter_types, + traits::{ + tokens::fungible, ConstU32, ConstU64, ConstU8, Imbalance as ImbalanceT, OnUnbalanced, + StorageMapShim, StoredMap, + }, + weights::{IdentityFee, Weight}, + RuntimeDebug, +}; +use frame_system::{self as system, RawOrigin}; +use pallet_transaction_payment::{ChargeTransactionPayment, CurrencyAdapter, Multiplier}; +use scale_info::TypeInfo; +use sp_core::H256; +use sp_io; +use sp_runtime::{ + testing::Header, + traits::{BadOrigin, IdentityLookup, SignedExtension, Zero}, + ArithmeticError, DispatchError, DispatchResult, FixedPointNumber, TokenError, +}; + +mod currency_tests; +mod dispatchable_tests; +mod fungible_tests; +mod reentrancy_tests; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +#[derive( + Encode, + Decode, + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + MaxEncodedLen, + TypeInfo, + RuntimeDebug, +)] +pub enum TestId { + Foo, + Bar, + Baz, +} + +frame_support::construct_runtime!( + pub struct Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, + } +); + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_parts(1024, u64::MAX), + ); + pub static ExistentialDeposit: u64 = 1; +} +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = u64; + type RuntimeCall = RuntimeCall; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = super::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_transaction_payment::Config for Test { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = CurrencyAdapter, ()>; + type OperationalFeeMultiplier = ConstU8<5>; + type WeightToFee = IdentityFee; + type LengthToFee = IdentityFee; + type FeeMultiplierUpdate = (); +} + +impl Config for Test { + type Balance = u64; + type DustRemoval = DustTrap; + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = TestAccountStore; + type MaxLocks = ConstU32<50>; + type MaxReserves = ConstU32<2>; + type ReserveIdentifier = TestId; + type WeightInfo = (); + type HoldIdentifier = TestId; + type FreezeIdentifier = TestId; + type MaxFreezes = ConstU32<2>; + type MaxHolds = ConstU32<2>; +} + +#[derive(Clone)] +pub struct ExtBuilder { + existential_deposit: u64, + monied: bool, + dust_trap: Option, +} +impl Default for ExtBuilder { + fn default() -> Self { + Self { existential_deposit: 1, monied: false, dust_trap: None } + } +} +impl ExtBuilder { + pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { + self.existential_deposit = existential_deposit; + self + } + pub fn monied(mut self, monied: bool) -> Self { + self.monied = monied; + if self.existential_deposit == 0 { + self.existential_deposit = 1; + } + self + } + pub fn dust_trap(mut self, account: u64) -> Self { + self.dust_trap = Some(account); + self + } + pub fn set_associated_consts(&self) { + DUST_TRAP_TARGET.with(|v| v.replace(self.dust_trap)); + EXISTENTIAL_DEPOSIT.with(|v| v.replace(self.existential_deposit)); + } + pub fn build(self) -> sp_io::TestExternalities { + self.set_associated_consts(); + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: if self.monied { + vec![ + (1, 10 * self.existential_deposit), + (2, 20 * self.existential_deposit), + (3, 30 * self.existential_deposit), + (4, 40 * self.existential_deposit), + (12, 10 * self.existential_deposit), + ] + } else { + vec![] + }, + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } + pub fn build_and_execute_with(self, f: impl Fn()) { + let other = self.clone(); + UseSystem::set(false); + other.build().execute_with(|| f()); + UseSystem::set(true); + self.build().execute_with(|| f()); + } +} + +parameter_types! { + static DustTrapTarget: Option = None; +} + +pub struct DustTrap; + +impl OnUnbalanced> for DustTrap { + fn on_nonzero_unbalanced(amount: CreditOf) { + match DustTrapTarget::get() { + None => drop(amount), + Some(a) => { + let result = >::resolve(&a, amount); + debug_assert!(result.is_ok()); + }, + } + } +} + +parameter_types! { + pub static UseSystem: bool = false; +} + +type BalancesAccountStore = StorageMapShim, u64, super::AccountData>; +type SystemAccountStore = frame_system::Pallet; + +pub struct TestAccountStore; +impl StoredMap> for TestAccountStore { + fn get(k: &u64) -> super::AccountData { + if UseSystem::get() { + >::get(k) + } else { + >::get(k) + } + } + fn try_mutate_exists>( + k: &u64, + f: impl FnOnce(&mut Option>) -> Result, + ) -> Result { + if UseSystem::get() { + >::try_mutate_exists(k, f) + } else { + >::try_mutate_exists(k, f) + } + } + fn mutate( + k: &u64, + f: impl FnOnce(&mut super::AccountData) -> R, + ) -> Result { + if UseSystem::get() { + >::mutate(k, f) + } else { + >::mutate(k, f) + } + } + fn mutate_exists( + k: &u64, + f: impl FnOnce(&mut Option>) -> R, + ) -> Result { + if UseSystem::get() { + >::mutate_exists(k, f) + } else { + >::mutate_exists(k, f) + } + } + fn insert(k: &u64, t: super::AccountData) -> Result<(), DispatchError> { + if UseSystem::get() { + >::insert(k, t) + } else { + >::insert(k, t) + } + } + fn remove(k: &u64) -> Result<(), DispatchError> { + if UseSystem::get() { + >::remove(k) + } else { + >::remove(k) + } + } +} + +pub fn events() -> Vec { + let evt = System::events().into_iter().map(|evt| evt.event).collect::>(); + System::reset_events(); + evt +} + +/// create a transaction info struct from weight. Handy to avoid building the whole struct. +pub fn info_from_weight(w: Weight) -> DispatchInfo { + DispatchInfo { weight: w, ..Default::default() } +} + +#[test] +fn weights_sane() { + let info = crate::Call::::transfer_allow_death { dest: 10, value: 4 }.get_dispatch_info(); + assert_eq!(<() as crate::WeightInfo>::transfer_allow_death(), info.weight); + + let info = crate::Call::::force_unreserve { who: 10, amount: 4 }.get_dispatch_info(); + assert_eq!(<() as crate::WeightInfo>::force_unreserve(), info.weight); +} diff --git a/frame/balances/src/tests/reentrancy_tests.rs b/frame/balances/src/tests/reentrancy_tests.rs new file mode 100644 index 0000000000000..e97bf2ed2b706 --- /dev/null +++ b/frame/balances/src/tests/reentrancy_tests.rs @@ -0,0 +1,195 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests regarding the reentrancy functionality. + +use super::*; +use frame_support::traits::tokens::{ + Fortitude::Force, + Precision::BestEffort, + Preservation::{Expendable, Protect}, +}; +use fungible::Balanced; + +#[test] +fn transfer_dust_removal_tst1_should_work() { + ExtBuilder::default() + .existential_deposit(100) + .dust_trap(1) + .build_and_execute_with(|| { + // Verification of reentrancy in dust removal + assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 1, 1000)); + assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 2, 500)); + + // In this transaction, account 2 free balance + // drops below existential balance + // and dust balance is removed from account 2 + assert_ok!(Balances::transfer_allow_death(RawOrigin::Signed(2).into(), 3, 450)); + + // As expected dust balance is removed. + assert_eq!(Balances::free_balance(&2), 0); + + // As expected beneficiary account 3 + // received the transfered fund. + assert_eq!(Balances::free_balance(&3), 450); + + // Dust balance is deposited to account 1 + // during the process of dust removal. + assert_eq!(Balances::free_balance(&1), 1050); + + // Verify the events + assert_eq!(System::events().len(), 12); + + System::assert_has_event(RuntimeEvent::Balances(crate::Event::Transfer { + from: 2, + to: 3, + amount: 450, + })); + System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { + account: 2, + amount: 50, + })); + System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { + who: 1, + amount: 50, + })); + }); +} + +#[test] +fn transfer_dust_removal_tst2_should_work() { + ExtBuilder::default() + .existential_deposit(100) + .dust_trap(1) + .build_and_execute_with(|| { + // Verification of reentrancy in dust removal + assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 1, 1000)); + assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 2, 500)); + + // In this transaction, account 2 free balance + // drops below existential balance + // and dust balance is removed from account 2 + assert_ok!(Balances::transfer_allow_death(RawOrigin::Signed(2).into(), 1, 450)); + + // As expected dust balance is removed. + assert_eq!(Balances::free_balance(&2), 0); + + // Dust balance is deposited to account 1 + // during the process of dust removal. + assert_eq!(Balances::free_balance(&1), 1500); + + // Verify the events + assert_eq!(System::events().len(), 10); + + System::assert_has_event(RuntimeEvent::Balances(crate::Event::Transfer { + from: 2, + to: 1, + amount: 450, + })); + System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { + account: 2, + amount: 50, + })); + System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { + who: 1, + amount: 50, + })); + }); +} + +#[test] +fn repatriating_reserved_balance_dust_removal_should_work() { + ExtBuilder::default() + .existential_deposit(100) + .dust_trap(1) + .build_and_execute_with(|| { + // Verification of reentrancy in dust removal + assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 1, 1000)); + assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 2, 500)); + + // Reserve a value on account 2, + // Such that free balance is lower than + // Exestintial deposit. + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), 1, 450)); + + // Since free balance of account 2 is lower than + // existential deposit, dust amount is + // removed from the account 2 + assert_eq!(Balances::reserved_balance(2), 0); + assert_eq!(Balances::free_balance(2), 0); + + // account 1 is credited with reserved amount + // together with dust balance during dust + // removal. + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::free_balance(1), 1500); + + // Verify the events + assert_eq!(System::events().len(), 10); + + System::assert_has_event(RuntimeEvent::Balances(crate::Event::Transfer { + from: 2, + to: 1, + amount: 450, + })); + System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { + account: 2, + amount: 50, + })); + System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { + who: 1, + amount: 50, + })); + }); +} + +#[test] +fn emit_events_with_no_existential_deposit_suicide_with_dust() { + ExtBuilder::default().existential_deposit(2).build_and_execute_with(|| { + assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 1, 100)); + + assert_eq!( + events(), + [ + RuntimeEvent::System(system::Event::NewAccount { account: 1 }), + RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), + RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100 }), + ] + ); + + let res = Balances::withdraw(&1, 98, BestEffort, Protect, Force); + assert_eq!(res.unwrap().peek(), 98); + + // no events + assert_eq!( + events(), + [RuntimeEvent::Balances(crate::Event::Withdraw { who: 1, amount: 98 })] + ); + + let res = Balances::withdraw(&1, 1, BestEffort, Expendable, Force); + assert_eq!(res.unwrap().peek(), 1); + + assert_eq!( + events(), + [ + RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), + RuntimeEvent::Balances(crate::Event::DustLost { account: 1, amount: 1 }), + RuntimeEvent::Balances(crate::Event::Withdraw { who: 1, amount: 1 }) + ] + ); + }); +} diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs deleted file mode 100644 index 765ab194d2011..0000000000000 --- a/frame/balances/src/tests_composite.rs +++ /dev/null @@ -1,149 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Test utilities - -#![cfg(test)] - -use crate::{self as pallet_balances, decl_tests, Config, Pallet}; -use frame_support::{ - dispatch::DispatchInfo, - parameter_types, - traits::{ConstU32, ConstU64, ConstU8}, - weights::{IdentityFee, Weight}, -}; -use pallet_transaction_payment::CurrencyAdapter; -use sp_core::H256; -use sp_io; -use sp_runtime::{testing::Header, traits::IdentityLookup}; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; - -frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, - } -); - -parameter_types! { - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - frame_support::weights::Weight::from_parts(1024, u64::MAX), - ); - pub static ExistentialDeposit: u64 = 0; -} -impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = super::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_transaction_payment::Config for Test { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = CurrencyAdapter, ()>; - type OperationalFeeMultiplier = ConstU8<5>; - type WeightToFee = IdentityFee; - type LengthToFee = IdentityFee; - type FeeMultiplierUpdate = (); -} - -impl Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Pallet; - type MaxLocks = (); - type MaxReserves = ConstU32<2>; - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); -} - -pub struct ExtBuilder { - existential_deposit: u64, - monied: bool, -} -impl Default for ExtBuilder { - fn default() -> Self { - Self { existential_deposit: 1, monied: false } - } -} -impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { - self.existential_deposit = existential_deposit; - self - } - pub fn monied(mut self, monied: bool) -> Self { - self.monied = monied; - self - } - pub fn set_associated_consts(&self) { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - } - pub fn build(self) -> sp_io::TestExternalities { - self.set_associated_consts(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: if self.monied { - vec![ - (1, 10 * self.existential_deposit), - (2, 20 * self.existential_deposit), - (3, 30 * self.existential_deposit), - (4, 40 * self.existential_deposit), - (12, 10 * self.existential_deposit), - ] - } else { - vec![] - }, - } - .assimilate_storage(&mut t) - .unwrap(); - - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext - } -} - -decl_tests! { Test, ExtBuilder, EXISTENTIAL_DEPOSIT } diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs deleted file mode 100644 index 929f77b540fe1..0000000000000 --- a/frame/balances/src/tests_local.rs +++ /dev/null @@ -1,191 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Test utilities - -#![cfg(test)] - -use crate::{self as pallet_balances, decl_tests, Config, Pallet}; -use frame_support::{ - dispatch::DispatchInfo, - parameter_types, - traits::{ConstU32, ConstU64, ConstU8, StorageMapShim}, - weights::{IdentityFee, Weight}, -}; -use pallet_transaction_payment::CurrencyAdapter; -use sp_core::H256; -use sp_io; -use sp_runtime::{testing::Header, traits::IdentityLookup}; - -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; - -frame_support::construct_runtime!( - pub struct Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, - } -); - -parameter_types! { - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - frame_support::weights::Weight::from_parts(1024, u64::MAX), - ); - pub static ExistentialDeposit: u64 = 0; -} -impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_transaction_payment::Config for Test { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = CurrencyAdapter, ()>; - type OperationalFeeMultiplier = ConstU8<5>; - type WeightToFee = IdentityFee; - type LengthToFee = IdentityFee; - type FeeMultiplierUpdate = (); -} - -impl Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = - StorageMapShim, system::Provider, u64, super::AccountData>; - type MaxLocks = ConstU32<50>; - type MaxReserves = ConstU32<2>; - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); -} - -pub struct ExtBuilder { - existential_deposit: u64, - monied: bool, -} -impl Default for ExtBuilder { - fn default() -> Self { - Self { existential_deposit: 1, monied: false } - } -} -impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { - self.existential_deposit = existential_deposit; - self - } - pub fn monied(mut self, monied: bool) -> Self { - self.monied = monied; - if self.existential_deposit == 0 { - self.existential_deposit = 1; - } - self - } - pub fn set_associated_consts(&self) { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - } - pub fn build(self) -> sp_io::TestExternalities { - self.set_associated_consts(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: if self.monied { - vec![ - (1, 10 * self.existential_deposit), - (2, 20 * self.existential_deposit), - (3, 30 * self.existential_deposit), - (4, 40 * self.existential_deposit), - (12, 10 * self.existential_deposit), - ] - } else { - vec![] - }, - } - .assimilate_storage(&mut t) - .unwrap(); - - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext - } -} - -decl_tests! { Test, ExtBuilder, EXISTENTIAL_DEPOSIT } - -#[test] -fn emit_events_with_no_existential_deposit_suicide_with_dust() { - ::default().existential_deposit(2).build().execute_with(|| { - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 100, 0)); - - assert_eq!( - events(), - [ - RuntimeEvent::System(system::Event::NewAccount { account: 1 }), - RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), - RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), - ] - ); - - let res = Balances::slash(&1, 98); - assert_eq!(res, (NegativeImbalance::new(98), 0)); - - // no events - assert_eq!( - events(), - [RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 98 })] - ); - - let res = Balances::slash(&1, 1); - assert_eq!(res, (NegativeImbalance::new(1), 0)); - - assert_eq!( - events(), - [ - RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), - RuntimeEvent::Balances(crate::Event::DustLost { account: 1, amount: 1 }), - RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 1 }) - ] - ); - }); -} diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs deleted file mode 100644 index 828dfa23707d0..0000000000000 --- a/frame/balances/src/tests_reentrancy.rs +++ /dev/null @@ -1,264 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Test setup for potential reentracy and lost updates of nested mutations. - -#![cfg(test)] - -use crate::{self as pallet_balances, Config}; -use frame_support::{ - parameter_types, - traits::{ConstU32, ConstU64, StorageMapShim}, -}; -use sp_core::H256; -use sp_io; -use sp_runtime::{testing::Header, traits::IdentityLookup}; - -use crate::*; -use frame_support::{ - assert_ok, - traits::{Currency, ReservableCurrency}, -}; -use frame_system::RawOrigin; - -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; - -frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - } -); - -parameter_types! { - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - frame_support::weights::Weight::from_parts(1024, u64::MAX), - ); - pub static ExistentialDeposit: u64 = 0; -} -impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -pub struct OnDustRemoval; -impl OnUnbalanced> for OnDustRemoval { - fn on_nonzero_unbalanced(amount: NegativeImbalance) { - assert_ok!(Balances::resolve_into_existing(&1, amount)); - } -} - -impl Config for Test { - type Balance = u64; - type DustRemoval = OnDustRemoval; - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = - StorageMapShim, system::Provider, u64, super::AccountData>; - type MaxLocks = ConstU32<50>; - type MaxReserves = ConstU32<2>; - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); -} - -pub struct ExtBuilder { - existential_deposit: u64, -} -impl Default for ExtBuilder { - fn default() -> Self { - Self { existential_deposit: 1 } - } -} -impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { - self.existential_deposit = existential_deposit; - self - } - - pub fn set_associated_consts(&self) { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - } - - pub fn build(self) -> sp_io::TestExternalities { - self.set_associated_consts(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { balances: vec![] } - .assimilate_storage(&mut t) - .unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext - } -} - -#[test] -fn transfer_dust_removal_tst1_should_work() { - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - // Verification of reentrancy in dust removal - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); - - // In this transaction, account 2 free balance - // drops below existential balance - // and dust balance is removed from account 2 - assert_ok!(Balances::transfer(RawOrigin::Signed(2).into(), 3, 450)); - - // As expected dust balance is removed. - assert_eq!(Balances::free_balance(&2), 0); - - // As expected beneficiary account 3 - // received the transfered fund. - assert_eq!(Balances::free_balance(&3), 450); - - // Dust balance is deposited to account 1 - // during the process of dust removal. - assert_eq!(Balances::free_balance(&1), 1050); - - // Verify the events - assert_eq!(System::events().len(), 12); - - System::assert_has_event(RuntimeEvent::Balances(crate::Event::Transfer { - from: 2, - to: 3, - amount: 450, - })); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { - account: 2, - amount: 50, - })); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { - who: 1, - amount: 50, - })); - }); -} - -#[test] -fn transfer_dust_removal_tst2_should_work() { - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - // Verification of reentrancy in dust removal - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); - - // In this transaction, account 2 free balance - // drops below existential balance - // and dust balance is removed from account 2 - assert_ok!(Balances::transfer(RawOrigin::Signed(2).into(), 1, 450)); - - // As expected dust balance is removed. - assert_eq!(Balances::free_balance(&2), 0); - - // Dust balance is deposited to account 1 - // during the process of dust removal. - assert_eq!(Balances::free_balance(&1), 1500); - - // Verify the events - assert_eq!(System::events().len(), 10); - - System::assert_has_event(RuntimeEvent::Balances(crate::Event::Transfer { - from: 2, - to: 1, - amount: 450, - })); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { - account: 2, - amount: 50, - })); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { - who: 1, - amount: 50, - })); - }); -} - -#[test] -fn repatriating_reserved_balance_dust_removal_should_work() { - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - // Verification of reentrancy in dust removal - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); - - // Reserve a value on account 2, - // Such that free balance is lower than - // Exestintial deposit. - assert_ok!(Balances::reserve(&2, 450)); - - // Transfer of reserved fund from slashed account 2 to - // beneficiary account 1 - assert_ok!(Balances::repatriate_reserved(&2, &1, 450, Status::Free), 0); - - // Since free balance of account 2 is lower than - // existential deposit, dust amount is - // removed from the account 2 - assert_eq!(Balances::reserved_balance(2), 0); - assert_eq!(Balances::free_balance(2), 0); - - // account 1 is credited with reserved amount - // together with dust balance during dust - // removal. - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::free_balance(1), 1500); - - // Verify the events - assert_eq!(System::events().len(), 11); - - System::assert_has_event(RuntimeEvent::Balances(crate::Event::ReserveRepatriated { - from: 2, - to: 1, - amount: 450, - destination_status: Status::Free, - })); - - System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { - account: 2, - amount: 50, - })); - - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Deposit { - who: 1, - amount: 50, - })); - }); -} diff --git a/frame/balances/src/types.rs b/frame/balances/src/types.rs new file mode 100644 index 0000000000000..389124402a8f1 --- /dev/null +++ b/frame/balances/src/types.rs @@ -0,0 +1,157 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types used in the pallet. + +use crate::{Config, CreditOf, Event, Pallet}; +use codec::{Decode, Encode, MaxEncodedLen}; +use core::ops::BitOr; +use frame_support::{ + traits::{Imbalance, LockIdentifier, OnUnbalanced, WithdrawReasons}, + RuntimeDebug, +}; +use scale_info::TypeInfo; +use sp_runtime::Saturating; + +/// Simplified reasons for withdrawing balance. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub enum Reasons { + /// Paying system transaction fees. + Fee = 0, + /// Any reason other than paying system transaction fees. + Misc = 1, + /// Any reason at all. + All = 2, +} + +impl From for Reasons { + fn from(r: WithdrawReasons) -> Reasons { + if r == WithdrawReasons::TRANSACTION_PAYMENT { + Reasons::Fee + } else if r.contains(WithdrawReasons::TRANSACTION_PAYMENT) { + Reasons::All + } else { + Reasons::Misc + } + } +} + +impl BitOr for Reasons { + type Output = Reasons; + fn bitor(self, other: Reasons) -> Reasons { + if self == other { + return self + } + Reasons::All + } +} + +/// A single lock on a balance. There can be many of these on an account and they "overlap", so the +/// same balance is frozen by multiple locks. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct BalanceLock { + /// An identifier for this lock. Only one lock may be in existence for each identifier. + pub id: LockIdentifier, + /// The amount which the free balance may not drop below when this lock is in effect. + pub amount: Balance, + /// If true, then the lock remains in effect even for payment of transaction fees. + pub reasons: Reasons, +} + +/// Store named reserved balance. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct ReserveData { + /// The identifier for the named reserve. + pub id: ReserveIdentifier, + /// The amount of the named reserve. + pub amount: Balance, +} + +/// An identifier and balance. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct IdAmount { + /// An identifier for this item. + pub id: Id, + /// Some amount for this item. + pub amount: Balance, +} + +/// All balance information for an account. +#[derive(Encode, Decode, Clone, PartialEq, Eq, Default, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct AccountData { + /// Non-reserved part of the balance which the account holder may be able to control. + /// + /// This is the only balance that matters in terms of most operations on tokens. + pub free: Balance, + /// Balance which is has active holds on it and may not be used at all. + /// + /// This is the sum of all individual holds together with any sums still under the (deprecated) + /// reserves API. + pub reserved: Balance, + /// The amount that `free + reserved` may not drop below when reducing the balance, except for + /// actions where the account owner cannot reasonably benefit from the balance reduction, such + /// as slashing. + pub frozen: Balance, + /// Extra information about this account. The MSB is a flag indicating whether the new ref- + /// counting logic is in place for this account. + pub flags: ExtraFlags, +} + +const IS_NEW_LOGIC: u128 = 0x80000000_00000000_00000000_00000000u128; + +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct ExtraFlags(u128); +impl Default for ExtraFlags { + fn default() -> Self { + Self(IS_NEW_LOGIC) + } +} +impl ExtraFlags { + pub fn old_logic() -> Self { + Self(0) + } + pub fn set_new_logic(&mut self) { + self.0 = self.0 | IS_NEW_LOGIC + } + pub fn is_new_logic(&self) -> bool { + (self.0 & IS_NEW_LOGIC) == IS_NEW_LOGIC + } +} + +impl AccountData { + pub fn usable(&self) -> Balance { + self.free.saturating_sub(self.frozen) + } + + /// The total balance in this account including any that is reserved and ignoring any frozen. + pub fn total(&self) -> Balance { + self.free.saturating_add(self.reserved) + } +} + +pub struct DustCleaner, I: 'static = ()>( + pub(crate) Option<(T::AccountId, CreditOf)>, +); + +impl, I: 'static> Drop for DustCleaner { + fn drop(&mut self) { + if let Some((who, dust)) = self.0.take() { + Pallet::::deposit_event(Event::DustLost { account: who, amount: dust.peek() }); + T::DustRemoval::on_unbalanced(dust); + } + } +} diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index cdc657ce1656c..f35d9c697028b 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -18,26 +18,25 @@ //! Autogenerated weights for pallet_balances //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-02-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_balances // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/balances/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -49,13 +48,14 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_balances. pub trait WeightInfo { - fn transfer() -> Weight; + fn transfer_allow_death() -> Weight; fn transfer_keep_alive() -> Weight; - fn set_balance_creating() -> Weight; - fn set_balance_killing() -> Weight; + fn force_set_balance_creating() -> Weight; + fn force_set_balance_killing() -> Weight; fn force_transfer() -> Weight; fn transfer_all() -> Weight; fn force_unreserve() -> Weight; + fn upgrade_accounts(u: u32, ) -> Weight; } /// Weights for pallet_balances using the Substrate node and recommended hardware. @@ -63,13 +63,12 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn transfer() -> Weight { + fn transfer_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 37_815 nanoseconds. - Weight::from_parts(38_109_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) + // Minimum execution time: 59_458_000 picoseconds. + Weight::from_parts(60_307_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -79,33 +78,30 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 28_184 nanoseconds. - Weight::from_parts(49_250_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) + // Minimum execution time: 43_056_000 picoseconds. + Weight::from_parts(43_933_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn set_balance_creating() -> Weight { + fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `206` + // Measured: `174` // Estimated: `3593` - // Minimum execution time: 17_474 nanoseconds. - Weight::from_parts(17_777_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) + // Minimum execution time: 17_428_000 picoseconds. + Weight::from_parts(17_731_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn set_balance_killing() -> Weight { + fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: - // Measured: `206` + // Measured: `174` // Estimated: `3593` - // Minimum execution time: 20_962 nanoseconds. - Weight::from_parts(21_419_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) + // Minimum execution time: 22_809_000 picoseconds. + Weight::from_parts(23_225_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -113,11 +109,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `135` + // Measured: `103` // Estimated: `6196` - // Minimum execution time: 39_713 nanoseconds. - Weight::from_parts(40_360_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) + // Minimum execution time: 56_929_000 picoseconds. + Weight::from_parts(57_688_000, 6196) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -127,9 +122,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 34_878 nanoseconds. - Weight::from_parts(35_121_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) + // Minimum execution time: 49_820_000 picoseconds. + Weight::from_parts(50_832_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -137,27 +131,40 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn force_unreserve() -> Weight { // Proof Size summary in bytes: - // Measured: `206` + // Measured: `174` // Estimated: `3593` - // Minimum execution time: 16_790 nanoseconds. - Weight::from_parts(17_029_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) + // Minimum execution time: 20_270_000 picoseconds. + Weight::from_parts(20_597_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: System Account (r:999 w:999) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// The range of component `u` is `[1, 1000]`. + fn upgrade_accounts(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + u * (135 ±0)` + // Estimated: `990 + u * (2603 ±0)` + // Minimum execution time: 19_847_000 picoseconds. + Weight::from_parts(20_053_000, 990) + // Standard Error: 11_643 + .saturating_add(Weight::from_parts(14_563_782, 0).saturating_mul(u.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) + } } // For backwards compatibility and tests impl WeightInfo for () { /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn transfer() -> Weight { + fn transfer_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 37_815 nanoseconds. - Weight::from_parts(38_109_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) + // Minimum execution time: 59_458_000 picoseconds. + Weight::from_parts(60_307_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -167,33 +174,30 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 28_184 nanoseconds. - Weight::from_parts(49_250_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) + // Minimum execution time: 43_056_000 picoseconds. + Weight::from_parts(43_933_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn set_balance_creating() -> Weight { + fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `206` + // Measured: `174` // Estimated: `3593` - // Minimum execution time: 17_474 nanoseconds. - Weight::from_parts(17_777_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) + // Minimum execution time: 17_428_000 picoseconds. + Weight::from_parts(17_731_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn set_balance_killing() -> Weight { + fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: - // Measured: `206` + // Measured: `174` // Estimated: `3593` - // Minimum execution time: 20_962 nanoseconds. - Weight::from_parts(21_419_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) + // Minimum execution time: 22_809_000 picoseconds. + Weight::from_parts(23_225_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -201,11 +205,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `135` + // Measured: `103` // Estimated: `6196` - // Minimum execution time: 39_713 nanoseconds. - Weight::from_parts(40_360_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) + // Minimum execution time: 56_929_000 picoseconds. + Weight::from_parts(57_688_000, 6196) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -215,9 +218,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 34_878 nanoseconds. - Weight::from_parts(35_121_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) + // Minimum execution time: 49_820_000 picoseconds. + Weight::from_parts(50_832_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -225,12 +227,26 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn force_unreserve() -> Weight { // Proof Size summary in bytes: - // Measured: `206` + // Measured: `174` // Estimated: `3593` - // Minimum execution time: 16_790 nanoseconds. - Weight::from_parts(17_029_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) + // Minimum execution time: 20_270_000 picoseconds. + Weight::from_parts(20_597_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: System Account (r:999 w:999) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// The range of component `u` is `[1, 1000]`. + fn upgrade_accounts(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + u * (135 ±0)` + // Estimated: `990 + u * (2603 ±0)` + // Minimum execution time: 19_847_000 picoseconds. + Weight::from_parts(20_053_000, 990) + // Standard Error: 11_643 + .saturating_add(Weight::from_parts(14_563_782, 0).saturating_mul(u.into())) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(u.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) + } } diff --git a/frame/beefy-mmr/Cargo.toml b/frame/beefy-mmr/Cargo.toml index 721e24282c45b..9840162149a38 100644 --- a/frame/beefy-mmr/Cargo.toml +++ b/frame/beefy-mmr/Cargo.toml @@ -12,7 +12,7 @@ homepage = "https://substrate.io" array-bytes = { version = "4.1", optional = true } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true } binary-merkle-tree = { version = "4.0.0-dev", default-features = false, path = "../../utils/binary-merkle-tree" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } diff --git a/frame/beefy/Cargo.toml b/frame/beefy/Cargo.toml index 3778ca5f37a56..21c96ff952731 100644 --- a/frame/beefy/Cargo.toml +++ b/frame/beefy/Cargo.toml @@ -10,7 +10,7 @@ homepage = "https://substrate.io" [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/beefy/src/mock.rs b/frame/beefy/src/mock.rs index c92966235118e..ceb95263e2436 100644 --- a/frame/beefy/src/mock.rs +++ b/frame/beefy/src/mock.rs @@ -161,6 +161,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU128<1>; type AccountStore = System; type WeightInfo = (); + type HoldIdentifier = (); + type MaxHolds = (); + type FreezeIdentifier = (); + type MaxFreezes = (); } impl pallet_timestamp::Config for Test { diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 9c030c0a2c7ad..5705396de0048 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = linregress = { version = "0.5.1", optional = true } log = { version = "0.4.17", default-features = false } paste = "1.0" -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-support-procedural = { version = "4.0.0-dev", default-features = false, path = "../support/procedural" } diff --git a/frame/benchmarking/pov/Cargo.toml b/frame/benchmarking/pov/Cargo.toml index d885821bc1fc7..28358142b674a 100644 --- a/frame/benchmarking/pov/Cargo.toml +++ b/frame/benchmarking/pov/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } diff --git a/frame/benchmarking/pov/src/benchmarking.rs b/frame/benchmarking/pov/src/benchmarking.rs index 46a98ae3ff183..27191e37219fd 100644 --- a/frame/benchmarking/pov/src/benchmarking.rs +++ b/frame/benchmarking/pov/src/benchmarking.rs @@ -192,6 +192,7 @@ frame_benchmarking::benchmarks! { // Same as above, but we still expect a mathematical worst case PoV size for the bounded one. storage_value_bounded_and_unbounded_read { + (0..1024).for_each(|i| Map1M::::insert(i, i)); }: { assert!(UnboundedValue::::get().is_none()); assert!(BoundedValue::::get().is_none()); diff --git a/frame/benchmarking/pov/src/lib.rs b/frame/benchmarking/pov/src/lib.rs index dccfe72d3f243..66000e54151b0 100644 --- a/frame/benchmarking/pov/src/lib.rs +++ b/frame/benchmarking/pov/src/lib.rs @@ -120,14 +120,14 @@ pub mod pallet { #[pallet::call] impl Pallet { #[pallet::call_index(0)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn emit_event(_origin: OriginFor) -> DispatchResult { Self::deposit_event(Event::TestEvent); Ok(()) } #[pallet::call_index(1)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn noop(_origin: OriginFor) -> DispatchResult { Ok(()) } diff --git a/frame/benchmarking/pov/src/tests.rs b/frame/benchmarking/pov/src/tests.rs index 19156b93060bc..b908925cccd8e 100644 --- a/frame/benchmarking/pov/src/tests.rs +++ b/frame/benchmarking/pov/src/tests.rs @@ -144,9 +144,9 @@ fn unbounded_read_best_effort() { fn partial_unbounded_read_best_effort() { let w_unbounded = W::storage_value_unbounded_read().proof_size(); let w_bounded = W::storage_value_bounded_read().proof_size(); - let w_partial = W::storage_value_bounded_and_unbounded_read().proof_size(); + let w_both = W::storage_value_bounded_and_unbounded_read().proof_size(); - assert_eq!(w_bounded + w_unbounded, w_partial, "The bounded part increases the PoV"); + assert!(w_both > w_bounded && w_both > w_unbounded, "The bounded part increases the PoV"); } #[test] diff --git a/frame/benchmarking/pov/src/weights.rs b/frame/benchmarking/pov/src/weights.rs index df6dba33b2dd3..f16ac7fbc2733 100644 --- a/frame/benchmarking/pov/src/weights.rs +++ b/frame/benchmarking/pov/src/weights.rs @@ -2,7 +2,7 @@ //! Autogenerated weights for frame_benchmarking_pallet_pov //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-02-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -28,7 +28,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for frame_benchmarking_pallet_pov. pub trait WeightInfo { @@ -61,6 +61,7 @@ pub trait WeightInfo { fn storage_map_partial_unbounded_ignored_read(i: u32, ) -> Weight; fn emit_event() -> Weight; fn noop() -> Weight; + fn storage_iteration() -> Weight; } /// Weights for frame_benchmarking_pallet_pov using the Substrate node and recommended hardware. @@ -72,9 +73,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `1489` - // Minimum execution time: 1_968 nanoseconds. - Weight::from_parts(2_060_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) + // Minimum execution time: 1_706_000 picoseconds. + Weight::from_parts(1_788_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Pov Value (r:1 w:0) @@ -83,9 +83,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `0` - // Minimum execution time: 1_934 nanoseconds. - Weight::from_parts(2_092_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 1_661_000 picoseconds. + Weight::from_parts(1_718_000, 0) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Pov Value (r:1 w:0) @@ -95,10 +94,9 @@ impl WeightInfo for SubstrateWeight { fn storage_single_value_ignored_some_read() -> Weight { // Proof Size summary in bytes: // Measured: `160` - // Estimated: `1649` - // Minimum execution time: 2_605 nanoseconds. - Weight::from_parts(2_786_000, 0) - .saturating_add(Weight::from_parts(0, 1649)) + // Estimated: `1489` + // Minimum execution time: 2_226_000 picoseconds. + Weight::from_parts(2_365_000, 1489) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: Pov Value (r:1 w:0) @@ -107,9 +105,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `1489` - // Minimum execution time: 2_019 nanoseconds. - Weight::from_parts(2_214_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) + // Minimum execution time: 1_785_000 picoseconds. + Weight::from_parts(1_980_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Pov Value (r:0 w:1) @@ -118,9 +115,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 279 nanoseconds. - Weight::from_parts(357_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 254_000 picoseconds. + Weight::from_parts(326_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Pov Value (r:0 w:1) @@ -129,9 +125,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 291 nanoseconds. - Weight::from_parts(378_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 239_000 picoseconds. + Weight::from_parts(277_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Pov Map1M (r:1 w:0) @@ -140,9 +135,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1275` // Estimated: `4740` - // Minimum execution time: 5_077 nanoseconds. - Weight::from_parts(5_400_000, 0) - .saturating_add(Weight::from_parts(0, 4740)) + // Minimum execution time: 4_760_000 picoseconds. + Weight::from_parts(5_051_000, 4740) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Pov Map1M (r:1 w:0) @@ -151,9 +145,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1544` // Estimated: `5009` - // Minimum execution time: 5_878 nanoseconds. - Weight::from_parts(6_239_000, 0) - .saturating_add(Weight::from_parts(0, 5009)) + // Minimum execution time: 5_490_000 picoseconds. + Weight::from_parts(5_703_000, 5009) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Pov Map1M (r:1 w:0) @@ -162,9 +155,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2044` // Estimated: `5509` - // Minimum execution time: 7_282 nanoseconds. - Weight::from_parts(8_022_000, 0) - .saturating_add(Weight::from_parts(0, 5509)) + // Minimum execution time: 6_397_000 picoseconds. + Weight::from_parts(7_084_000, 5509) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Pov Map1M (r:100 w:0) @@ -175,19 +167,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 100]`. fn storage_map_read_per_component(n: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `515 + n * (188 ±0) + m * (188 ±0)` - // Estimated: `1980 + n * (3006 ±0) + m * (2511 ±0)` - // Minimum execution time: 195_406 nanoseconds. - Weight::from_parts(129_093_464, 0) - .saturating_add(Weight::from_parts(0, 1980)) - // Standard Error: 12_134 - .saturating_add(Weight::from_parts(855_330, 0).saturating_mul(n.into())) - // Standard Error: 12_134 - .saturating_add(Weight::from_parts(870_523, 0).saturating_mul(m.into())) + // Measured: `515 + m * (188 ±0) + n * (188 ±0)` + // Estimated: `990 + m * (2511 ±0) + n * (3006 ±0)` + // Minimum execution time: 181_481_000 picoseconds. + Weight::from_parts(129_275_141, 990) + // Standard Error: 13_049 + .saturating_add(Weight::from_parts(787_667, 0).saturating_mul(n.into())) + // Standard Error: 13_049 + .saturating_add(Weight::from_parts(830_378, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) - .saturating_add(Weight::from_parts(0, 3006).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 2511).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 3006).saturating_mul(n.into())) } /// Storage: Pov Map1M (r:100 w:0) /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: Ignored) @@ -197,19 +188,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 100]`. fn storage_map_read_per_component_one_ignored(n: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `515 + n * (188 ±0) + m * (188 ±0)` - // Estimated: `1685 + n * (3195 ±0) + m * (189 ±0)` - // Minimum execution time: 195_053 nanoseconds. - Weight::from_parts(131_322_479, 0) - .saturating_add(Weight::from_parts(0, 1685)) - // Standard Error: 12_161 - .saturating_add(Weight::from_parts(843_047, 0).saturating_mul(n.into())) - // Standard Error: 12_161 - .saturating_add(Weight::from_parts(858_668, 0).saturating_mul(m.into())) + // Measured: `515 + m * (188 ±0) + n * (188 ±0)` + // Estimated: `1685 + m * (189 ±0) + n * (3006 ±0)` + // Minimum execution time: 181_925_000 picoseconds. + Weight::from_parts(134_416_814, 1685) + // Standard Error: 15_678 + .saturating_add(Weight::from_parts(827_168, 0).saturating_mul(n.into())) + // Standard Error: 15_678 + .saturating_add(Weight::from_parts(813_655, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) - .saturating_add(Weight::from_parts(0, 3195).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 189).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 3006).saturating_mul(n.into())) } /// Storage: Pov Map1M (r:1 w:0) /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) @@ -218,11 +208,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3501` - // Minimum execution time: 22 nanoseconds. - Weight::from_parts(2_334_945, 0) - .saturating_add(Weight::from_parts(0, 3501)) - // Standard Error: 624 - .saturating_add(Weight::from_parts(282_046, 0).saturating_mul(n.into())) + // Minimum execution time: 20_000 picoseconds. + Weight::from_parts(2_006_399, 3501) + // Standard Error: 808 + .saturating_add(Weight::from_parts(263_609, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Pov Map1M (r:100 w:0) @@ -232,11 +221,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `147 + n * (40 ±0)` // Estimated: `990 + n * (2511 ±0)` - // Minimum execution time: 20 nanoseconds. - Weight::from_parts(525_027, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 2_767 - .saturating_add(Weight::from_parts(3_887_350, 0).saturating_mul(n.into())) + // Minimum execution time: 21_000 picoseconds. + Weight::from_parts(3_940_044, 990) + // Standard Error: 4_906 + .saturating_add(Weight::from_parts(3_454_882, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2511).saturating_mul(n.into())) } @@ -247,11 +235,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `21938 + n * (57 ±0)` // Estimated: `990 + n * (2543 ±0)` - // Minimum execution time: 34 nanoseconds. - Weight::from_parts(18_341_393, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 1_312 - .saturating_add(Weight::from_parts(2_053_135, 0).saturating_mul(n.into())) + // Minimum execution time: 28_000 picoseconds. + Weight::from_parts(20_674_869, 990) + // Standard Error: 3_035 + .saturating_add(Weight::from_parts(1_995_730, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2543).saturating_mul(n.into())) } @@ -261,9 +248,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1518` - // Minimum execution time: 1_163 nanoseconds. - Weight::from_parts(1_274_000, 0) - .saturating_add(Weight::from_parts(0, 1518)) + // Minimum execution time: 1_091_000 picoseconds. + Weight::from_parts(1_181_000, 1518) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Pov UnboundedValue (r:1 w:0) @@ -272,9 +258,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1594` - // Minimum execution time: 1_167 nanoseconds. - Weight::from_parts(1_367_000, 0) - .saturating_add(Weight::from_parts(0, 1594)) + // Minimum execution time: 1_079_000 picoseconds. + Weight::from_parts(1_176_000, 1594) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Pov UnboundedValue (r:1 w:0) @@ -283,9 +268,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `0` - // Minimum execution time: 1_155 nanoseconds. - Weight::from_parts(1_248_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 1_101_000 picoseconds. + Weight::from_parts(1_160_000, 0) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Pov UnboundedValue (r:1 w:0) @@ -294,11 +278,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Pov BoundedValue (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) fn storage_value_bounded_and_unbounded_read() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3112` - // Minimum execution time: 1_424 nanoseconds. - Weight::from_parts(1_601_000, 0) - .saturating_add(Weight::from_parts(0, 3112)) + // Measured: `147` + // Estimated: `1632` + // Minimum execution time: 2_143_000 picoseconds. + Weight::from_parts(2_280_000, 1632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: Pov LargeValue (r:1 w:0) @@ -306,13 +289,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[0, 4194304]`. fn measured_storage_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `174 + l * (1 ±0)` - // Estimated: `1656 + l * (1 ±0)` - // Minimum execution time: 1_744 nanoseconds. - Weight::from_parts(1_800_000, 0) - .saturating_add(Weight::from_parts(0, 1656)) - // Standard Error: 4 - .saturating_add(Weight::from_parts(443, 0).saturating_mul(l.into())) + // Measured: `142 + l * (1 ±0)` + // Estimated: `1626 + l * (1 ±0)` + // Minimum execution time: 1_665_000 picoseconds. + Weight::from_parts(1_725_000, 1626) + // Standard Error: 3 + .saturating_add(Weight::from_parts(376, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(l.into())) } @@ -321,13 +303,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[0, 4194304]`. fn mel_storage_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `174 + l * (1 ±0)` + // Measured: `142 + l * (1 ±0)` // Estimated: `4195793` - // Minimum execution time: 1_770 nanoseconds. - Weight::from_parts(1_813_000, 0) - .saturating_add(Weight::from_parts(0, 4195793)) - // Standard Error: 6 - .saturating_add(Weight::from_parts(495, 0).saturating_mul(l.into())) + // Minimum execution time: 1_640_000 picoseconds. + Weight::from_parts(1_724_000, 4195793) + // Standard Error: 4 + .saturating_add(Weight::from_parts(395, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Pov LargeValue (r:1 w:0) @@ -337,15 +318,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[0, 4194304]`. fn measured_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `235 + l * (2 ±0)` - // Estimated: `3428 + l * (4 ±0)` - // Minimum execution time: 2_349 nanoseconds. - Weight::from_parts(2_423_000, 0) - .saturating_add(Weight::from_parts(0, 3428)) - // Standard Error: 11 - .saturating_add(Weight::from_parts(950, 0).saturating_mul(l.into())) + // Measured: `171 + l * (2 ±0)` + // Estimated: `1655 + l * (2 ±0)` + // Minimum execution time: 2_263_000 picoseconds. + Weight::from_parts(2_358_000, 1655) + // Standard Error: 8 + .saturating_add(Weight::from_parts(737, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(0, 2).saturating_mul(l.into())) } /// Storage: Pov LargeValue (r:1 w:0) /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: MaxEncodedLen) @@ -354,13 +334,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[0, 4194304]`. fn mel_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `235 + l * (2 ±0)` - // Estimated: `8391586` - // Minimum execution time: 2_315 nanoseconds. - Weight::from_parts(2_409_000, 0) - .saturating_add(Weight::from_parts(0, 8391586)) - // Standard Error: 12 - .saturating_add(Weight::from_parts(984, 0).saturating_mul(l.into())) + // Measured: `171 + l * (2 ±0)` + // Estimated: `4195793` + // Minimum execution time: 2_161_000 picoseconds. + Weight::from_parts(2_233_000, 4195793) + // Standard Error: 5 + .saturating_add(Weight::from_parts(639, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: Pov LargeValue (r:1 w:0) @@ -370,13 +349,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[0, 4194304]`. fn mel_mixed_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `235 + l * (2 ±0)` - // Estimated: `4197507 + l * (2 ±0)` - // Minimum execution time: 2_370 nanoseconds. - Weight::from_parts(2_474_000, 0) - .saturating_add(Weight::from_parts(0, 4197507)) - // Standard Error: 11 - .saturating_add(Weight::from_parts(956, 0).saturating_mul(l.into())) + // Measured: `171 + l * (2 ±0)` + // Estimated: `4195793 + l * (2 ±0)` + // Minimum execution time: 2_149_000 picoseconds. + Weight::from_parts(2_256_000, 4195793) + // Standard Error: 6 + .saturating_add(Weight::from_parts(677, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 2).saturating_mul(l.into())) } @@ -387,13 +365,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[0, 4194304]`. fn measured_mixed_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `235 + l * (2 ±0)` - // Estimated: `4197507 + l * (2 ±0)` - // Minimum execution time: 2_375 nanoseconds. - Weight::from_parts(2_420_000, 0) - .saturating_add(Weight::from_parts(0, 4197507)) - // Standard Error: 9 - .saturating_add(Weight::from_parts(914, 0).saturating_mul(l.into())) + // Measured: `171 + l * (2 ±0)` + // Estimated: `4195793 + l * (2 ±0)` + // Minimum execution time: 2_254_000 picoseconds. + Weight::from_parts(2_319_000, 4195793) + // Standard Error: 5 + .saturating_add(Weight::from_parts(664, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 2).saturating_mul(l.into())) } @@ -404,15 +381,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 1000]`. fn storage_map_unbounded_both_measured_read(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `293 + i * (8 ±0)` - // Estimated: `7504 + i * (16 ±0)` - // Minimum execution time: 3_305 nanoseconds. - Weight::from_parts(3_689_335, 0) - .saturating_add(Weight::from_parts(0, 7504)) - // Standard Error: 29 - .saturating_add(Weight::from_parts(638, 0).saturating_mul(i.into())) + // Measured: `229 + i * (8 ±0)` + // Estimated: `3693 + i * (8 ±0)` + // Minimum execution time: 3_071_000 picoseconds. + Weight::from_parts(3_487_712, 3693) + // Standard Error: 26 + .saturating_add(Weight::from_parts(748, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(Weight::from_parts(0, 16).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(i.into())) } /// Storage: Pov Map1M (r:1 w:0) /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) @@ -421,13 +397,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 1000]`. fn storage_map_partial_unbounded_read(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `260 + i * (4 ±0)` - // Estimated: `7223 + i * (4 ±0)` - // Minimum execution time: 3_469 nanoseconds. - Weight::from_parts(3_878_896, 0) - .saturating_add(Weight::from_parts(0, 7223)) - // Standard Error: 33 - .saturating_add(Weight::from_parts(356, 0).saturating_mul(i.into())) + // Measured: `228 + i * (4 ±0)` + // Estimated: `3692 + i * (4 ±0)` + // Minimum execution time: 3_150_000 picoseconds. + Weight::from_parts(3_582_963, 3692) + // Standard Error: 18 + .saturating_add(Weight::from_parts(380, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 4).saturating_mul(i.into())) } @@ -438,13 +413,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 1000]`. fn storage_map_partial_unbounded_ignored_read(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `260 + i * (4 ±0)` - // Estimated: `3758 + i * (4 ±0)` - // Minimum execution time: 3_442 nanoseconds. - Weight::from_parts(3_881_051, 0) - .saturating_add(Weight::from_parts(0, 3758)) - // Standard Error: 35 - .saturating_add(Weight::from_parts(384, 0).saturating_mul(i.into())) + // Measured: `228 + i * (4 ±0)` + // Estimated: `3501 + i * (4 ±0)` + // Minimum execution time: 3_092_000 picoseconds. + Weight::from_parts(3_595_328, 3501) + // Standard Error: 20 + .saturating_add(Weight::from_parts(243, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 4).saturating_mul(i.into())) } @@ -452,17 +426,25 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_619 nanoseconds. - Weight::from_parts(1_728_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 1_705_000 picoseconds. + Weight::from_parts(1_818_000, 0) } fn noop() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 546 nanoseconds. - Weight::from_parts(640_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 533_000 picoseconds. + Weight::from_parts(587_000, 0) + } + /// Storage: Pov UnboundedMapTwox (r:65001 w:0) + /// Proof Skipped: Pov UnboundedMapTwox (max_values: None, max_size: None, mode: Measured) + fn storage_iteration() -> Weight { + // Proof Size summary in bytes: + // Measured: `17985289` + // Estimated: `178863754` + // Minimum execution time: 118_753_057_000 picoseconds. + Weight::from_parts(121_396_503_000, 178863754) + .saturating_add(T::DbWeight::get().reads(65001_u64)) } } @@ -474,9 +456,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `136` // Estimated: `1489` - // Minimum execution time: 1_968 nanoseconds. - Weight::from_parts(2_060_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) + // Minimum execution time: 1_706_000 picoseconds. + Weight::from_parts(1_788_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Pov Value (r:1 w:0) @@ -485,9 +466,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `136` // Estimated: `0` - // Minimum execution time: 1_934 nanoseconds. - Weight::from_parts(2_092_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 1_661_000 picoseconds. + Weight::from_parts(1_718_000, 0) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Pov Value (r:1 w:0) @@ -497,10 +477,9 @@ impl WeightInfo for () { fn storage_single_value_ignored_some_read() -> Weight { // Proof Size summary in bytes: // Measured: `160` - // Estimated: `1649` - // Minimum execution time: 2_605 nanoseconds. - Weight::from_parts(2_786_000, 0) - .saturating_add(Weight::from_parts(0, 1649)) + // Estimated: `1489` + // Minimum execution time: 2_226_000 picoseconds. + Weight::from_parts(2_365_000, 1489) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: Pov Value (r:1 w:0) @@ -509,9 +488,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `136` // Estimated: `1489` - // Minimum execution time: 2_019 nanoseconds. - Weight::from_parts(2_214_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) + // Minimum execution time: 1_785_000 picoseconds. + Weight::from_parts(1_980_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Pov Value (r:0 w:1) @@ -520,9 +498,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 279 nanoseconds. - Weight::from_parts(357_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 254_000 picoseconds. + Weight::from_parts(326_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Pov Value (r:0 w:1) @@ -531,9 +508,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 291 nanoseconds. - Weight::from_parts(378_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 239_000 picoseconds. + Weight::from_parts(277_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Pov Map1M (r:1 w:0) @@ -542,9 +518,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1275` // Estimated: `4740` - // Minimum execution time: 5_077 nanoseconds. - Weight::from_parts(5_400_000, 0) - .saturating_add(Weight::from_parts(0, 4740)) + // Minimum execution time: 4_760_000 picoseconds. + Weight::from_parts(5_051_000, 4740) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Pov Map1M (r:1 w:0) @@ -553,9 +528,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1544` // Estimated: `5009` - // Minimum execution time: 5_878 nanoseconds. - Weight::from_parts(6_239_000, 0) - .saturating_add(Weight::from_parts(0, 5009)) + // Minimum execution time: 5_490_000 picoseconds. + Weight::from_parts(5_703_000, 5009) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Pov Map1M (r:1 w:0) @@ -564,9 +538,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2044` // Estimated: `5509` - // Minimum execution time: 7_282 nanoseconds. - Weight::from_parts(8_022_000, 0) - .saturating_add(Weight::from_parts(0, 5509)) + // Minimum execution time: 6_397_000 picoseconds. + Weight::from_parts(7_084_000, 5509) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Pov Map1M (r:100 w:0) @@ -577,19 +550,18 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 100]`. fn storage_map_read_per_component(n: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `515 + n * (188 ±0) + m * (188 ±0)` - // Estimated: `1980 + n * (3006 ±0) + m * (2511 ±0)` - // Minimum execution time: 195_406 nanoseconds. - Weight::from_parts(129_093_464, 0) - .saturating_add(Weight::from_parts(0, 1980)) - // Standard Error: 12_134 - .saturating_add(Weight::from_parts(855_330, 0).saturating_mul(n.into())) - // Standard Error: 12_134 - .saturating_add(Weight::from_parts(870_523, 0).saturating_mul(m.into())) + // Measured: `515 + m * (188 ±0) + n * (188 ±0)` + // Estimated: `990 + m * (2511 ±0) + n * (3006 ±0)` + // Minimum execution time: 181_481_000 picoseconds. + Weight::from_parts(129_275_141, 990) + // Standard Error: 13_049 + .saturating_add(Weight::from_parts(787_667, 0).saturating_mul(n.into())) + // Standard Error: 13_049 + .saturating_add(Weight::from_parts(830_378, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) - .saturating_add(Weight::from_parts(0, 3006).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 2511).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 3006).saturating_mul(n.into())) } /// Storage: Pov Map1M (r:100 w:0) /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: Ignored) @@ -599,19 +571,18 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 100]`. fn storage_map_read_per_component_one_ignored(n: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `515 + n * (188 ±0) + m * (188 ±0)` - // Estimated: `1685 + n * (3195 ±0) + m * (189 ±0)` - // Minimum execution time: 195_053 nanoseconds. - Weight::from_parts(131_322_479, 0) - .saturating_add(Weight::from_parts(0, 1685)) - // Standard Error: 12_161 - .saturating_add(Weight::from_parts(843_047, 0).saturating_mul(n.into())) - // Standard Error: 12_161 - .saturating_add(Weight::from_parts(858_668, 0).saturating_mul(m.into())) + // Measured: `515 + m * (188 ±0) + n * (188 ±0)` + // Estimated: `1685 + m * (189 ±0) + n * (3006 ±0)` + // Minimum execution time: 181_925_000 picoseconds. + Weight::from_parts(134_416_814, 1685) + // Standard Error: 15_678 + .saturating_add(Weight::from_parts(827_168, 0).saturating_mul(n.into())) + // Standard Error: 15_678 + .saturating_add(Weight::from_parts(813_655, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) - .saturating_add(Weight::from_parts(0, 3195).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 189).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 3006).saturating_mul(n.into())) } /// Storage: Pov Map1M (r:1 w:0) /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) @@ -620,11 +591,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3501` - // Minimum execution time: 22 nanoseconds. - Weight::from_parts(2_334_945, 0) - .saturating_add(Weight::from_parts(0, 3501)) - // Standard Error: 624 - .saturating_add(Weight::from_parts(282_046, 0).saturating_mul(n.into())) + // Minimum execution time: 20_000 picoseconds. + Weight::from_parts(2_006_399, 3501) + // Standard Error: 808 + .saturating_add(Weight::from_parts(263_609, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Pov Map1M (r:100 w:0) @@ -634,11 +604,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `147 + n * (40 ±0)` // Estimated: `990 + n * (2511 ±0)` - // Minimum execution time: 20 nanoseconds. - Weight::from_parts(525_027, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 2_767 - .saturating_add(Weight::from_parts(3_887_350, 0).saturating_mul(n.into())) + // Minimum execution time: 21_000 picoseconds. + Weight::from_parts(3_940_044, 990) + // Standard Error: 4_906 + .saturating_add(Weight::from_parts(3_454_882, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2511).saturating_mul(n.into())) } @@ -649,11 +618,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `21938 + n * (57 ±0)` // Estimated: `990 + n * (2543 ±0)` - // Minimum execution time: 34 nanoseconds. - Weight::from_parts(18_341_393, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 1_312 - .saturating_add(Weight::from_parts(2_053_135, 0).saturating_mul(n.into())) + // Minimum execution time: 28_000 picoseconds. + Weight::from_parts(20_674_869, 990) + // Standard Error: 3_035 + .saturating_add(Weight::from_parts(1_995_730, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2543).saturating_mul(n.into())) } @@ -663,9 +631,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1518` - // Minimum execution time: 1_163 nanoseconds. - Weight::from_parts(1_274_000, 0) - .saturating_add(Weight::from_parts(0, 1518)) + // Minimum execution time: 1_091_000 picoseconds. + Weight::from_parts(1_181_000, 1518) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Pov UnboundedValue (r:1 w:0) @@ -674,9 +641,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1594` - // Minimum execution time: 1_167 nanoseconds. - Weight::from_parts(1_367_000, 0) - .saturating_add(Weight::from_parts(0, 1594)) + // Minimum execution time: 1_079_000 picoseconds. + Weight::from_parts(1_176_000, 1594) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Pov UnboundedValue (r:1 w:0) @@ -685,9 +651,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `0` - // Minimum execution time: 1_155 nanoseconds. - Weight::from_parts(1_248_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 1_101_000 picoseconds. + Weight::from_parts(1_160_000, 0) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Pov UnboundedValue (r:1 w:0) @@ -696,11 +661,10 @@ impl WeightInfo for () { /// Proof: Pov BoundedValue (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) fn storage_value_bounded_and_unbounded_read() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3112` - // Minimum execution time: 1_424 nanoseconds. - Weight::from_parts(1_601_000, 0) - .saturating_add(Weight::from_parts(0, 3112)) + // Measured: `147` + // Estimated: `1632` + // Minimum execution time: 2_143_000 picoseconds. + Weight::from_parts(2_280_000, 1632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: Pov LargeValue (r:1 w:0) @@ -708,13 +672,12 @@ impl WeightInfo for () { /// The range of component `l` is `[0, 4194304]`. fn measured_storage_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `174 + l * (1 ±0)` - // Estimated: `1656 + l * (1 ±0)` - // Minimum execution time: 1_744 nanoseconds. - Weight::from_parts(1_800_000, 0) - .saturating_add(Weight::from_parts(0, 1656)) - // Standard Error: 4 - .saturating_add(Weight::from_parts(443, 0).saturating_mul(l.into())) + // Measured: `142 + l * (1 ±0)` + // Estimated: `1626 + l * (1 ±0)` + // Minimum execution time: 1_665_000 picoseconds. + Weight::from_parts(1_725_000, 1626) + // Standard Error: 3 + .saturating_add(Weight::from_parts(376, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(l.into())) } @@ -723,13 +686,12 @@ impl WeightInfo for () { /// The range of component `l` is `[0, 4194304]`. fn mel_storage_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `174 + l * (1 ±0)` + // Measured: `142 + l * (1 ±0)` // Estimated: `4195793` - // Minimum execution time: 1_770 nanoseconds. - Weight::from_parts(1_813_000, 0) - .saturating_add(Weight::from_parts(0, 4195793)) - // Standard Error: 6 - .saturating_add(Weight::from_parts(495, 0).saturating_mul(l.into())) + // Minimum execution time: 1_640_000 picoseconds. + Weight::from_parts(1_724_000, 4195793) + // Standard Error: 4 + .saturating_add(Weight::from_parts(395, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Pov LargeValue (r:1 w:0) @@ -739,15 +701,14 @@ impl WeightInfo for () { /// The range of component `l` is `[0, 4194304]`. fn measured_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `235 + l * (2 ±0)` - // Estimated: `3428 + l * (4 ±0)` - // Minimum execution time: 2_349 nanoseconds. - Weight::from_parts(2_423_000, 0) - .saturating_add(Weight::from_parts(0, 3428)) - // Standard Error: 11 - .saturating_add(Weight::from_parts(950, 0).saturating_mul(l.into())) + // Measured: `171 + l * (2 ±0)` + // Estimated: `1655 + l * (2 ±0)` + // Minimum execution time: 2_263_000 picoseconds. + Weight::from_parts(2_358_000, 1655) + // Standard Error: 8 + .saturating_add(Weight::from_parts(737, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(0, 2).saturating_mul(l.into())) } /// Storage: Pov LargeValue (r:1 w:0) /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: MaxEncodedLen) @@ -756,13 +717,12 @@ impl WeightInfo for () { /// The range of component `l` is `[0, 4194304]`. fn mel_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `235 + l * (2 ±0)` - // Estimated: `8391586` - // Minimum execution time: 2_315 nanoseconds. - Weight::from_parts(2_409_000, 0) - .saturating_add(Weight::from_parts(0, 8391586)) - // Standard Error: 12 - .saturating_add(Weight::from_parts(984, 0).saturating_mul(l.into())) + // Measured: `171 + l * (2 ±0)` + // Estimated: `4195793` + // Minimum execution time: 2_161_000 picoseconds. + Weight::from_parts(2_233_000, 4195793) + // Standard Error: 5 + .saturating_add(Weight::from_parts(639, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: Pov LargeValue (r:1 w:0) @@ -772,13 +732,12 @@ impl WeightInfo for () { /// The range of component `l` is `[0, 4194304]`. fn mel_mixed_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `235 + l * (2 ±0)` - // Estimated: `4197507 + l * (2 ±0)` - // Minimum execution time: 2_370 nanoseconds. - Weight::from_parts(2_474_000, 0) - .saturating_add(Weight::from_parts(0, 4197507)) - // Standard Error: 11 - .saturating_add(Weight::from_parts(956, 0).saturating_mul(l.into())) + // Measured: `171 + l * (2 ±0)` + // Estimated: `4195793 + l * (2 ±0)` + // Minimum execution time: 2_149_000 picoseconds. + Weight::from_parts(2_256_000, 4195793) + // Standard Error: 6 + .saturating_add(Weight::from_parts(677, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 2).saturating_mul(l.into())) } @@ -789,13 +748,12 @@ impl WeightInfo for () { /// The range of component `l` is `[0, 4194304]`. fn measured_mixed_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `235 + l * (2 ±0)` - // Estimated: `4197507 + l * (2 ±0)` - // Minimum execution time: 2_375 nanoseconds. - Weight::from_parts(2_420_000, 0) - .saturating_add(Weight::from_parts(0, 4197507)) - // Standard Error: 9 - .saturating_add(Weight::from_parts(914, 0).saturating_mul(l.into())) + // Measured: `171 + l * (2 ±0)` + // Estimated: `4195793 + l * (2 ±0)` + // Minimum execution time: 2_254_000 picoseconds. + Weight::from_parts(2_319_000, 4195793) + // Standard Error: 5 + .saturating_add(Weight::from_parts(664, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 2).saturating_mul(l.into())) } @@ -806,15 +764,14 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 1000]`. fn storage_map_unbounded_both_measured_read(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `293 + i * (8 ±0)` - // Estimated: `7504 + i * (16 ±0)` - // Minimum execution time: 3_305 nanoseconds. - Weight::from_parts(3_689_335, 0) - .saturating_add(Weight::from_parts(0, 7504)) - // Standard Error: 29 - .saturating_add(Weight::from_parts(638, 0).saturating_mul(i.into())) + // Measured: `229 + i * (8 ±0)` + // Estimated: `3693 + i * (8 ±0)` + // Minimum execution time: 3_071_000 picoseconds. + Weight::from_parts(3_487_712, 3693) + // Standard Error: 26 + .saturating_add(Weight::from_parts(748, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(Weight::from_parts(0, 16).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(i.into())) } /// Storage: Pov Map1M (r:1 w:0) /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) @@ -823,13 +780,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 1000]`. fn storage_map_partial_unbounded_read(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `260 + i * (4 ±0)` - // Estimated: `7223 + i * (4 ±0)` - // Minimum execution time: 3_469 nanoseconds. - Weight::from_parts(3_878_896, 0) - .saturating_add(Weight::from_parts(0, 7223)) - // Standard Error: 33 - .saturating_add(Weight::from_parts(356, 0).saturating_mul(i.into())) + // Measured: `228 + i * (4 ±0)` + // Estimated: `3692 + i * (4 ±0)` + // Minimum execution time: 3_150_000 picoseconds. + Weight::from_parts(3_582_963, 3692) + // Standard Error: 18 + .saturating_add(Weight::from_parts(380, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 4).saturating_mul(i.into())) } @@ -840,13 +796,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 1000]`. fn storage_map_partial_unbounded_ignored_read(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `260 + i * (4 ±0)` - // Estimated: `3758 + i * (4 ±0)` - // Minimum execution time: 3_442 nanoseconds. - Weight::from_parts(3_881_051, 0) - .saturating_add(Weight::from_parts(0, 3758)) - // Standard Error: 35 - .saturating_add(Weight::from_parts(384, 0).saturating_mul(i.into())) + // Measured: `228 + i * (4 ±0)` + // Estimated: `3501 + i * (4 ±0)` + // Minimum execution time: 3_092_000 picoseconds. + Weight::from_parts(3_595_328, 3501) + // Standard Error: 20 + .saturating_add(Weight::from_parts(243, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 4).saturating_mul(i.into())) } @@ -854,16 +809,24 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_619 nanoseconds. - Weight::from_parts(1_728_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 1_705_000 picoseconds. + Weight::from_parts(1_818_000, 0) } fn noop() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 546 nanoseconds. - Weight::from_parts(640_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 533_000 picoseconds. + Weight::from_parts(587_000, 0) + } + /// Storage: Pov UnboundedMapTwox (r:65001 w:0) + /// Proof Skipped: Pov UnboundedMapTwox (max_values: None, max_size: None, mode: Measured) + fn storage_iteration() -> Weight { + // Proof Size summary in bytes: + // Measured: `17985289` + // Estimated: `178863754` + // Minimum execution time: 118_753_057_000 picoseconds. + Weight::from_parts(121_396_503_000, 178863754) + .saturating_add(RocksDbWeight::get().reads(65001_u64)) } } diff --git a/frame/benchmarking/src/baseline.rs b/frame/benchmarking/src/baseline.rs index 11d4ba5011a2f..25336b6974d9f 100644 --- a/frame/benchmarking/src/baseline.rs +++ b/frame/benchmarking/src/baseline.rs @@ -160,12 +160,11 @@ pub mod mock { impl super::Config for Test {} pub fn new_test_ext() -> sp_io::TestExternalities { - use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStorePtr}; - use sp_std::sync::Arc; + use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); let mut ext = sp_io::TestExternalities::new(t); - ext.register_extension(KeystoreExt(Arc::new(KeyStore::new()) as SyncCryptoStorePtr)); + ext.register_extension(KeystoreExt::new(MemoryKeystore::new())); ext } diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 7110c378d581e..b11e164fe8e44 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -160,6 +160,13 @@ pub use v1::*; /// The underscore will be substituted with the name of the benchmark (i.e. the name of the /// function in the benchmark function definition). /// +/// In case of a `force_origin` where you want to elevate the privileges of the provided origin, +/// this is the general syntax: +/// ```ignore +/// #[extrinsic_call] +/// _(force_origin as T::RuntimeOrigin, 0u32.into(), 0); +/// ``` +/// /// Regardless of whether `#[extrinsic_call]` or `#[block]` is used, this attribute also serves /// the purpose of designating the boundary between the setup code portion of the benchmark /// (everything before the `#[extrinsic_call]` or `#[block]` attribute) and the verification diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 453e8e125cbb3..80b631bb73668 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -29,7 +29,7 @@ use sp_runtime::{ use sp_std::prelude::*; use std::cell::RefCell; -#[frame_support::pallet] +#[frame_support::pallet(dev_mode)] mod pallet_test { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; diff --git a/frame/benchmarking/src/tests_instance.rs b/frame/benchmarking/src/tests_instance.rs index d49d7cc817c6b..92d78b9ecbbe1 100644 --- a/frame/benchmarking/src/tests_instance.rs +++ b/frame/benchmarking/src/tests_instance.rs @@ -61,7 +61,7 @@ mod pallet_test { ::OtherEvent: Into<>::RuntimeEvent>, { #[pallet::call_index(0)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn set_value(origin: OriginFor, n: u32) -> DispatchResult { let _sender = ensure_signed(origin)?; Value::::put(n); @@ -69,7 +69,7 @@ mod pallet_test { } #[pallet::call_index(1)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn dummy(origin: OriginFor, _n: u32) -> DispatchResult { let _sender = ensure_none(origin)?; Ok(()) diff --git a/frame/benchmarking/src/weights.rs b/frame/benchmarking/src/weights.rs index b05feb5900a65..25e2702f702d1 100644 --- a/frame/benchmarking/src/weights.rs +++ b/frame/benchmarking/src/weights.rs @@ -18,26 +18,25 @@ //! Autogenerated weights for frame_benchmarking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=frame_benchmarking // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=frame_benchmarking -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/benchmarking/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -65,24 +64,24 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 168_000 picoseconds. - Weight::from_parts(237_577, 0) + // Minimum execution time: 173_000 picoseconds. + Weight::from_parts(205_895, 0) } /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 169_000 picoseconds. - Weight::from_parts(224_111, 0) + // Minimum execution time: 180_000 picoseconds. + Weight::from_parts(206_967, 0) } /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 175_000 picoseconds. - Weight::from_parts(229_708, 0) + // Minimum execution time: 174_000 picoseconds. + Weight::from_parts(214_304, 0) } /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { @@ -90,24 +89,24 @@ impl WeightInfo for SubstrateWeight { // Measured: `0` // Estimated: `0` // Minimum execution time: 173_000 picoseconds. - Weight::from_parts(229_855, 0) + Weight::from_parts(207_804, 0) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_286_627_000 picoseconds. - Weight::from_parts(21_405_011_000, 0) + // Minimum execution time: 21_173_551_000 picoseconds. + Weight::from_parts(21_256_886_000, 0) } /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 239_000 picoseconds. - Weight::from_parts(661_987, 0) - // Standard Error: 24_324 - .saturating_add(Weight::from_parts(47_322_399, 0).saturating_mul(i.into())) + // Minimum execution time: 208_000 picoseconds. + Weight::from_parts(1_227_077, 0) + // Standard Error: 9_390 + .saturating_add(Weight::from_parts(47_152_841, 0).saturating_mul(i.into())) } } @@ -118,24 +117,24 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 168_000 picoseconds. - Weight::from_parts(237_577, 0) + // Minimum execution time: 173_000 picoseconds. + Weight::from_parts(205_895, 0) } /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 169_000 picoseconds. - Weight::from_parts(224_111, 0) + // Minimum execution time: 180_000 picoseconds. + Weight::from_parts(206_967, 0) } /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 175_000 picoseconds. - Weight::from_parts(229_708, 0) + // Minimum execution time: 174_000 picoseconds. + Weight::from_parts(214_304, 0) } /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { @@ -143,23 +142,23 @@ impl WeightInfo for () { // Measured: `0` // Estimated: `0` // Minimum execution time: 173_000 picoseconds. - Weight::from_parts(229_855, 0) + Weight::from_parts(207_804, 0) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_286_627_000 picoseconds. - Weight::from_parts(21_405_011_000, 0) + // Minimum execution time: 21_173_551_000 picoseconds. + Weight::from_parts(21_256_886_000, 0) } /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 239_000 picoseconds. - Weight::from_parts(661_987, 0) - // Standard Error: 24_324 - .saturating_add(Weight::from_parts(47_322_399, 0).saturating_mul(i.into())) + // Minimum execution time: 208_000 picoseconds. + Weight::from_parts(1_227_077, 0) + // Standard Error: 9_390 + .saturating_add(Weight::from_parts(47_152_841, 0).saturating_mul(i.into())) } } diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml index dcb1adae05777..a2f39ccd7ec0b 100644 --- a/frame/bounties/Cargo.toml +++ b/frame/bounties/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = "derive", ] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 04465a25eb891..0675328c3d3c2 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -57,9 +57,12 @@ fn setup_bounty, I: 'static>( let fee = value / 2u32.into(); let deposit = T::BountyDepositBase::get() + T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into(); - let _ = T::Currency::make_free_balance_be(&caller, deposit); + let _ = T::Currency::make_free_balance_be(&caller, deposit + T::Currency::minimum_balance()); let curator = account("curator", u, SEED); - let _ = T::Currency::make_free_balance_be(&curator, fee / 2u32.into()); + let _ = T::Currency::make_free_balance_be( + &curator, + fee / 2u32.into() + T::Currency::minimum_balance(), + ); let reason = vec![0; d as usize]; (caller, curator, fee, value, reason) } diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index 27aa0858523ec..ef3da7564874e 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -100,6 +100,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); @@ -754,7 +758,7 @@ fn award_and_claim_bounty_works() { System::set_block_number(5); >::on_initialize(5); - assert_ok!(Balances::transfer( + assert_ok!(Balances::transfer_allow_death( RuntimeOrigin::signed(0), Bounties::bounty_account_id(0), 10 @@ -832,7 +836,7 @@ fn cancel_and_refund() { System::set_block_number(2); >::on_initialize(2); - assert_ok!(Balances::transfer( + assert_ok!(Balances::transfer_allow_death( RuntimeOrigin::signed(0), Bounties::bounty_account_id(0), 10 diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index f0ea78b5f1e8d..5a84adf08210c 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_bounties //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -75,12 +75,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `d` is `[0, 300]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `308` - // Estimated: `3102` - // Minimum execution time: 22_787 nanoseconds. - Weight::from_parts(23_898_632, 3102) - // Standard Error: 141 - .saturating_add(Weight::from_parts(568, 0).saturating_mul(d.into())) + // Measured: `276` + // Estimated: `3593` + // Minimum execution time: 30_793_000 picoseconds. + Weight::from_parts(31_509_544, 3593) + // Standard Error: 168 + .saturating_add(Weight::from_parts(2_219, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -90,10 +90,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `400` - // Estimated: `3549` - // Minimum execution time: 10_526 nanoseconds. - Weight::from_parts(10_729_000, 3549) + // Measured: `368` + // Estimated: `3642` + // Minimum execution time: 12_471_000 picoseconds. + Weight::from_parts(12_677_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -101,10 +101,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `420` - // Estimated: `2652` - // Minimum execution time: 9_193 nanoseconds. - Weight::from_parts(9_455_000, 2652) + // Measured: `388` + // Estimated: `3642` + // Minimum execution time: 10_560_000 picoseconds. + Weight::from_parts(10_744_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -114,10 +114,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `628` - // Estimated: `5255` - // Minimum execution time: 22_592 nanoseconds. - Weight::from_parts(22_952_000, 5255) + // Measured: `564` + // Estimated: `3642` + // Minimum execution time: 30_980_000 picoseconds. + Weight::from_parts(31_354_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -127,10 +127,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `624` - // Estimated: `5255` - // Minimum execution time: 20_920 nanoseconds. - Weight::from_parts(21_369_000, 5255) + // Measured: `560` + // Estimated: `3642` + // Minimum execution time: 29_257_000 picoseconds. + Weight::from_parts(29_656_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -140,10 +140,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `570` - // Estimated: `5143` - // Minimum execution time: 17_853 nanoseconds. - Weight::from_parts(18_280_000, 5143) + // Measured: `538` + // Estimated: `3642` + // Minimum execution time: 20_662_000 picoseconds. + Weight::from_parts(20_956_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -157,10 +157,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `998` - // Estimated: `12964` - // Minimum execution time: 67_538 nanoseconds. - Weight::from_parts(67_974_000, 12964) + // Measured: `902` + // Estimated: `8799` + // Minimum execution time: 119_287_000 picoseconds. + Weight::from_parts(121_468_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -174,10 +174,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `646` - // Estimated: `7746` - // Minimum execution time: 28_380 nanoseconds. - Weight::from_parts(28_859_000, 7746) + // Measured: `582` + // Estimated: `3642` + // Minimum execution time: 37_759_000 picoseconds. + Weight::from_parts(38_185_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -191,10 +191,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `914` - // Estimated: `10349` - // Minimum execution time: 47_739 nanoseconds. - Weight::from_parts(48_388_000, 10349) + // Measured: `818` + // Estimated: `6196` + // Minimum execution time: 80_332_000 picoseconds. + Weight::from_parts(81_328_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -202,10 +202,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `456` - // Estimated: `2652` - // Minimum execution time: 14_188 nanoseconds. - Weight::from_parts(14_801_000, 2652) + // Measured: `424` + // Estimated: `3642` + // Minimum execution time: 16_301_000 picoseconds. + Weight::from_parts(16_611_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -218,17 +218,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `31 + b * (360 ±0)` - // Estimated: `897 + b * (7858 ±0)` - // Minimum execution time: 4_685 nanoseconds. - Weight::from_parts(9_932_840, 897) - // Standard Error: 14_301 - .saturating_add(Weight::from_parts(27_178_347, 0).saturating_mul(b.into())) + // Measured: `4 + b * (297 ±0)` + // Estimated: `1887 + b * (5206 ±0)` + // Minimum execution time: 5_430_000 picoseconds. + Weight::from_parts(4_463_976, 1887) + // Standard Error: 43_695 + .saturating_add(Weight::from_parts(39_370_113, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 7858).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(b.into())) } } @@ -245,12 +245,12 @@ impl WeightInfo for () { /// The range of component `d` is `[0, 300]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `308` - // Estimated: `3102` - // Minimum execution time: 22_787 nanoseconds. - Weight::from_parts(23_898_632, 3102) - // Standard Error: 141 - .saturating_add(Weight::from_parts(568, 0).saturating_mul(d.into())) + // Measured: `276` + // Estimated: `3593` + // Minimum execution time: 30_793_000 picoseconds. + Weight::from_parts(31_509_544, 3593) + // Standard Error: 168 + .saturating_add(Weight::from_parts(2_219, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -260,10 +260,10 @@ impl WeightInfo for () { /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `400` - // Estimated: `3549` - // Minimum execution time: 10_526 nanoseconds. - Weight::from_parts(10_729_000, 3549) + // Measured: `368` + // Estimated: `3642` + // Minimum execution time: 12_471_000 picoseconds. + Weight::from_parts(12_677_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -271,10 +271,10 @@ impl WeightInfo for () { /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `420` - // Estimated: `2652` - // Minimum execution time: 9_193 nanoseconds. - Weight::from_parts(9_455_000, 2652) + // Measured: `388` + // Estimated: `3642` + // Minimum execution time: 10_560_000 picoseconds. + Weight::from_parts(10_744_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -284,10 +284,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `628` - // Estimated: `5255` - // Minimum execution time: 22_592 nanoseconds. - Weight::from_parts(22_952_000, 5255) + // Measured: `564` + // Estimated: `3642` + // Minimum execution time: 30_980_000 picoseconds. + Weight::from_parts(31_354_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -297,10 +297,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `624` - // Estimated: `5255` - // Minimum execution time: 20_920 nanoseconds. - Weight::from_parts(21_369_000, 5255) + // Measured: `560` + // Estimated: `3642` + // Minimum execution time: 29_257_000 picoseconds. + Weight::from_parts(29_656_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -310,10 +310,10 @@ impl WeightInfo for () { /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `570` - // Estimated: `5143` - // Minimum execution time: 17_853 nanoseconds. - Weight::from_parts(18_280_000, 5143) + // Measured: `538` + // Estimated: `3642` + // Minimum execution time: 20_662_000 picoseconds. + Weight::from_parts(20_956_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -327,10 +327,10 @@ impl WeightInfo for () { /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `998` - // Estimated: `12964` - // Minimum execution time: 67_538 nanoseconds. - Weight::from_parts(67_974_000, 12964) + // Measured: `902` + // Estimated: `8799` + // Minimum execution time: 119_287_000 picoseconds. + Weight::from_parts(121_468_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -344,10 +344,10 @@ impl WeightInfo for () { /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `646` - // Estimated: `7746` - // Minimum execution time: 28_380 nanoseconds. - Weight::from_parts(28_859_000, 7746) + // Measured: `582` + // Estimated: `3642` + // Minimum execution time: 37_759_000 picoseconds. + Weight::from_parts(38_185_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -361,10 +361,10 @@ impl WeightInfo for () { /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `914` - // Estimated: `10349` - // Minimum execution time: 47_739 nanoseconds. - Weight::from_parts(48_388_000, 10349) + // Measured: `818` + // Estimated: `6196` + // Minimum execution time: 80_332_000 picoseconds. + Weight::from_parts(81_328_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -372,10 +372,10 @@ impl WeightInfo for () { /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `456` - // Estimated: `2652` - // Minimum execution time: 14_188 nanoseconds. - Weight::from_parts(14_801_000, 2652) + // Measured: `424` + // Estimated: `3642` + // Minimum execution time: 16_301_000 picoseconds. + Weight::from_parts(16_611_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -388,16 +388,16 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `31 + b * (360 ±0)` - // Estimated: `897 + b * (7858 ±0)` - // Minimum execution time: 4_685 nanoseconds. - Weight::from_parts(9_932_840, 897) - // Standard Error: 14_301 - .saturating_add(Weight::from_parts(27_178_347, 0).saturating_mul(b.into())) + // Measured: `4 + b * (297 ±0)` + // Estimated: `1887 + b * (5206 ±0)` + // Minimum execution time: 5_430_000 picoseconds. + Weight::from_parts(4_463_976, 1887) + // Standard Error: 43_695 + .saturating_add(Weight::from_parts(39_370_113, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 7858).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(b.into())) } } diff --git a/frame/child-bounties/Cargo.toml b/frame/child-bounties/Cargo.toml index 8d29b7619759d..48fa222616152 100644 --- a/frame/child-bounties/Cargo.toml +++ b/frame/child-bounties/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = "derive", ] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/child-bounties/src/benchmarking.rs b/frame/child-bounties/src/benchmarking.rs index 0a83f7c394905..e49d9c836125d 100644 --- a/frame/child-bounties/src/benchmarking.rs +++ b/frame/child-bounties/src/benchmarking.rs @@ -63,9 +63,12 @@ fn setup_bounty( let fee = value / 2u32.into(); let deposit = T::BountyDepositBase::get() + T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into(); - let _ = T::Currency::make_free_balance_be(&caller, deposit); + let _ = T::Currency::make_free_balance_be(&caller, deposit + T::Currency::minimum_balance()); let curator = account("curator", user, SEED); - let _ = T::Currency::make_free_balance_be(&curator, fee / 2u32.into()); + let _ = T::Currency::make_free_balance_be( + &curator, + fee / 2u32.into() + T::Currency::minimum_balance(), + ); let reason = vec![0; description as usize]; (caller, curator, fee, value, reason) } @@ -73,7 +76,10 @@ fn setup_bounty( fn setup_child_bounty(user: u32, description: u32) -> BenchmarkChildBounty { let (caller, curator, fee, value, reason) = setup_bounty::(user, description); let child_curator = account("child-curator", user, SEED); - let _ = T::Currency::make_free_balance_be(&child_curator, fee / 2u32.into()); + let _ = T::Currency::make_free_balance_be( + &child_curator, + fee / 2u32.into() + T::Currency::minimum_balance(), + ); let child_bounty_value = (value - fee) / 4u32.into(); let child_bounty_fee = child_bounty_value / 2u32.into(); diff --git a/frame/child-bounties/src/tests.rs b/frame/child-bounties/src/tests.rs index 0172468ec55d6..a936312aec868 100644 --- a/frame/child-bounties/src/tests.rs +++ b/frame/child-bounties/src/tests.rs @@ -35,7 +35,7 @@ use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BadOrigin, BlakeTwo256, IdentityLookup}, - Perbill, Permill, + Perbill, Permill, TokenError, }; use super::Event as ChildBountiesEvent; @@ -103,6 +103,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); @@ -248,7 +252,7 @@ fn add_child_bounty() { assert_noop!( ChildBounties::add_child_bounty(RuntimeOrigin::signed(4), 0, 50, b"12345-p1".to_vec()), - pallet_balances::Error::::KeepAlive, + TokenError::NotExpendable, ); assert_noop!( diff --git a/frame/child-bounties/src/weights.rs b/frame/child-bounties/src/weights.rs index eeb14419678d7..be30e80a19f27 100644 --- a/frame/child-bounties/src/weights.rs +++ b/frame/child-bounties/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_child_bounties //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -74,14 +74,12 @@ impl WeightInfo for SubstrateWeight { /// Storage: ChildBounties ChildBounties (r:0 w:1) /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) /// The range of component `d` is `[0, 300]`. - fn add_child_bounty(d: u32, ) -> Weight { + fn add_child_bounty(_d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `742` - // Estimated: `10848` - // Minimum execution time: 46_743 nanoseconds. - Weight::from_parts(47_762_924, 10848) - // Standard Error: 135 - .saturating_add(Weight::from_parts(599, 0).saturating_mul(d.into())) + // Measured: `678` + // Estimated: `6196` + // Minimum execution time: 69_784_000 picoseconds. + Weight::from_parts(71_225_354, 6196) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -93,10 +91,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `796` - // Estimated: `7775` - // Minimum execution time: 16_417 nanoseconds. - Weight::from_parts(16_712_000, 7775) + // Measured: `732` + // Estimated: `3642` + // Minimum execution time: 19_008_000 picoseconds. + Weight::from_parts(19_219_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -108,10 +106,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `974` - // Estimated: `7875` - // Minimum execution time: 25_548 nanoseconds. - Weight::from_parts(25_919_000, 7875) + // Measured: `878` + // Estimated: `3642` + // Minimum execution time: 35_457_000 picoseconds. + Weight::from_parts(36_088_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -123,10 +121,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `974` - // Estimated: `7875` - // Minimum execution time: 27_645 nanoseconds. - Weight::from_parts(27_947_000, 7875) + // Measured: `878` + // Estimated: `3642` + // Minimum execution time: 38_244_000 picoseconds. + Weight::from_parts(38_611_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -136,10 +134,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `839` - // Estimated: `5272` - // Minimum execution time: 20_002 nanoseconds. - Weight::from_parts(20_512_000, 5272) + // Measured: `775` + // Estimated: `3642` + // Minimum execution time: 22_762_000 picoseconds. + Weight::from_parts(23_249_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -153,10 +151,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `744` - // Estimated: `12920` - // Minimum execution time: 63_752 nanoseconds. - Weight::from_parts(64_179_000, 12920) + // Measured: `648` + // Estimated: `8799` + // Minimum execution time: 112_019_000 picoseconds. + Weight::from_parts(113_190_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -174,10 +172,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `1106` - // Estimated: `15472` - // Minimum execution time: 47_388 nanoseconds. - Weight::from_parts(47_946_000, 15472) + // Measured: `978` + // Estimated: `6196` + // Minimum execution time: 72_477_000 picoseconds. + Weight::from_parts(73_573_000, 6196) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -195,10 +193,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `1325` - // Estimated: `18075` - // Minimum execution time: 58_008 nanoseconds. - Weight::from_parts(58_586_000, 18075) + // Measured: `1165` + // Estimated: `8799` + // Minimum execution time: 91_049_000 picoseconds. + Weight::from_parts(91_874_000, 8799) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -219,14 +217,12 @@ impl WeightInfo for () { /// Storage: ChildBounties ChildBounties (r:0 w:1) /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) /// The range of component `d` is `[0, 300]`. - fn add_child_bounty(d: u32, ) -> Weight { + fn add_child_bounty(_d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `742` - // Estimated: `10848` - // Minimum execution time: 46_743 nanoseconds. - Weight::from_parts(47_762_924, 10848) - // Standard Error: 135 - .saturating_add(Weight::from_parts(599, 0).saturating_mul(d.into())) + // Measured: `678` + // Estimated: `6196` + // Minimum execution time: 69_784_000 picoseconds. + Weight::from_parts(71_225_354, 6196) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -238,10 +234,10 @@ impl WeightInfo for () { /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `796` - // Estimated: `7775` - // Minimum execution time: 16_417 nanoseconds. - Weight::from_parts(16_712_000, 7775) + // Measured: `732` + // Estimated: `3642` + // Minimum execution time: 19_008_000 picoseconds. + Weight::from_parts(19_219_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -253,10 +249,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `974` - // Estimated: `7875` - // Minimum execution time: 25_548 nanoseconds. - Weight::from_parts(25_919_000, 7875) + // Measured: `878` + // Estimated: `3642` + // Minimum execution time: 35_457_000 picoseconds. + Weight::from_parts(36_088_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -268,10 +264,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `974` - // Estimated: `7875` - // Minimum execution time: 27_645 nanoseconds. - Weight::from_parts(27_947_000, 7875) + // Measured: `878` + // Estimated: `3642` + // Minimum execution time: 38_244_000 picoseconds. + Weight::from_parts(38_611_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -281,10 +277,10 @@ impl WeightInfo for () { /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `839` - // Estimated: `5272` - // Minimum execution time: 20_002 nanoseconds. - Weight::from_parts(20_512_000, 5272) + // Measured: `775` + // Estimated: `3642` + // Minimum execution time: 22_762_000 picoseconds. + Weight::from_parts(23_249_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -298,10 +294,10 @@ impl WeightInfo for () { /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `744` - // Estimated: `12920` - // Minimum execution time: 63_752 nanoseconds. - Weight::from_parts(64_179_000, 12920) + // Measured: `648` + // Estimated: `8799` + // Minimum execution time: 112_019_000 picoseconds. + Weight::from_parts(113_190_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -319,10 +315,10 @@ impl WeightInfo for () { /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `1106` - // Estimated: `15472` - // Minimum execution time: 47_388 nanoseconds. - Weight::from_parts(47_946_000, 15472) + // Measured: `978` + // Estimated: `6196` + // Minimum execution time: 72_477_000 picoseconds. + Weight::from_parts(73_573_000, 6196) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -340,10 +336,10 @@ impl WeightInfo for () { /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `1325` - // Estimated: `18075` - // Minimum execution time: 58_008 nanoseconds. - Weight::from_parts(58_586_000, 18075) + // Measured: `1165` + // Estimated: `8799` + // Minimum execution time: 91_049_000 picoseconds. + Weight::from_parts(91_874_000, 8799) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index 26cd5a8ab44cc..7ae8817474a47 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index d4b5af1bc1937..bcd203c3894a3 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -646,5 +646,5 @@ benchmarks_instance_pallet! { assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); } - impl_benchmark_test_suite!(Collective, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Collective, crate::tests::ExtBuilder::default().build(), crate::tests::Test); } diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 414db9f24e6e2..4679bbdd50530 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -50,14 +50,14 @@ use sp_std::{marker::PhantomData, prelude::*, result, vec}; use frame_support::{ codec::{Decode, Encode, MaxEncodedLen}, dispatch::{ - DispatchError, DispatchResultWithPostInfo, Dispatchable, GetDispatchInfo, Pays, - PostDispatchInfo, + DispatchError, DispatchResult, DispatchResultWithPostInfo, Dispatchable, GetDispatchInfo, + Pays, PostDispatchInfo, }, ensure, traits::{ Backing, ChangeMembers, EnsureOrigin, Get, GetBacking, InitializeMembers, StorageVersion, }, - weights::{OldWeight, Weight}, + weights::Weight, }; #[cfg(test)] @@ -217,6 +217,10 @@ pub mod pallet { /// Origin allowed to set collective members type SetMembersOrigin: EnsureOrigin<::RuntimeOrigin>; + + /// The maximum weight of a dispatch call that can be proposed and executed. + #[pallet::constant] + type MaxProposalWeight: Get; } #[pallet::genesis_config] @@ -242,6 +246,10 @@ pub mod pallet { self.members.len(), "Members cannot contain duplicate accounts." ); + assert!( + self.members.len() <= T::MaxMembers::get() as usize, + "Members length cannot exceed MaxMembers.", + ); Pallet::::initialize_members(&self.members) } @@ -341,6 +349,15 @@ pub mod pallet { WrongProposalLength, } + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), &'static str> { + Self::do_try_state()?; + Ok(()) + } + } + // Note that councillor operations are assigned to the operational class. #[pallet::call] impl, I: 'static> Pallet { @@ -557,59 +574,7 @@ pub mod pallet { } } - /// Close a vote that is either approved, disapproved or whose voting period has ended. - /// - /// May be called by any signed account in order to finish voting and close the proposal. - /// - /// If called before the end of the voting period it will only close the vote if it is - /// has enough votes to be approved or disapproved. - /// - /// If called after the end of the voting period abstentions are counted as rejections - /// unless there is a prime member set and the prime member cast an approval. - /// - /// If the close operation completes successfully with disapproval, the transaction fee will - /// be waived. Otherwise execution of the approved operation will be charged to the caller. - /// - /// + `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed - /// proposal. - /// + `length_bound`: The upper bound for the length of the proposal in storage. Checked via - /// `storage::read` so it is `size_of::() == 4` larger than the pure length. - /// - /// ## Complexity - /// - `O(B + M + P1 + P2)` where: - /// - `B` is `proposal` size in bytes (length-fee-bounded) - /// - `M` is members-count (code- and governance-bounded) - /// - `P1` is the complexity of `proposal` preimage. - /// - `P2` is proposal-count (code-bounded) - #[pallet::call_index(4)] - #[pallet::weight(( - { - let b = *length_bound; - let m = T::MaxMembers::get(); - let p1 = *proposal_weight_bound; - let p2 = T::MaxProposals::get(); - T::WeightInfo::close_early_approved(b, m, p2) - .max(T::WeightInfo::close_early_disapproved(m, p2)) - .max(T::WeightInfo::close_approved(b, m, p2)) - .max(T::WeightInfo::close_disapproved(m, p2)) - .saturating_add(p1.into()) - }, - DispatchClass::Operational - ))] - #[allow(deprecated)] - #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `close`")] - pub fn close_old_weight( - origin: OriginFor, - proposal_hash: T::Hash, - #[pallet::compact] index: ProposalIndex, - #[pallet::compact] proposal_weight_bound: OldWeight, - #[pallet::compact] length_bound: u32, - ) -> DispatchResultWithPostInfo { - let proposal_weight_bound: Weight = proposal_weight_bound.into(); - let _ = ensure_signed(origin)?; - - Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) - } + // Index 4 was `close_old_weight`; it was removed due to weights v1 deprecation. /// Disapprove a proposal, close, and remove it from the system, regardless of its current /// state. @@ -710,6 +675,11 @@ impl, I: 'static> Pallet { ) -> Result<(u32, DispatchResultWithPostInfo), DispatchError> { let proposal_len = proposal.encoded_size(); ensure!(proposal_len <= length_bound as usize, Error::::WrongProposalLength); + let proposal_weight = proposal.get_dispatch_info().weight; + ensure!( + proposal_weight.all_lte(T::MaxProposalWeight::get()), + Error::::WrongProposalWeight + ); let proposal_hash = T::Hashing::hash_of(&proposal); ensure!(!>::contains_key(proposal_hash), Error::::DuplicateProposal); @@ -732,6 +702,11 @@ impl, I: 'static> Pallet { ) -> Result<(u32, u32), DispatchError> { let proposal_len = proposal.encoded_size(); ensure!(proposal_len <= length_bound as usize, Error::::WrongProposalLength); + let proposal_weight = proposal.get_dispatch_info().weight; + ensure!( + proposal_weight.all_lte(T::MaxProposalWeight::get()), + Error::::WrongProposalWeight + ); let proposal_hash = T::Hashing::hash_of(&proposal); ensure!(!>::contains_key(proposal_hash), Error::::DuplicateProposal); @@ -968,6 +943,111 @@ impl, I: 'static> Pallet { }); num_proposals as u32 } + + /// Ensure the correctness of the state of this pallet. + /// + /// The following expectation must always apply. + /// + /// ## Expectations: + /// + /// Looking at proposals: + /// + /// * Each hash of a proposal that is stored inside `Proposals` must have a + /// call mapped to it inside the `ProposalOf` storage map. + /// * `ProposalCount` must always be more or equal to the number of + /// proposals inside the `Proposals` storage value. The reason why + /// `ProposalCount` can be more is because when a proposal is removed the + /// count is not deducted. + /// * Count of `ProposalOf` should match the count of `Proposals` + /// + /// Looking at votes: + /// * The sum of aye and nay votes for a proposal can never exceed + /// `MaxMembers`. + /// * The proposal index inside the `Voting` storage map must be unique. + /// * All proposal hashes inside `Voting` must exist in `Proposals`. + /// + /// Looking at members: + /// * The members count must never exceed `MaxMembers`. + /// * All the members must be sorted by value. + /// + /// Looking at prime account: + /// * The prime account must be a member of the collective. + #[cfg(any(feature = "try-runtime", test))] + fn do_try_state() -> DispatchResult { + Self::proposals().into_iter().try_for_each(|proposal| -> DispatchResult { + ensure!( + Self::proposal_of(proposal).is_some(), + DispatchError::Other( + "Proposal hash from `Proposals` is not found inside the `ProposalOf` mapping." + ) + ); + Ok(()) + })?; + + ensure!( + Self::proposals().into_iter().count() <= Self::proposal_count() as usize, + DispatchError::Other("The actual number of proposals is greater than `ProposalCount`") + ); + ensure!( + Self::proposals().into_iter().count() == >::iter_keys().count(), + DispatchError::Other("Proposal count inside `Proposals` is not equal to the proposal count in `ProposalOf`") + ); + + Self::proposals().into_iter().try_for_each(|proposal| -> DispatchResult { + if let Some(votes) = Self::voting(proposal) { + let ayes = votes.ayes.len(); + let nays = votes.nays.len(); + + ensure!( + ayes.saturating_add(nays) <= T::MaxMembers::get() as usize, + DispatchError::Other("The sum of ayes and nays is greater than `MaxMembers`") + ); + } + Ok(()) + })?; + + let mut proposal_indices = vec![]; + Self::proposals().into_iter().try_for_each(|proposal| -> DispatchResult { + if let Some(votes) = Self::voting(proposal) { + let proposal_index = votes.index; + ensure!( + !proposal_indices.contains(&proposal_index), + DispatchError::Other("The proposal index is not unique.") + ); + proposal_indices.push(proposal_index); + } + Ok(()) + })?; + + >::iter_keys().try_for_each(|proposal_hash| -> DispatchResult { + ensure!( + Self::proposals().contains(&proposal_hash), + DispatchError::Other( + "`Proposals` doesn't contain the proposal hash from the `Voting` storage map." + ) + ); + Ok(()) + })?; + + ensure!( + Self::members().len() <= T::MaxMembers::get() as usize, + DispatchError::Other("The member count is greater than `MaxMembers`.") + ); + + ensure!( + Self::members().windows(2).all(|members| members[0] <= members[1]), + DispatchError::Other("The members are not sorted by value.") + ); + + if let Some(prime) = Self::prime() { + ensure!( + Self::members().contains(&prime), + DispatchError::Other("Prime account is not a member.") + ); + } + + Ok(()) + } } impl, I: 'static> ChangeMembers for Pallet { @@ -1031,6 +1111,8 @@ impl, I: 'static> InitializeMembers for Pallet fn initialize_members(members: &[T::AccountId]) { if !members.is_empty() { assert!(>::get().is_empty(), "Members are already initialized!"); + let mut members = members.to_vec(); + members.sort(); >::put(members); } } diff --git a/frame/collective/src/tests.rs b/frame/collective/src/tests.rs index 79f6cc27fadbd..1165ebcec2287 100644 --- a/frame/collective/src/tests.rs +++ b/frame/collective/src/tests.rs @@ -51,7 +51,7 @@ frame_support::construct_runtime!( mod mock_democracy { pub use pallet::*; - #[frame_support::pallet] + #[frame_support::pallet(dev_mode)] pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -86,14 +86,18 @@ mod mock_democracy { } pub type MaxMembers = ConstU32<100>; +type AccountId = u64; parameter_types! { pub const MotionDuration: u64 = 3; pub const MaxProposals: u32 = 257; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(Weight::MAX); + pub static MaxProposalWeight: Weight = default_max_proposal_weight(); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); + type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; @@ -102,7 +106,7 @@ impl frame_system::Config for Test { type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; - type AccountId = u64; + type AccountId = AccountId; type Lookup = IdentityLookup; type Header = Header; type RuntimeEvent = RuntimeEvent; @@ -127,6 +131,7 @@ impl Config for Test { type DefaultVote = PrimeDefaultVote; type WeightInfo = (); type SetMembersOrigin = EnsureRoot; + type MaxProposalWeight = MaxProposalWeight; } impl Config for Test { type RuntimeOrigin = RuntimeOrigin; @@ -138,6 +143,7 @@ impl Config for Test { type DefaultVote = MoreThanMajorityThenPrimeDefaultVote; type WeightInfo = (); type SetMembersOrigin = EnsureRoot; + type MaxProposalWeight = MaxProposalWeight; } impl mock_democracy::Config for Test { type RuntimeEvent = RuntimeEvent; @@ -153,25 +159,50 @@ impl Config for Test { type DefaultVote = PrimeDefaultVote; type WeightInfo = (); type SetMembersOrigin = EnsureRoot; + type MaxProposalWeight = MaxProposalWeight; } -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = GenesisConfig { - collective: pallet_collective::GenesisConfig { - members: vec![1, 2, 3], - phantom: Default::default(), - }, - collective_majority: pallet_collective::GenesisConfig { - members: vec![1, 2, 3, 4, 5], - phantom: Default::default(), - }, - default_collective: Default::default(), +pub struct ExtBuilder { + collective_members: Vec, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { collective_members: vec![1, 2, 3] } + } +} + +impl ExtBuilder { + fn set_collective_members(mut self, collective_members: Vec) -> Self { + self.collective_members = collective_members; + self + } + + pub fn build(self) -> sp_io::TestExternalities { + let mut ext: sp_io::TestExternalities = GenesisConfig { + collective: pallet_collective::GenesisConfig { + members: self.collective_members, + phantom: Default::default(), + }, + collective_majority: pallet_collective::GenesisConfig { + members: vec![1, 2, 3, 4, 5], + phantom: Default::default(), + }, + default_collective: Default::default(), + } + .build_storage() + .unwrap() + .into(); + ext.execute_with(|| System::set_block_number(1)); + ext + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(|| { + test(); + Collective::do_try_state().unwrap(); + }) } - .build_storage() - .unwrap() - .into(); - ext.execute_with(|| System::set_block_number(1)); - ext } fn make_proposal(value: u64) -> RuntimeCall { @@ -184,17 +215,62 @@ fn record(event: RuntimeEvent) -> EventRecord { EventRecord { phase: Phase::Initialization, event, topics: vec![] } } +fn default_max_proposal_weight() -> Weight { + sp_runtime::Perbill::from_percent(80) * BlockWeights::get().max_block +} + #[test] fn motions_basic_environment_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { assert_eq!(Collective::members(), vec![1, 2, 3]); assert_eq!(*Collective::proposals(), Vec::::new()); }); } +#[test] +fn initialize_members_sorts_members() { + let unsorted_members = vec![3, 2, 4, 1]; + let expected_members = vec![1, 2, 3, 4]; + ExtBuilder::default() + .set_collective_members(unsorted_members) + .build_and_execute(|| { + assert_eq!(Collective::members(), expected_members); + }); +} + +#[test] +fn proposal_weight_limit_works() { + ExtBuilder::default().build_and_execute(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + + assert_ok!(Collective::propose( + RuntimeOrigin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); + + // set a small limit for max proposal weight. + MaxProposalWeight::set(Weight::from_parts(1, 1)); + assert_noop!( + Collective::propose( + RuntimeOrigin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + ), + Error::::WrongProposalWeight + ); + + // reset the max weight to default. + MaxProposalWeight::set(default_max_proposal_weight()); + }); +} + #[test] fn close_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; @@ -262,7 +338,7 @@ fn close_works() { #[test] fn proposal_weight_limit_works_on_approve() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = RuntimeCall::Collective(crate::Call::set_members { new_members: vec![1, 2, 3], prime: None, @@ -304,7 +380,7 @@ fn proposal_weight_limit_works_on_approve() { #[test] fn proposal_weight_limit_ignored_on_disapprove() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = RuntimeCall::Collective(crate::Call::set_members { new_members: vec![1, 2, 3], prime: None, @@ -334,7 +410,7 @@ fn proposal_weight_limit_ignored_on_disapprove() { #[test] fn close_with_prime_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; @@ -402,7 +478,7 @@ fn close_with_prime_works() { #[test] fn close_with_voting_prime_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; @@ -472,7 +548,7 @@ fn close_with_voting_prime_works() { #[test] fn close_with_no_prime_but_majority_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; @@ -552,7 +628,7 @@ fn close_with_no_prime_but_majority_works() { #[test] fn removal_of_old_voters_votes_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = BlakeTwo256::hash_of(&proposal); @@ -600,7 +676,7 @@ fn removal_of_old_voters_votes_works() { #[test] fn removal_of_old_voters_votes_works_with_set_members() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = BlakeTwo256::hash_of(&proposal); @@ -658,7 +734,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { #[test] fn propose_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = proposal.blake2_256().into(); @@ -690,7 +766,7 @@ fn propose_works() { #[test] fn limit_active_proposals() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { for i in 0..MaxProposals::get() { let proposal = make_proposal(i as u64); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); @@ -717,7 +793,7 @@ fn limit_active_proposals() { #[test] fn correct_validate_and_get_proposal() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = RuntimeCall::Collective(crate::Call::set_members { new_members: vec![1, 2, 3], prime: None, @@ -763,7 +839,7 @@ fn correct_validate_and_get_proposal() { #[test] fn motions_ignoring_non_collective_proposals_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); assert_noop!( @@ -780,7 +856,7 @@ fn motions_ignoring_non_collective_proposals_works() { #[test] fn motions_ignoring_non_collective_votes_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); @@ -799,7 +875,7 @@ fn motions_ignoring_non_collective_votes_works() { #[test] fn motions_ignoring_bad_index_collective_vote_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(3); let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); @@ -819,7 +895,7 @@ fn motions_ignoring_bad_index_collective_vote_works() { #[test] fn motions_vote_after_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); @@ -888,7 +964,7 @@ fn motions_vote_after_works() { #[test] fn motions_all_first_vote_free_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); @@ -946,7 +1022,7 @@ fn motions_all_first_vote_free_works() { #[test] fn motions_reproposing_disapproved_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; @@ -978,7 +1054,7 @@ fn motions_reproposing_disapproved_works() { #[test] fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {}); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; @@ -1108,7 +1184,7 @@ fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { #[test] fn motions_disapproval_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; @@ -1167,7 +1243,7 @@ fn motions_disapproval_works() { #[test] fn motions_approval_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; @@ -1228,7 +1304,7 @@ fn motions_approval_works() { #[test] fn motion_with_no_votes_closes_with_disapproval() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; @@ -1289,7 +1365,7 @@ fn close_disapprove_does_not_care_about_weight_or_len() { // This test confirms that if you close a proposal that would be disapproved, // we do not care about the proposal length or proposal weight since it will // not be read from storage or executed. - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); @@ -1321,7 +1397,7 @@ fn close_disapprove_does_not_care_about_weight_or_len() { #[test] fn disapprove_proposal_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); @@ -1367,6 +1443,19 @@ fn disapprove_proposal_works() { }) } +#[should_panic(expected = "Members length cannot exceed MaxMembers.")] +#[test] +fn genesis_build_panics_with_too_many_members() { + let max_members: u32 = MaxMembers::get(); + let too_many_members = (1..=max_members as u64 + 1).collect::>(); + pallet_collective::GenesisConfig:: { + members: too_many_members, + phantom: Default::default(), + } + .build_storage() + .unwrap(); +} + #[test] #[should_panic(expected = "Members cannot contain duplicate accounts.")] fn genesis_build_panics_with_duplicate_members() { @@ -1380,7 +1469,7 @@ fn genesis_build_panics_with_duplicate_members() { #[test] fn migration_v4() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { use frame_support::traits::PalletInfoAccess; let old_pallet = "OldCollective"; diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs index df233fc248e3d..bf739daca0931 100644 --- a/frame/collective/src/weights.rs +++ b/frame/collective/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_collective //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -76,20 +76,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 100]`. fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + m * (3233 ±0) + p * (3223 ±0)` - // Estimated: `16586 + m * (7809 ±24) + p * (10238 ±24)` - // Minimum execution time: 17_093 nanoseconds. - Weight::from_parts(17_284_000, 16586) - // Standard Error: 64_700 - .saturating_add(Weight::from_parts(5_143_145, 0).saturating_mul(m.into())) - // Standard Error: 64_700 - .saturating_add(Weight::from_parts(7_480_941, 0).saturating_mul(p.into())) + // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` + // Estimated: `15861 + m * (1967 ±23) + p * (4332 ±23)` + // Minimum execution time: 19_398_000 picoseconds. + Weight::from_parts(19_542_000, 15861) + // Standard Error: 71_395 + .saturating_add(Weight::from_parts(5_630_062, 0).saturating_mul(m.into())) + // Standard Error: 71_395 + .saturating_add(Weight::from_parts(8_634_133, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 7809).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 10238).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) } /// Storage: Council Members (r:1 w:0) /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) @@ -97,14 +97,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `234 + m * (32 ±0)` - // Estimated: `730 + m * (32 ±0)` - // Minimum execution time: 15_972 nanoseconds. - Weight::from_parts(14_971_445, 730) - // Standard Error: 32 - .saturating_add(Weight::from_parts(1_775, 0).saturating_mul(b.into())) - // Standard Error: 334 - .saturating_add(Weight::from_parts(17_052, 0).saturating_mul(m.into())) + // Measured: `202 + m * (32 ±0)` + // Estimated: `1688 + m * (32 ±0)` + // Minimum execution time: 17_579_000 picoseconds. + Weight::from_parts(16_874_624, 1688) + // Standard Error: 34 + .saturating_add(Weight::from_parts(1_617, 0).saturating_mul(b.into())) + // Standard Error: 353 + .saturating_add(Weight::from_parts(19_759, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -116,16 +116,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `234 + m * (32 ±0)` - // Estimated: `3440 + m * (64 ±0)` - // Minimum execution time: 17_950 nanoseconds. - Weight::from_parts(17_019_558, 3440) - // Standard Error: 41 - .saturating_add(Weight::from_parts(1_807, 0).saturating_mul(b.into())) - // Standard Error: 432 - .saturating_add(Weight::from_parts(27_986, 0).saturating_mul(m.into())) + // Measured: `202 + m * (32 ±0)` + // Estimated: `3668 + m * (32 ±0)` + // Minimum execution time: 20_339_000 picoseconds. + Weight::from_parts(19_534_549, 3668) + // Standard Error: 45 + .saturating_add(Weight::from_parts(1_636, 0).saturating_mul(b.into())) + // Standard Error: 469 + .saturating_add(Weight::from_parts(28_178, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } /// Storage: Council Members (r:1 w:0) /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) @@ -142,20 +142,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `556 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `6355 + m * (165 ±0) + p * (180 ±0)` - // Minimum execution time: 24_817 nanoseconds. - Weight::from_parts(24_778_955, 6355) - // Standard Error: 73 - .saturating_add(Weight::from_parts(2_355, 0).saturating_mul(b.into())) - // Standard Error: 765 - .saturating_add(Weight::from_parts(20_518, 0).saturating_mul(m.into())) - // Standard Error: 755 - .saturating_add(Weight::from_parts(85_670, 0).saturating_mul(p.into())) + // Measured: `492 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `3884 + m * (33 ±0) + p * (36 ±0)` + // Minimum execution time: 27_793_000 picoseconds. + Weight::from_parts(28_095_462, 3884) + // Standard Error: 82 + .saturating_add(Weight::from_parts(2_646, 0).saturating_mul(b.into())) + // Standard Error: 861 + .saturating_add(Weight::from_parts(22_332, 0).saturating_mul(m.into())) + // Standard Error: 850 + .saturating_add(Weight::from_parts(121_560, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 165).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 180).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Council Members (r:1 w:0) /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) @@ -164,15 +164,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1006 + m * (64 ±0)` - // Estimated: `4980 + m * (128 ±0)` - // Minimum execution time: 19_790 nanoseconds. - Weight::from_parts(20_528_275, 4980) - // Standard Error: 651 - .saturating_add(Weight::from_parts(48_856, 0).saturating_mul(m.into())) + // Measured: `941 + m * (64 ±0)` + // Estimated: `4405 + m * (64 ±0)` + // Minimum execution time: 23_096_000 picoseconds. + Weight::from_parts(23_793_304, 4405) + // Standard Error: 675 + .saturating_add(Weight::from_parts(51_741, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 128).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } /// Storage: Council Voting (r:1 w:1) /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) @@ -186,18 +186,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `626 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `5893 + m * (260 ±0) + p * (144 ±0)` - // Minimum execution time: 25_564 nanoseconds. - Weight::from_parts(25_535_497, 5893) - // Standard Error: 610 - .saturating_add(Weight::from_parts(27_956, 0).saturating_mul(m.into())) - // Standard Error: 595 - .saturating_add(Weight::from_parts(84_835, 0).saturating_mul(p.into())) + // Measured: `530 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `3975 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 29_635_000 picoseconds. + Weight::from_parts(29_574_124, 3975) + // Standard Error: 755 + .saturating_add(Weight::from_parts(29_126, 0).saturating_mul(m.into())) + // Standard Error: 737 + .saturating_add(Weight::from_parts(123_438, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 260).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 144).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Council Voting (r:1 w:1) /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) @@ -212,21 +212,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `962 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `9164 + b * (4 ±0) + m * (264 ±0) + p * (160 ±0)` - // Minimum execution time: 36_515 nanoseconds. - Weight::from_parts(36_626_648, 9164) - // Standard Error: 98 - .saturating_add(Weight::from_parts(2_295, 0).saturating_mul(b.into())) - // Standard Error: 1_036 - .saturating_add(Weight::from_parts(22_182, 0).saturating_mul(m.into())) - // Standard Error: 1_010 - .saturating_add(Weight::from_parts(100_034, 0).saturating_mul(p.into())) + // Measured: `832 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4149 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 41_934_000 picoseconds. + Weight::from_parts(44_022_379, 4149) + // Standard Error: 105 + .saturating_add(Weight::from_parts(2_266, 0).saturating_mul(b.into())) + // Standard Error: 1_112 + .saturating_add(Weight::from_parts(18_074, 0).saturating_mul(m.into())) + // Standard Error: 1_084 + .saturating_add(Weight::from_parts(132_405, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 264).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 160).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } /// Storage: Council Voting (r:1 w:1) /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) @@ -242,18 +242,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `646 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `7095 + m * (325 ±0) + p * (180 ±0)` - // Minimum execution time: 28_858 nanoseconds. - Weight::from_parts(28_050_047, 7095) - // Standard Error: 614 - .saturating_add(Weight::from_parts(34_031, 0).saturating_mul(m.into())) - // Standard Error: 599 - .saturating_add(Weight::from_parts(85_744, 0).saturating_mul(p.into())) + // Measured: `550 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `3995 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 33_146_000 picoseconds. + Weight::from_parts(31_957_128, 3995) + // Standard Error: 2_321 + .saturating_add(Weight::from_parts(31_272, 0).saturating_mul(m.into())) + // Standard Error: 2_264 + .saturating_add(Weight::from_parts(156_129, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 325).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 180).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Council Voting (r:1 w:1) /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) @@ -270,21 +270,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `982 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `10565 + b * (5 ±0) + m * (330 ±0) + p * (200 ±0)` - // Minimum execution time: 38_608 nanoseconds. - Weight::from_parts(39_948_329, 10565) - // Standard Error: 84 - .saturating_add(Weight::from_parts(2_045, 0).saturating_mul(b.into())) - // Standard Error: 895 - .saturating_add(Weight::from_parts(22_669, 0).saturating_mul(m.into())) - // Standard Error: 872 - .saturating_add(Weight::from_parts(95_525, 0).saturating_mul(p.into())) + // Measured: `852 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4169 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 44_278_000 picoseconds. + Weight::from_parts(46_039_907, 4169) + // Standard Error: 100 + .saturating_add(Weight::from_parts(2_257, 0).saturating_mul(b.into())) + // Standard Error: 1_062 + .saturating_add(Weight::from_parts(25_055, 0).saturating_mul(m.into())) + // Standard Error: 1_035 + .saturating_add(Weight::from_parts(136_282, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 5).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 330).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 200).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } /// Storage: Council Proposals (r:1 w:1) /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) @@ -295,15 +295,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `391 + p * (32 ±0)` - // Estimated: `1668 + p * (96 ±0)` - // Minimum execution time: 14_785 nanoseconds. - Weight::from_parts(16_393_818, 1668) - // Standard Error: 612 - .saturating_add(Weight::from_parts(76_786, 0).saturating_mul(p.into())) + // Measured: `359 + p * (32 ±0)` + // Estimated: `1844 + p * (32 ±0)` + // Minimum execution time: 16_500_000 picoseconds. + Weight::from_parts(18_376_538, 1844) + // Standard Error: 755 + .saturating_add(Weight::from_parts(113_189, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 96).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) } } @@ -322,20 +322,20 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 100]`. fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + m * (3233 ±0) + p * (3223 ±0)` - // Estimated: `16586 + m * (7809 ±24) + p * (10238 ±24)` - // Minimum execution time: 17_093 nanoseconds. - Weight::from_parts(17_284_000, 16586) - // Standard Error: 64_700 - .saturating_add(Weight::from_parts(5_143_145, 0).saturating_mul(m.into())) - // Standard Error: 64_700 - .saturating_add(Weight::from_parts(7_480_941, 0).saturating_mul(p.into())) + // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` + // Estimated: `15861 + m * (1967 ±23) + p * (4332 ±23)` + // Minimum execution time: 19_398_000 picoseconds. + Weight::from_parts(19_542_000, 15861) + // Standard Error: 71_395 + .saturating_add(Weight::from_parts(5_630_062, 0).saturating_mul(m.into())) + // Standard Error: 71_395 + .saturating_add(Weight::from_parts(8_634_133, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 7809).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 10238).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) } /// Storage: Council Members (r:1 w:0) /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) @@ -343,14 +343,14 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `234 + m * (32 ±0)` - // Estimated: `730 + m * (32 ±0)` - // Minimum execution time: 15_972 nanoseconds. - Weight::from_parts(14_971_445, 730) - // Standard Error: 32 - .saturating_add(Weight::from_parts(1_775, 0).saturating_mul(b.into())) - // Standard Error: 334 - .saturating_add(Weight::from_parts(17_052, 0).saturating_mul(m.into())) + // Measured: `202 + m * (32 ±0)` + // Estimated: `1688 + m * (32 ±0)` + // Minimum execution time: 17_579_000 picoseconds. + Weight::from_parts(16_874_624, 1688) + // Standard Error: 34 + .saturating_add(Weight::from_parts(1_617, 0).saturating_mul(b.into())) + // Standard Error: 353 + .saturating_add(Weight::from_parts(19_759, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -362,16 +362,16 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `234 + m * (32 ±0)` - // Estimated: `3440 + m * (64 ±0)` - // Minimum execution time: 17_950 nanoseconds. - Weight::from_parts(17_019_558, 3440) - // Standard Error: 41 - .saturating_add(Weight::from_parts(1_807, 0).saturating_mul(b.into())) - // Standard Error: 432 - .saturating_add(Weight::from_parts(27_986, 0).saturating_mul(m.into())) + // Measured: `202 + m * (32 ±0)` + // Estimated: `3668 + m * (32 ±0)` + // Minimum execution time: 20_339_000 picoseconds. + Weight::from_parts(19_534_549, 3668) + // Standard Error: 45 + .saturating_add(Weight::from_parts(1_636, 0).saturating_mul(b.into())) + // Standard Error: 469 + .saturating_add(Weight::from_parts(28_178, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } /// Storage: Council Members (r:1 w:0) /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) @@ -388,20 +388,20 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `556 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `6355 + m * (165 ±0) + p * (180 ±0)` - // Minimum execution time: 24_817 nanoseconds. - Weight::from_parts(24_778_955, 6355) - // Standard Error: 73 - .saturating_add(Weight::from_parts(2_355, 0).saturating_mul(b.into())) - // Standard Error: 765 - .saturating_add(Weight::from_parts(20_518, 0).saturating_mul(m.into())) - // Standard Error: 755 - .saturating_add(Weight::from_parts(85_670, 0).saturating_mul(p.into())) + // Measured: `492 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `3884 + m * (33 ±0) + p * (36 ±0)` + // Minimum execution time: 27_793_000 picoseconds. + Weight::from_parts(28_095_462, 3884) + // Standard Error: 82 + .saturating_add(Weight::from_parts(2_646, 0).saturating_mul(b.into())) + // Standard Error: 861 + .saturating_add(Weight::from_parts(22_332, 0).saturating_mul(m.into())) + // Standard Error: 850 + .saturating_add(Weight::from_parts(121_560, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 165).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 180).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Council Members (r:1 w:0) /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) @@ -410,15 +410,15 @@ impl WeightInfo for () { /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1006 + m * (64 ±0)` - // Estimated: `4980 + m * (128 ±0)` - // Minimum execution time: 19_790 nanoseconds. - Weight::from_parts(20_528_275, 4980) - // Standard Error: 651 - .saturating_add(Weight::from_parts(48_856, 0).saturating_mul(m.into())) + // Measured: `941 + m * (64 ±0)` + // Estimated: `4405 + m * (64 ±0)` + // Minimum execution time: 23_096_000 picoseconds. + Weight::from_parts(23_793_304, 4405) + // Standard Error: 675 + .saturating_add(Weight::from_parts(51_741, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 128).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } /// Storage: Council Voting (r:1 w:1) /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) @@ -432,18 +432,18 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `626 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `5893 + m * (260 ±0) + p * (144 ±0)` - // Minimum execution time: 25_564 nanoseconds. - Weight::from_parts(25_535_497, 5893) - // Standard Error: 610 - .saturating_add(Weight::from_parts(27_956, 0).saturating_mul(m.into())) - // Standard Error: 595 - .saturating_add(Weight::from_parts(84_835, 0).saturating_mul(p.into())) + // Measured: `530 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `3975 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 29_635_000 picoseconds. + Weight::from_parts(29_574_124, 3975) + // Standard Error: 755 + .saturating_add(Weight::from_parts(29_126, 0).saturating_mul(m.into())) + // Standard Error: 737 + .saturating_add(Weight::from_parts(123_438, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 260).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 144).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Council Voting (r:1 w:1) /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) @@ -458,21 +458,21 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `962 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `9164 + b * (4 ±0) + m * (264 ±0) + p * (160 ±0)` - // Minimum execution time: 36_515 nanoseconds. - Weight::from_parts(36_626_648, 9164) - // Standard Error: 98 - .saturating_add(Weight::from_parts(2_295, 0).saturating_mul(b.into())) - // Standard Error: 1_036 - .saturating_add(Weight::from_parts(22_182, 0).saturating_mul(m.into())) - // Standard Error: 1_010 - .saturating_add(Weight::from_parts(100_034, 0).saturating_mul(p.into())) + // Measured: `832 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4149 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 41_934_000 picoseconds. + Weight::from_parts(44_022_379, 4149) + // Standard Error: 105 + .saturating_add(Weight::from_parts(2_266, 0).saturating_mul(b.into())) + // Standard Error: 1_112 + .saturating_add(Weight::from_parts(18_074, 0).saturating_mul(m.into())) + // Standard Error: 1_084 + .saturating_add(Weight::from_parts(132_405, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 264).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 160).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } /// Storage: Council Voting (r:1 w:1) /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) @@ -488,18 +488,18 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `646 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `7095 + m * (325 ±0) + p * (180 ±0)` - // Minimum execution time: 28_858 nanoseconds. - Weight::from_parts(28_050_047, 7095) - // Standard Error: 614 - .saturating_add(Weight::from_parts(34_031, 0).saturating_mul(m.into())) - // Standard Error: 599 - .saturating_add(Weight::from_parts(85_744, 0).saturating_mul(p.into())) + // Measured: `550 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `3995 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 33_146_000 picoseconds. + Weight::from_parts(31_957_128, 3995) + // Standard Error: 2_321 + .saturating_add(Weight::from_parts(31_272, 0).saturating_mul(m.into())) + // Standard Error: 2_264 + .saturating_add(Weight::from_parts(156_129, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 325).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 180).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Council Voting (r:1 w:1) /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) @@ -516,21 +516,21 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `982 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `10565 + b * (5 ±0) + m * (330 ±0) + p * (200 ±0)` - // Minimum execution time: 38_608 nanoseconds. - Weight::from_parts(39_948_329, 10565) - // Standard Error: 84 - .saturating_add(Weight::from_parts(2_045, 0).saturating_mul(b.into())) - // Standard Error: 895 - .saturating_add(Weight::from_parts(22_669, 0).saturating_mul(m.into())) - // Standard Error: 872 - .saturating_add(Weight::from_parts(95_525, 0).saturating_mul(p.into())) + // Measured: `852 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4169 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 44_278_000 picoseconds. + Weight::from_parts(46_039_907, 4169) + // Standard Error: 100 + .saturating_add(Weight::from_parts(2_257, 0).saturating_mul(b.into())) + // Standard Error: 1_062 + .saturating_add(Weight::from_parts(25_055, 0).saturating_mul(m.into())) + // Standard Error: 1_035 + .saturating_add(Weight::from_parts(136_282, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 5).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 330).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 200).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } /// Storage: Council Proposals (r:1 w:1) /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) @@ -541,14 +541,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `391 + p * (32 ±0)` - // Estimated: `1668 + p * (96 ±0)` - // Minimum execution time: 14_785 nanoseconds. - Weight::from_parts(16_393_818, 1668) - // Standard Error: 612 - .saturating_add(Weight::from_parts(76_786, 0).saturating_mul(p.into())) + // Measured: `359 + p * (32 ±0)` + // Estimated: `1844 + p * (32 ±0)` + // Minimum execution time: 16_500_000 picoseconds. + Weight::from_parts(18_376_538, 1844) + // Standard Error: 755 + .saturating_add(Weight::from_parts(113_189, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 96).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) } } diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index e19326b785b2b..6ab60846fd412 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -19,15 +19,15 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4", default-features = false } wasm-instrument = { version = "0.4", default-features = false } serde = { version = "1", optional = true, features = ["derive"] } smallvec = { version = "1", default-features = false, features = [ "const_generics", ] } -wasmi = { version = "0.20", default-features = false } -wasmparser = { package = "wasmparser-nostd", version = "0.91", default-features = false } +wasmi = { version = "0.28", default-features = false } +wasmparser = { package = "wasmparser-nostd", version = "0.100", default-features = false } impl-trait-for-tuples = "0.2" # Only used in benchmarking to generate random contract code diff --git a/frame/contracts/README.md b/frame/contracts/README.md index 4df7d9449682d..13c5e7253c1d8 100644 --- a/frame/contracts/README.md +++ b/frame/contracts/README.md @@ -1,11 +1,11 @@ -# Contract Module +# Contracts Module -The Contract module provides functionality for the runtime to deploy and execute WebAssembly smart-contracts. +The Contracts module provides functionality for the runtime to deploy and execute WebAssembly smart-contracts. - [`Call`](https://paritytech.github.io/substrate/master/pallet_contracts/pallet/enum.Call.html) - [`Config`](https://paritytech.github.io/substrate/master/pallet_contracts/pallet/trait.Config.html) - [`Error`](https://paritytech.github.io/substrate/master/pallet_contracts/pallet/enum.Error.html) -- [`Event`](https://paritytech.github.io/substrate/master/pallet_contracts/pallet/enum.Error.html) +- [`Event`](https://paritytech.github.io/substrate/master/pallet_contracts/pallet/enum.Event.html) ## Overview @@ -63,9 +63,9 @@ directly. This makes sure that by default `pallet-contracts` does not expose any When setting up the `Schedule` for your runtime make sure to set `InstructionWeights::fallback` to a non zero value. The default is `0` and prevents the upload of any non deterministic code. -An indeterministic code can be deployed on-chain by passing `Determinism::AllowIndeterministic` -to `upload_code`. A determinstic contract can then delegate call into it if and only if it -is ran by using `bare_call` and passing `Determinism::AllowIndeterministic` to it. **Never use +An indeterministic code can be deployed on-chain by passing `Determinism::Relaxed` +to `upload_code`. A deterministic contract can then delegate call into it if and only if it +is ran by using `bare_call` and passing `Determinism::Relaxed` to it. **Never use this argument when the contract is called from an on-chain transaction.** ## Interface @@ -106,14 +106,14 @@ Look for the `define_env!` macro invocation. This module executes WebAssembly smart contracts. These can potentially be written in any language that compiles to web assembly. However, using a language that specifically targets this module -will make things a lot easier. One such language is [`ink`](https://github.com/paritytech/ink) +will make things a lot easier. One such language is [`ink!`](https://use.ink) which is an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables writing WebAssembly based smart contracts in the Rust programming language. ## Debugging Contracts can emit messages to the client when called as RPC through the `seal_debug_message` -API. This is exposed in ink! via +API. This is exposed in [ink!](https://use.ink) via [`ink_env::debug_message()`](https://paritytech.github.io/ink/ink_env/fn.debug_message.html). Those messages are gathered into an internal buffer and send to the RPC client. @@ -135,6 +135,18 @@ to `error` in order to prevent them from spamming the console. `--dev`: Use a dev chain spec `--tmp`: Use temporary storage for chain data (the chain state is deleted on exit) +## Host function tracing + +For contract authors, it can be a helpful debugging tool to see which host functions are called, with which arguments, and what the result was. + +In order to see these messages on the node console, the log level for the `runtime::contracts::strace` target needs to be raised to the `trace` level. + +Example: + +```bash +cargo run --release -- --dev -lerror,runtime::contracts::strace=trace,runtime::contracts=debug +``` + ## Unstable Interfaces Driven by the desire to have an iterative approach in developing new contract interfaces diff --git a/frame/contracts/benchmarks/README.md b/frame/contracts/benchmarks/README.md index a4b15bd840db4..a621dd65d5931 100644 --- a/frame/contracts/benchmarks/README.md +++ b/frame/contracts/benchmarks/README.md @@ -1,6 +1,6 @@ # Benchmarks -This directory contains real world (ink!, solang) contracts which are used in macro benchmarks. +This directory contains real world ([ink!](https://use.ink), [solang](https://github.com/hyperledger/solang)) contracts which are used in macro benchmarks. Those benchmarks are not used to determine weights but rather to compare different contract languages and execution engines with larger wasm modules. diff --git a/frame/contracts/fixtures/call.wat b/frame/contracts/fixtures/call.wat new file mode 100644 index 0000000000000..4558b2c6409b9 --- /dev/null +++ b/frame/contracts/fixtures/call.wat @@ -0,0 +1,39 @@ +;; This calls another contract as passed as its account id. +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal1" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "deploy")) + + (func (export "call") + ;; Store length of input buffer. + (i32.store (i32.const 0) (i32.const 512)) + + ;; Copy input at address 4. + (call $seal_input (i32.const 4) (i32.const 0)) + + ;; Call passed contract. + (call $assert (i32.eqz + (call $seal_call + (i32.const 0) ;; No flags + (i32.const 8) ;; Pointer to "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 512) ;; Pointer to the buffer with value to transfer + (i32.const 4) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) + )) + ) +) diff --git a/frame/contracts/fixtures/call_with_limit.wat b/frame/contracts/fixtures/call_with_limit.wat index abb8708267271..04da59551a8ce 100644 --- a/frame/contracts/fixtures/call_with_limit.wat +++ b/frame/contracts/fixtures/call_with_limit.wat @@ -1,8 +1,8 @@ -;; This expects [account_id, gas_limit] as input and calls the account_id with the supplied gas_limit. +;; This expects [account_id, ref_time, proof_size] as input and calls the account_id with the supplied 2D Weight limit. ;; It returns the result of the call as output data. (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal2" "call" (func $seal_call (param i32 i32 i64 i64 i32 i32 i32 i32 i32 i32) (result i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -13,24 +13,25 @@ (func (export "deploy")) (func (export "call") - ;; Receive the encoded call + gas_limit + ;; Receive the encoded account_id, ref_time, proof_size (call $seal_input (i32.const 4) ;; Pointer to the input buffer - (i32.const 0) ;; Size of the length buffer + (i32.const 0) ;; Pointer to the length of the input buffer ) (i32.store (i32.const 0) (call $seal_call + (i32.const 0) ;; Set no flag. (i32.const 4) ;; Pointer to "callee" address. - (i32.const 32) ;; Length of "callee" address. - (i64.load (i32.const 36)) ;; How much gas to devote for the execution. + (i64.load (i32.const 36)) ;; How much ref_time to devote for the execution. + (i64.load (i32.const 44)) ;; How much proof_size to devote for the execution. + (i32.const 0xffffffff) ;; u32 max sentinel value: pass no deposit limit. (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 0) ;; Length of the buffer with value to transfer. (i32.const 0) ;; Pointer to input data buffer address - (i32.const 0) ;; Length of input data buffer + (i32.const 0) ;; Length of input data buffer (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output - (i32.const 0) ;; Ptr to output buffer len - ) + (i32.const 0) ;; Length is ignored in this case + ) ) (call $seal_return (i32.const 0) (i32.const 0) (i32.const 4)) ) diff --git a/frame/contracts/fixtures/caller_contract.wat b/frame/contracts/fixtures/caller_contract.wat index f9caf49f29ca8..929171b9a26f6 100644 --- a/frame/contracts/fixtures/caller_contract.wat +++ b/frame/contracts/fixtures/caller_contract.wat @@ -1,9 +1,9 @@ (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_balance" (func $seal_balance (param i32 i32))) - (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) - (import "seal0" "seal_instantiate" (func $seal_instantiate - (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + (import "seal2" "call" (func $seal_call (param i32 i32 i64 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal2" "instantiate" (func $seal_instantiate + (param i32 i64 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) )) (import "env" "memory" (memory 1 1)) @@ -43,18 +43,18 @@ (set_local $exit_code (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. - (i32.const 32) ;; Length of the code hash. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. + (i64.const 0) ;; How much proof_size weight to devote for the execution. 0 = all. + (i32.const 0xffffffff) ;; u32 max sentinel value: pass no deposit limit. (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 9) ;; Pointer to input data buffer address (i32.const 7) ;; Length of input data buffer - (i32.const 4294967295) ;; u32 max sentinel value: do not copy address - (i32.const 0) ;; Length is ignored in this case + (i32.const 4294967295) ;; u32 max sentinel value: do not copy address + (i32.const 0) ;; Length is ignored in this case (i32.const 4294967295) ;; u32 max sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this case - (i32.const 0) ;; salt_ptr - (i32.const 0) ;; salt_le + (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le ) ) @@ -63,22 +63,47 @@ (i32.eq (get_local $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted ) - ;; Fail to deploy the contract due to insufficient gas. + ;; Fail to deploy the contract due to insufficient ref_time weight. (set_local $exit_code (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. - (i32.const 32) ;; Length of the code hash. - (i64.const 1) ;; Supply too little gas + (i64.const 1) ;; Supply too little ref_time weight + (i64.const 0) ;; How much proof_size weight to devote for the execution. 0 = all. + (i32.const 0xffffffff) ;; u32 max sentinel value: pass no deposit limit. (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 8) ;; Pointer to input data buffer address (i32.const 8) ;; Length of input data buffer (i32.const 4294967295) ;; u32 max sentinel value: do not copy address - (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; Length is ignored in this case (i32.const 4294967295) ;; u32 max sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this case - (i32.const 0) ;; salt_ptr - (i32.const 0) ;; salt_le + (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le + + ) + ) + + ;; Check for special trap exit status. + (call $assert + (i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped + ) + + ;; Fail to deploy the contract due to insufficient ref_time weight. + (set_local $exit_code + (call $seal_instantiate + (i32.const 24) ;; Pointer to the code hash. + (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. + (i64.const 1) ;; Supply too little proof_size weight + (i32.const 0xffffffff) ;; u32 max sentinel value: pass no deposit limit. + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Pointer to input data buffer address + (i32.const 8) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max sentinel value: do not copy address + (i32.const 0) ;; Length is ignored in this case + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le ) ) @@ -98,18 +123,18 @@ (set_local $exit_code (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. - (i32.const 32) ;; Length of the code hash. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. + (i64.const 0) ;; How much proof_size weight to devote for the execution. 0 = all. + (i32.const 0xffffffff) ;; u32 max sentinel value: pass no deposit limit. (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 8) ;; Pointer to input data buffer address (i32.const 8) ;; Length of input data buffer - (i32.const 16) ;; Pointer to the address output buffer - (i32.sub (get_local $sp) (i32.const 4)) ;; Pointer to the address buffer length - (i32.const 4294967295) ;; u32 max sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this case - (i32.const 0) ;; salt_ptr - (i32.const 0) ;; salt_le + (i32.const 16) ;; Pointer to the address output buffer + (i32.sub (get_local $sp) (i32.const 4)) ;; Pointer to the address buffer length + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le ) ) @@ -139,15 +164,16 @@ ;; Call the new contract and expect it to return failing exit code. (set_local $exit_code (call $seal_call + (i32.const 0) ;; Set no flag (i32.const 16) ;; Pointer to "callee" address. - (i32.const 32) ;; Length of "callee" address. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. + (i64.const 0) ;; How much proof_size weight to devote for the execution. 0 = all. + (i32.const 0xffffffff) ;; u32 max sentinel value: pass no deposit limit. (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 9) ;; Pointer to input data buffer address (i32.const 7) ;; Length of input data buffer - (i32.sub (get_local $sp) (i32.const 4)) ;; Ptr to output buffer - (i32.sub (get_local $sp) (i32.const 8)) ;; Ptr to output buffer len + (i32.sub (get_local $sp) (i32.const 4)) ;; Ptr to output buffer + (i32.sub (get_local $sp) (i32.const 8)) ;; Ptr to output buffer len ) ) @@ -167,18 +193,40 @@ ) ) - ;; Fail to call the contract due to insufficient gas. + ;; Fail to call the contract due to insufficient ref_time weight. (set_local $exit_code (call $seal_call + (i32.const 0) ;; Set no flag (i32.const 16) ;; Pointer to "callee" address. - (i32.const 32) ;; Length of "callee" address. - (i64.const 1) ;; Supply too little gas + (i64.const 1) ;; Supply too little ref_time weight + (i64.const 0) ;; How much proof_size weight to devote for the execution. 0 = all. + (i32.const 0xffffffff) ;; u32 max sentinel value: pass no deposit limit. (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 8) ;; Pointer to input data buffer address (i32.const 8) ;; Length of input data buffer - (i32.const 4294967295) ;; u32 max sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this cas + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this cas + ) + ) + + ;; Check for special trap exit status. + (call $assert + (i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped + ) + + ;; Fail to call the contract due to insufficient proof_size weight. + (set_local $exit_code + (call $seal_call + (i32.const 0) ;; Set no flag + (i32.const 16) ;; Pointer to "callee" address. + (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. + (i64.const 1) ;; Supply too little proof_size weight + (i32.const 0xffffffff) ;; u32 max sentinel value: pass no deposit limit. + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Pointer to input data buffer address + (i32.const 8) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this cas ) ) @@ -202,15 +250,16 @@ ;; Call the contract successfully. (set_local $exit_code (call $seal_call + (i32.const 0) ;; Set no flag (i32.const 16) ;; Pointer to "callee" address. - (i32.const 32) ;; Length of "callee" address. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. + (i64.const 0) ;; How much proof_size weight to devote for the execution. 0 = all. + (i32.const 0xffffffff) ;; u32 max sentinel value: pass no deposit limit. (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 8) ;; Pointer to input data buffer address (i32.const 8) ;; Length of input data buffer - (i32.sub (get_local $sp) (i32.const 4)) ;; Ptr to output buffer - (i32.sub (get_local $sp) (i32.const 8)) ;; Ptr to output buffer len + (i32.sub (get_local $sp) (i32.const 4)) ;; Ptr to output buffer + (i32.sub (get_local $sp) (i32.const 8)) ;; Ptr to output buffer len ) ) diff --git a/frame/contracts/fixtures/create_storage_and_call.wat b/frame/contracts/fixtures/create_storage_and_call.wat index 2a1e53f7ce08a..5592e7e96a980 100644 --- a/frame/contracts/fixtures/create_storage_and_call.wat +++ b/frame/contracts/fixtures/create_storage_and_call.wat @@ -2,7 +2,7 @@ (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) - (import "seal1" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) + (import "seal2" "call" (func $seal_call (param i32 i32 i64 i64 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) (func $assert (param i32) @@ -20,7 +20,10 @@ ;; store length of input buffer (i32.store (i32.const 0) (i32.const 512)) - ;; copy input at address 4 + ;; copy input at address 4: + ;; first 4 bytes for the size of the storage to be created in callee + ;; next 32 bytes are for the callee address + ;; next bytes for the encoded deposit limit (call $seal_input (i32.const 4) (i32.const 0)) ;; create 4 byte of storage before calling @@ -34,8 +37,10 @@ (call $assert (i32.eqz (call $seal_call (i32.const 0) ;; No flags - (i32.const 8) ;; Pointer to "callee" address. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 8) ;; Pointer to "callee" address + (i64.const 0) ;; How much ref_time to devote for the execution. 0 = all + (i64.const 0) ;; How much proof_limit to devote for the execution. 0 = all + (i32.const 40) ;; Pointer to the storage deposit limit (i32.const 512) ;; Pointer to the buffer with value to transfer (i32.const 4) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer diff --git a/frame/contracts/fixtures/create_storage_and_instantiate.wat b/frame/contracts/fixtures/create_storage_and_instantiate.wat new file mode 100644 index 0000000000000..cd7202478437b --- /dev/null +++ b/frame/contracts/fixtures/create_storage_and_instantiate.wat @@ -0,0 +1,66 @@ +;; This instantiates another contract and passes some input to its constructor. +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) + (import "seal2" "instantiate" (func $seal_instantiate + (param i32 i64 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0, 8) send 10_000 balance + (data (i32.const 48) "\10\27\00\00\00\00\00\00") + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "deploy")) + + (func (export "call") + ;; store length of input buffer + (i32.store (i32.const 0) (i32.const 512)) + ;; store length of contract address + (i32.store (i32.const 84) (i32.const 32)) + + ;; copy input at address 4 + (call $seal_input (i32.const 4) (i32.const 0)) + + ;; memory layout is: + ;; [0,4): size of input buffer + ;; [4,8): size of the storage to be created in callee + ;; [8,40): the code hash of the contract to instantiate + ;; [40,48): for the encoded deposit limit + ;; [48,52): value to transfer + ;; [52,84): address of the deployed contract + ;; [84,88): len of the address + + ;; instantiate a contract + (call $assert (i32.eqz +;; (i32.store +;; (i32.const 64) + (call $seal_instantiate + (i32.const 8) ;; Pointer to the code hash. + (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. + (i64.const 0) ;; How much proof_size weight to devote for the execution. 0 = all. + (i32.const 40) ;; Pointer to the storage deposit limit + (i32.const 48) ;; Pointer to the buffer with value to transfer + (i32.const 4) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer + (i32.const 52) ;; Pointer to where to copy address + (i32.const 84) ;; Pointer to address len ptr + (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_len + ) + )) + ;; return the deployed contract address + (call $seal_return (i32.const 0) (i32.const 52) (i32.const 32)) + ) +) diff --git a/frame/contracts/fixtures/sr25519_verify.wat b/frame/contracts/fixtures/sr25519_verify.wat new file mode 100644 index 0000000000000..2da1ceb87eab0 --- /dev/null +++ b/frame/contracts/fixtures/sr25519_verify.wat @@ -0,0 +1,55 @@ +;; This contract: +;; 1) Reads signature, message and public key from the input +;; 2) Calls and return the result of sr25519_verify + +(module + ;; import the host functions from the seal0 module + (import "seal0" "sr25519_verify" (func $sr25519_verify (param i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + + ;; give the program 1 page of memory + (import "env" "memory" (memory 1 1)) + + ;; [0, 4) length of signature + message + public key - 64 + 11 + 32 = 107 bytes + ;; write the length of the input (6b = 107) bytes at offset 0 + (data (i32.const 0) "\6b") + + (func (export "deploy")) + + (func (export "call") + ;; define local variables + (local $signature_ptr i32) + (local $pub_key_ptr i32) + (local $message_len i32) + (local $message_ptr i32) + + ;; set the pointers to the memory locations + ;; Memory layout during `call` + ;; [10, 74) signature + ;; [74, 106) public key + ;; [106, 117) message (11 bytes) + (local.set $signature_ptr (i32.const 10)) + (local.set $pub_key_ptr (i32.const 74)) + (local.set $message_ptr (i32.const 106)) + + ;; store the input into the memory, starting at the signature and + ;; up to 107 bytes stored at offset 0 + (call $seal_input (local.get $signature_ptr) (i32.const 0)) + + ;; call sr25519_verify and store the return code + (i32.store + (i32.const 0) + (call $sr25519_verify + (local.get $signature_ptr) + (local.get $pub_key_ptr) + (i32.const 11) + (local.get $message_ptr) + ) + ) + + ;; exit with success and take transfer return code to the output buffer + (call $seal_return (i32.const 0) (i32.const 0) (i32.const 4)) + ) +) + diff --git a/frame/contracts/fixtures/store.wat b/frame/contracts/fixtures/store_call.wat similarity index 100% rename from frame/contracts/fixtures/store.wat rename to frame/contracts/fixtures/store_call.wat diff --git a/frame/contracts/fixtures/store_deploy.wat b/frame/contracts/fixtures/store_deploy.wat new file mode 100644 index 0000000000000..cc428e9623bfb --- /dev/null +++ b/frame/contracts/fixtures/store_deploy.wat @@ -0,0 +1,45 @@ +;; Stores a value of the passed size in constructor. +(module + (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "env" "memory" (memory 16 16)) + + ;; [0, 32) storage key + (data (i32.const 0) "\01") + + ;; [32, 36) buffer where input is copied (expected size of storage item) + + ;; [36, 40) size of the input buffer + (data (i32.const 36) "\04") + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "deploy") + (call $seal_input (i32.const 32) (i32.const 36)) + + ;; assert input size == 4 + (call $assert + (i32.eq + (i32.load (i32.const 36)) + (i32.const 4) + ) + ) + + ;; place a value in storage, the size of which is specified by the call input. + ;; we don't care about the contents of the storage item + (call $seal_set_storage + (i32.const 0) ;; Pointer to storage key + (i32.const 0) ;; Pointer to value + (i32.load (i32.const 32)) ;; Size of value + ) + ) + + (func (export "call")) +) diff --git a/frame/contracts/primitives/Cargo.toml b/frame/contracts/primitives/Cargo.toml index 2460edc5a7b2f..b643f03a9d511 100644 --- a/frame/contracts/primitives/Cargo.toml +++ b/frame/contracts/primitives/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bitflags = "1.0" -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } # Substrate Dependencies (This crate should not rely on frame) diff --git a/frame/contracts/primitives/src/lib.rs b/frame/contracts/primitives/src/lib.rs index ee415cb822257..e73ceb031e184 100644 --- a/frame/contracts/primitives/src/lib.rs +++ b/frame/contracts/primitives/src/lib.rs @@ -131,7 +131,7 @@ pub struct InstantiateReturnValue { pub account_id: AccountId, } -/// The result of succesfully uploading a contract. +/// The result of successfully uploading a contract. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct CodeUploadReturnValue { /// The key under which the new code is stored. @@ -239,7 +239,7 @@ where } } - /// If the amount of deposit (this type) is constrained by a `limit` this calcuates how + /// If the amount of deposit (this type) is constrained by a `limit` this calculates how /// much balance (if any) is still available from this limit. /// /// # Note diff --git a/frame/contracts/proc-macro/Cargo.toml b/frame/contracts/proc-macro/Cargo.toml index 08dafcc6a8fca..c700d1e335639 100644 --- a/frame/contracts/proc-macro/Cargo.toml +++ b/frame/contracts/proc-macro/Cargo.toml @@ -15,9 +15,9 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro2 = "1" -quote = "1" -syn = { version = "1.0.98", features = ["full"] } +proc-macro2 = "1.0.56" +quote = "1.0.26" +syn = { version = "2.0.14", features = ["full"] } [dev-dependencies] diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs index 1b530a409b933..a6a8187bc8aaa 100644 --- a/frame/contracts/proc-macro/src/lib.rs +++ b/frame/contracts/proc-macro/src/lib.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Procedural macroses used in the contracts module. +//! Procedural macros used in the contracts module. //! //! Most likely you should use the [`#[define_env]`][`macro@define_env`] attribute macro which hides //! boilerplate of defining external environment for a wasm module. @@ -209,13 +209,13 @@ impl HostFn { "only #[version()], #[unstable], #[prefixed_alias] and #[deprecated] attributes are allowed."; let span = item.span(); let mut attrs = item.attrs.clone(); - attrs.retain(|a| !a.path.is_ident("doc")); + attrs.retain(|a| !a.path().is_ident("doc")); let mut maybe_version = None; let mut is_stable = true; let mut alias_to = None; let mut not_deprecated = true; while let Some(attr) = attrs.pop() { - let ident = attr.path.get_ident().ok_or(err(span, msg))?.to_string(); + let ident = attr.path().get_ident().ok_or(err(span, msg))?.to_string(); match ident.as_str() { "version" => { if maybe_version.is_some() { @@ -377,7 +377,7 @@ impl EnvDef { _ => None, }; - let selector = |a: &syn::Attribute| a.path.is_ident("prefixed_alias"); + let selector = |a: &syn::Attribute| a.path().is_ident("prefixed_alias"); let aliases = items .iter() @@ -401,7 +401,7 @@ impl EnvDef { } fn is_valid_special_arg(idx: usize, arg: &FnArg) -> bool { - let pat = if let FnArg::Typed(pat) = arg { pat } else { return false }; + let FnArg::Typed(pat) = arg else { return false }; let ident = if let syn::Pat::Ident(ref ident) = *pat.pat { &ident.ident } else { return false }; let name_ok = match idx { 0 => ident == "ctx" || ident == "_ctx", @@ -434,7 +434,7 @@ fn expand_func_doc(func: &HostFn) -> TokenStream2 { ); quote! { #[doc = #alias_doc] } } else { - let docs = func.item.attrs.iter().filter(|a| a.path.is_ident("doc")).map(|d| { + let docs = func.item.attrs.iter().filter(|a| a.path().is_ident("doc")).map(|d| { let docs = d.to_token_stream(); quote! { #docs } }); @@ -527,7 +527,7 @@ fn expand_docs(def: &EnvDef) -> TokenStream2 { } } -/// Expands environment definiton. +/// Expands environment definition. /// Should generate source code for: /// - implementations of the host functions to be added to the wasm runtime environment (see /// `expand_impls()`). @@ -596,6 +596,7 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) let impls = def.host_funcs.iter().map(|f| { // skip the context and memory argument let params = f.item.sig.inputs.iter().skip(2); + let (module, name, body, wasm_output, output) = ( f.module(), &f.name, @@ -606,6 +607,39 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) let is_stable = f.is_stable; let not_deprecated = f.not_deprecated; + // wrapped host function body call with host function traces + // see https://github.com/paritytech/substrate/tree/master/frame/contracts#host-function-tracing + let wrapped_body_with_trace = { + let trace_fmt_args = params.clone().filter_map(|arg| match arg { + syn::FnArg::Receiver(_) => None, + syn::FnArg::Typed(p) => { + match *p.pat.clone() { + syn::Pat::Ident(ref pat_ident) => Some(pat_ident.ident.clone()), + _ => None, + } + }, + }); + + let params_fmt_str = trace_fmt_args.clone().map(|s| format!("{s}: {{:?}}")).collect::>().join(", "); + let trace_fmt_str = format!("{}::{}({}) = {{:?}}\n", module, name, params_fmt_str); + + quote! { + if ::log::log_enabled!(target: "runtime::contracts::strace", ::log::Level::Trace) { + let result = #body; + { + use sp_std::fmt::Write; + let mut w = sp_std::Writer::default(); + let _ = core::write!(&mut w, #trace_fmt_str, #( #trace_fmt_args, )* result); + let msg = core::str::from_utf8(&w.inner()).unwrap_or_default(); + ctx.ext().append_debug_buffer(msg); + } + result + } else { + #body + } + } + }; + // If we don't expand blocks (implementing for `()`) we change a few things: // - We replace any code by unreachable! // - Allow unused variables as the code that uses is not expanded @@ -613,11 +647,11 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) let inner = if expand_blocks { quote! { || #output { let (memory, ctx) = __caller__ - .host_data() + .data() .memory() .expect("Memory must be set when setting up host data; qed") .data_and_store_mut(&mut __caller__); - #body + #wrapped_body_with_trace } } } else { quote! { || -> #wasm_output { @@ -630,7 +664,7 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) let map_err = if expand_blocks { quote! { |reason| { - ::wasmi::core::Trap::host(reason) + ::wasmi::core::Trap::from(reason) } } } else { @@ -778,7 +812,7 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) #[proc_macro_attribute] pub fn define_env(attr: TokenStream, item: TokenStream) -> TokenStream { if !attr.is_empty() && !(attr.to_string() == "doc".to_string()) { - let msg = r#"Invalid `define_env` attribute macro: expected either no attributes or a single `doc` attibute: + let msg = r#"Invalid `define_env` attribute macro: expected either no attributes or a single `doc` attribute: - `#[define_env]` - `#[define_env(doc)]`"#; let span = TokenStream2::from(attr).span(); diff --git a/frame/contracts/src/address.rs b/frame/contracts/src/address.rs index 1c25af21e8cb7..e36fc6fffd6f7 100644 --- a/frame/contracts/src/address.rs +++ b/frame/contracts/src/address.rs @@ -32,7 +32,7 @@ use sp_runtime::traits::{Hash, TrailingZeroInput}; pub trait AddressGenerator { /// The address of a contract based on the given instantiate parameters. /// - /// Changing the formular for an already deployed chain is fine as long as no collisons + /// Changing the formular for an already deployed chain is fine as long as no collisions /// with the old formular. Changes only affect existing contracts. fn contract_address( deploying_address: &T::AccountId, diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index aa44cc3976a2c..07f24f385b035 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -39,7 +39,7 @@ use wasm_instrument::{ }, }; -/// The location where to put the genrated code. +/// The location where to put the generated code. pub enum Location { /// Generate all code into the `call` exported function. Call, @@ -512,7 +512,7 @@ pub fn max_pages() -> u32 { fn inject_gas_metering(module: Module) -> Module { let schedule = T::Schedule::get(); - let gas_rules = schedule.rules(&module, Determinism::Deterministic); + let gas_rules = schedule.rules(Determinism::Enforced); let backend = gas_metering::host_function::Injector::new("seal0", "gas"); gas_metering::inject(module, backend, &gas_rules).unwrap() } diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index c18d9ffb4b584..6ebfb18509b1c 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -30,8 +30,7 @@ use self::{ sandbox::Sandbox, }; use crate::{ - exec::{AccountIdOf, FixSizedKey, VarSizedKey}, - schedule::{API_BENCHMARK_BATCH_SIZE, INSTR_BENCHMARK_BATCH_SIZE}, + exec::{AccountIdOf, Key}, wasm::CallFlags, Pallet as Contracts, *, }; @@ -46,11 +45,18 @@ use sp_runtime::{ use sp_std::prelude::*; use wasm_instrument::parity_wasm::elements::{BlockType, BrTableData, Instruction, ValueType}; -/// How many batches we do per API benchmark. -const API_BENCHMARK_BATCHES: u32 = 20; +/// How many runs we do per API benchmark. +/// +/// This is picked more or less arbitrary. We experimented with different numbers until +/// the results appeared to be stable. Reducing the number would speed up the benchmarks +/// but might make the results less precise. +const API_BENCHMARK_RUNS: u32 = 1600; -/// How many batches we do per Instruction benchmark. -const INSTR_BENCHMARK_BATCHES: u32 = 50; +/// How many runs we do per instruction benchmark. +/// +/// Same rationale as for [`API_BENCHMARK_RUNS`]. The number is bigger because instruction +/// benchmarks are faster. +const INSTR_BENCHMARK_RUNS: u32 = 5000; /// An instantiated and deployed contract. struct Contract { @@ -129,10 +135,10 @@ where } /// Store the supplied storage items into this contracts storage. - fn store(&self, items: &Vec<(FixSizedKey, Vec)>) -> Result<(), &'static str> { + fn store(&self, items: &Vec<([u8; 32], Vec)>) -> Result<(), &'static str> { let info = self.info()?; for item in items { - info.write(&item.0 as &FixSizedKey, Some(item.1.clone()), None, false) + info.write(&Key::Fix(item.0), Some(item.1.clone()), None, false) .map_err(|_| "Failed to write storage to restoration dest")?; } >::insert(&self.account_id, info); @@ -208,19 +214,7 @@ benchmarks! { on_initialize_per_trie_key { let k in 0..1024; let instance = Contract::::with_storage(WasmModule::dummy(), k, T::Schedule::get().limits.payload_len)?; - instance.info()?.queue_trie_for_deletion()?; - }: { - ContractInfo::::process_deletion_queue_batch(Weight::MAX) - } - - #[pov_mode = Measured] - on_initialize_per_queue_item { - let q in 0..1024.min(T::DeletionQueueDepth::get()); - for i in 0 .. q { - let instance = Contract::::with_index(i, WasmModule::dummy(), vec![])?; - instance.info()?.queue_trie_for_deletion()?; - ContractInfoOf::::remove(instance.account_id); - } + instance.info()?.queue_trie_for_deletion(); }: { ContractInfo::::process_deletion_queue_batch(Weight::MAX) } @@ -244,7 +238,7 @@ benchmarks! { // the sandbox. This does **not** include the actual execution for which the gas meter // is responsible. This is achieved by generating all code to the `deploy` function // which is in the wasm module but not executed on `call`. - // The results are supposed to be used as `call_with_code_kb(c) - call_with_code_kb(0)`. + // The results are supposed to be used as `call_with_code_per_byte(c) - call_with_code_per_byte(0)`. #[pov_mode = Measured] call_with_code_per_byte { let c in 0 .. T::MaxCodeLen::get(); @@ -263,9 +257,9 @@ benchmarks! { // we don't benchmark the actual execution of this code but merely what it takes to load // a code of that size into the sandbox. // - // `c`: Size of the code in kilobytes. - // `i`: Size of the input in kilobytes. - // `s`: Size of the salt in kilobytes. + // `c`: Size of the code in bytes. + // `i`: Size of the input in bytes. + // `s`: Size of the salt in bytes. // // # Note // @@ -299,8 +293,8 @@ benchmarks! { } // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. - // `i`: Size of the input in kilobytes. - // `s`: Size of the salt in kilobytes. + // `i`: Size of the input in bytes. + // `s`: Size of the salt in bytes. #[pov_mode = Measured] instantiate { let i in 0 .. code::max_pages::() * 64 * 1024; @@ -333,7 +327,7 @@ benchmarks! { // The dummy contract used here does not do this. The costs for the data copy is billed as // part of `seal_input`. The costs for invoking a contract of a specific size are not part // of this benchmark because we cannot know the size of the contract when issuing a call - // transaction. See `invoke_per_code_kb` for this. + // transaction. See `call_with_code_per_byte` for this. #[pov_mode = Measured] call { let data = vec![42u8; 1024]; @@ -349,7 +343,7 @@ benchmarks! { }: _(origin, callee, value, Weight::MAX, None, data) verify { let deposit = T::Currency::free_balance(&deposit_account); - // value and value transfered via call should be removed from the caller + // value and value transferred via call should be removed from the caller assert_eq!( T::Currency::free_balance(&instance.caller), caller_funding::() - instance.value - value - deposit - Pallet::::min_balance(), @@ -362,7 +356,7 @@ benchmarks! { // This constructs a contract that is maximal expensive to instrument. // It creates a maximum number of metering blocks per byte. - // `c`: Size of the code in kilobytes. + // `c`: Size of the code in bytes. // // # Note // @@ -375,7 +369,7 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call); let origin = RawOrigin::Signed(caller.clone()); - }: _(origin, code, None, Determinism::Deterministic) + }: _(origin, code, None, Determinism::Enforced) verify { // uploading the code reserves some balance in the callers account assert!(T::Currency::reserved_balance(&caller) > 0u32.into()); @@ -391,7 +385,7 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::dummy(); let origin = RawOrigin::Signed(caller.clone()); - let uploaded = >::bare_upload_code(caller.clone(), code, None, Determinism::Deterministic)?; + let uploaded = >::bare_upload_code(caller.clone(), code, None, Determinism::Enforced)?; assert_eq!(uploaded.code_hash, hash); assert_eq!(uploaded.deposit, T::Currency::reserved_balance(&caller)); assert!(>::code_exists(&hash)); @@ -419,17 +413,17 @@ benchmarks! { #[pov_mode = Measured] seal_caller { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_caller", r * API_BENCHMARK_BATCH_SIZE + "seal0", "seal_caller", r ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] seal_is_contract { - let r in 0 .. API_BENCHMARK_BATCHES; - let accounts = (0 .. r * API_BENCHMARK_BATCH_SIZE) + let r in 0 .. API_BENCHMARK_RUNS; + let accounts = (0 .. r) .map(|n| account::("account", n, 0)) .collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); @@ -448,7 +442,7 @@ benchmarks! { value: accounts_bytes }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Counter(0, account_len as u32), // address_ptr Regular(Instruction::Call(0)), Regular(Instruction::Drop), @@ -466,8 +460,8 @@ benchmarks! { #[pov_mode = Measured] seal_code_hash { - let r in 0 .. API_BENCHMARK_BATCHES; - let accounts = (0 .. r * API_BENCHMARK_BATCH_SIZE) + let r in 0 .. API_BENCHMARK_RUNS; + let accounts = (0 .. r) .map(|n| account::("account", n, 0)) .collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); @@ -492,7 +486,7 @@ benchmarks! { value: accounts_bytes, }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Counter(36, account_len as u32), // address_ptr Regular(Instruction::I32Const(4)), // ptr to output data Regular(Instruction::I32Const(0)), // ptr to output length @@ -512,16 +506,16 @@ benchmarks! { #[pov_mode = Measured] seal_own_code_hash { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_own_code_hash", r * API_BENCHMARK_BATCH_SIZE + "seal0", "seal_own_code_hash", r ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] seal_caller_is_origin { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -530,7 +524,7 @@ benchmarks! { params: vec![], return_type: Some(ValueType::I32), }], - call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::repeated(r, &[ Instruction::Call(0), Instruction::Drop, ])), @@ -542,85 +536,86 @@ benchmarks! { #[pov_mode = Measured] seal_address { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_address", r * API_BENCHMARK_BATCH_SIZE + "seal0", "seal_address", r ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] seal_gas_left { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_gas_left", r * API_BENCHMARK_BATCH_SIZE + "seal1", "gas_left", r ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] seal_balance { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_balance", r * API_BENCHMARK_BATCH_SIZE + "seal0", "seal_balance", r ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] seal_value_transferred { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_value_transferred", r * API_BENCHMARK_BATCH_SIZE + "seal0", "seal_value_transferred", r ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] seal_minimum_balance { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_minimum_balance", r * API_BENCHMARK_BATCH_SIZE + "seal0", "seal_minimum_balance", r ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] seal_block_number { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_block_number", r * API_BENCHMARK_BATCH_SIZE + "seal0", "seal_block_number", r ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] seal_now { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_now", r * API_BENCHMARK_BATCH_SIZE + "seal0", "seal_now", r ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] seal_weight_to_fee { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let pages = code::max_pages::(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_weight_to_fee", - params: vec![ValueType::I64, ValueType::I32, ValueType::I32], + module: "seal1", + name: "weight_to_fee", + params: vec![ValueType::I64, ValueType::I64, ValueType::I32, ValueType::I32], return_type: None, }], data_segments: vec![DataSegment { offset: 0, value: (pages * 64 * 1024 - 4).to_le_bytes().to_vec(), }], - call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::repeated(r, &[ Instruction::I64Const(500_000), + Instruction::I64Const(300_000), Instruction::I32Const(4), Instruction::I32Const(0), Instruction::Call(0), @@ -633,7 +628,7 @@ benchmarks! { #[pov_mode = Measured] seal_gas { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let code = WasmModule::::from(ModuleDefinition { imported_functions: vec![ImportedFunction { module: "seal0", @@ -641,7 +636,7 @@ benchmarks! { params: vec![ValueType::I64], return_type: None, }], - call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::repeated(r, &[ Instruction::I64Const(42), Instruction::Call(0), ])), @@ -654,7 +649,7 @@ benchmarks! { #[pov_mode = Measured] seal_input { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -669,7 +664,7 @@ benchmarks! { value: 0u32.to_le_bytes().to_vec(), }, ], - call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::repeated(r, &[ Instruction::I32Const(4), // ptr where to store output Instruction::I32Const(0), // ptr to length Instruction::Call(0), @@ -681,10 +676,9 @@ benchmarks! { }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] - seal_input_per_kb { - let n in 0 .. code::max_pages::() * 64; - let pages = code::max_pages::(); - let buffer_size = pages * 64 * 1024 - 4; + seal_input_per_byte { + let n in 0 .. code::max_pages::() * 64 * 1024; + let buffer_size = code::max_pages::() * 64 * 1024 - 4; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -699,15 +693,16 @@ benchmarks! { value: buffer_size.to_le_bytes().to_vec(), }, ], - call_body: Some(body::repeated(API_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::plain(vec![ Instruction::I32Const(4), // ptr where to store output Instruction::I32Const(0), // ptr to length Instruction::Call(0), + Instruction::End, ])), .. Default::default() }); let instance = Contract::::new(code, vec![])?; - let data = vec![42u8; (n * 1024).min(buffer_size) as usize]; + let data = vec![42u8; n.min(buffer_size) as usize]; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, data) @@ -738,8 +733,8 @@ benchmarks! { }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] - seal_return_per_kb { - let n in 0 .. code::max_pages::() * 64; + seal_return_per_byte { + let n in 0 .. code::max_pages::() * 64 * 1024; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -751,7 +746,7 @@ benchmarks! { call_body: Some(body::plain(vec![ Instruction::I32Const(0), // flags Instruction::I32Const(0), // data_ptr - Instruction::I32Const((n * 1024) as i32), // data_len + Instruction::I32Const(n as i32), // data_len Instruction::Call(0), Instruction::End, ])), @@ -792,15 +787,15 @@ benchmarks! { let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); let deposit_account = instance.info()?.deposit_account().clone(); - assert_eq!(T::Currency::total_balance(&beneficiary), 0u32.into()); + assert_eq!(>::total_balance(&beneficiary), 0u32.into()); assert_eq!(T::Currency::free_balance(&instance.account_id), Pallet::::min_balance() * 2u32.into()); assert_ne!(T::Currency::free_balance(&deposit_account), 0u32.into()); }: call(origin, instance.addr.clone(), 0u32.into(), Weight::MAX, None, vec![]) verify { if r > 0 { - assert_eq!(T::Currency::total_balance(&instance.account_id), 0u32.into()); - assert_eq!(T::Currency::total_balance(&deposit_account), 0u32.into()); - assert_eq!(T::Currency::total_balance(&beneficiary), Pallet::::min_balance() * 2u32.into()); + assert_eq!(>::total_balance(&instance.account_id), 0u32.into()); + assert_eq!(>::total_balance(&deposit_account), 0u32.into()); + assert_eq!(>::total_balance(&beneficiary), Pallet::::min_balance() * 2u32.into()); } } @@ -809,7 +804,7 @@ benchmarks! { // used. #[pov_mode = Measured] seal_random { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let pages = code::max_pages::(); let subject_len = T::Schedule::get().limits.subject_len; assert!(subject_len < 1024); @@ -827,7 +822,7 @@ benchmarks! { value: (pages * 64 * 1024 - subject_len - 4).to_le_bytes().to_vec(), }, ], - call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::repeated(r, &[ Instruction::I32Const(4), // subject_ptr Instruction::I32Const(subject_len as i32), // subject_len Instruction::I32Const((subject_len + 4) as i32), // out_ptr @@ -844,7 +839,7 @@ benchmarks! { // We benchmark for the worst case (largest event). #[pov_mode = Measured] seal_deposit_event { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -853,7 +848,7 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, }], - call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::repeated(r, &[ Instruction::I32Const(0), // topics_ptr Instruction::I32Const(0), // topics_len Instruction::I32Const(0), // data_ptr @@ -868,16 +863,13 @@ benchmarks! { // Benchmark the overhead that topics generate. // `t`: Number of topics - // `n`: Size of event payload in kb + // `n`: Size of event payload in bytes #[pov_mode = Measured] - seal_deposit_event_per_topic_and_kb { + seal_deposit_event_per_topic_and_byte { let t in 0 .. T::Schedule::get().limits.event_topics; - let n in 0 .. T::Schedule::get().limits.payload_len / 1024; - let mut topics = (0..API_BENCHMARK_BATCH_SIZE) - .map(|n| (n * t..n * t + t).map(|i| T::Hashing::hash_of(&i)).collect::>().encode()) - .peekable(); - let topics_len = topics.peek().map(|i| i.len()).unwrap_or(0); - let topics = topics.flatten().collect(); + let n in 0 .. T::Schedule::get().limits.payload_len; + let topics = (0..t).map(|i| T::Hashing::hash_of(&i)).collect::>().encode(); + let topics_len = topics.len(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -892,12 +884,13 @@ benchmarks! { value: topics, }, ], - call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, topics_len as u32), // topics_ptr - Regular(Instruction::I32Const(topics_len as i32)), // topics_len - Regular(Instruction::I32Const(0)), // data_ptr - Regular(Instruction::I32Const((n * 1024) as i32)), // data_len - Regular(Instruction::Call(0)), + call_body: Some(body::plain(vec![ + Instruction::I32Const(0), // topics_ptr + Instruction::I32Const(topics_len as i32), // topics_len + Instruction::I32Const(0), // data_ptr + Instruction::I32Const(n as i32), // data_len + Instruction::Call(0), + Instruction::End, ])), .. Default::default() }); @@ -910,7 +903,7 @@ benchmarks! { // against an excessive use. #[pov_mode = Measured] seal_debug_message { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory { min_pages: 1, max_pages: 1 }), imported_functions: vec![ImportedFunction { @@ -919,7 +912,7 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], - call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::repeated(r, &[ Instruction::I32Const(0), // value_ptr Instruction::I32Const(0), // value_len Instruction::Call(0), @@ -937,15 +930,15 @@ benchmarks! { None, vec![], true, - Determinism::Deterministic, + Determinism::Enforced, ) .result?; } - seal_debug_message_per_kb { - // Vary size of input in kilobytes up to maximum allowed contract memory + seal_debug_message_per_byte { + // Vary size of input in bytes up to maximum allowed contract memory // or maximum allowed debug buffer size, whichever is less. - let i in 0 .. (T::Schedule::get().limits.memory_pages * 64).min(T::MaxDebugBufferLen::get() / 1024); + let i in 0 .. (T::Schedule::get().limits.memory_pages * 64 * 1024).min(T::MaxDebugBufferLen::get()); // We benchmark versus messages containing printable ASCII codes. // About 1Kb goes to the instrumented contract code instructions, // whereas all the space left we use for the initialization of the debug messages data. @@ -969,7 +962,7 @@ benchmarks! { ], call_body: Some(body::plain(vec![ Instruction::I32Const(0), // value_ptr - Instruction::I32Const((i * 1024) as i32), // value_len increments by i Kb + Instruction::I32Const(i as i32), // value_len Instruction::Call(0), Instruction::Drop, Instruction::End, @@ -986,7 +979,7 @@ benchmarks! { None, vec![], true, - Determinism::Deterministic, + Determinism::Enforced, ) .result?; } @@ -994,15 +987,20 @@ benchmarks! { // Only the overhead of calling the function itself with minimal arguments. // The contract is a bit more complex because it needs to use different keys in order // to generate unique storage accesses. However, it is still dominated by the storage - // accesses. We store all the keys that we are about to write at beforehand + // accesses. We store something at all the keys that we are about to write to // because re-writing at an existing key is always more expensive than writing - // it at a virgin key. + // to an key with no data behind it. + // + // # Note + // + // We need to use a smaller `r` because the keys are big and writing them all into the wasm + // might exceed the code size. #[skip_meta] #[pov_mode = Measured] seal_set_storage { - let r in 0 .. API_BENCHMARK_BATCHES/2; + let r in 0 .. API_BENCHMARK_RUNS/2; let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) + let keys = (0 .. r) .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) .collect::>(); @@ -1021,7 +1019,7 @@ benchmarks! { value: keys_bytes, }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Counter(0, max_key_len as u32), // key_ptr Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::I32Const(0)), // value_ptr @@ -1035,7 +1033,7 @@ benchmarks! { let info = instance.info()?; for key in keys { info.write( - &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, Some(vec![]), None, false, @@ -1047,14 +1045,10 @@ benchmarks! { #[skip_meta] #[pov_mode = Measured] - seal_set_storage_per_new_kb { - let n in 0 .. T::Schedule::get().limits.payload_len / 2048; // half of the max payload_len in kb + seal_set_storage_per_new_byte { + let n in 0 .. T::Schedule::get().limits.payload_len; let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. n * API_BENCHMARK_BATCH_SIZE) - .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); + let key = vec![0u8; max_key_len as usize]; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1066,43 +1060,38 @@ benchmarks! { data_segments: vec![ DataSegment { offset: 0, - value: key_bytes, + value: key.clone(), }, ], - call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const((n * 2048) as i32)), // value_len increments by 2kb up to max payload_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), + call_body: Some(body::plain(vec![ + Instruction::I32Const(0), // key_ptr + Instruction::I32Const(max_key_len as i32), // key_len + Instruction::I32Const(0), // value_ptr + Instruction::I32Const(n as i32), // value_len + Instruction::Call(0), + Instruction::Drop, + Instruction::End, ])), .. Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; - for key in keys { - info.write( - &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - } + info.write( + &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + Some(vec![]), + None, + false, + ) + .map_err(|_| "Failed to write to storage during setup.")?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[skip_meta] #[pov_mode = Measured] - seal_set_storage_per_old_kb { - let n in 0 .. T::Schedule::get().limits.payload_len / 2048; // half of the max payload_len in kb + seal_set_storage_per_old_byte { + let n in 0 .. T::Schedule::get().limits.payload_len; let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. n * API_BENCHMARK_BATCH_SIZE) - .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); + let key = vec![0u8; max_key_len as usize]; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1114,43 +1103,42 @@ benchmarks! { data_segments: vec![ DataSegment { offset: 0, - value: key_bytes, + value: key.clone(), }, ], - call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(0)), // value_len is 0 as testing vs pre-existing value len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), + call_body: Some(body::plain(vec![ + Instruction::I32Const(0), // key_ptr + Instruction::I32Const(max_key_len as i32), // key_len + Instruction::I32Const(0), // value_ptr + Instruction::I32Const(0), // value_len is 0 as testing vs pre-existing value len + Instruction::Call(0), + Instruction::Drop, + Instruction::End, ])), .. Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; - for key in keys { - info.write( - &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, - Some(vec![42u8; (n * 2048) as usize]), // value_len increments by 2kb up to max payload_len - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - } + info.write( + &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + Some(vec![42u8; n as usize]), + None, + false, + ) + .map_err(|_| "Failed to write to storage during setup.")?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) // Similar to seal_set_storage. We store all the keys that we are about to // delete beforehand in order to prevent any optimizations that could occur when // deleting a non existing key. We generate keys of a maximum length, and have to - // reduce batch size in order to make resulting contract code size less than MaxCodeLen. + // the amount of runs in order to make resulting contract code size less than MaxCodeLen. #[skip_meta] #[pov_mode = Measured] seal_clear_storage { - let r in 0 .. API_BENCHMARK_BATCHES/2; + let r in 0 .. API_BENCHMARK_RUNS/2; let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) + let keys = (0 .. r) .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) .collect::>(); @@ -1169,7 +1157,7 @@ benchmarks! { value: key_bytes, }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Counter(0, max_key_len as u32), // key_ptr Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::Call(0)), @@ -1181,7 +1169,7 @@ benchmarks! { let info = instance.info()?; for key in keys { info.write( - &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, Some(vec![]), None, false, @@ -1194,14 +1182,10 @@ benchmarks! { #[skip_meta] #[pov_mode = Measured] - seal_clear_storage_per_kb { - let n in 0 .. T::Schedule::get().limits.payload_len / 2048; // half of the max payload_len in kb + seal_clear_storage_per_byte { + let n in 0 .. T::Schedule::get().limits.payload_len; let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. n * API_BENCHMARK_BATCH_SIZE) - .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); + let key = vec![0u8; max_key_len as usize]; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1213,28 +1197,27 @@ benchmarks! { data_segments: vec![ DataSegment { offset: 0, - value: key_bytes, + value: key.clone(), }, ], - call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), + call_body: Some(body::plain(vec![ + Instruction::I32Const(0), // key_ptr + Instruction::I32Const(max_key_len as i32), // key_len + Instruction::Call(0), + Instruction::Drop, + Instruction::End, ])), .. Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; - for key in keys { - info.write( - &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, - Some(vec![42u8; (n * 2048) as usize]), // value_len increments by 2kb up to max payload_len - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - } + info.write( + &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + Some(vec![42u8; n as usize]), + None, + false, + ) + .map_err(|_| "Failed to write to storage during setup.")?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) @@ -1242,9 +1225,9 @@ benchmarks! { #[skip_meta] #[pov_mode = Measured] seal_get_storage { - let r in 0 .. API_BENCHMARK_BATCHES/2; + let r in 0 .. API_BENCHMARK_RUNS/2; let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) + let keys = (0 .. r) .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) .collect::>(); @@ -1268,8 +1251,8 @@ benchmarks! { value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, max_key_len as u32), // key_ptr + call_body: Some(body::repeated_dyn(r, vec![ + Counter(0, max_key_len), // key_ptr Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr @@ -1282,7 +1265,7 @@ benchmarks! { let info = instance.info()?; for key in keys { info.write( - &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, Some(vec![]), None, false, @@ -1295,15 +1278,10 @@ benchmarks! { #[skip_meta] #[pov_mode = Measured] - seal_get_storage_per_kb { - let n in 0 .. T::Schedule::get().limits.payload_len / 2048; // half of the max payload_len in kb + seal_get_storage_per_byte { + let n in 0 .. T::Schedule::get().limits.payload_len; let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. n * API_BENCHMARK_BATCH_SIZE) - .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); - let key_bytes_len = key_bytes.len(); + let key = vec![0u8; max_key_len as usize]; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1315,34 +1293,33 @@ benchmarks! { data_segments: vec![ DataSegment { offset: 0, - value: key_bytes, + value: key.clone(), }, DataSegment { - offset: key_bytes_len as u32, + offset: max_key_len, value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), }, ], - call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr - Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), + call_body: Some(body::plain(vec![ + Instruction::I32Const(0), // key_ptr + Instruction::I32Const(max_key_len as i32), // key_len + Instruction::I32Const((max_key_len + 4) as i32), // out_ptr + Instruction::I32Const(max_key_len as i32), // out_len_ptr + Instruction::Call(0), + Instruction::Drop, + Instruction::End, ])), .. Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; - for key in keys { - info.write( - &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, - Some(vec![42u8; (n * 2048) as usize]), // value_len increments by 2kb up to max payload_len - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - } + info.write( + &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + Some(vec![42u8; n as usize]), + None, + false, + ) + .map_err(|_| "Failed to write to storage during setup.")?; >::insert(&instance.account_id, info); let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) @@ -1351,9 +1328,9 @@ benchmarks! { #[skip_meta] #[pov_mode = Measured] seal_contains_storage { - let r in 0 .. API_BENCHMARK_BATCHES/2; + let r in 0 .. API_BENCHMARK_RUNS/2; let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) + let keys = (0 .. r) .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) .collect::>(); @@ -1373,7 +1350,7 @@ benchmarks! { value: key_bytes, }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Counter(0, max_key_len as u32), // key_ptr Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::Call(0)), @@ -1385,7 +1362,7 @@ benchmarks! { let info = instance.info()?; for key in keys { info.write( - &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, Some(vec![]), None, false, @@ -1398,14 +1375,10 @@ benchmarks! { #[skip_meta] #[pov_mode = Measured] - seal_contains_storage_per_kb { - let n in 0 .. T::Schedule::get().limits.payload_len / 2048; // half of the max payload_len in kb + seal_contains_storage_per_byte { + let n in 0 .. T::Schedule::get().limits.payload_len; let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. n * API_BENCHMARK_BATCH_SIZE) - .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); + let key = vec![0u8; max_key_len as usize]; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1417,28 +1390,27 @@ benchmarks! { data_segments: vec![ DataSegment { offset: 0, - value: key_bytes, + value: key.clone(), }, ], - call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), + call_body: Some(body::plain(vec![ + Instruction::I32Const(0), // key_ptr + Instruction::I32Const(max_key_len as i32), // key_len + Instruction::Call(0), + Instruction::Drop, + Instruction::End, ])), .. Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; - for key in keys { - info.write( - &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, - Some(vec![42u8; (n * 2048) as usize]), // value_len increments by 2kb up to max payload_len - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - } + info.write( + &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + Some(vec![42u8; n as usize]), + None, + false, + ) + .map_err(|_| "Failed to write to storage during setup.")?; >::insert(&instance.account_id, info); let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) @@ -1446,9 +1418,9 @@ benchmarks! { #[skip_meta] #[pov_mode = Measured] seal_take_storage { - let r in 0 .. API_BENCHMARK_BATCHES/2; + let r in 0 .. API_BENCHMARK_RUNS/2; let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) + let keys = (0 .. r) .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) .collect::>(); @@ -1472,7 +1444,7 @@ benchmarks! { value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Counter(0, max_key_len as u32), // key_ptr Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr @@ -1486,7 +1458,7 @@ benchmarks! { let info = instance.info()?; for key in keys { info.write( - &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, Some(vec![]), None, false, @@ -1499,15 +1471,10 @@ benchmarks! { #[skip_meta] #[pov_mode = Measured] - seal_take_storage_per_kb { - let n in 0 .. T::Schedule::get().limits.payload_len / 2048; // half of the max payload_len in kb + seal_take_storage_per_byte { + let n in 0 .. T::Schedule::get().limits.payload_len; let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. n * API_BENCHMARK_BATCH_SIZE) - .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); - let key_bytes_len = key_bytes.len(); + let key = vec![0u8; max_key_len as usize]; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1519,34 +1486,33 @@ benchmarks! { data_segments: vec![ DataSegment { offset: 0, - value: key_bytes, + value: key.clone(), }, DataSegment { - offset: key_bytes_len as u32, + offset: max_key_len, value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), }, ], - call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr - Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), + call_body: Some(body::plain(vec![ + Instruction::I32Const(0), // key_ptr + Instruction::I32Const(max_key_len as i32), // key_len + Instruction::I32Const((max_key_len + 4) as i32), // out_ptr + Instruction::I32Const(max_key_len as i32), // out_len_ptr + Instruction::Call(0), + Instruction::Drop, + Instruction::End, ])), .. Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; - for key in keys { - info.write( - &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, - Some(vec![42u8; (n * 2048) as usize]), // value_len increments by 2kb up to max payload_len - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - } + info.write( + &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + Some(vec![42u8; n as usize]), + None, + false, + ) + .map_err(|_| "Failed to write to storage during setup.")?; >::insert(&instance.account_id, info); let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) @@ -1554,8 +1520,8 @@ benchmarks! { // We transfer to unique accounts. #[pov_mode = Measured] seal_transfer { - let r in 0 .. API_BENCHMARK_BATCHES; - let accounts = (0..r * API_BENCHMARK_BATCH_SIZE) + let r in 0 .. API_BENCHMARK_RUNS; + let accounts = (0..r) .map(|i| account::("receiver", i, 0)) .collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); @@ -1582,7 +1548,7 @@ benchmarks! { value: account_bytes, }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Counter(value_len as u32, account_len as u32), // account_ptr Regular(Instruction::I32Const(account_len as i32)), // account_len Regular(Instruction::I32Const(0)), // value_ptr @@ -1593,40 +1559,52 @@ benchmarks! { .. Default::default() }); let instance = Contract::::new(code, vec![])?; - instance.set_balance(value * (r * API_BENCHMARK_BATCH_SIZE + 1).into()); + instance.set_balance(value * (r + 1).into()); let origin = RawOrigin::Signed(instance.caller.clone()); for account in &accounts { - assert_eq!(T::Currency::total_balance(account), 0u32.into()); + assert_eq!(>::total_balance(account), 0u32.into()); } }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) verify { for account in &accounts { - assert_eq!(T::Currency::total_balance(account), value); + assert_eq!(>::total_balance(account), value); } } // We call unique accounts. + // + // This is a slow call: We redeuce the number of runs. #[pov_mode = Measured] seal_call { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS / 2; let dummy_code = WasmModule::::dummy_with_bytes(0); - let callees = (0..r * API_BENCHMARK_BATCH_SIZE) + let callees = (0..r) .map(|i| Contract::with_index(i + 1, dummy_code.clone(), vec![])) .collect::, _>>()?; let callee_len = callees.get(0).map(|i| i.account_id.encode().len()).unwrap_or(0); let callee_bytes = callees.iter().flat_map(|x| x.account_id.encode()).collect(); let value: BalanceOf = 0u32.into(); let value_bytes = value.encode(); - let value_len = value_bytes.len(); + let value_len = BalanceOf::::max_encoded_len() as u32; + // Set an own limit every 2nd call + let own_limit = (u32::MAX - 100).into(); + let deposits = (0..r) + .map(|i| if i % 2 == 0 { 0u32.into() } else { own_limit } ) + .collect::>>(); + let deposits_bytes: Vec = deposits.iter().flat_map(|i| i.encode()).collect(); + let deposits_len = deposits_bytes.len() as u32; + let deposit_len = value_len.clone(); + let callee_offset = value_len + deposits_len; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_call", + module: "seal2", + name: "call", params: vec![ ValueType::I32, ValueType::I32, ValueType::I64, + ValueType::I64, ValueType::I32, ValueType::I32, ValueType::I32, @@ -1642,16 +1620,21 @@ benchmarks! { value: value_bytes, }, DataSegment { - offset: value_len as u32, + offset: value_len, + value: deposits_bytes, + }, + DataSegment { + offset: callee_offset, value: callee_bytes, }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ - Counter(value_len as u32, callee_len as u32), // callee_ptr - Regular(Instruction::I32Const(callee_len as i32)), // callee_len - Regular(Instruction::I64Const(0)), // gas + call_body: Some(body::repeated_dyn(r, vec![ + Regular(Instruction::I32Const(0)), // flags + Counter(callee_offset, callee_len as u32), // callee_ptr + Regular(Instruction::I64Const(0)), // ref_time weight + Regular(Instruction::I64Const(0)), // proof_size weight + Counter(value_len, deposit_len as u32), // deposit_limit_ptr Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(value_len as i32)), // value_len Regular(Instruction::I32Const(0)), // input_data_ptr Regular(Instruction::I32Const(0)), // input_data_len Regular(Instruction::I32Const(SENTINEL as i32)), // output_ptr @@ -1663,12 +1646,13 @@ benchmarks! { }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::MAX, Some(BalanceOf::::from(u32::MAX).into()), vec![]) + // This is a slow call: We redeuce the number of runs. #[pov_mode = Measured] seal_delegate_call { - let r in 0 .. API_BENCHMARK_BATCHES; - let hashes = (0..r * API_BENCHMARK_BATCH_SIZE) + let r in 0 .. API_BENCHMARK_RUNS / 2; + let hashes = (0..r) .map(|i| { let code = WasmModule::::dummy_with_bytes(i); Contracts::::store_code_raw(code.code, whitelisted_caller())?; @@ -1701,7 +1685,7 @@ benchmarks! { value: hashes_bytes, }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Regular(Instruction::I32Const(0)), // flags Counter(hashes_offset as u32, hash_len as u32), // code_hash_ptr Regular(Instruction::I32Const(0)), // input_data_ptr @@ -1719,14 +1703,10 @@ benchmarks! { }: call(origin, callee, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] - seal_call_per_transfer_clone_kb { + seal_call_per_transfer_clone_byte { let t in 0 .. 1; - let c in 0 .. code::max_pages::() * 64; - let callees = (0..API_BENCHMARK_BATCH_SIZE) - .map(|i| Contract::with_index(i + 1, >::dummy(), vec![])) - .collect::, _>>()?; - let callee_len = callees.get(0).map(|i| i.account_id.encode().len()).unwrap_or(0); - let callee_bytes = callees.iter().flat_map(|x| x.account_id.encode()).collect::>(); + let c in 0 .. code::max_pages::() * 64 * 1024; + let callee = Contract::with_index(5, >::dummy(), vec![])?; let value: BalanceOf = t.into(); let value_bytes = value.encode(); let value_len = value_bytes.len(); @@ -1754,39 +1734,41 @@ benchmarks! { }, DataSegment { offset: value_len as u32, - value: callee_bytes, + value: callee.account_id.encode(), }, ], - call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Regular(Instruction::I32Const(CallFlags::CLONE_INPUT.bits() as i32)), // flags - Counter(value_len as u32, callee_len as u32), // callee_ptr - Regular(Instruction::I64Const(0)), // gas - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(0)), // input_data_ptr - Regular(Instruction::I32Const(0)), // input_data_len - Regular(Instruction::I32Const(SENTINEL as i32)), // output_ptr - Regular(Instruction::I32Const(0)), // output_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), + call_body: Some(body::plain(vec![ + Instruction::I32Const(CallFlags::CLONE_INPUT.bits() as i32), // flags + Instruction::I32Const(value_len as i32), // callee_ptr + Instruction::I64Const(0), // gas + Instruction::I32Const(0), // value_ptr + Instruction::I32Const(0), // input_data_ptr + Instruction::I32Const(0), // input_data_len + Instruction::I32Const(SENTINEL as i32), // output_ptr + Instruction::I32Const(0), // output_len_ptr + Instruction::Call(0), + Instruction::Drop, + Instruction::End, ])), .. Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - let bytes = vec![42; (c * 1024) as usize]; + let bytes = vec![42; c as usize]; }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, bytes) // We assume that every instantiate sends at least the minimum balance. + // This is a slow call: we reduce the number of runs. #[pov_mode = Measured] seal_instantiate { - let r in 0 .. API_BENCHMARK_BATCHES; - let hashes = (0..r * API_BENCHMARK_BATCH_SIZE) + let r in 1 .. API_BENCHMARK_RUNS / 2; + let hashes = (0..r) .map(|i| { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), call_body: Some(body::plain(vec![ - // we need to add this in order to make contracts unique - // so that they can be deployed from the same sender + // We need to add this in order to make contracts unique, + // so that they can be deployed from the same sender. Instruction::I32Const(i as i32), Instruction::Drop, Instruction::End, @@ -1799,27 +1781,24 @@ benchmarks! { .collect::, &'static str>>()?; let hash_len = hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); let hashes_bytes = hashes.iter().flat_map(|x| x.encode()).collect::>(); - let hashes_len = hashes_bytes.len(); + let hashes_len = &hashes_bytes.len(); let value = Pallet::::min_balance(); assert!(value > 0u32.into()); let value_bytes = value.encode(); - let value_len = value_bytes.len(); + let value_len = BalanceOf::::max_encoded_len(); let addr_len = T::AccountId::max_encoded_len(); - - // offsets where to place static data in contract memory - let value_offset = 0; - let hashes_offset = value_offset + value_len; + // Offsets where to place static data in contract memory. + let hashes_offset = value_len; let addr_len_offset = hashes_offset + hashes_len; let addr_offset = addr_len_offset + addr_len; - let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_instantiate", + module: "seal2", + name: "instantiate", params: vec![ ValueType::I32, - ValueType::I32, + ValueType::I64, ValueType::I64, ValueType::I32, ValueType::I32, @@ -1836,7 +1815,7 @@ benchmarks! { }], data_segments: vec![ DataSegment { - offset: value_offset as u32, + offset: 0, value: value_bytes, }, DataSegment { @@ -1848,12 +1827,12 @@ benchmarks! { value: addr_len.to_le_bytes().into(), }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Counter(hashes_offset as u32, hash_len as u32), // code_hash_ptr - Regular(Instruction::I32Const(hash_len as i32)), // code_hash_len - Regular(Instruction::I64Const(0)), // gas - Regular(Instruction::I32Const(value_offset as i32)), // value_ptr - Regular(Instruction::I32Const(value_len as i32)), // value_len + Regular(Instruction::I64Const(0)), // ref_time weight + Regular(Instruction::I64Const(0)), // proof_size weight + Regular(Instruction::I32Const(SENTINEL as i32)), // deposit limit ptr: use parent's limit + Regular(Instruction::I32Const(0)), // value_ptr Regular(Instruction::I32Const(0)), // input_data_ptr Regular(Instruction::I32Const(0)), // input_data_len Regular(Instruction::I32Const(addr_offset as i32)), // address_ptr @@ -1861,14 +1840,14 @@ benchmarks! { Regular(Instruction::I32Const(SENTINEL as i32)), // output_ptr Regular(Instruction::I32Const(0)), // output_len_ptr Regular(Instruction::I32Const(0)), // salt_ptr - Regular(Instruction::I32Const(0)), // salt_ptr_len + Regular(Instruction::I32Const(0)), // salt_len_ptr Regular(Instruction::Call(0)), Regular(Instruction::Drop), ])), .. Default::default() }); let instance = Contract::::new(code, vec![])?; - instance.set_balance((value + Pallet::::min_balance()) * (r * API_BENCHMARK_BATCH_SIZE + 1).into()); + instance.set_balance((value + Pallet::::min_balance()) * (r + 1).into()); let origin = RawOrigin::Signed(instance.caller.clone()); let callee = instance.addr.clone(); let addresses = hashes @@ -1892,37 +1871,24 @@ benchmarks! { } #[pov_mode = Measured] - seal_instantiate_per_transfer_input_salt_kb { + seal_instantiate_per_transfer_input_salt_byte { let t in 0 .. 1; - let i in 0 .. (code::max_pages::() - 1) * 64; - let s in 0 .. (code::max_pages::() - 1) * 64; + let i in 0 .. (code::max_pages::() - 1) * 64 * 1024; + let s in 0 .. (code::max_pages::() - 1) * 64 * 1024; let callee_code = WasmModule::::dummy(); let hash = callee_code.hash; let hash_bytes = callee_code.hash.encode(); let hash_len = hash_bytes.len(); Contracts::::store_code_raw(callee_code.code, whitelisted_caller())?; - let salts = (0..API_BENCHMARK_BATCH_SIZE).map(|x| x.encode()).collect::>(); - let salt_len = salts.get(0).map(|x| x.len()).unwrap_or(0); - let salt_bytes = salts.iter().cloned().flatten().collect::>(); - let salts_len = salt_bytes.len(); - let value: BalanceOf = t.into(); + let value: BalanceOf = t.into(); let value_bytes = value.encode(); - let value_len = value_bytes.len(); - let addr_len = T::AccountId::max_encoded_len(); - - // offsets where to place static data in contract memory - let salt_offset = 0; - let value_offset = salts_len; - let hash_offset = value_offset + value_len; - let addr_len_offset = hash_offset + hash_len; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal0", + module: "seal1", name: "seal_instantiate", params: vec![ - ValueType::I32, ValueType::I32, ValueType::I64, ValueType::I32, @@ -1934,73 +1900,63 @@ benchmarks! { ValueType::I32, ValueType::I32, ValueType::I32, - ValueType::I32, ], return_type: Some(ValueType::I32), }], data_segments: vec![ DataSegment { - offset: salt_offset as u32, - value: salt_bytes, - }, - DataSegment { - offset: value_offset as u32, - value: value_bytes, - }, - DataSegment { - offset: hash_offset as u32, + offset: 0, value: hash_bytes, }, DataSegment { - offset: addr_len_offset as u32, - value: (addr_len as u32).to_le_bytes().into(), + offset: hash_len as u32, + value: value_bytes, }, ], - call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Regular(Instruction::I32Const(hash_offset as i32)), // code_hash_ptr - Regular(Instruction::I32Const(hash_len as i32)), // code_hash_len - Regular(Instruction::I64Const(0)), // gas - Regular(Instruction::I32Const(value_offset as i32)), // value_ptr - Regular(Instruction::I32Const(value_len as i32)), // value_len - Counter(salt_offset as u32, salt_len as u32), // input_data_ptr - Regular(Instruction::I32Const((i * 1024) as i32)), // input_data_len - Regular(Instruction::I32Const((addr_len_offset + addr_len) as i32)), // address_ptr - Regular(Instruction::I32Const(addr_len_offset as i32)), // address_len_ptr - Regular(Instruction::I32Const(SENTINEL as i32)), // output_ptr - Regular(Instruction::I32Const(0)), // output_len_ptr - Counter(salt_offset as u32, salt_len as u32), // salt_ptr - Regular(Instruction::I32Const((s * 1024) as i32)), // salt_len - Regular(Instruction::Call(0)), - Regular(Instruction::I32Eqz), - Regular(Instruction::If(BlockType::NoResult)), - Regular(Instruction::Nop), - Regular(Instruction::Else), - Regular(Instruction::Unreachable), - Regular(Instruction::End), + call_body: Some(body::plain(vec![ + Instruction::I32Const(0 as i32), // code_hash_ptr + Instruction::I64Const(0), // gas + Instruction::I32Const(hash_len as i32), // value_ptr + Instruction::I32Const(0 as i32), // input_data_ptr + Instruction::I32Const(i as i32), // input_data_len + Instruction::I32Const(SENTINEL as i32), // address_ptr + Instruction::I32Const(0), // address_len_ptr + Instruction::I32Const(SENTINEL as i32), // output_ptr + Instruction::I32Const(0), // output_len_ptr + Instruction::I32Const(0 as i32), // salt_ptr + Instruction::I32Const(s as i32), // salt_len + Instruction::Call(0), + Instruction::I32Eqz, + Instruction::If(BlockType::NoResult), + Instruction::Nop, + Instruction::Else, + Instruction::Unreachable, + Instruction::End, + Instruction::End, ])), .. Default::default() }); let instance = Contract::::new(code, vec![])?; - instance.set_balance((value + Pallet::::min_balance()) * (API_BENCHMARK_BATCH_SIZE + 1).into()); + instance.set_balance(value + (Pallet::::min_balance() * 2u32.into())); let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) // Only the overhead of calling the function itself with minimal arguments. #[pov_mode = Measured] seal_hash_sha2_256 { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::hasher( - "seal_hash_sha2_256", r * API_BENCHMARK_BATCH_SIZE, 0, + "seal_hash_sha2_256", r, 0, ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - // `n`: Input to hash in kilobytes + // `n`: Input to hash in bytes #[pov_mode = Measured] - seal_hash_sha2_256_per_kb { - let n in 0 .. code::max_pages::() * 64; + seal_hash_sha2_256_per_byte { + let n in 0 .. code::max_pages::() * 64 * 1024; let instance = Contract::::new(WasmModule::hasher( - "seal_hash_sha2_256", API_BENCHMARK_BATCH_SIZE, n * 1024, + "seal_hash_sha2_256", 1, n, ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) @@ -2008,19 +1964,19 @@ benchmarks! { // Only the overhead of calling the function itself with minimal arguments. #[pov_mode = Measured] seal_hash_keccak_256 { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::hasher( - "seal_hash_keccak_256", r * API_BENCHMARK_BATCH_SIZE, 0, + "seal_hash_keccak_256", r, 0, ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - // `n`: Input to hash in kilobytes + // `n`: Input to hash in bytes #[pov_mode = Measured] - seal_hash_keccak_256_per_kb { - let n in 0 .. code::max_pages::() * 64; + seal_hash_keccak_256_per_byte { + let n in 0 .. code::max_pages::() * 64 * 1024; let instance = Contract::::new(WasmModule::hasher( - "seal_hash_keccak_256", API_BENCHMARK_BATCH_SIZE, n * 1024, + "seal_hash_keccak_256", 1, n, ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) @@ -2028,19 +1984,19 @@ benchmarks! { // Only the overhead of calling the function itself with minimal arguments. #[pov_mode = Measured] seal_hash_blake2_256 { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::hasher( - "seal_hash_blake2_256", r * API_BENCHMARK_BATCH_SIZE, 0, + "seal_hash_blake2_256", r, 0, ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - // `n`: Input to hash in kilobytes + // `n`: Input to hash in bytes #[pov_mode = Measured] - seal_hash_blake2_256_per_kb { - let n in 0 .. code::max_pages::() * 64; + seal_hash_blake2_256_per_byte { + let n in 0 .. code::max_pages::() * 64 * 1024; let instance = Contract::::new(WasmModule::hasher( - "seal_hash_blake2_256", API_BENCHMARK_BATCH_SIZE, n * 1024, + "seal_hash_blake2_256", 1, n, ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) @@ -2048,32 +2004,138 @@ benchmarks! { // Only the overhead of calling the function itself with minimal arguments. #[pov_mode = Measured] seal_hash_blake2_128 { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_RUNS; let instance = Contract::::new(WasmModule::hasher( - "seal_hash_blake2_128", r * API_BENCHMARK_BATCH_SIZE, 0, + "seal_hash_blake2_128", r, 0, ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - // `n`: Input to hash in kilobytes + // `n`: Input to hash in bytes #[pov_mode = Measured] - seal_hash_blake2_128_per_kb { - let n in 0 .. code::max_pages::() * 64; + seal_hash_blake2_128_per_byte { + let n in 0 .. code::max_pages::() * 64 * 1024; let instance = Contract::::new(WasmModule::hasher( - "seal_hash_blake2_128", API_BENCHMARK_BATCH_SIZE, n * 1024, + "seal_hash_blake2_128", 1, n, ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + // `n`: Message input length to verify in bytes. + #[pov_mode = Measured] + seal_sr25519_verify_per_byte { + let n in 0 .. T::MaxCodeLen::get() - 255; // need some buffer so the code size does not + // exceed the max code size. + + let message = (0..n).zip((32u8..127u8).cycle()).map(|(_, c)| c).collect::>(); + let message_len = message.len() as i32; + + let key_type = sp_core::crypto::KeyTypeId(*b"code"); + let pub_key = sp_io::crypto::sr25519_generate(key_type, None); + let sig = sp_io::crypto::sr25519_sign(key_type, &pub_key, &message).expect("Generates signature"); + let sig = AsRef::<[u8; 64]>::as_ref(&sig).to_vec(); + + let code = WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), + imported_functions: vec![ImportedFunction { + module: "seal0", + name: "sr25519_verify", + params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], + return_type: Some(ValueType::I32), + }], + data_segments: vec![ + DataSegment { + offset: 0, + value: sig, + }, + DataSegment { + offset: 64, + value: pub_key.to_vec(), + }, + DataSegment { + offset: 96, + value: message, + }, + ], + call_body: Some(body::plain(vec![ + Instruction::I32Const(0), // signature_ptr + Instruction::I32Const(64), // pub_key_ptr + Instruction::I32Const(message_len), // message_len + Instruction::I32Const(96), // message_ptr + Instruction::Call(0), + Instruction::Drop, + Instruction::End, + ])), + .. Default::default() + }); + + let instance = Contract::::new(code, vec![])?; + let origin = RawOrigin::Signed(instance.caller.clone()); + }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + // Only calling the function itself with valid arguments. // It generates different private keys and signatures for the message "Hello world". + // This is a slow call: We reduce the number of runs. + #[pov_mode = Measured] + seal_sr25519_verify { + let r in 0 .. API_BENCHMARK_RUNS / 10; + + let message = b"Hello world".to_vec(); + let message_len = message.len() as i32; + let key_type = sp_core::crypto::KeyTypeId(*b"code"); + let sig_params = (0..r) + .map(|i| { + let pub_key = sp_io::crypto::sr25519_generate(key_type, None); + let sig = sp_io::crypto::sr25519_sign(key_type, &pub_key, &message).expect("Generates signature"); + let data: [u8; 96] = [AsRef::<[u8]>::as_ref(&sig), AsRef::<[u8]>::as_ref(&pub_key)].concat().try_into().unwrap(); + data + }) + .flatten() + .collect::>(); + let sig_params_len = sig_params.len() as i32; + + let code = WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), + imported_functions: vec![ImportedFunction { + module: "seal0", + name: "sr25519_verify", + params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], + return_type: Some(ValueType::I32), + }], + data_segments: vec![ + DataSegment { + offset: 0, + value: sig_params + }, + DataSegment { + offset: sig_params_len as u32, + value: message, + }, + ], + call_body: Some(body::repeated_dyn(r, vec![ + Counter(0, 96), // signature_ptr + Counter(64, 96), // pub_key_ptr + Regular(Instruction::I32Const(message_len)), // message_len + Regular(Instruction::I32Const(sig_params_len)), // message_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ])), + .. Default::default() + }); + let instance = Contract::::new(code, vec![])?; + let origin = RawOrigin::Signed(instance.caller.clone()); + }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + + // Only calling the function itself with valid arguments. + // It generates different private keys and signatures for the message "Hello world". + // This is a slow call: We reduce the number of runs. #[pov_mode = Measured] seal_ecdsa_recover { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_RUNS / 10; let message_hash = sp_io::hashing::blake2_256("Hello world".as_bytes()); let key_type = sp_core::crypto::KeyTypeId(*b"code"); - let signatures = (0..r * API_BENCHMARK_BATCH_SIZE) + let signatures = (0..r) .map(|i| { let pub_key = sp_io::crypto::ecdsa_generate(key_type, None); let sig = sp_io::crypto::ecdsa_sign_prehashed(key_type, &pub_key, &message_hash).expect("Generates signature"); @@ -2101,7 +2163,7 @@ benchmarks! { value: signatures, }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Counter(32, 65), // signature_ptr Regular(Instruction::I32Const(0)), // message_hash_ptr Regular(Instruction::I32Const(signatures_bytes_len + 32)), // output_len_ptr @@ -2116,11 +2178,12 @@ benchmarks! { // Only calling the function itself for the list of // generated different ECDSA keys. + // This is a slow call: We redeuce the number of runs. #[pov_mode = Measured] seal_ecdsa_to_eth_address { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_RUNS / 10; let key_type = sp_core::crypto::KeyTypeId(*b"code"); - let pub_keys_bytes = (0..r * API_BENCHMARK_BATCH_SIZE) + let pub_keys_bytes = (0..r) .flat_map(|_| { sp_io::crypto::ecdsa_generate(key_type, None).0 }) @@ -2140,7 +2203,7 @@ benchmarks! { value: pub_keys_bytes, }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Counter(0, 33), // pub_key_ptr Regular(Instruction::I32Const(pub_keys_bytes_len)), // out_ptr Regular(Instruction::Call(0)), @@ -2154,8 +2217,8 @@ benchmarks! { #[pov_mode = Measured] seal_set_code_hash { - let r in 0 .. API_BENCHMARK_BATCHES; - let code_hashes = (0..r * API_BENCHMARK_BATCH_SIZE) + let r in 0 .. API_BENCHMARK_RUNS; + let code_hashes = (0..r) .map(|i| { let new_code = WasmModule::::dummy_with_bytes(i); Contracts::::store_code_raw(new_code.code, whitelisted_caller())?; @@ -2182,7 +2245,7 @@ benchmarks! { value: code_hashes_bytes, }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Counter(0, code_hash_len as u32), // code_hash_ptr Regular(Instruction::Call(0)), Regular(Instruction::Drop), @@ -2195,7 +2258,7 @@ benchmarks! { #[pov_mode = Measured] seal_reentrance_count { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -2204,7 +2267,7 @@ benchmarks! { params: vec![], return_type: Some(ValueType::I32), }], - call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::repeated(r, &[ Instruction::Call(0), Instruction::Drop, ])), @@ -2216,9 +2279,9 @@ benchmarks! { #[pov_mode = Measured] seal_account_reentrance_count { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let dummy_code = WasmModule::::dummy_with_bytes(0); - let accounts = (0..r * API_BENCHMARK_BATCH_SIZE) + let accounts = (0..r) .map(|i| Contract::with_index(i + 1, dummy_code.clone(), vec![])) .collect::, _>>()?; let account_id_len = accounts.get(0).map(|i| i.account_id.encode().len()).unwrap_or(0); @@ -2237,7 +2300,7 @@ benchmarks! { value: account_id_bytes, }, ], - call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Counter(0, account_id_len as u32), // account_ptr Regular(Instruction::Call(0)), Regular(Instruction::Drop), @@ -2250,7 +2313,7 @@ benchmarks! { #[pov_mode = Measured] seal_instantiation_nonce { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_RUNS; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -2259,7 +2322,7 @@ benchmarks! { params: vec![], return_type: Some(ValueType::I64), }], - call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::repeated(r, &[ Instruction::Call(0), Instruction::Drop, ])), @@ -2273,7 +2336,7 @@ benchmarks! { // the same amount of time. We follow that `t.load` and `drop` both have the weight // of this benchmark / 2. We need to make this assumption because there is no way // to measure them on their own using a valid wasm module. We need their individual - // values to derive the weight of individual instructions (by substraction) from + // values to derive the weight of individual instructions (by subtraction) from // benchmarks that include those for parameter pushing and return type dropping. // We call the weight of `t.load` and `drop`: `w_param`. // The weight that would result from the respective benchmark we call: `w_bench`. @@ -2281,9 +2344,9 @@ benchmarks! { // w_i{32,64}const = w_drop = w_bench / 2 #[pov_mode = Ignored] instr_i64const { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ RandomI64Repeated(1), Regular(Instruction::Drop), ])), @@ -2296,10 +2359,10 @@ benchmarks! { // w_i{32,64}load = w_bench - 2 * w_param #[pov_mode = Ignored] instr_i64load { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ RandomUnaligned(0, code::max_pages::() * 64 * 1024 - 8), Regular(Instruction::I64Load(3, 0)), Regular(Instruction::Drop), @@ -2313,10 +2376,10 @@ benchmarks! { // w_i{32,64}store{...} = w_bench - 2 * w_param #[pov_mode = Ignored] instr_i64store { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ RandomUnaligned(0, code::max_pages::() * 64 * 1024 - 8), RandomI64Repeated(1), Regular(Instruction::I64Store(3, 0)), @@ -2330,9 +2393,9 @@ benchmarks! { // w_select = w_bench - 4 * w_param #[pov_mode = Ignored] instr_select { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ RandomI64Repeated(1), RandomI64Repeated(1), RandomI32(0, 2), @@ -2348,9 +2411,9 @@ benchmarks! { // w_if = w_bench - 3 * w_param #[pov_mode = Ignored] instr_if { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ RandomI32(0, 2), Regular(Instruction::If(BlockType::Value(ValueType::I64))), RandomI64Repeated(1), @@ -2369,9 +2432,9 @@ benchmarks! { // Block instructions are not counted. #[pov_mode = Ignored] instr_br { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Regular(Instruction::Block(BlockType::NoResult)), Regular(Instruction::Block(BlockType::NoResult)), Regular(Instruction::Block(BlockType::NoResult)), @@ -2396,9 +2459,9 @@ benchmarks! { // Block instructions are not counted. #[pov_mode = Ignored] instr_br_if { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Regular(Instruction::Block(BlockType::NoResult)), Regular(Instruction::Block(BlockType::NoResult)), Regular(Instruction::Block(BlockType::NoResult)), @@ -2424,13 +2487,13 @@ benchmarks! { // Block instructions are not counted. #[pov_mode = Ignored] instr_br_table { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let table = Box::new(BrTableData { table: Box::new([1, 1, 1]), default: 1, }); let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ Regular(Instruction::Block(BlockType::NoResult)), Regular(Instruction::Block(BlockType::NoResult)), Regular(Instruction::Block(BlockType::NoResult)), @@ -2465,21 +2528,22 @@ benchmarks! { default: 0, }); let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(INSTR_BENCHMARK_BATCH_SIZE, vec![ - Regular(Instruction::Block(BlockType::NoResult)), - Regular(Instruction::Block(BlockType::NoResult)), - Regular(Instruction::Block(BlockType::NoResult)), - RandomI32(0, (e + 1) as i32), // Make sure the default entry is also used - Regular(Instruction::BrTable(table)), - RandomI64Repeated(1), - Regular(Instruction::Drop), - Regular(Instruction::End), - RandomI64Repeated(1), - Regular(Instruction::Drop), - Regular(Instruction::End), - RandomI64Repeated(1), - Regular(Instruction::Drop), - Regular(Instruction::End), + call_body: Some(body::plain(vec![ + Instruction::Block(BlockType::NoResult), + Instruction::Block(BlockType::NoResult), + Instruction::Block(BlockType::NoResult), + Instruction::I32Const((e / 2) as i32), + Instruction::BrTable(table), + Instruction::I64Const(42), + Instruction::Drop, + Instruction::End, + Instruction::I64Const(42), + Instruction::Drop, + Instruction::End, + Instruction::I64Const(42), + Instruction::Drop, + Instruction::End, + Instruction::End, ])), .. Default::default() })); @@ -2490,7 +2554,7 @@ benchmarks! { // w_call = w_bench - 2 * w_param #[pov_mode = Ignored] instr_call { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { // We need to make use of the stack here in order to trigger stack height // instrumentation. @@ -2499,7 +2563,7 @@ benchmarks! { Instruction::Drop, Instruction::End, ])), - call_body: Some(body::repeated(r * INSTR_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::repeated(r, &[ Instruction::Call(2), // call aux ])), .. Default::default() @@ -2511,7 +2575,7 @@ benchmarks! { // w_call_indrect = w_bench - 3 * w_param #[pov_mode = Ignored] instr_call_indirect { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let num_elements = T::Schedule::get().limits.table_size; use self::code::TableSegment; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { @@ -2522,7 +2586,7 @@ benchmarks! { Instruction::Drop, Instruction::End, ])), - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ RandomI32(0, num_elements as i32), Regular(Instruction::CallIndirect(0, 0)), // we only have one sig: 0 ])), @@ -2536,39 +2600,6 @@ benchmarks! { sbox.invoke(); } - // w_instr_call_indirect_per_param = w_bench - 1 * w_param - // Calling a function indirectly causes it to go through a thunk function whose runtime - // linearly depend on the amount of parameters to this function. - // Please note that this is not necessary with a direct call. - #[pov_mode = Ignored] - instr_call_indirect_per_param { - let p in 0 .. T::Schedule::get().limits.parameters; - let num_elements = T::Schedule::get().limits.table_size; - use self::code::TableSegment; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - // We need to make use of the stack here in order to trigger stack height - // instrumentation. - aux_body: Some(body::plain(vec![ - Instruction::I64Const(42), - Instruction::Drop, - Instruction::End, - ])), - aux_arg_num: p, - call_body: Some(body::repeated_dyn(INSTR_BENCHMARK_BATCH_SIZE, vec![ - RandomI64Repeated(p as usize), - RandomI32(0, num_elements as i32), - Regular(Instruction::CallIndirect(p.min(1), 0)), // aux signature: 1 or 0 - ])), - table: Some(TableSegment { - num_elements, - function_index: 2, // aux - }), - .. Default::default() - })); - }: { - sbox.invoke(); - } - // w_per_local = w_bench #[pov_mode = Ignored] instr_call_per_local { @@ -2579,8 +2610,9 @@ benchmarks! { body::inject_locals(&mut aux_body, l); let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { aux_body: Some(aux_body), - call_body: Some(body::repeated(INSTR_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::plain(vec![ Instruction::Call(2), // call aux + Instruction::End, ])), .. Default::default() })); @@ -2591,9 +2623,9 @@ benchmarks! { // w_local_get = w_bench - 1 * w_param #[pov_mode = Ignored] instr_local_get { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let max_locals = T::Schedule::get().limits.locals; - let mut call_body = body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + let mut call_body = body::repeated_dyn(r, vec![ RandomGetLocal(0, max_locals), Regular(Instruction::Drop), ]); @@ -2609,9 +2641,9 @@ benchmarks! { // w_local_set = w_bench - 1 * w_param #[pov_mode = Ignored] instr_local_set { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let max_locals = T::Schedule::get().limits.locals; - let mut call_body = body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + let mut call_body = body::repeated_dyn(r, vec![ RandomI64Repeated(1), RandomSetLocal(0, max_locals), ]); @@ -2627,9 +2659,9 @@ benchmarks! { // w_local_tee = w_bench - 2 * w_param #[pov_mode = Ignored] instr_local_tee { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let max_locals = T::Schedule::get().limits.locals; - let mut call_body = body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + let mut call_body = body::repeated_dyn(r, vec![ RandomI64Repeated(1), RandomTeeLocal(0, max_locals), Regular(Instruction::Drop), @@ -2646,10 +2678,10 @@ benchmarks! { // w_global_get = w_bench - 1 * w_param #[pov_mode = Ignored] instr_global_get { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let max_globals = T::Schedule::get().limits.globals; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ RandomGetGlobal(0, max_globals), Regular(Instruction::Drop), ])), @@ -2663,10 +2695,10 @@ benchmarks! { // w_global_set = w_bench - 1 * w_param #[pov_mode = Ignored] instr_global_set { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let max_globals = T::Schedule::get().limits.globals; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ RandomI64Repeated(1), RandomSetGlobal(0, max_globals), ])), @@ -2680,10 +2712,10 @@ benchmarks! { // w_memory_get = w_bench - 1 * w_param #[pov_mode = Ignored] instr_memory_current { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), - call_body: Some(body::repeated(r * INSTR_BENCHMARK_BATCH_SIZE, &[ + call_body: Some(body::repeated(r, &[ Instruction::CurrentMemory(0), Instruction::Drop ])), @@ -2700,14 +2732,13 @@ benchmarks! { // depends on how much memory is already allocated. #[pov_mode = Ignored] instr_memory_grow { - let r in 0 .. 1; - let max_pages = ImportedMemory::max::().max_pages; + let r in 0 .. ImportedMemory::max::().max_pages; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory { min_pages: 0, - max_pages, + max_pages: ImportedMemory::max::().max_pages, }), - call_body: Some(body::repeated(r * max_pages, &[ + call_body: Some(body::repeated(r, &[ Instruction::I32Const(1), Instruction::GrowMemory(0), Instruction::Drop, @@ -2723,10 +2754,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64clz { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::unary_instr( Instruction::I64Clz, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2734,10 +2765,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64ctz { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::unary_instr( Instruction::I64Ctz, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2745,10 +2776,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64popcnt { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::unary_instr( Instruction::I64Popcnt, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2756,10 +2787,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64eqz { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::unary_instr( Instruction::I64Eqz, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2767,9 +2798,9 @@ benchmarks! { #[pov_mode = Ignored] instr_i64extendsi32 { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ RandomI32Repeated(1), Regular(Instruction::I64ExtendSI32), Regular(Instruction::Drop), @@ -2782,9 +2813,9 @@ benchmarks! { #[pov_mode = Ignored] instr_i64extendui32 { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r, vec![ RandomI32Repeated(1), Regular(Instruction::I64ExtendUI32), Regular(Instruction::Drop), @@ -2797,10 +2828,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i32wrapi64 { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::unary_instr( Instruction::I32WrapI64, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2811,10 +2842,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64eq { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64Eq, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2822,10 +2853,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64ne { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64Ne, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2833,10 +2864,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64lts { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64LtS, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2844,10 +2875,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64ltu { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64LtU, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2855,10 +2886,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64gts { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64GtS, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2866,10 +2897,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64gtu { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64GtU, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2877,10 +2908,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64les { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64LeS, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2888,10 +2919,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64leu { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64LeU, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2899,10 +2930,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64ges { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64GeS, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2910,10 +2941,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64geu { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64GeU, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2921,10 +2952,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64add { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64Add, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2932,10 +2963,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64sub { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64Sub, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2943,10 +2974,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64mul { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64Mul, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2954,10 +2985,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64divs { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64DivS, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2965,10 +2996,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64divu { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64DivU, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2976,10 +3007,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64rems { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64RemS, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2987,10 +3018,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64remu { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64RemU, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -2998,10 +3029,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64and { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64And, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -3009,10 +3040,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64or { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64Or, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -3020,10 +3051,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64xor { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64Xor, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -3031,10 +3062,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64shl { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64Shl, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -3042,10 +3073,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64shrs { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64ShrS, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -3053,10 +3084,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64shru { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64ShrU, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -3064,10 +3095,10 @@ benchmarks! { #[pov_mode = Ignored] instr_i64rotl { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64Rotl, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); @@ -3075,18 +3106,18 @@ benchmarks! { #[pov_mode = Ignored] instr_i64rotr { - let r in 0 .. INSTR_BENCHMARK_BATCHES; + let r in 0 .. INSTR_BENCHMARK_RUNS; let mut sbox = Sandbox::from(&WasmModule::::binary_instr( Instruction::I64Rotr, - r * INSTR_BENCHMARK_BATCH_SIZE, + r, )); }: { sbox.invoke(); } - // This is no benchmark. It merely exist to have an easy way to pretty print the curently + // This is no benchmark. It merely exist to have an easy way to pretty print the currently // configured `Schedule` during benchmark development. - // It can be outputed using the following command: + // It can be outputted using the following command: // cargo run --manifest-path=bin/node/cli/Cargo.toml \ // --features runtime-benchmarks -- benchmark pallet --extra --dev --execution=native \ // -p pallet_contracts -e print_schedule --no-median-slopes --no-min-squares @@ -3095,16 +3126,12 @@ benchmarks! { print_schedule { #[cfg(feature = "std")] { - let weight_limit = T::DeletionWeightLimit::get(); - let max_queue_depth = T::DeletionQueueDepth::get() as usize; - let empty_queue_throughput = ContractInfo::::deletion_budget(0, weight_limit); - let full_queue_throughput = ContractInfo::::deletion_budget(max_queue_depth, weight_limit); + let max_weight = ::BlockWeights::get().max_block; + let (weight_per_key, key_budget) = ContractInfo::::deletion_budget(max_weight); println!("{:#?}", Schedule::::default()); println!("###############################################"); - println!("Lazy deletion weight per key: {}", empty_queue_throughput.0); - println!("Lazy deletion throughput per block (empty queue, full queue): {}, {}", - empty_queue_throughput.1, full_queue_throughput.1, - ); + println!("Lazy deletion weight per key: {weight_per_key}"); + println!("Lazy deletion throughput per block: {key_budget}"); } #[cfg(not(feature = "std"))] Err("Run this bench with a native runtime in order to see the schedule.")?; @@ -3144,7 +3171,7 @@ benchmarks! { None, data, false, - Determinism::Deterministic, + Determinism::Enforced, ) .result?; } @@ -3193,7 +3220,7 @@ benchmarks! { None, data, false, - Determinism::Deterministic, + Determinism::Enforced, ) .result?; } diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index 56d41a759391b..6d1f3df90f23e 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -66,8 +66,8 @@ //! //! # Example //! -//! The ink! repository maintains an -//! [end-to-end example](https://github.com/paritytech/ink/tree/master/examples/rand-extension) +//! The ink-examples repository maintains an +//! [end-to-end example](https://github.com/paritytech/ink-examples/tree/main/rand-extension) //! on how to use a chain extension in order to provide new features to ink! contracts. use crate::{ @@ -138,7 +138,7 @@ pub trait ChainExtension { /// /// # Note /// -/// Currently, we support tuples of up to ten registred chain extensions. If more chain extensions +/// Currently, we support tuples of up to ten registered chain extensions. If more chain extensions /// are needed consider opening an issue. pub trait RegisteredChainExtension: ChainExtension { /// The extensions globally unique identifier. diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 37be12d50d689..d0a650e0a0ef6 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -19,23 +19,31 @@ use crate::{ gas::GasMeter, storage::{self, DepositAccount, WriteOutcome}, BalanceOf, CodeHash, Config, ContractInfo, ContractInfoOf, DebugBufferVec, Determinism, Error, - Event, Nonce, Pallet as Contracts, Schedule, System, + Event, Nonce, Pallet as Contracts, Schedule, System, LOG_TARGET, }; use frame_support::{ crypto::ecdsa::ECDSAExt, - dispatch::{DispatchError, DispatchResult, DispatchResultWithPostInfo, Dispatchable}, + dispatch::{ + fmt::Debug, DispatchError, DispatchResult, DispatchResultWithPostInfo, Dispatchable, + }, storage::{with_transaction, TransactionOutcome}, - traits::{Contains, Currency, ExistenceRequirement, OriginTrait, Randomness, Time}, + traits::{ + tokens::{Fortitude::Polite, Preservation::Expendable}, + Contains, Currency, ExistenceRequirement, OriginTrait, Randomness, Time, + }, weights::Weight, Blake2_128Concat, BoundedVec, StorageHasher, }; use frame_system::RawOrigin; use pallet_contracts_primitives::ExecReturnValue; use smallvec::{Array, SmallVec}; -use sp_core::ecdsa::Public as ECDSAPublic; +use sp_core::{ + ecdsa::Public as ECDSAPublic, + sr25519::{Public as SR25519Public, Signature as SR25519Signature}, +}; use sp_io::{crypto::secp256k1_ecdsa_recover_compressed, hashing::blake2_256}; -use sp_runtime::traits::{Convert, Hash}; -use sp_std::{marker::PhantomData, mem, prelude::*}; +use sp_runtime::traits::{Convert, Hash, Zero}; +use sp_std::{marker::PhantomData, mem, prelude::*, vec::Vec}; pub type AccountIdOf = ::AccountId; pub type MomentOf = <::Time as Time>::Moment; @@ -46,32 +54,39 @@ pub type ExecResult = Result; /// A type that represents a topic of an event. At the moment a hash is used. pub type TopicOf = ::Hash; -/// Type for fix sized storage key. -pub type FixSizedKey = [u8; 32]; - /// Type for variable sized storage key. Used for transparent hashing. -pub type VarSizedKey = BoundedVec::MaxStorageKeyLen>; - -/// Trait for hashing storage keys. -pub trait StorageKey -where - T: Config, -{ - fn hash(&self) -> Vec; +type VarSizedKey = BoundedVec::MaxStorageKeyLen>; + +/// Combined key type for both fixed and variable sized storage keys. +pub enum Key { + /// Variant for fixed sized keys. + Fix([u8; 32]), + /// Variant for variable sized keys. + Var(VarSizedKey), } -impl StorageKey for FixSizedKey { - fn hash(&self) -> Vec { - blake2_256(self.as_slice()).to_vec() +impl Key { + /// Copies self into a new vec. + pub fn to_vec(&self) -> Vec { + match self { + Key::Fix(v) => v.to_vec(), + Key::Var(v) => v.to_vec(), + } } -} -impl StorageKey for VarSizedKey -where - T: Config, -{ - fn hash(&self) -> Vec { - Blake2_128Concat::hash(self.as_slice()) + pub fn hash(&self) -> Vec { + match self { + Key::Fix(v) => blake2_256(v.as_slice()).to_vec(), + Key::Var(v) => Blake2_128Concat::hash(v.as_slice()), + } + } + + pub fn try_from_fix(v: Vec) -> Result> { + <[u8; 32]>::try_from(v).map(Self::Fix) + } + + pub fn try_from_var(v: Vec) -> Result> { + VarSizedKey::::try_from(v).map(Self::Var) } } @@ -84,14 +99,14 @@ where pub enum ErrorOrigin { /// Caller error origin. /// - /// The error happened in the current exeuction context rather than in the one + /// The error happened in the current execution context rather than in the one /// of the contract that is called into. Caller, /// The error happened during execution of the called contract. Callee, } -/// Error returned by contract exection. +/// Error returned by contract execution. #[cfg_attr(test, derive(Debug, PartialEq))] pub struct ExecError { /// The reason why the execution failed. @@ -124,6 +139,7 @@ pub trait Ext: sealing::Sealed { fn call( &mut self, gas_limit: Weight, + deposit_limit: BalanceOf, to: AccountIdOf, value: BalanceOf, input_data: Vec, @@ -147,6 +163,7 @@ pub trait Ext: sealing::Sealed { fn instantiate( &mut self, gas_limit: Weight, + deposit_limit: BalanceOf, code: CodeHash, value: BalanceOf, input_data: Vec, @@ -169,44 +186,19 @@ pub trait Ext: sealing::Sealed { /// /// Returns `None` if the `key` wasn't previously set by `set_storage` or /// was deleted. - fn get_storage(&mut self, key: &FixSizedKey) -> Option>; - - /// This is a variation of `get_storage()` to be used with transparent hashing. - /// These two will be merged into a single function after some refactoring is done. - /// Returns the storage entry of the executing account by the given `key`. - /// - /// Returns `None` if the `key` wasn't previously set by `set_storage` or - /// was deleted. - fn get_storage_transparent(&mut self, key: &VarSizedKey) -> Option>; - - /// Returns `Some(len)` (in bytes) if a storage item exists at `key`. - /// - /// Returns `None` if the `key` wasn't previously set by `set_storage` or - /// was deleted. - fn get_storage_size(&mut self, key: &FixSizedKey) -> Option; + fn get_storage(&mut self, key: &Key) -> Option>; - /// This is the variation of `get_storage_size()` to be used with transparent hashing. - /// These two will be merged into a single function after some refactoring is done. /// Returns `Some(len)` (in bytes) if a storage item exists at `key`. /// /// Returns `None` if the `key` wasn't previously set by `set_storage` or /// was deleted. - fn get_storage_size_transparent(&mut self, key: &VarSizedKey) -> Option; + fn get_storage_size(&mut self, key: &Key) -> Option; /// Sets the storage entry by the given key to the specified value. If `value` is `None` then /// the storage entry is deleted. fn set_storage( &mut self, - key: &FixSizedKey, - value: Option>, - take_old: bool, - ) -> Result; - - /// This is the variation of `set_storage()` to be used with transparent hashing. - /// These two will be merged into a single function after some refactoring is done. - fn set_storage_transparent( - &mut self, - key: &VarSizedKey, + key: &Key, value: Option>, take_old: bool, ) -> Result; @@ -287,6 +279,9 @@ pub trait Ext: sealing::Sealed { /// Recovers ECDSA compressed public key based on signature and message hash. fn ecdsa_recover(&self, signature: &[u8; 65], message_hash: &[u8; 32]) -> Result<[u8; 33], ()>; + /// Verify a sr25519 signature. + fn sr25519_verify(&self, signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> bool; + /// Returns Ethereum address from the ECDSA compressed public key. fn ecdsa_to_eth_address(&self, pk: &[u8; 33]) -> Result<[u8; 20], ()>; @@ -680,7 +675,7 @@ where schedule, value, debug_message, - Determinism::Deterministic, + Determinism::Enforced, )?; let account_id = stack.top_frame().account_id.clone(); stack.run(executable, input_data).map(|ret| (account_id, ret)) @@ -701,8 +696,9 @@ where args, value, gas_meter, - storage_meter, Weight::zero(), + storage_meter, + BalanceOf::::zero(), schedule, determinism, )?; @@ -728,12 +724,13 @@ where /// /// This does not take `self` because when constructing the first frame `self` is /// not initialized, yet. - fn new_frame( + fn new_frame( frame_args: FrameArgs, value_transferred: BalanceOf, gas_meter: &mut GasMeter, - storage_meter: &mut storage::meter::GenericMeter, gas_limit: Weight, + storage_meter: &mut storage::meter::GenericMeter, + deposit_limit: BalanceOf, schedule: &Schedule, determinism: Determinism, ) -> Result<(Frame, E, Option), ExecError> { @@ -774,10 +771,10 @@ where }, }; - // `AllowIndeterminism` will only be ever set in case of off-chain execution. + // `Relaxed` will only be ever set in case of off-chain execution. // Instantiations are never allowed even when executing off-chain. if !(executable.is_deterministic() || - (matches!(determinism, Determinism::AllowIndeterminism) && + (matches!(determinism, Determinism::Relaxed) && matches!(entry_point, ExportedFunction::Call))) { return Err(Error::::Indeterministic.into()) @@ -790,7 +787,7 @@ where account_id, entry_point, nested_gas: gas_meter.nested(gas_limit)?, - nested_storage: storage_meter.nested(), + nested_storage: storage_meter.nested(deposit_limit), allows_reentry: true, }; @@ -803,6 +800,7 @@ where frame_args: FrameArgs, value_transferred: BalanceOf, gas_limit: Weight, + deposit_limit: BalanceOf, ) -> Result { if self.frames.len() == T::CallStack::size() { return Err(Error::::MaxCallDepthReached.into()) @@ -826,8 +824,9 @@ where frame_args, value_transferred, nested_gas, - nested_storage, gas_limit, + nested_storage, + deposit_limit, self.schedule, self.determinism, )?; @@ -868,8 +867,10 @@ where return Ok(output) } - // Storage limit is enforced as late as possible (when the last frame returns) so that - // the ordering of storage accesses does not matter. + // Storage limit is normally enforced as late as possible (when the last frame returns) + // so that the ordering of storage accesses does not matter. + // (However, if a special limit was set for a sub-call, it should be enforced right + // after the sub-call returned. See below for this case of enforcement). if self.frames.is_empty() { let frame = &mut self.first_frame; frame.contract_info.load(&frame.account_id); @@ -878,7 +879,7 @@ where } let frame = self.top_frame(); - let account_id = &frame.account_id; + let account_id = &frame.account_id.clone(); match (entry_point, delegated_code_hash) { (ExportedFunction::Constructor, _) => { // It is not allowed to terminate a contract inside its constructor. @@ -886,6 +887,13 @@ where return Err(Error::::TerminatedInConstructor.into()) } + // If a special limit was set for the sub-call, we enforce it here. + // This is needed because contract constructor might write to storage. + // The sub-call will be rolled back in case the limit is exhausted. + let frame = self.top_frame_mut(); + let contract = frame.contract_info.as_contract(); + frame.nested_storage.enforce_subcall_limit(contract)?; + // Deposit an instantiation event. Contracts::::deposit_event( vec![T::Hashing::hash_of(self.caller()), T::Hashing::hash_of(account_id)], @@ -902,9 +910,15 @@ where ); }, (ExportedFunction::Call, None) => { + // If a special limit was set for the sub-call, we enforce it here. + // The sub-call will be rolled back in case the limit is exhausted. + let frame = self.top_frame_mut(); + let contract = frame.contract_info.as_contract(); + frame.nested_storage.enforce_subcall_limit(contract)?; + let caller = self.caller(); Contracts::::deposit_event( - vec![T::Hashing::hash_of(caller), T::Hashing::hash_of(account_id)], + vec![T::Hashing::hash_of(caller), T::Hashing::hash_of(&account_id)], Event::Called { caller: caller.clone(), contract: account_id.clone() }, ); }, @@ -1011,7 +1025,7 @@ where } else { if let Some((msg, false)) = self.debug_message.as_ref().map(|m| (m, m.is_empty())) { log::debug!( - target: "runtime::contracts", + target: LOG_TARGET, "Execution finished with debug buffer: {}", core::str::from_utf8(msg).unwrap_or(""), ); @@ -1116,6 +1130,7 @@ where fn call( &mut self, gas_limit: Weight, + deposit_limit: BalanceOf, to: T::AccountId, value: BalanceOf, input_data: Vec, @@ -1144,6 +1159,7 @@ where FrameArgs::Call { dest: to, cached_info, delegated_call: None }, value, gas_limit, + deposit_limit, )?; self.run(executable, input_data) }; @@ -1175,6 +1191,7 @@ where }, value, Weight::zero(), + BalanceOf::::zero(), )?; self.run(executable, input_data) } @@ -1182,6 +1199,7 @@ where fn instantiate( &mut self, gas_limit: Weight, + deposit_limit: BalanceOf, code_hash: CodeHash, value: BalanceOf, input_data: Vec, @@ -1199,6 +1217,7 @@ where }, value, gas_limit, + deposit_limit, )?; let account_id = self.top_frame().account_id.clone(); self.run(executable, input_data).map(|ret| (account_id, ret)) @@ -1216,10 +1235,10 @@ where T::Currency::transfer( &frame.account_id, beneficiary, - T::Currency::reducible_balance(&frame.account_id, false), + T::Currency::reducible_balance(&frame.account_id, Expendable, Polite), ExistenceRequirement::AllowDeath, )?; - info.queue_trie_for_deletion()?; + info.queue_trie_for_deletion(); ContractInfoOf::::remove(&frame.account_id); E::remove_user(info.code_hash); Contracts::::deposit_event( @@ -1236,46 +1255,23 @@ where Self::transfer(ExistenceRequirement::KeepAlive, &self.top_frame().account_id, to, value) } - fn get_storage(&mut self, key: &FixSizedKey) -> Option> { - self.top_frame_mut().contract_info().read(key) - } - - fn get_storage_transparent(&mut self, key: &VarSizedKey) -> Option> { + fn get_storage(&mut self, key: &Key) -> Option> { self.top_frame_mut().contract_info().read(key) } - fn get_storage_size(&mut self, key: &FixSizedKey) -> Option { - self.top_frame_mut().contract_info().size(key) - } - - fn get_storage_size_transparent(&mut self, key: &VarSizedKey) -> Option { - self.top_frame_mut().contract_info().size(key) + fn get_storage_size(&mut self, key: &Key) -> Option { + self.top_frame_mut().contract_info().size(key.into()) } fn set_storage( &mut self, - key: &FixSizedKey, - value: Option>, - take_old: bool, - ) -> Result { - let frame = self.top_frame_mut(); - frame.contract_info.get(&frame.account_id).write( - key, - value, - Some(&mut frame.nested_storage), - take_old, - ) - } - - fn set_storage_transparent( - &mut self, - key: &VarSizedKey, + key: &Key, value: Option>, take_old: bool, ) -> Result { let frame = self.top_frame_mut(); frame.contract_info.get(&frame.account_id).write( - key, + key.into(), value, Some(&mut frame.nested_storage), take_old, @@ -1363,7 +1359,7 @@ where .try_extend(&mut msg.bytes()) .map_err(|_| { log::debug!( - target: "runtime::contracts", + target: LOG_TARGET, "Debug buffer (of {} bytes) exhausted!", DebugBufferVec::::bound(), ) @@ -1385,6 +1381,14 @@ where secp256k1_ecdsa_recover_compressed(signature, message_hash).map_err(|_| ()) } + fn sr25519_verify(&self, signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> bool { + sp_io::crypto::sr25519_verify( + &SR25519Signature(*signature), + message, + &SR25519Public(*pub_key), + ) + } + fn ecdsa_to_eth_address(&self, pk: &[u8; 33]) -> Result<[u8; 20], ()> { ECDSAPublic(*pk).to_eth_address() } @@ -1645,7 +1649,7 @@ mod tests { value, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ), Ok(_) ); @@ -1688,7 +1692,7 @@ mod tests { place_contract(&dest, success_ch); set_balance(&origin, 100); let balance = get_balance(&dest); - let mut storage_meter = storage::meter::Meter::new(&origin, Some(0), 55).unwrap(); + let mut storage_meter = storage::meter::Meter::new(&origin, Some(0), value).unwrap(); let _ = MockStack::run_call( origin.clone(), @@ -1699,7 +1703,7 @@ mod tests { value, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ) .unwrap(); @@ -1741,7 +1745,7 @@ mod tests { value, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ) .unwrap(); @@ -1777,7 +1781,7 @@ mod tests { 55, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ) .unwrap(); @@ -1829,7 +1833,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ); let output = result.unwrap(); @@ -1862,7 +1866,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ); let output = result.unwrap(); @@ -1893,7 +1897,7 @@ mod tests { 0, vec![1, 2, 3, 4], None, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result, Ok(_)); }); @@ -1941,7 +1945,7 @@ mod tests { let value = Default::default(); let recurse_ch = MockLoader::insert(Call, |ctx, _| { // Try to call into yourself. - let r = ctx.ext.call(Weight::zero(), BOB, 0, vec![], true); + let r = ctx.ext.call(Weight::zero(), BalanceOf::::zero(), BOB, 0, vec![], true); ReachedBottom::mutate(|reached_bottom| { if !*reached_bottom { @@ -1973,7 +1977,7 @@ mod tests { value, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result, Ok(_)); @@ -1995,7 +1999,11 @@ mod tests { WitnessedCallerBob::mutate(|caller| *caller = Some(ctx.ext.caller().clone())); // Call into CHARLIE contract. - assert_matches!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), Ok(_)); + assert_matches!( + ctx.ext + .call(Weight::zero(), BalanceOf::::zero(), CHARLIE, 0, vec![], true), + Ok(_) + ); exec_success() }); let charlie_ch = MockLoader::insert(Call, |ctx, _| { @@ -2019,7 +2027,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result, Ok(_)); @@ -2053,7 +2061,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result, Ok(_)); }); @@ -2083,7 +2091,7 @@ mod tests { 0, vec![0], None, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result, Ok(_)); }); @@ -2111,7 +2119,7 @@ mod tests { 0, vec![0], None, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result, Ok(_)); }); @@ -2129,7 +2137,8 @@ mod tests { // ALICE is the origin of the call stack assert!(ctx.ext.caller_is_origin()); // BOB calls CHARLIE - ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true) + ctx.ext + .call(Weight::zero(), BalanceOf::::zero(), CHARLIE, 0, vec![], true) }); ExtBuilder::default().build().execute_with(|| { @@ -2147,7 +2156,7 @@ mod tests { 0, vec![0], None, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result, Ok(_)); }); @@ -2160,7 +2169,11 @@ mod tests { assert_eq!(*ctx.ext.address(), BOB); // Call into charlie contract. - assert_matches!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), Ok(_)); + assert_matches!( + ctx.ext + .call(Weight::zero(), BalanceOf::::zero(), CHARLIE, 0, vec![], true), + Ok(_) + ); exec_success() }); let charlie_ch = MockLoader::insert(Call, |ctx, _| { @@ -2183,7 +2196,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result, Ok(_)); @@ -2311,6 +2324,7 @@ mod tests { .ext .instantiate( Weight::zero(), + BalanceOf::::zero(), dummy_ch, ::Currency::minimum_balance(), vec![], @@ -2342,7 +2356,7 @@ mod tests { min_balance * 10, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ), Ok(_) ); @@ -2375,6 +2389,7 @@ mod tests { assert_matches!( ctx.ext.instantiate( Weight::zero(), + BalanceOf::::zero(), dummy_ch, ::Currency::minimum_balance(), vec![], @@ -2407,7 +2422,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ), Ok(_) ); @@ -2455,7 +2470,7 @@ mod tests { #[test] fn in_memory_changes_not_discarded() { // Call stack: BOB -> CHARLIE (trap) -> BOB' (success) - // This tests verfies some edge case of the contract info cache: + // This tests verifies some edge case of the contract info cache: // We change some value in our contract info before calling into a contract // that calls into ourself. This triggers a case where BOBs contract info // is written to storage and invalidated by the successful execution of BOB'. @@ -2467,13 +2482,26 @@ mod tests { let info = ctx.ext.contract_info(); assert_eq!(info.storage_byte_deposit, 0); info.storage_byte_deposit = 42; - assert_eq!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), exec_trapped()); + assert_eq!( + ctx.ext.call( + Weight::zero(), + BalanceOf::::zero(), + CHARLIE, + 0, + vec![], + true + ), + exec_trapped() + ); assert_eq!(ctx.ext.contract_info().storage_byte_deposit, 42); } exec_success() }); let code_charlie = MockLoader::insert(Call, |ctx, _| { - assert!(ctx.ext.call(Weight::zero(), BOB, 0, vec![99], true).is_ok()); + assert!(ctx + .ext + .call(Weight::zero(), BalanceOf::::zero(), BOB, 0, vec![99], true) + .is_ok()); exec_trapped() }); @@ -2493,7 +2521,7 @@ mod tests { 0, vec![0], None, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result, Ok(_)); }); @@ -2503,7 +2531,7 @@ mod tests { fn recursive_call_during_constructor_fails() { let code = MockLoader::insert(Constructor, |ctx, _| { assert_matches!( - ctx.ext.call(Weight::zero(), ctx.ext.address().clone(), 0, vec![], true), + ctx.ext.call(Weight::zero(), BalanceOf::::zero(), ctx.ext.address().clone(), 0, vec![], true), Err(ExecError{error, ..}) if error == >::ContractNotFound.into() ); exec_success() @@ -2559,7 +2587,7 @@ mod tests { 0, vec![], Some(&mut debug_buffer), - Determinism::Deterministic, + Determinism::Enforced, ) .unwrap(); }); @@ -2593,7 +2621,7 @@ mod tests { 0, vec![], Some(&mut debug_buffer), - Determinism::Deterministic, + Determinism::Enforced, ); assert!(result.is_err()); }); @@ -2630,7 +2658,7 @@ mod tests { 0, vec![], Some(&mut debug_buf_after), - Determinism::Deterministic, + Determinism::Enforced, ) .unwrap(); assert_eq!(debug_buf_before, debug_buf_after); @@ -2642,7 +2670,7 @@ mod tests { // call the contract passed as input with disabled reentry let code_bob = MockLoader::insert(Call, |ctx, _| { let dest = Decode::decode(&mut ctx.input_data.as_ref()).unwrap(); - ctx.ext.call(Weight::zero(), dest, 0, vec![], false) + ctx.ext.call(Weight::zero(), BalanceOf::::zero(), dest, 0, vec![], false) }); let code_charlie = MockLoader::insert(Call, |_, _| exec_success()); @@ -2663,7 +2691,7 @@ mod tests { 0, CHARLIE.encode(), None, - Determinism::Deterministic + Determinism::Enforced )); // Calling into oneself fails @@ -2677,7 +2705,7 @@ mod tests { 0, BOB.encode(), None, - Determinism::Deterministic + Determinism::Enforced ) .map_err(|e| e.error), >::ReentranceDenied, @@ -2689,15 +2717,17 @@ mod tests { fn call_deny_reentry() { let code_bob = MockLoader::insert(Call, |ctx, _| { if ctx.input_data[0] == 0 { - ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], false) + ctx.ext + .call(Weight::zero(), BalanceOf::::zero(), CHARLIE, 0, vec![], false) } else { exec_success() } }); // call BOB with input set to '1' - let code_charlie = - MockLoader::insert(Call, |ctx, _| ctx.ext.call(Weight::zero(), BOB, 0, vec![1], true)); + let code_charlie = MockLoader::insert(Call, |ctx, _| { + ctx.ext.call(Weight::zero(), BalanceOf::::zero(), BOB, 0, vec![1], true) + }); ExtBuilder::default().build().execute_with(|| { let schedule = ::Schedule::get(); @@ -2716,7 +2746,7 @@ mod tests { 0, vec![0], None, - Determinism::Deterministic + Determinism::Enforced ) .map_err(|e| e.error), >::ReentranceDenied, @@ -2751,7 +2781,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ) .unwrap(); @@ -2792,8 +2822,10 @@ mod tests { RuntimeCall::System(SysCall::remark_with_event { remark: b"Hello".to_vec() }); // transfers are disallowed by the `TestFiler` (see below) - let forbidden_call = - RuntimeCall::Balances(BalanceCall::transfer { dest: CHARLIE, value: 22 }); + let forbidden_call = RuntimeCall::Balances(BalanceCall::transfer_allow_death { + dest: CHARLIE, + value: 22, + }); // simple cases: direct call assert_err!( @@ -2813,7 +2845,7 @@ mod tests { }); TestFilter::set_filter(|call| match call { - RuntimeCall::Balances(pallet_balances::Call::transfer { .. }) => false, + RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { .. }) => false, _ => true, }); @@ -2834,7 +2866,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ) .unwrap(); @@ -2884,6 +2916,7 @@ mod tests { ctx.ext .instantiate( Weight::zero(), + BalanceOf::::zero(), fail_code, ctx.ext.minimum_balance() * 100, vec![], @@ -2897,6 +2930,7 @@ mod tests { .ext .instantiate( Weight::zero(), + BalanceOf::::zero(), success_code, ctx.ext.minimum_balance() * 100, vec![], @@ -2905,7 +2939,9 @@ mod tests { .unwrap(); // a plain call should not influence the account counter - ctx.ext.call(Weight::zero(), account_id, 0, vec![], false).unwrap(); + ctx.ext + .call(Weight::zero(), BalanceOf::::zero(), account_id, 0, vec![], false) + .unwrap(); exec_success() }); @@ -2986,35 +3022,41 @@ mod tests { let code_hash = MockLoader::insert(Call, |ctx, _| { // Write assert_eq!( - ctx.ext.set_storage(&[1; 32], Some(vec![1, 2, 3]), false), + ctx.ext.set_storage(&Key::Fix([1; 32]), Some(vec![1, 2, 3]), false), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_storage(&Key::Fix([2; 32]), Some(vec![4, 5, 6]), true), + Ok(WriteOutcome::New) + ); + assert_eq!(ctx.ext.set_storage(&Key::Fix([3; 32]), None, false), Ok(WriteOutcome::New)); + assert_eq!(ctx.ext.set_storage(&Key::Fix([4; 32]), None, true), Ok(WriteOutcome::New)); + assert_eq!( + ctx.ext.set_storage(&Key::Fix([5; 32]), Some(vec![]), false), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.set_storage(&[2; 32], Some(vec![4, 5, 6]), true), + ctx.ext.set_storage(&Key::Fix([6; 32]), Some(vec![]), true), Ok(WriteOutcome::New) ); - assert_eq!(ctx.ext.set_storage(&[3; 32], None, false), Ok(WriteOutcome::New)); - assert_eq!(ctx.ext.set_storage(&[4; 32], None, true), Ok(WriteOutcome::New)); - assert_eq!(ctx.ext.set_storage(&[5; 32], Some(vec![]), false), Ok(WriteOutcome::New)); - assert_eq!(ctx.ext.set_storage(&[6; 32], Some(vec![]), true), Ok(WriteOutcome::New)); // Overwrite assert_eq!( - ctx.ext.set_storage(&[1; 32], Some(vec![42]), false), + ctx.ext.set_storage(&Key::Fix([1; 32]), Some(vec![42]), false), Ok(WriteOutcome::Overwritten(3)) ); assert_eq!( - ctx.ext.set_storage(&[2; 32], Some(vec![48]), true), + ctx.ext.set_storage(&Key::Fix([2; 32]), Some(vec![48]), true), Ok(WriteOutcome::Taken(vec![4, 5, 6])) ); - assert_eq!(ctx.ext.set_storage(&[3; 32], None, false), Ok(WriteOutcome::New)); - assert_eq!(ctx.ext.set_storage(&[4; 32], None, true), Ok(WriteOutcome::New)); + assert_eq!(ctx.ext.set_storage(&Key::Fix([3; 32]), None, false), Ok(WriteOutcome::New)); + assert_eq!(ctx.ext.set_storage(&Key::Fix([4; 32]), None, true), Ok(WriteOutcome::New)); assert_eq!( - ctx.ext.set_storage(&[5; 32], Some(vec![]), false), + ctx.ext.set_storage(&Key::Fix([5; 32]), Some(vec![]), false), Ok(WriteOutcome::Overwritten(0)) ); assert_eq!( - ctx.ext.set_storage(&[6; 32], Some(vec![]), true), + ctx.ext.set_storage(&Key::Fix([6; 32]), Some(vec![]), true), Ok(WriteOutcome::Taken(vec![])) ); @@ -3037,58 +3079,58 @@ mod tests { 0, vec![], None, - Determinism::Deterministic + Determinism::Enforced )); }); } #[test] - fn set_storage_transparent_works() { + fn set_storage_varsized_key_works() { let code_hash = MockLoader::insert(Call, |ctx, _| { // Write assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([1; 64].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([1; 64].to_vec()).unwrap(), Some(vec![1, 2, 3]), false ), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([2; 19].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([2; 19].to_vec()).unwrap(), Some(vec![4, 5, 6]), true ), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([3; 19].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([3; 19].to_vec()).unwrap(), None, false ), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([4; 64].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([4; 64].to_vec()).unwrap(), None, true ), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([5; 30].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([5; 30].to_vec()).unwrap(), Some(vec![]), false ), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([6; 128].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([6; 128].to_vec()).unwrap(), Some(vec![]), true ), @@ -3097,48 +3139,48 @@ mod tests { // Overwrite assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([1; 64].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([1; 64].to_vec()).unwrap(), Some(vec![42, 43, 44]), false ), Ok(WriteOutcome::Overwritten(3)) ); assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([2; 19].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([2; 19].to_vec()).unwrap(), Some(vec![48]), true ), Ok(WriteOutcome::Taken(vec![4, 5, 6])) ); assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([3; 19].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([3; 19].to_vec()).unwrap(), None, false ), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([4; 64].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([4; 64].to_vec()).unwrap(), None, true ), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([5; 30].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([5; 30].to_vec()).unwrap(), Some(vec![]), false ), Ok(WriteOutcome::Overwritten(0)) ); assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([6; 128].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([6; 128].to_vec()).unwrap(), Some(vec![]), true ), @@ -3164,7 +3206,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic + Determinism::Enforced )); }); } @@ -3173,13 +3215,16 @@ mod tests { fn get_storage_works() { let code_hash = MockLoader::insert(Call, |ctx, _| { assert_eq!( - ctx.ext.set_storage(&[1; 32], Some(vec![1, 2, 3]), false), + ctx.ext.set_storage(&Key::Fix([1; 32]), Some(vec![1, 2, 3]), false), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_storage(&Key::Fix([2; 32]), Some(vec![]), false), Ok(WriteOutcome::New) ); - assert_eq!(ctx.ext.set_storage(&[2; 32], Some(vec![]), false), Ok(WriteOutcome::New)); - assert_eq!(ctx.ext.get_storage(&[1; 32]), Some(vec![1, 2, 3])); - assert_eq!(ctx.ext.get_storage(&[2; 32]), Some(vec![])); - assert_eq!(ctx.ext.get_storage(&[3; 32]), None); + assert_eq!(ctx.ext.get_storage(&Key::Fix([1; 32])), Some(vec![1, 2, 3])); + assert_eq!(ctx.ext.get_storage(&Key::Fix([2; 32])), Some(vec![])); + assert_eq!(ctx.ext.get_storage(&Key::Fix([3; 32])), None); exec_success() }); @@ -3200,7 +3245,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic + Determinism::Enforced )); }); } @@ -3209,13 +3254,16 @@ mod tests { fn get_storage_size_works() { let code_hash = MockLoader::insert(Call, |ctx, _| { assert_eq!( - ctx.ext.set_storage(&[1; 32], Some(vec![1, 2, 3]), false), + ctx.ext.set_storage(&Key::Fix([1; 32]), Some(vec![1, 2, 3]), false), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_storage(&Key::Fix([2; 32]), Some(vec![]), false), Ok(WriteOutcome::New) ); - assert_eq!(ctx.ext.set_storage(&[2; 32], Some(vec![]), false), Ok(WriteOutcome::New)); - assert_eq!(ctx.ext.get_storage_size(&[1; 32]), Some(3)); - assert_eq!(ctx.ext.get_storage_size(&[2; 32]), Some(0)); - assert_eq!(ctx.ext.get_storage_size(&[3; 32]), None); + assert_eq!(ctx.ext.get_storage_size(&Key::Fix([1; 32])), Some(3)); + assert_eq!(ctx.ext.get_storage_size(&Key::Fix([2; 32])), Some(0)); + assert_eq!(ctx.ext.get_storage_size(&Key::Fix([3; 32])), None); exec_success() }); @@ -3236,46 +3284,40 @@ mod tests { 0, vec![], None, - Determinism::Deterministic + Determinism::Enforced )); }); } #[test] - fn get_storage_transparent_works() { + fn get_storage_varsized_key_works() { let code_hash = MockLoader::insert(Call, |ctx, _| { assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([1; 19].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([1; 19].to_vec()).unwrap(), Some(vec![1, 2, 3]), false ), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([2; 16].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([2; 16].to_vec()).unwrap(), Some(vec![]), false ), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.get_storage_transparent( - &VarSizedKey::::try_from([1; 19].to_vec()).unwrap() - ), + ctx.ext.get_storage(&Key::::try_from_var([1; 19].to_vec()).unwrap()), Some(vec![1, 2, 3]) ); assert_eq!( - ctx.ext.get_storage_transparent( - &VarSizedKey::::try_from([2; 16].to_vec()).unwrap() - ), + ctx.ext.get_storage(&Key::::try_from_var([2; 16].to_vec()).unwrap()), Some(vec![]) ); assert_eq!( - ctx.ext.get_storage_transparent( - &VarSizedKey::::try_from([3; 8].to_vec()).unwrap() - ), + ctx.ext.get_storage(&Key::::try_from_var([3; 8].to_vec()).unwrap()), None ); @@ -3298,46 +3340,40 @@ mod tests { 0, vec![], None, - Determinism::Deterministic + Determinism::Enforced )); }); } #[test] - fn get_storage_size_transparent_works() { + fn get_storage_size_varsized_key_works() { let code_hash = MockLoader::insert(Call, |ctx, _| { assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([1; 19].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([1; 19].to_vec()).unwrap(), Some(vec![1, 2, 3]), false ), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from([2; 16].to_vec()).unwrap(), + ctx.ext.set_storage( + &Key::::try_from_var([2; 16].to_vec()).unwrap(), Some(vec![]), false ), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.get_storage_size_transparent( - &VarSizedKey::::try_from([1; 19].to_vec()).unwrap() - ), + ctx.ext.get_storage_size(&Key::::try_from_var([1; 19].to_vec()).unwrap()), Some(3) ); assert_eq!( - ctx.ext.get_storage_size_transparent( - &VarSizedKey::::try_from([2; 16].to_vec()).unwrap() - ), + ctx.ext.get_storage_size(&Key::::try_from_var([2; 16].to_vec()).unwrap()), Some(0) ); assert_eq!( - ctx.ext.get_storage_size_transparent( - &VarSizedKey::::try_from([3; 8].to_vec()).unwrap() - ), + ctx.ext.get_storage_size(&Key::::try_from_var([3; 8].to_vec()).unwrap()), None ); @@ -3360,7 +3396,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic + Determinism::Enforced )); }); } @@ -3392,7 +3428,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result, Ok(_)); }); @@ -3405,19 +3441,35 @@ mod tests { let code_hash = MockLoader::insert(Call, move |ctx, _| { // It is set to one when this contract was instantiated by `place_contract` assert_eq!(ctx.ext.nonce(), 1); - // Should not change without any instantation in-between + // Should not change without any instantiation in-between assert_eq!(ctx.ext.nonce(), 1); // Should not change with a failed instantiation assert_err!( - ctx.ext.instantiate(Weight::zero(), fail_code, 0, vec![], &[],), + ctx.ext.instantiate( + Weight::zero(), + BalanceOf::::zero(), + fail_code, + 0, + vec![], + &[], + ), ExecError { error: >::ContractTrapped.into(), origin: ErrorOrigin::Callee } ); assert_eq!(ctx.ext.nonce(), 1); - // Successful instantation increments - ctx.ext.instantiate(Weight::zero(), success_code, 0, vec![], &[]).unwrap(); + // Successful instantiation increments + ctx.ext + .instantiate( + Weight::zero(), + BalanceOf::::zero(), + success_code, + 0, + vec![], + &[], + ) + .unwrap(); assert_eq!(ctx.ext.nonce(), 2); exec_success() }); @@ -3438,7 +3490,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic + Determinism::Enforced )); }); } @@ -3468,7 +3520,7 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result, Ok(_)); }); diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 4f5d7ba305fc1..5a82a4a42b4a9 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -15,9 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Contract Pallet +//! # Contracts Pallet //! -//! The Contract module provides functionality for the runtime to deploy and execute WebAssembly +//! The Contracts module provides functionality for the runtime to deploy and execute WebAssembly //! smart-contracts. //! //! - [`Config`] @@ -73,21 +73,20 @@ //! //! ## Usage //! -//! The Contract module is a work in progress. The following examples show how this Contract module +//! The Contracts module is a work in progress. The following examples show how this module //! can be used to instantiate and call contracts. //! -//! * [`ink`](https://github.com/paritytech/ink) is +//! * [`ink!`](https://use.ink) is //! an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables writing //! WebAssembly based smart contracts in the Rust programming language. #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(feature = "runtime-benchmarks", recursion_limit = "1024")] -#[macro_use] -mod gas; mod address; mod benchmarking; mod exec; +mod gas; mod migration; mod schedule; mod storage; @@ -100,9 +99,9 @@ pub mod weights; mod tests; use crate::{ - exec::{AccountIdOf, ErrorOrigin, ExecError, Executable, Stack as ExecStack}, + exec::{AccountIdOf, ErrorOrigin, ExecError, Executable, Key, Stack as ExecStack}, gas::GasMeter, - storage::{meter::Meter as StorageMeter, ContractInfo, DeletedContract}, + storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager}, wasm::{OwnerInfo, PrefabWasmModule, TryInstantiate}, weights::WeightInfo, }; @@ -115,7 +114,7 @@ use frame_support::{ tokens::fungible::Inspect, ConstU32, Contains, Currency, Get, Randomness, ReservableCurrency, Time, }, - weights::{OldWeight, Weight}, + weights::Weight, BoundedVec, WeakBoundedVec, }; use frame_system::Pallet as System; @@ -131,7 +130,7 @@ use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; pub use crate::{ address::{AddressGenerator, DefaultAddressGenerator}, - exec::{Frame, VarSizedKey as StorageKey}, + exec::Frame, migration::Migration, pallet::*, schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, @@ -150,6 +149,12 @@ type RelaxedCodeVec = WeakBoundedVec::MaxCodeLen>; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; type DebugBufferVec = BoundedVec::MaxDebugBufferLen>; +/// The old weight type. +/// +/// This is a copy of the [`frame_support::weights::OldWeight`] type since the contracts pallet +/// needs to support it indefinitely. +type OldWeight = u64; + /// Used as a sentinel value when reading and writing contract memory. /// /// It is usually used to signal `None` to a contract when only a primitive is allowed @@ -158,6 +163,13 @@ type DebugBufferVec = BoundedVec::MaxDebugBufferLen>; /// that this value makes sense for a memory location or length. const SENTINEL: u32 = u32::MAX; +/// The target that is used for the log output emitted by this crate. +/// +/// Hence you can use this target to selectively increase the log level for this crate. +/// +/// Example: `RUST_LOG=runtime::contracts=debug my_code --dev` +const LOG_TARGET: &str = "runtime::contracts"; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -187,7 +199,7 @@ pub mod pallet { type Randomness: Randomness; /// The currency in which fees are paid and contract balances are held. - type Currency: ReservableCurrency + type Currency: ReservableCurrency // TODO: Move to fungible traits + Inspect>; /// The overarching event type. @@ -245,33 +257,6 @@ pub mod pallet { /// memory usage of your runtime. type CallStack: Array>; - /// The maximum number of contracts that can be pending for deletion. - /// - /// When a contract is deleted by calling `seal_terminate` it becomes inaccessible - /// immediately, but the deletion of the storage items it has accumulated is performed - /// later. The contract is put into the deletion queue. This defines how many - /// contracts can be queued up at the same time. If that limit is reached `seal_terminate` - /// will fail. The action must be retried in a later block in that case. - /// - /// The reasons for limiting the queue depth are: - /// - /// 1. The queue is in storage in order to be persistent between blocks. We want to limit - /// the amount of storage that can be consumed. - /// 2. The queue is stored in a vector and needs to be decoded as a whole when reading - /// it at the end of each block. Longer queues take more weight to decode and hence - /// limit the amount of items that can be deleted per block. - #[pallet::constant] - type DeletionQueueDepth: Get; - - /// The maximum amount of weight that can be consumed per block for lazy trie removal. - /// - /// The amount of weight that is dedicated per block to work on the deletion queue. Larger - /// values allow more trie keys to be deleted in each block but reduce the amount of - /// weight that is left for transactions. See [`Self::DeletionQueueDepth`] for more - /// information about the deletion queue. - #[pallet::constant] - type DeletionWeightLimit: Get; - /// The amount of balance a caller has to pay for each byte of storage. /// /// # Note @@ -280,6 +265,10 @@ pub mod pallet { #[pallet::constant] type DepositPerByte: Get>; + /// Fallback value to limit the storage deposit if it's not being set by the caller. + #[pallet::constant] + type DefaultDepositLimit: Get>; + /// The amount of balance a caller has to pay for each storage item. /// /// # Note @@ -329,28 +318,9 @@ pub mod pallet { .saturating_add(T::WeightInfo::on_process_deletion_queue_batch()) } - fn on_initialize(_block: T::BlockNumber) -> Weight { - // We want to process the deletion_queue in the on_idle hook. Only in the case - // that the queue length has reached its maximal depth, we process it here. - let max_len = T::DeletionQueueDepth::get() as usize; - let queue_len = >::decode_len().unwrap_or(0); - if queue_len >= max_len { - // We do not want to go above the block limit and rather avoid lazy deletion - // in that case. This should only happen on runtime upgrades. - let weight_limit = T::BlockWeights::get() - .max_block - .saturating_sub(System::::block_weight().total()) - .min(T::DeletionWeightLimit::get()); - ContractInfo::::process_deletion_queue_batch(weight_limit) - .saturating_add(T::WeightInfo::on_process_deletion_queue_batch()) - } else { - T::WeightInfo::on_process_deletion_queue_batch() - } - } - fn integrity_test() { - // Total runtime memory is expected to have 128Mb upper limit - const MAX_RUNTIME_MEM: u32 = 1024 * 1024 * 128; + // Total runtime memory limit + let max_runtime_mem: u32 = T::Schedule::get().limits.runtime_memory; // Memory limits for a single contract: // Value stack size: 1Mb per contract, default defined in wasmi const MAX_STACK_SIZE: u32 = 1024 * 1024; @@ -384,10 +354,10 @@ pub mod pallet { // This gives us the following formula: // // `(MaxCodeLen * 18 * 4 + MAX_STACK_SIZE + max_heap_size) * max_call_depth < - // MAX_RUNTIME_MEM/2` + // max_runtime_mem/2` // // Hence the upper limit for the `MaxCodeLen` can be defined as follows: - let code_len_limit = MAX_RUNTIME_MEM + let code_len_limit = max_runtime_mem .saturating_div(2) .saturating_div(max_call_depth) .saturating_sub(max_heap_size) @@ -403,7 +373,7 @@ pub mod pallet { T::MaxCodeLen::get(), ); - // Debug buffer should at least be large enough to accomodate a simple error message + // Debug buffer should at least be large enough to accommodate a simple error message const MIN_DEBUG_BUF_SIZE: u32 = 256; assert!( T::MaxDebugBufferLen::get() > MIN_DEBUG_BUF_SIZE, @@ -509,9 +479,9 @@ pub mod pallet { /// the in storage version to the current /// [`InstructionWeights::version`](InstructionWeights). /// - /// - `determinism`: If this is set to any other value but [`Determinism::Deterministic`] - /// then the only way to use this code is to delegate call into it from an offchain - /// execution. Set to [`Determinism::Deterministic`] if in doubt. + /// - `determinism`: If this is set to any other value but [`Determinism::Enforced`] then + /// the only way to use this code is to delegate call into it from an offchain execution. + /// Set to [`Determinism::Enforced`] if in doubt. /// /// # Note /// @@ -625,8 +595,8 @@ pub mod pallet { storage_deposit_limit: storage_deposit_limit.map(Into::into), debug_message: None, }; - let mut output = CallInput:: { dest, determinism: Determinism::Deterministic } - .run_guarded(common); + let mut output = + CallInput:: { dest, determinism: Determinism::Enforced }.run_guarded(common); if let Ok(retval) = &output.result { if retval.did_revert() { output.result = Err(>::ContractReverted.into()); @@ -860,12 +830,6 @@ pub mod pallet { /// in this error. Note that this usually shouldn't happen as deploying such contracts /// is rejected. NoChainExtension, - /// Removal of a contract failed because the deletion queue is full. - /// - /// This can happen when calling `seal_terminate`. - /// The queue is filled by deleting contracts and emptied by a fixed amount each block. - /// Trying again during another block is the only way to resolve this issue. - DeletionQueueFull, /// A contract with the same AccountId already exists. DuplicateContract, /// A contract self destructed in its constructor. @@ -891,7 +855,7 @@ pub mod pallet { /// The contract's code was found to be invalid during validation or instrumentation. /// /// The most likely cause of this is that an API was used which is not supported by the - /// node. This hapens if an older node is used with a new version of ink!. Try updating + /// node. This happens if an older node is used with a new version of ink!. Try updating /// your node to the newest available version. /// /// A more detailed error can be found on the node console if debug messages are enabled @@ -949,10 +913,15 @@ pub mod pallet { /// Evicted contracts that await child trie deletion. /// /// Child trie deletion is a heavy operation depending on the amount of storage items - /// stored in said trie. Therefore this operation is performed lazily in `on_initialize`. + /// stored in said trie. Therefore this operation is performed lazily in `on_idle`. + #[pallet::storage] + pub(crate) type DeletionQueue = StorageMap<_, Twox64Concat, u32, TrieId>; + + /// A pair of monotonic counters used to track the latest contract marked for deletion + /// and the latest deleted contract in queue. #[pallet::storage] - pub(crate) type DeletionQueue = - StorageValue<_, BoundedVec, ValueQuery>; + pub(crate) type DeletionQueueCounter = + StorageValue<_, DeletionQueueManager, ValueQuery>; } /// Context of a contract invocation. @@ -987,7 +956,7 @@ struct InternalOutput { result: Result, } -/// Helper trait to wrap contract execution entry points into a signle function +/// Helper trait to wrap contract execution entry points into a single function /// [`Invokable::run_guarded`]. trait Invokable { /// What is returned as a result of a successful invocation. @@ -1096,7 +1065,7 @@ impl Invokable for InstantiateInput { binary.clone(), &schedule, common.origin.clone(), - Determinism::Deterministic, + Determinism::Enforced, TryInstantiate::Skip, ) .map_err(|(err, msg)| { @@ -1265,7 +1234,9 @@ impl Pallet { ContractInfoOf::::get(&address).ok_or(ContractAccessError::DoesntExist)?; let maybe_value = contract_info.read( - &StorageKey::::try_from(key).map_err(|_| ContractAccessError::KeyDecodingFailed)?, + &Key::::try_from_var(key) + .map_err(|_| ContractAccessError::KeyDecodingFailed)? + .into(), ); Ok(maybe_value) } @@ -1326,7 +1297,7 @@ impl Pallet { /// Used by backwards compatible extrinsics. We cannot just set the proof_size weight limit to /// zero or an old `Call` will just fail with OutOfGas. fn compat_weight_limit(gas_limit: OldWeight) -> Weight { - Weight::from_parts(gas_limit.0, u64::from(T::MaxCodeLen::get()) * 2) + Weight::from_parts(gas_limit, u64::from(T::MaxCodeLen::get()) * 2) } } diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index cd8f1dd1c9cd4..96a4c3203474c 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -394,7 +394,7 @@ mod v9 { initial: old.initial, maximum: old.maximum, code: old.code, - determinism: Determinism::Deterministic, + determinism: Determinism::Enforced, }) }); } @@ -464,7 +464,7 @@ mod post_checks { fn v9() -> Result<(), &'static str> { for value in CodeStorage::::iter_values() { ensure!( - value.determinism == Determinism::Deterministic, + value.determinism == Determinism::Enforced, "All pre-existing codes need to be deterministic." ); } diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index 71faf08c23d6a..aef2fa1ca7dc1 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -27,18 +27,9 @@ use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; use sp_runtime::RuntimeDebug; -use sp_std::{marker::PhantomData, vec::Vec}; +use sp_std::marker::PhantomData; use wasm_instrument::{gas_metering, parity_wasm::elements}; -/// How many API calls are executed in a single batch. The reason for increasing the amount -/// of API calls in batches (per benchmark component increase) is so that the linear regression -/// has an easier time determining the contribution of that component. -pub const API_BENCHMARK_BATCH_SIZE: u32 = 80; - -/// How many instructions are executed in a single batch. The reasoning is the same -/// as for `API_BENCHMARK_BATCH_SIZE`. -pub const INSTR_BENCHMARK_BATCH_SIZE: u32 = 100; - /// Definition of the cost schedule and other parameterizations for the wasm vm. /// /// Its [`Default`] implementation is the designated way to initialize this type. It uses @@ -136,6 +127,10 @@ pub struct Limits { /// The maximum size of a storage value and event payload in bytes. pub payload_len: u32, + + /// The maximum node runtime memory. This is for integrity checks only and does not affect the + /// real setting. + pub runtime_memory: u32, } impl Limits { @@ -148,7 +143,7 @@ impl Limits { /// Describes the weight for all categories of supported wasm instructions. /// /// There there is one field for each wasm instruction that describes the weight to -/// execute one instruction of that name. There are a few execptions: +/// execute one instruction of that name. There are a few exceptions: /// /// 1. If there is a i64 and a i32 variant of an instruction we use the weight /// of the former for both. @@ -184,9 +179,9 @@ pub struct InstructionWeights { pub version: u32, /// Weight to be used for instructions which don't have benchmarks assigned. /// - /// This weight is used whenever a code is uploaded with [`Determinism::AllowIndeterminism`] + /// This weight is used whenever a code is uploaded with [`Determinism::Relaxed`] /// and an instruction (usually a float instruction) is encountered. This weight is **not** - /// used if a contract is uploaded with [`Determinism::Deterministic`]. If this field is set to + /// used if a contract is uploaded with [`Determinism::Enforced`]. If this field is set to /// `0` (the default) only deterministic codes are allowed to be uploaded. pub fallback: u32, pub i64const: u32, @@ -200,7 +195,6 @@ pub struct InstructionWeights { pub br_table_per_entry: u32, pub call: u32, pub call_indirect: u32, - pub call_indirect_per_param: u32, pub call_per_local: u32, pub local_get: u32, pub local_set: u32, @@ -419,6 +413,12 @@ pub struct HostFnWeights { /// Weight of calling `seal_ecdsa_to_eth_address`. pub ecdsa_to_eth_address: Weight, + /// Weight of calling `sr25519_verify`. + pub sr25519_verify: Weight, + + /// Weight per byte of calling `sr25519_verify`. + pub sr25519_verify_per_byte: Weight, + /// Weight of calling `reentrance_count`. pub reentrance_count: Weight, @@ -451,66 +451,22 @@ macro_rules! cost_args { } } -macro_rules! cost_batched_args { - ($name:ident, $( $arg: expr ),+) => { - cost_args!($name, $( $arg ),+) / u64::from(API_BENCHMARK_BATCH_SIZE) - } -} - -macro_rules! cost_instr_no_params_with_batch_size { - ($name:ident, $batch_size:expr) => { - (cost_args!($name, 1).ref_time() / u64::from($batch_size)) as u32 - }; -} - -macro_rules! cost_instr_with_batch_size { - ($name:ident, $num_params:expr, $batch_size:expr) => { - cost_instr_no_params_with_batch_size!($name, $batch_size).saturating_sub( - (cost_instr_no_params_with_batch_size!(instr_i64const, $batch_size) / 2) - .saturating_mul($num_params), - ) - }; -} - -macro_rules! cost_instr { - ($name:ident, $num_params:expr) => { - cost_instr_with_batch_size!($name, $num_params, INSTR_BENCHMARK_BATCH_SIZE) +macro_rules! cost_instr_no_params { + ($name:ident) => { + cost_args!($name, 1).ref_time() as u32 }; } -macro_rules! cost_byte_args { - ($name:ident, $( $arg: expr ),+) => { - cost_args!($name, $( $arg ),+) / 1024 - } -} - -macro_rules! cost_byte_batched_args { - ($name:ident, $( $arg: expr ),+) => { - cost_batched_args!($name, $( $arg ),+) / 1024 - } -} - macro_rules! cost { ($name:ident) => { cost_args!($name, 1) }; } -macro_rules! cost_batched { - ($name:ident) => { - cost_batched_args!($name, 1) - }; -} - -macro_rules! cost_byte { - ($name:ident) => { - cost_byte_args!($name, 1) - }; -} - -macro_rules! cost_byte_batched { - ($name:ident) => { - cost_byte_batched_args!($name, 1) +macro_rules! cost_instr { + ($name:ident, $num_params:expr) => { + cost_instr_no_params!($name) + .saturating_sub((cost_instr_no_params!(instr_i64const) / 2).saturating_mul($num_params)) }; } @@ -527,13 +483,13 @@ impl Default for Limits { br_table_size: 256, subject_len: 32, payload_len: 16 * 1024, + runtime_memory: 1024 * 1024 * 128, } } } impl Default for InstructionWeights { fn default() -> Self { - let max_pages = Limits::default().memory_pages; Self { version: 4, fallback: 0, @@ -548,7 +504,6 @@ impl Default for InstructionWeights { br_table_per_entry: cost_instr!(instr_br_table_per_entry, 0), call: cost_instr!(instr_call, 2), call_indirect: cost_instr!(instr_call_indirect, 3), - call_indirect_per_param: cost_instr!(instr_call_indirect_per_param, 0), call_per_local: cost_instr!(instr_call_per_local, 0), local_get: cost_instr!(instr_local_get, 1), local_set: cost_instr!(instr_local_set, 1), @@ -556,7 +511,7 @@ impl Default for InstructionWeights { global_get: cost_instr!(instr_global_get, 1), global_set: cost_instr!(instr_global_set, 1), memory_current: cost_instr!(instr_memory_current, 1), - memory_grow: cost_instr_with_batch_size!(instr_memory_grow, 1, max_pages), + memory_grow: cost_instr!(instr_memory_grow, 1), i64clz: cost_instr!(instr_i64clz, 2), i64ctz: cost_instr!(instr_i64ctz, 2), i64popcnt: cost_instr!(instr_i64popcnt, 2), @@ -597,89 +552,87 @@ impl Default for InstructionWeights { impl Default for HostFnWeights { fn default() -> Self { Self { - caller: cost_batched!(seal_caller), - is_contract: cost_batched!(seal_is_contract), - code_hash: cost_batched!(seal_code_hash), - own_code_hash: cost_batched!(seal_own_code_hash), - caller_is_origin: cost_batched!(seal_caller_is_origin), - address: cost_batched!(seal_address), - gas_left: cost_batched!(seal_gas_left), - balance: cost_batched!(seal_balance), - value_transferred: cost_batched!(seal_value_transferred), - minimum_balance: cost_batched!(seal_minimum_balance), - block_number: cost_batched!(seal_block_number), - now: cost_batched!(seal_now), - weight_to_fee: cost_batched!(seal_weight_to_fee), + caller: cost!(seal_caller), + is_contract: cost!(seal_is_contract), + code_hash: cost!(seal_code_hash), + own_code_hash: cost!(seal_own_code_hash), + caller_is_origin: cost!(seal_caller_is_origin), + address: cost!(seal_address), + gas_left: cost!(seal_gas_left), + balance: cost!(seal_balance), + value_transferred: cost!(seal_value_transferred), + minimum_balance: cost!(seal_minimum_balance), + block_number: cost!(seal_block_number), + now: cost!(seal_now), + weight_to_fee: cost!(seal_weight_to_fee), // Manually remove proof size from basic block cost. // // Due to imperfect benchmarking some host functions incur a small // amount of proof size. Usually this is ok. However, charging a basic block is such // a frequent operation that this would be a vast overestimation. - gas: cost_batched!(seal_gas).set_proof_size(0), - input: cost_batched!(seal_input), - input_per_byte: cost_byte_batched!(seal_input_per_kb), + gas: cost!(seal_gas).set_proof_size(0), + input: cost!(seal_input), + input_per_byte: cost!(seal_input_per_byte), r#return: cost!(seal_return), - return_per_byte: cost_byte!(seal_return_per_kb), + return_per_byte: cost!(seal_return_per_byte), terminate: cost!(seal_terminate), - random: cost_batched!(seal_random), - deposit_event: cost_batched!(seal_deposit_event), - deposit_event_per_topic: cost_batched_args!(seal_deposit_event_per_topic_and_kb, 1, 0), - deposit_event_per_byte: cost_byte_batched_args!( - seal_deposit_event_per_topic_and_kb, - 0, - 1 - ), - debug_message: cost_batched!(seal_debug_message), - debug_message_per_byte: cost_byte!(seal_debug_message_per_kb), - set_storage: cost_batched!(seal_set_storage), - set_code_hash: cost_batched!(seal_set_code_hash), - set_storage_per_new_byte: cost_byte_batched!(seal_set_storage_per_new_kb), - set_storage_per_old_byte: cost_byte_batched!(seal_set_storage_per_old_kb), - clear_storage: cost_batched!(seal_clear_storage), - clear_storage_per_byte: cost_byte_batched!(seal_clear_storage_per_kb), - contains_storage: cost_batched!(seal_contains_storage), - contains_storage_per_byte: cost_byte_batched!(seal_contains_storage_per_kb), - get_storage: cost_batched!(seal_get_storage), - get_storage_per_byte: cost_byte_batched!(seal_get_storage_per_kb), - take_storage: cost_batched!(seal_take_storage), - take_storage_per_byte: cost_byte_batched!(seal_take_storage_per_kb), - transfer: cost_batched!(seal_transfer), - call: cost_batched!(seal_call), - delegate_call: cost_batched!(seal_delegate_call), - call_transfer_surcharge: cost_batched_args!(seal_call_per_transfer_clone_kb, 1, 0), - call_per_cloned_byte: cost_byte_batched_args!(seal_call_per_transfer_clone_kb, 0, 1), - instantiate: cost_batched!(seal_instantiate), - instantiate_transfer_surcharge: cost_batched_args!( - seal_instantiate_per_transfer_input_salt_kb, + random: cost!(seal_random), + deposit_event: cost!(seal_deposit_event), + deposit_event_per_topic: cost_args!(seal_deposit_event_per_topic_and_byte, 1, 0), + deposit_event_per_byte: cost_args!(seal_deposit_event_per_topic_and_byte, 0, 1), + debug_message: cost!(seal_debug_message), + debug_message_per_byte: cost!(seal_debug_message_per_byte), + set_storage: cost!(seal_set_storage), + set_code_hash: cost!(seal_set_code_hash), + set_storage_per_new_byte: cost!(seal_set_storage_per_new_byte), + set_storage_per_old_byte: cost!(seal_set_storage_per_old_byte), + clear_storage: cost!(seal_clear_storage), + clear_storage_per_byte: cost!(seal_clear_storage_per_byte), + contains_storage: cost!(seal_contains_storage), + contains_storage_per_byte: cost!(seal_contains_storage_per_byte), + get_storage: cost!(seal_get_storage), + get_storage_per_byte: cost!(seal_get_storage_per_byte), + take_storage: cost!(seal_take_storage), + take_storage_per_byte: cost!(seal_take_storage_per_byte), + transfer: cost!(seal_transfer), + call: cost!(seal_call), + delegate_call: cost!(seal_delegate_call), + call_transfer_surcharge: cost_args!(seal_call_per_transfer_clone_byte, 1, 0), + call_per_cloned_byte: cost_args!(seal_call_per_transfer_clone_byte, 0, 1), + instantiate: cost!(seal_instantiate), + instantiate_transfer_surcharge: cost_args!( + seal_instantiate_per_transfer_input_salt_byte, 1, 0, 0 ), - instantiate_per_input_byte: cost_byte_batched_args!( - seal_instantiate_per_transfer_input_salt_kb, + instantiate_per_input_byte: cost_args!( + seal_instantiate_per_transfer_input_salt_byte, 0, 1, 0 ), - instantiate_per_salt_byte: cost_byte_batched_args!( - seal_instantiate_per_transfer_input_salt_kb, + instantiate_per_salt_byte: cost_args!( + seal_instantiate_per_transfer_input_salt_byte, 0, 0, 1 ), - hash_sha2_256: cost_batched!(seal_hash_sha2_256), - hash_sha2_256_per_byte: cost_byte_batched!(seal_hash_sha2_256_per_kb), - hash_keccak_256: cost_batched!(seal_hash_keccak_256), - hash_keccak_256_per_byte: cost_byte_batched!(seal_hash_keccak_256_per_kb), - hash_blake2_256: cost_batched!(seal_hash_blake2_256), - hash_blake2_256_per_byte: cost_byte_batched!(seal_hash_blake2_256_per_kb), - hash_blake2_128: cost_batched!(seal_hash_blake2_128), - hash_blake2_128_per_byte: cost_byte_batched!(seal_hash_blake2_128_per_kb), - ecdsa_recover: cost_batched!(seal_ecdsa_recover), - ecdsa_to_eth_address: cost_batched!(seal_ecdsa_to_eth_address), - reentrance_count: cost_batched!(seal_reentrance_count), - account_reentrance_count: cost_batched!(seal_account_reentrance_count), - instantiation_nonce: cost_batched!(seal_instantiation_nonce), + hash_sha2_256: cost!(seal_hash_sha2_256), + hash_sha2_256_per_byte: cost!(seal_hash_sha2_256_per_byte), + hash_keccak_256: cost!(seal_hash_keccak_256), + hash_keccak_256_per_byte: cost!(seal_hash_keccak_256_per_byte), + hash_blake2_256: cost!(seal_hash_blake2_256), + hash_blake2_256_per_byte: cost!(seal_hash_blake2_256_per_byte), + hash_blake2_128: cost!(seal_hash_blake2_128), + hash_blake2_128_per_byte: cost!(seal_hash_blake2_128_per_byte), + ecdsa_recover: cost!(seal_ecdsa_recover), + sr25519_verify: cost!(seal_sr25519_verify), + sr25519_verify_per_byte: cost!(seal_sr25519_verify_per_byte), + ecdsa_to_eth_address: cost!(seal_ecdsa_to_eth_address), + reentrance_count: cost!(seal_reentrance_count), + account_reentrance_count: cost!(seal_account_reentrance_count), + instantiation_nonce: cost!(seal_instantiation_nonce), _phantom: PhantomData, } } @@ -687,29 +640,12 @@ impl Default for HostFnWeights { struct ScheduleRules<'a, T: Config> { schedule: &'a Schedule, - params: Vec, determinism: Determinism, } impl Schedule { - pub(crate) fn rules( - &self, - module: &elements::Module, - determinism: Determinism, - ) -> impl gas_metering::Rules + '_ { - ScheduleRules { - schedule: self, - params: module - .type_section() - .iter() - .flat_map(|section| section.types()) - .map(|func| { - let elements::Type::Function(func) = func; - func.params().len() as u32 - }) - .collect(), - determinism, - } + pub(crate) fn rules(&self, determinism: Determinism) -> impl gas_metering::Rules + '_ { + ScheduleRules { schedule: self, determinism } } } @@ -717,7 +653,6 @@ impl<'a, T: Config> gas_metering::Rules for ScheduleRules<'a, T> { fn instruction_cost(&self, instruction: &elements::Instruction) -> Option { use self::elements::Instruction::*; let w = &self.schedule.instruction_weights; - let max_params = self.schedule.limits.parameters; let weight = match *instruction { End | Unreachable | Return | Else => 0, @@ -753,7 +688,7 @@ impl<'a, T: Config> gas_metering::Rules for ScheduleRules<'a, T> { SetGlobal(_) => w.global_set, CurrentMemory(_) => w.memory_current, GrowMemory(_) => w.memory_grow, - CallIndirect(idx, _) => *self.params.get(idx as usize).unwrap_or(&max_params), + CallIndirect(_, _) => w.call_indirect, BrTable(ref data) => w .br_table .saturating_add(w.br_table_per_entry.saturating_mul(data.table.len() as u32)), @@ -793,8 +728,7 @@ impl<'a, T: Config> gas_metering::Rules for ScheduleRules<'a, T> { // Returning None makes the gas instrumentation fail which we intend for // unsupported or unknown instructions. Offchain we might allow indeterminism and hence // use the fallback weight for those instructions. - _ if matches!(self.determinism, Determinism::AllowIndeterminism) && w.fallback > 0 => - w.fallback, + _ if matches!(self.determinism, Determinism::Relaxed) && w.fallback > 0 => w.fallback, _ => return None, }; Some(weight) diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 26162b55ca393..769caef0736fe 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -20,17 +20,17 @@ pub mod meter; use crate::{ - exec::{AccountIdOf, StorageKey}, + exec::{AccountIdOf, Key}, weights::WeightInfo, - AddressGenerator, BalanceOf, CodeHash, Config, ContractInfoOf, DeletionQueue, Error, Pallet, - TrieId, SENTINEL, + AddressGenerator, BalanceOf, CodeHash, Config, ContractInfoOf, DeletionQueue, + DeletionQueueCounter, Error, Pallet, TrieId, SENTINEL, }; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::{DispatchError, DispatchResult}, + dispatch::DispatchError, storage::child::{self, ChildInfo}, weights::Weight, - RuntimeDebugNoBound, + DefaultNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; use sp_io::KillStorageResult; @@ -38,7 +38,7 @@ use sp_runtime::{ traits::{Hash, Saturating, Zero}, RuntimeDebug, }; -use sp_std::{ops::Deref, prelude::*}; +use sp_std::{marker::PhantomData, ops::Deref, prelude::*}; /// Information for managing an account and its sub trie abstraction. /// This is the required info to cache for an account. @@ -132,7 +132,7 @@ impl ContractInfo { /// /// The read is performed from the `trie_id` only. The `address` is not necessary. If the /// contract doesn't store under the given `key` `None` is returned. - pub fn read>(&self, key: &K) -> Option> { + pub fn read(&self, key: &Key) -> Option> { child::get_raw(&self.child_trie_info(), key.hash().as_slice()) } @@ -140,7 +140,7 @@ impl ContractInfo { /// /// Returns `None` if the `key` wasn't previously set by `set_storage` or /// was deleted. - pub fn size>(&self, key: &K) -> Option { + pub fn size(&self, key: &Key) -> Option { child::len(&self.child_trie_info(), key.hash().as_slice()) } @@ -151,9 +151,9 @@ impl ContractInfo { /// /// This function also records how much storage was created or removed if a `storage_meter` /// is supplied. It should only be absent for testing or benchmarking code. - pub fn write>( + pub fn write( &self, - key: &K, + key: &Key, new_value: Option>, storage_meter: Option<&mut meter::NestedMeter>, take: bool, @@ -204,27 +204,21 @@ impl ContractInfo { /// Push a contract's trie to the deletion queue for lazy removal. /// /// You must make sure that the contract is also removed when queuing the trie for deletion. - pub fn queue_trie_for_deletion(&self) -> DispatchResult { - >::try_append(DeletedContract { trie_id: self.trie_id.clone() }) - .map_err(|_| >::DeletionQueueFull.into()) + pub fn queue_trie_for_deletion(&self) { + DeletionQueueManager::::load().insert(self.trie_id.clone()); } /// Calculates the weight that is necessary to remove one key from the trie and how many - /// of those keys can be deleted from the deletion queue given the supplied queue length - /// and weight limit. - pub fn deletion_budget(queue_len: usize, weight_limit: Weight) -> (Weight, u32) { + /// of those keys can be deleted from the deletion queue given the supplied weight limit. + pub fn deletion_budget(weight_limit: Weight) -> (Weight, u32) { let base_weight = T::WeightInfo::on_process_deletion_queue_batch(); - let weight_per_queue_item = T::WeightInfo::on_initialize_per_queue_item(1) - - T::WeightInfo::on_initialize_per_queue_item(0); let weight_per_key = T::WeightInfo::on_initialize_per_trie_key(1) - T::WeightInfo::on_initialize_per_trie_key(0); - let decoding_weight = weight_per_queue_item.saturating_mul(queue_len as u64); // `weight_per_key` being zero makes no sense and would constitute a failure to // benchmark properly. We opt for not removing any keys at all in this case. let key_budget = weight_limit .saturating_sub(base_weight) - .saturating_sub(decoding_weight) .checked_div_per_component(&weight_per_key) .unwrap_or(0) as u32; @@ -235,13 +229,13 @@ impl ContractInfo { /// /// It returns the amount of weight used for that task. pub fn process_deletion_queue_batch(weight_limit: Weight) -> Weight { - let queue_len = >::decode_len().unwrap_or(0); - if queue_len == 0 { + let mut queue = >::load(); + + if queue.is_empty() { return Weight::zero() } - let (weight_per_key, mut remaining_key_budget) = - Self::deletion_budget(queue_len, weight_limit); + let (weight_per_key, mut remaining_key_budget) = Self::deletion_budget(weight_limit); // We want to check whether we have enough weight to decode the queue before // proceeding. Too little weight for decoding might happen during runtime upgrades @@ -250,30 +244,25 @@ impl ContractInfo { return weight_limit } - let mut queue = >::get(); + while remaining_key_budget > 0 { + let Some(entry) = queue.next() else { break }; - while !queue.is_empty() && remaining_key_budget > 0 { - // Cannot panic due to loop condition - let trie = &mut queue[0]; #[allow(deprecated)] let outcome = child::kill_storage( - &ChildInfo::new_default(&trie.trie_id), + &ChildInfo::new_default(&entry.trie_id), Some(remaining_key_budget), ); - let keys_removed = match outcome { + + match outcome { // This happens when our budget wasn't large enough to remove all keys. - KillStorageResult::SomeRemaining(c) => c, - KillStorageResult::AllRemoved(c) => { - // We do not care to preserve order. The contract is deleted already and - // no one waits for the trie to be deleted. - queue.swap_remove(0); - c + KillStorageResult::SomeRemaining(_) => return weight_limit, + KillStorageResult::AllRemoved(keys_removed) => { + entry.remove(); + remaining_key_budget = remaining_key_budget.saturating_sub(keys_removed); }, }; - remaining_key_budget = remaining_key_budget.saturating_sub(keys_removed); } - >::put(queue); weight_limit.saturating_sub(weight_per_key.saturating_mul(u64::from(remaining_key_budget))) } @@ -281,25 +270,9 @@ impl ContractInfo { pub fn load_code_hash(account: &AccountIdOf) -> Option> { >::get(account).map(|i| i.code_hash) } - - /// Fill up the queue in order to exercise the limits during testing. - #[cfg(test)] - pub fn fill_queue_with_dummies() { - use frame_support::{traits::Get, BoundedVec}; - let queue: Vec = (0..T::DeletionQueueDepth::get()) - .map(|_| DeletedContract { trie_id: TrieId::default() }) - .collect(); - let bounded: BoundedVec<_, _> = queue.try_into().map_err(|_| ()).unwrap(); - >::put(bounded); - } } -#[derive(Encode, Decode, TypeInfo, MaxEncodedLen)] -pub struct DeletedContract { - pub(crate) trie_id: TrieId, -} - -/// Information about what happended to the pre-existing value when calling [`ContractInfo::write`]. +/// Information about what happened to the pre-existing value when calling [`ContractInfo::write`]. #[cfg_attr(test, derive(Debug, PartialEq))] pub enum WriteOutcome { /// No value existed at the specified key. @@ -352,3 +325,84 @@ impl Deref for DepositAccount { &self.0 } } + +/// Manage the removal of contracts storage that are marked for deletion. +/// +/// When a contract is deleted by calling `seal_terminate` it becomes inaccessible +/// immediately, but the deletion of the storage items it has accumulated is performed +/// later by pulling the contract from the queue in the `on_idle` hook. +#[derive(Encode, Decode, TypeInfo, MaxEncodedLen, DefaultNoBound, Clone)] +#[scale_info(skip_type_params(T))] +pub struct DeletionQueueManager { + /// Counter used as a key for inserting a new deleted contract in the queue. + /// The counter is incremented after each insertion. + insert_counter: u32, + /// The index used to read the next element to be deleted in the queue. + /// The counter is incremented after each deletion. + delete_counter: u32, + + _phantom: PhantomData, +} + +/// View on a contract that is marked for deletion. +struct DeletionQueueEntry<'a, T: Config> { + /// the trie id of the contract to delete. + trie_id: TrieId, + + /// A mutable reference on the queue so that the contract can be removed, and none can be added + /// or read in the meantime. + queue: &'a mut DeletionQueueManager, +} + +impl<'a, T: Config> DeletionQueueEntry<'a, T> { + /// Remove the contract from the deletion queue. + fn remove(self) { + >::remove(self.queue.delete_counter); + self.queue.delete_counter = self.queue.delete_counter.wrapping_add(1); + >::set(self.queue.clone()); + } +} + +impl DeletionQueueManager { + /// Load the `DeletionQueueCounter`, so we can perform read or write operations on the + /// DeletionQueue storage. + fn load() -> Self { + >::get() + } + + /// Returns `true` if the queue contains no elements. + fn is_empty(&self) -> bool { + self.insert_counter.wrapping_sub(self.delete_counter) == 0 + } + + /// Insert a contract in the deletion queue. + fn insert(&mut self, trie_id: TrieId) { + >::insert(self.insert_counter, trie_id); + self.insert_counter = self.insert_counter.wrapping_add(1); + >::set(self.clone()); + } + + /// Fetch the next contract to be deleted. + /// + /// Note: + /// we use the delete counter to get the next value to read from the queue and thus don't pay + /// the cost of an extra call to `sp_io::storage::next_key` to lookup the next entry in the map + fn next(&mut self) -> Option> { + if self.is_empty() { + return None + } + + let entry = >::get(self.delete_counter); + entry.map(|trie_id| DeletionQueueEntry { trie_id, queue: self }) + } +} + +#[cfg(test)] +impl DeletionQueueManager { + pub fn from_test_values(insert_counter: u32, delete_counter: u32) -> Self { + Self { insert_counter, delete_counter, _phantom: Default::default() } + } + pub fn as_test_tuple(&self) -> (u32, u32) { + (self.insert_counter, self.delete_counter) + } +} diff --git a/frame/contracts/src/storage/meter.rs b/frame/contracts/src/storage/meter.rs index ef6fb3277c11b..b8bbd6dc4178b 100644 --- a/frame/contracts/src/storage/meter.rs +++ b/frame/contracts/src/storage/meter.rs @@ -19,17 +19,23 @@ use crate::{ storage::{ContractInfo, DepositAccount}, - BalanceOf, Config, Error, Inspect, Pallet, System, + BalanceOf, Config, Error, Inspect, Pallet, System, LOG_TARGET, }; use codec::Encode; use frame_support::{ - dispatch::DispatchError, + dispatch::{fmt::Debug, DispatchError}, ensure, - traits::{tokens::WithdrawConsequence, Currency, ExistenceRequirement, Get}, + traits::{ + tokens::{Fortitude::Polite, Preservation::Protect, WithdrawConsequence}, + Currency, ExistenceRequirement, Get, + }, DefaultNoBound, RuntimeDebugNoBound, }; use pallet_contracts_primitives::StorageDeposit as Deposit; -use sp_runtime::{traits::Saturating, FixedPointNumber, FixedU128}; +use sp_runtime::{ + traits::{Saturating, Zero}, + FixedPointNumber, FixedU128, +}; use sp_std::{marker::PhantomData, vec::Vec}; /// Deposit that uses the native currency's balance type. @@ -93,17 +99,24 @@ pub enum ReservingExt {} pub trait State: private::Sealed {} /// State parameter that constitutes a meter that is in its root state. -pub enum Root {} +#[derive(Default, Debug)] +pub struct Root; /// State parameter that constitutes a meter that is in its nested state. -pub enum Nested {} +/// Its value indicates whether the nested meter has its own limit. +#[derive(DefaultNoBound, RuntimeDebugNoBound)] +pub enum Nested { + #[default] + DerivedLimit, + OwnLimit, +} impl State for Root {} impl State for Nested {} /// A type that allows the metering of consumed or freed storage of a single contract call stack. #[derive(DefaultNoBound, RuntimeDebugNoBound)] -pub struct RawMeter { +pub struct RawMeter { /// The limit of how much balance this meter is allowed to consume. limit: BalanceOf, /// The amount of balance that was used in this meter and all of its already absorbed children. @@ -115,8 +128,10 @@ pub struct RawMeter { /// We only have one charge per contract hence the size of this vector is /// limited by the maximum call depth. charges: Vec>, - /// Type parameters are only used in impls. - _phantom: PhantomData<(E, S)>, + /// We store the nested state to determine if it has a special limit for sub-call. + nested: S, + /// Type parameter only used in impls. + _phantom: PhantomData, } /// This type is used to describe a storage change when charging from the meter. @@ -211,6 +226,9 @@ impl Diff { /// this we can do all the refunds before doing any charge. This way a plain account can use /// more deposit than it has balance as along as it is covered by a refund. This /// essentially makes the order of storage changes irrelevant with regard to the deposit system. +/// The only exception is when a special (tougher) deposit limit is specified for a cross-contract +/// call. In that case the limit is enforced once the call is returned, rolling it back if +/// exhausted. #[derive(RuntimeDebugNoBound, Clone)] struct Charge { deposit_account: DepositAccount, @@ -252,16 +270,24 @@ impl RawMeter where T: Config, E: Ext, - S: State, + S: State + Default + Debug, { - /// Create a new child that has its `limit` set to whatever is remaining of it. + /// Create a new child that has its `limit`. + /// Passing `0` as the limit is interpreted as to take whatever is remaining from its parent. /// /// This is called whenever a new subcall is initiated in order to track the storage /// usage for this sub call separately. This is necessary because we want to exchange balance /// with the current contract we are interacting with. - pub fn nested(&self) -> RawMeter { + pub fn nested(&self, limit: BalanceOf) -> RawMeter { debug_assert!(self.is_alive()); - RawMeter { limit: self.available(), ..Default::default() } + // If a special limit is specified higher than it is available, + // we want to enforce the lesser limit to the nested meter, to fail in the sub-call. + let limit = self.available().min(limit); + if limit.is_zero() { + RawMeter { limit: self.available(), ..Default::default() } + } else { + RawMeter { limit, nested: Nested::OwnLimit, ..Default::default() } + } } /// Absorb a child that was spawned to handle a sub call. @@ -290,8 +316,8 @@ where .total_deposit .saturating_add(&absorbed.total_deposit) .saturating_add(&own_deposit); + self.charges.extend_from_slice(&absorbed.charges); if !own_deposit.is_zero() { - self.charges.extend_from_slice(&absorbed.charges); self.charges.push(Charge { deposit_account, amount: own_deposit, @@ -394,7 +420,7 @@ where self.total_deposit = deposit.clone(); info.storage_base_deposit = deposit.charge_or_zero(); - // Usually, deposit charges are deferred to be able to coalesce them with refunds. + // Normally, deposit charges are deferred to be able to coalesce them with refunds. // However, we need to charge immediately so that the account is created before // charges possibly below the ed are collected and fail. E::charge( @@ -426,8 +452,10 @@ where /// /// # Note /// - /// We only need to call this **once** for every call stack and not for every cross contract - /// call. Hence this is only called when the last call frame returns. + /// We normally need to call this **once** for every call stack and not for every cross contract + /// call. However, if a dedicated limit is specified for a sub-call, this needs to be called + /// once the sub-call has returned. For this, the [`Self::enforce_subcall_limit`] wrapper is + /// used. pub fn enforce_limit( &mut self, info: Option<&mut ContractInfo>, @@ -445,6 +473,18 @@ where } Ok(()) } + + /// This is a wrapper around [`Self::enforce_limit`] to use on the exit from a sub-call to + /// enforce its special limit if needed. + pub fn enforce_subcall_limit( + &mut self, + info: Option<&mut ContractInfo>, + ) -> Result<(), DispatchError> { + match self.nested { + Nested::OwnLimit => self.enforce_limit(info), + Nested::DerivedLimit => Ok(()), + } + } } impl Ext for ReservingExt { @@ -456,10 +496,11 @@ impl Ext for ReservingExt { // We are sending the `min_leftover` and the `min_balance` from the origin // account as part of a contract call. Hence origin needs to have those left over // as free balance after accounting for all deposits. - let max = T::Currency::reducible_balance(origin, true) + let max = T::Currency::reducible_balance(origin, Protect, Polite) .saturating_sub(min_leftover) .saturating_sub(Pallet::::min_balance()); - let limit = limit.unwrap_or(max); + let default = max.min(T::DefaultDepositLimit::get()); + let limit = limit.unwrap_or(default); ensure!( limit <= max && matches!(T::Currency::can_withdraw(origin, limit), WithdrawConsequence::Success), @@ -499,7 +540,7 @@ impl Ext for ReservingExt { ); if let Err(err) = result { log::error!( - target: "runtime::contracts", + target: LOG_TARGET, "Failed to transfer storage deposit {:?} from origin {:?} to deposit account {:?}: {:?}", amount, origin, deposit_account, err, ); @@ -513,7 +554,7 @@ impl Ext for ReservingExt { // we make sure to leave at least the ed in the free balance. // // The sender always has enough balance because we track it in the `ContractInfo` and - // never send more back than we have. Noone has access to the deposit account. Hence no + // never send more back than we have. No one has access to the deposit account. Hence no // other interaction with this account takes place. Deposit::Refund(amount) => { if terminated { @@ -528,7 +569,7 @@ impl Ext for ReservingExt { ); if matches!(result, Err(_)) { log::error!( - target: "runtime::contracts", + target: LOG_TARGET, "Failed to refund storage deposit {:?} from deposit account {:?} to origin {:?}: {:?}", amount, deposit_account, origin, result, ); @@ -670,7 +711,7 @@ mod tests { assert_eq!(meter.available(), 1_000); // an empty charge does not create a `Charge` entry - let mut nested0 = meter.nested(); + let mut nested0 = meter.nested(BalanceOf::::zero()); nested0.charge(&Default::default()); meter.absorb(nested0, DepositAccount(BOB), None); @@ -692,7 +733,7 @@ mod tests { let mut nested0_info = new_info(StorageInfo { bytes: 100, items: 5, bytes_deposit: 100, items_deposit: 10 }); - let mut nested0 = meter.nested(); + let mut nested0 = meter.nested(BalanceOf::::zero()); nested0.charge(&Diff { bytes_added: 108, bytes_removed: 5, @@ -703,13 +744,13 @@ mod tests { let mut nested1_info = new_info(StorageInfo { bytes: 100, items: 10, bytes_deposit: 100, items_deposit: 20 }); - let mut nested1 = nested0.nested(); + let mut nested1 = nested0.nested(BalanceOf::::zero()); nested1.charge(&Diff { items_removed: 5, ..Default::default() }); nested0.absorb(nested1, DepositAccount(CHARLIE), Some(&mut nested1_info)); let mut nested2_info = new_info(StorageInfo { bytes: 100, items: 7, bytes_deposit: 100, items_deposit: 20 }); - let mut nested2 = nested0.nested(); + let mut nested2 = nested0.nested(BalanceOf::::zero()); nested2.charge(&Diff { items_removed: 7, ..Default::default() }); nested0.absorb(nested2, DepositAccount(CHARLIE), Some(&mut nested2_info)); @@ -757,7 +798,7 @@ mod tests { let mut meter = TestMeter::new(&ALICE, Some(1_000), 0).unwrap(); assert_eq!(meter.available(), 1_000); - let mut nested0 = meter.nested(); + let mut nested0 = meter.nested(BalanceOf::::zero()); nested0.charge(&Diff { bytes_added: 5, bytes_removed: 1, @@ -768,7 +809,7 @@ mod tests { let mut nested1_info = new_info(StorageInfo { bytes: 100, items: 10, bytes_deposit: 100, items_deposit: 20 }); - let mut nested1 = nested0.nested(); + let mut nested1 = nested0.nested(BalanceOf::::zero()); nested1.charge(&Diff { items_removed: 5, ..Default::default() }); nested1.charge(&Diff { bytes_added: 20, ..Default::default() }); nested1.terminate(&nested1_info); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index d74b08243df4b..697b58f15c609 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -16,43 +16,44 @@ // limitations under the License. use self::test_utils::hash; +use crate as pallet_contracts; use crate::{ chain_extension::{ ChainExtension, Environment, Ext, InitState, RegisteredChainExtension, Result as ExtensionResult, RetVal, ReturnFlags, SysConfig, }, - exec::{FixSizedKey, Frame}, + exec::{Frame, Key}, + storage::DeletionQueueManager, tests::test_utils::{get_contract, get_contract_checked}, wasm::{Determinism, PrefabWasmModule, ReturnCode as RuntimeReturnCode}, weights::WeightInfo, BalanceOf, Code, CodeStorage, Config, ContractInfo, ContractInfoOf, DefaultAddressGenerator, - DeletionQueue, Error, Pallet, Schedule, + DeletionQueueCounter, Error, Pallet, Schedule, }; use assert_matches::assert_matches; use codec::Encode; use frame_support::{ assert_err, assert_err_ignore_postinfo, assert_noop, assert_ok, - dispatch::{DispatchClass, DispatchErrorWithPostInfo, PostDispatchInfo}, + dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, parameter_types, storage::child, traits::{ - ConstU32, ConstU64, Contains, Currency, ExistenceRequirement, Get, LockableCurrency, - OnIdle, OnInitialize, ReservableCurrency, WithdrawReasons, + ConstU32, ConstU64, Contains, Currency, ExistenceRequirement, LockableCurrency, OnIdle, + OnInitialize, WithdrawReasons, }, weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; -use frame_system::{self as system, EventRecord, Phase}; +use frame_system::{EventRecord, Phase}; use pretty_assertions::{assert_eq, assert_ne}; +use sp_core::ByteArray; use sp_io::hashing::blake2_256; -use sp_keystore::{testing::KeyStore, KeystoreExt}; +use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::{ testing::{Header, H256}, traits::{BlakeTwo256, Convert, Hash, IdentityLookup}, - AccountId32, + AccountId32, TokenError, }; -use std::{ops::Deref, sync::Arc}; - -use crate as pallet_contracts; +use std::ops::Deref; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -72,7 +73,19 @@ frame_support::construct_runtime!( } ); -#[macro_use] +macro_rules! assert_return_code { + ( $x:expr , $y:expr $(,)? ) => {{ + assert_eq!(u32::from_le_bytes($x.data[..].try_into().unwrap()), $y as u32); + }}; +} + +macro_rules! assert_refcount { + ( $code_hash:expr , $should:expr $(,)? ) => {{ + let is = crate::OwnerInfoOf::::get($code_hash).map(|m| m.refcount()).unwrap(); + assert_eq!(is, $should); + }}; +} + pub mod test_utils { use super::{Balances, Hash, SysConfig, Test}; use crate::{exec::AccountIdOf, CodeHash, Config, ContractInfo, ContractInfoOf, Nonce}; @@ -104,18 +117,6 @@ pub mod test_utils { pub fn hash(s: &S) -> <::Hashing as Hash>::Output { <::Hashing as Hash>::hash_of(s) } - macro_rules! assert_return_code { - ( $x:expr , $y:expr $(,)? ) => {{ - assert_eq!(u32::from_le_bytes($x.data[..].try_into().unwrap()), $y as u32); - }}; - } - - macro_rules! assert_refcount { - ( $code_hash:expr , $should:expr $(,)? ) => {{ - let is = crate::OwnerInfoOf::::get($code_hash).map(|m| m.refcount()).unwrap(); - assert_eq!(is, $should); - }}; - } } impl Test { @@ -318,6 +319,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl pallet_timestamp::Config for Test { @@ -340,6 +345,8 @@ parameter_types! { }; pub static DepositPerByte: BalanceOf = 1; pub const DepositPerItem: BalanceOf = 2; + // We need this one set high enough for running benchmarks. + pub static DefaultDepositLimit: BalanceOf = 10_000_000; } impl Convert> for Test { @@ -379,7 +386,6 @@ impl Contains for TestFilter { } parameter_types! { - pub const DeletionWeightLimit: Weight = GAS_LIMIT; pub static UnstableInterface: bool = true; } @@ -395,11 +401,10 @@ impl Config for Test { type WeightInfo = (); type ChainExtension = (TestExtension, DisabledExtension, RevertingExtension, TempStorageExtension); - type DeletionQueueDepth = ConstU32<1024>; - type DeletionWeightLimit = DeletionWeightLimit; type Schedule = MySchedule; type DepositPerByte = DepositPerByte; type DepositPerItem = DepositPerItem; + type DefaultDepositLimit = DefaultDepositLimit; type AddressGenerator = DefaultAddressGenerator; type MaxCodeLen = ConstU32<{ 123 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; @@ -440,7 +445,7 @@ impl ExtBuilder { .assimilate_storage(&mut t) .unwrap(); let mut ext = sp_io::TestExternalities::new(t); - ext.register_extension(KeystoreExt(Arc::new(KeyStore::new()))); + ext.register_extension(KeystoreExt::new(MemoryKeystore::new())); ext.execute_with(|| System::set_block_number(1)); ext } @@ -454,7 +459,14 @@ fn compile_module(fixture_name: &str) -> wat::Result<(Vec, ::KeepAlive, + TokenError::Frozen, ); assert_eq!(::Currency::total_balance(&addr), total_balance); @@ -1106,7 +1113,7 @@ fn cannot_self_destruct_through_draning() { #[test] fn cannot_self_destruct_through_storage_refund_after_price_change() { - let (wasm, _code_hash) = compile_module::("store").unwrap(); + let (wasm, _code_hash) = compile_module::("store_call").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); let min_balance = ::Currency::minimum_balance(); @@ -1317,7 +1324,7 @@ fn destroy_contract_and_transfer_funds() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create code hash for bob to instantiate let _ = Balances::deposit_creating(&ALICE, 1_000_000); - Contracts::bare_upload_code(ALICE, callee_wasm, None, Determinism::Deterministic).unwrap(); + Contracts::bare_upload_code(ALICE, callee_wasm, None, Determinism::Enforced).unwrap(); // This deploys the BOB contract, which in turn deploys the CHARLIE contract during // construction. @@ -1427,7 +1434,7 @@ fn crypto_hashes() { None, params, false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1469,26 +1476,7 @@ fn transfer_return_code() { None, vec![], false, - Determinism::Deterministic, - ) - .result - .unwrap(); - assert_return_code!(result, RuntimeReturnCode::TransferFailed); - - // Contract has enough total balance in order to not go below the min balance - // threshold when transfering 100 balance but this balance is reserved so - // the transfer still fails. - Balances::make_free_balance_be(&addr, min_balance + 100); - Balances::reserve(&addr, min_balance + 100).unwrap(); - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - vec![], - false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1529,7 +1517,7 @@ fn call_return_code() { None, AsRef::<[u8]>::as_ref(&DJANGO).to_vec(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1563,30 +1551,7 @@ fn call_return_code() { .cloned() .collect(), false, - Determinism::Deterministic, - ) - .result - .unwrap(); - assert_return_code!(result, RuntimeReturnCode::TransferFailed); - - // Contract has enough total balance in order to not go below the min balance - // threshold when transfering 100 balance but this balance is reserved so - // the transfer still fails. - Balances::make_free_balance_be(&addr_bob, min_balance + 100); - Balances::reserve(&addr_bob, min_balance + 100).unwrap(); - let result = Contracts::bare_call( - ALICE, - addr_bob.clone(), - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&addr_django) - .iter() - .chain(&0u32.to_le_bytes()) - .cloned() - .collect(), - false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1606,7 +1571,7 @@ fn call_return_code() { .cloned() .collect(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1625,7 +1590,7 @@ fn call_return_code() { .cloned() .collect(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1677,26 +1642,7 @@ fn instantiate_return_code() { None, callee_hash.clone(), false, - Determinism::Deterministic, - ) - .result - .unwrap(); - assert_return_code!(result, RuntimeReturnCode::TransferFailed); - - // Contract has enough total balance in order to not go below the min_balance - // threshold when transfering the balance but this balance is reserved so - // the transfer still fails. - Balances::make_free_balance_be(&addr, min_balance + 10_000); - Balances::reserve(&addr, min_balance + 10_000).unwrap(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - callee_hash.clone(), - false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1712,7 +1658,7 @@ fn instantiate_return_code() { None, vec![0; 33], false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1727,7 +1673,7 @@ fn instantiate_return_code() { None, callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1742,7 +1688,7 @@ fn instantiate_return_code() { None, callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1829,7 +1775,7 @@ fn chain_extension_works() { None, input.clone(), false, - Determinism::Deterministic, + Determinism::Enforced, ); assert_eq!(TestExtension::last_seen_buffer(), input); assert_eq!(result.result.unwrap().data, input); @@ -1843,7 +1789,7 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1859,7 +1805,7 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 2, extra: &[0] }.into(), false, - Determinism::Deterministic, + Determinism::Enforced, ); assert_ok!(result.result); let gas_consumed = result.gas_consumed; @@ -1871,7 +1817,7 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 2, extra: &[42] }.into(), false, - Determinism::Deterministic, + Determinism::Enforced, ); assert_ok!(result.result); assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 42); @@ -1883,7 +1829,7 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 2, extra: &[95] }.into(), false, - Determinism::Deterministic, + Determinism::Enforced, ); assert_ok!(result.result); assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 95); @@ -1897,7 +1843,7 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1915,7 +1861,7 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -1976,7 +1922,7 @@ fn chain_extension_temp_storage_works() { None, input.clone(), false, - Determinism::Deterministic + Determinism::Enforced ) .result ); @@ -2034,25 +1980,6 @@ fn lazy_removal_works() { }); } -#[test] -fn lazy_removal_on_full_queue_works_on_initialize() { - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Fill the deletion queue with dummy values, so that on_initialize attempts - // to clear the queue - ContractInfo::::fill_queue_with_dummies(); - - let queue_len_initial = >::decode_len().unwrap_or(0); - - // Run the lazy removal - Contracts::on_initialize(System::block_number()); - - let queue_len_after_on_initialize = >::decode_len().unwrap_or(0); - - // Queue length should be decreased after call of on_initialize() - assert!(queue_len_initial - queue_len_after_on_initialize > 0); - }); -} - #[test] fn lazy_batch_removal_works() { let (code, _hash) = compile_module::("self_destruct").unwrap(); @@ -2116,7 +2043,7 @@ fn lazy_removal_partial_remove_works() { // We create a contract with some extra keys above the weight limit let extra_keys = 7u32; let weight_limit = Weight::from_parts(5_000_000_000, 0); - let (_, max_keys) = ContractInfo::::deletion_budget(1, weight_limit); + let (_, max_keys) = ContractInfo::::deletion_budget(weight_limit); let vals: Vec<_> = (0..max_keys + extra_keys) .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) .collect(); @@ -2145,7 +2072,7 @@ fn lazy_removal_partial_remove_works() { // Put value into the contracts child trie for val in &vals { - info.write(&val.0 as &FixSizedKey, Some(val.2.clone()), None, false).unwrap(); + info.write(&Key::Fix(val.0), Some(val.2.clone()), None, false).unwrap(); } >::insert(&addr, info.clone()); @@ -2201,33 +2128,6 @@ fn lazy_removal_partial_remove_works() { }); } -#[test] -fn lazy_removal_does_no_run_on_full_queue_and_full_block() { - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Fill up the block which should prevent the lazy storage removal from running. - System::register_extra_weight_unchecked( - ::BlockWeights::get().max_block, - DispatchClass::Mandatory, - ); - - // Fill the deletion queue with dummy values, so that on_initialize attempts - // to clear the queue - ContractInfo::::fill_queue_with_dummies(); - - // Check that on_initialize() tries to perform lazy removal but removes nothing - // as no more weight is left for that. - let weight_used = Contracts::on_initialize(System::block_number()); - let base = <::WeightInfo as WeightInfo>::on_process_deletion_queue_batch(); - assert_eq!(weight_used, base); - - // Check that the deletion queue is still full after execution of the - // on_initialize() hook. - let max_len: u32 = ::DeletionQueueDepth::get(); - let queue_len: u32 = >::decode_len().unwrap_or(0).try_into().unwrap(); - assert_eq!(max_len, queue_len); - }); -} - #[test] fn lazy_removal_does_no_run_on_low_remaining_weight() { let (code, _hash) = compile_module::("self_destruct").unwrap(); @@ -2271,7 +2171,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { // But value should be still there as the lazy removal did not run, yet. assert_matches!(child::get(trie, &[99]), Some(42)); - // Assign a remaining weight which is too low for a successfull deletion of the contract + // Assign a remaining weight which is too low for a successful deletion of the contract let low_remaining_weight = <::WeightInfo as WeightInfo>::on_process_deletion_queue_batch(); @@ -2321,7 +2221,7 @@ fn lazy_removal_does_not_use_all_weight() { .account_id; let info = get_contract(&addr); - let (weight_per_key, max_keys) = ContractInfo::::deletion_budget(1, weight_limit); + let (weight_per_key, max_keys) = ContractInfo::::deletion_budget(weight_limit); // We create a contract with one less storage item than we can remove within the limit let vals: Vec<_> = (0..max_keys - 1) @@ -2330,7 +2230,7 @@ fn lazy_removal_does_not_use_all_weight() { // Put value into the contracts child trie for val in &vals { - info.write(&val.0 as &FixSizedKey, Some(val.2.clone()), None, false).unwrap(); + info.write(&Key::Fix(val.0), Some(val.2.clone()), None, false).unwrap(); } >::insert(&addr, info.clone()); @@ -2376,40 +2276,75 @@ fn lazy_removal_does_not_use_all_weight() { } #[test] -fn deletion_queue_full() { +fn deletion_queue_ring_buffer_overflow() { let (code, _hash) = compile_module::("self_destruct").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let mut ext = ExtBuilder::default().existential_deposit(50).build(); + + // setup the deletion queue with custom counters + ext.execute_with(|| { + let queue = DeletionQueueManager::from_test_values(u32::MAX - 1, u32::MAX - 1); + >::set(queue); + }); + + // commit the changes to the storage + ext.commit_all().unwrap(); + + ext.execute_with(|| { let min_balance = ::Currency::minimum_balance(); let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let mut tries: Vec = vec![]; - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - false, - ) - .result - .unwrap() - .account_id; + // add 3 contracts to the deletion queue + for i in 0..3u8 { + let addr = Contracts::bare_instantiate( + ALICE, + min_balance * 100, + GAS_LIMIT, + None, + Code::Upload(code.clone()), + vec![], + vec![i], + false, + ) + .result + .unwrap() + .account_id; - // fill the deletion queue up until its limit - ContractInfo::::fill_queue_with_dummies(); + let info = get_contract(&addr); + let trie = &info.child_trie_info(); - // Terminate the contract should fail - assert_err_ignore_postinfo!( - Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), - Error::::DeletionQueueFull, - ); + // Put value into the contracts child trie + child::put(trie, &[99], &42); - // Contract should exist because removal failed - get_contract(&addr); - }); -} + // Terminate the contract. Contract info should be gone, but value should be still + // there as the lazy removal did not run, yet. + assert_ok!(Contracts::call( + RuntimeOrigin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + None, + vec![] + )); + assert!(!>::contains_key(&addr)); + assert_matches!(child::get(trie, &[99]), Some(42)); + + tries.push(trie.clone()) + } + + // Run single lazy removal + Contracts::on_idle(System::block_number(), Weight::MAX); + + // The single lazy removal should have removed all queued tries + for trie in tries.iter() { + assert_matches!(child::get::(trie, &[99]), None); + } + + // insert and delete counter values should go from u32::MAX - 1 to 1 + assert_eq!(>::get().as_test_tuple(), (1, 1)); + }) +} #[test] fn refcounter() { let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); @@ -2537,7 +2472,7 @@ fn reinstrument_does_charge() { None, zero.clone(), false, - Determinism::Deterministic, + Determinism::Enforced, ); assert!(!result0.result.unwrap().did_revert()); @@ -2549,7 +2484,7 @@ fn reinstrument_does_charge() { None, zero.clone(), false, - Determinism::Deterministic, + Determinism::Enforced, ); assert!(!result1.result.unwrap().did_revert()); @@ -2571,7 +2506,7 @@ fn reinstrument_does_charge() { None, zero.clone(), false, - Determinism::Deterministic, + Determinism::Enforced, ); assert!(!result2.result.unwrap().did_revert()); assert!(result2.gas_consumed.ref_time() > result1.gas_consumed.ref_time()); @@ -2610,7 +2545,7 @@ fn debug_message_works() { None, vec![], true, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result.result, Ok(_)); @@ -2646,7 +2581,7 @@ fn debug_message_logging_disabled() { None, vec![], false, - Determinism::Deterministic, + Determinism::Enforced, ); assert_matches!(result.result, Ok(_)); // the dispatchables always run without debugging @@ -2682,7 +2617,7 @@ fn debug_message_invalid_utf8() { None, vec![], true, - Determinism::Deterministic, + Determinism::Enforced, ); assert_ok!(result.result); assert!(result.debug_message.is_empty()); @@ -2696,7 +2631,6 @@ fn gas_estimation_nested_call_fixed_limit() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); - let _ = Balances::deposit_creating(&CHARLIE, 1000 * min_balance); let addr_caller = Contracts::bare_instantiate( ALICE, @@ -2730,6 +2664,7 @@ fn gas_estimation_nested_call_fixed_limit() { .iter() .cloned() .chain((GAS_LIMIT / 5).ref_time().to_le_bytes()) + .chain((GAS_LIMIT / 5).proof_size().to_le_bytes()) .collect(); // Call in order to determine the gas that is required for this call @@ -2741,27 +2676,41 @@ fn gas_estimation_nested_call_fixed_limit() { None, input.clone(), false, - Determinism::Deterministic, + Determinism::Enforced, ); assert_ok!(&result.result); // We have a subcall with a fixed gas limit. This constitutes precharging. - assert!(result.gas_required.ref_time() > result.gas_consumed.ref_time()); + assert!(result.gas_required.all_gt(result.gas_consumed)); // Make the same call using the estimated gas. Should succeed. assert_ok!( Contracts::bare_call( ALICE, - addr_caller, + addr_caller.clone(), 0, result.gas_required, Some(result.storage_deposit.charge_or_zero()), - input, + input.clone(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result ); + + // Make the same call using proof_size a but less than estimated. Should fail with OutOfGas. + let result = Contracts::bare_call( + ALICE, + addr_caller, + 0, + result.gas_required.sub_proof_size(1), + Some(result.storage_deposit.charge_or_zero()), + input, + false, + Determinism::Enforced, + ) + .result; + assert_err!(result, >::OutOfGas); }); } @@ -2805,7 +2754,7 @@ fn gas_estimation_call_runtime() { // Call something trivial with a huge gas limit so that we can observe the effects // of pre-charging. This should create a difference between consumed and required. - let call = RuntimeCall::Balances(pallet_balances::Call::transfer { + let call = RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: addr_callee, value: min_balance * 10, }); @@ -2817,7 +2766,7 @@ fn gas_estimation_call_runtime() { None, call.encode(), false, - Determinism::Deterministic, + Determinism::Enforced, ); // contract encodes the result of the dispatch runtime let outcome = u32::decode(&mut result.result.unwrap().data.as_ref()).unwrap(); @@ -2834,7 +2783,7 @@ fn gas_estimation_call_runtime() { None, call.encode(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result ); @@ -2842,7 +2791,7 @@ fn gas_estimation_call_runtime() { } #[test] -fn gas_call_runtime_reentrancy_guarded() { +fn call_runtime_reentrancy_guarded() { let (caller_code, _caller_hash) = compile_module::("call_runtime").unwrap(); let (callee_code, _callee_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { @@ -2897,7 +2846,7 @@ fn gas_call_runtime_reentrancy_guarded() { None, call.encode(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -2959,7 +2908,7 @@ fn ecdsa_recover() { None, params, false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -2968,6 +2917,72 @@ fn ecdsa_recover() { }) } +#[test] +fn sr25519_verify() { + let (wasm, _code_hash) = compile_module::("sr25519_verify").unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + + // Instantiate the sr25519_verify contract. + let addr = Contracts::bare_instantiate( + ALICE, + 100_000, + GAS_LIMIT, + None, + Code::Upload(wasm), + vec![], + vec![], + false, + ) + .result + .unwrap() + .account_id; + + let call_with = |message: &[u8; 11]| { + // Alice's signature for "hello world" + #[rustfmt::skip] + let signature: [u8; 64] = [ + 184, 49, 74, 238, 78, 165, 102, 252, 22, 92, 156, 176, 124, 118, 168, 116, 247, + 99, 0, 94, 2, 45, 9, 170, 73, 222, 182, 74, 60, 32, 75, 64, 98, 174, 69, 55, 83, + 85, 180, 98, 208, 75, 231, 57, 205, 62, 4, 105, 26, 136, 172, 17, 123, 99, 90, 255, + 228, 54, 115, 63, 30, 207, 205, 131, + ]; + + // Alice's public key + #[rustfmt::skip] + let public_key: [u8; 32] = [ + 212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, + 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125, + ]; + + let mut params = vec![]; + params.extend_from_slice(&signature); + params.extend_from_slice(&public_key); + params.extend_from_slice(message); + + >::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + params, + false, + Determinism::Enforced, + ) + .result + .unwrap() + }; + + // verification should succeed for "hello world" + assert_return_code!(call_with(&b"hello world"), RuntimeReturnCode::Success); + + // verification should fail for other messages + assert_return_code!(call_with(&b"hello worlD"), RuntimeReturnCode::Sr25519VerifyFailed); + }) +} + #[test] fn upload_code_works() { let (wasm, code_hash) = compile_module::("dummy").unwrap(); @@ -2983,7 +2998,7 @@ fn upload_code_works() { RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(1_000)), - Determinism::Deterministic, + Determinism::Enforced, )); assert!(>::contains_key(code_hash)); @@ -3023,7 +3038,7 @@ fn upload_code_limit_too_low() { RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(100)), - Determinism::Deterministic + Determinism::Enforced ), >::StorageDepositLimitExhausted, ); @@ -3047,7 +3062,7 @@ fn upload_code_not_enough_balance() { RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(1_000)), - Determinism::Deterministic + Determinism::Enforced ), >::StorageDepositNotEnoughFunds, ); @@ -3070,7 +3085,7 @@ fn remove_code_works() { RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(1_000)), - Determinism::Deterministic, + Determinism::Enforced, )); assert!(>::contains_key(code_hash)); @@ -3125,7 +3140,7 @@ fn remove_code_wrong_origin() { RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(1_000)), - Determinism::Deterministic, + Determinism::Enforced, )); assert_noop!( @@ -3567,6 +3582,59 @@ fn storage_deposit_works() { }); } +#[test] +fn storage_deposit_callee_works() { + let (wasm_caller, _code_hash_caller) = compile_module::("call").unwrap(); + let (wasm_callee, _code_hash_callee) = compile_module::("store_call").unwrap(); + const ED: u64 = 200; + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + + // Create both contracts: Constructors do nothing. + let addr_caller = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(wasm_caller), + vec![], + vec![], + false, + ) + .result + .unwrap() + .account_id; + let addr_callee = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(wasm_callee), + vec![], + vec![], + false, + ) + .result + .unwrap() + .account_id; + + assert_ok!(Contracts::call( + RuntimeOrigin::signed(ALICE), + addr_caller, + 0, + GAS_LIMIT, + None, + (100u32, &addr_callee).encode() + )); + + let callee = get_contract(&addr_callee); + let deposit = ED + DepositPerByte::get() * 100 + DepositPerItem::get() * 1; + + assert_eq!(test_utils::get_balance(callee.deposit_account()), deposit); + assert_eq!(callee.total_deposit(), deposit); + }); +} + #[test] fn set_code_extrinsic() { let (wasm, code_hash) = compile_module::("dummy").unwrap(); @@ -3595,7 +3663,7 @@ fn set_code_extrinsic() { RuntimeOrigin::signed(ALICE), new_wasm, None, - Determinism::Deterministic + Determinism::Enforced )); // Drop previous events @@ -3715,7 +3783,7 @@ fn contract_reverted() { RuntimeOrigin::signed(ALICE), wasm.clone(), None, - Determinism::Deterministic + Determinism::Enforced )); // Calling extrinsic: revert leads to an error @@ -3803,7 +3871,7 @@ fn contract_reverted() { None, input, false, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -3823,7 +3891,7 @@ fn code_rejected_error_works() { RuntimeOrigin::signed(ALICE), wasm.clone(), None, - Determinism::Deterministic + Determinism::Enforced ), >::CodeRejected, ); @@ -3849,7 +3917,7 @@ fn code_rejected_error_works() { RuntimeOrigin::signed(ALICE), wasm.clone(), None, - Determinism::Deterministic + Determinism::Enforced ), >::CodeRejected, ); @@ -3899,7 +3967,7 @@ fn set_code_hash() { RuntimeOrigin::signed(ALICE), new_wasm.clone(), None, - Determinism::Deterministic + Determinism::Enforced )); System::reset_events(); @@ -3913,7 +3981,7 @@ fn set_code_hash() { None, new_code_hash.as_ref().to_vec(), true, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -3928,7 +3996,7 @@ fn set_code_hash() { None, vec![], true, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -3970,7 +4038,7 @@ fn set_code_hash() { #[test] fn storage_deposit_limit_is_enforced() { - let (wasm, _code_hash) = compile_module::("store").unwrap(); + let (wasm, _code_hash) = compile_module::("store_call").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); let min_balance = ::Currency::minimum_balance(); @@ -3994,15 +4062,47 @@ fn storage_deposit_limit_is_enforced() { assert_eq!(get_contract(&addr).total_deposit(), min_balance); assert_eq!(::Currency::total_balance(&addr), min_balance); - // Create 100 bytes of storage with a price of per byte + // Create 1 byte of storage with a price of per byte, + // setting insufficient deposit limit, as it requires 3 Balance: + // 2 for the item added + 1 for the new storage item. assert_err_ignore_postinfo!( Contracts::call( RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, - Some(codec::Compact(1)), - 100u32.to_le_bytes().to_vec() + Some(codec::Compact(2)), + 1u32.to_le_bytes().to_vec() + ), + >::StorageDepositLimitExhausted, + ); + + // To check that deposit limit fallbacks to DefaultDepositLimit, + // we customize it here. + DEFAULT_DEPOSIT_LIMIT.with(|c| *c.borrow_mut() = 3); + + // Create 1 byte of storage, should cost 3 Balance: + // 2 for the item added + 1 for the new storage item. + // Should pass as it fallbacks to DefaultDepositLimit. + assert_ok!(Contracts::call( + RuntimeOrigin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + None, + 1u32.to_le_bytes().to_vec() + )); + + // Use 4 more bytes of the storage for the same item, which requires 4 Balance. + // Should fail as DefaultDepositLimit is 3 and hence isn't enough. + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + None, + 5u32.to_le_bytes().to_vec() ), >::StorageDepositLimitExhausted, ); @@ -4010,10 +4110,10 @@ fn storage_deposit_limit_is_enforced() { } #[test] -fn storage_deposit_limit_is_enforced_late() { +fn deposit_limit_in_nested_calls() { let (wasm_caller, _code_hash_caller) = compile_module::("create_storage_and_call").unwrap(); - let (wasm_callee, _code_hash_callee) = compile_module::("store").unwrap(); + let (wasm_callee, _code_hash_callee) = compile_module::("store_call").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); @@ -4056,27 +4156,28 @@ fn storage_deposit_limit_is_enforced_late() { 100u32.to_le_bytes().to_vec() )); - // We do not remove any storage but require 14 bytes of storage for the new - // storage created in the immediate contract. + // We do not remove any storage but add a storage item of 12 bytes in the caller + // contract. This would cost 12 + 2 = 14 Balance. + // The nested call doesn't get a special limit, which is set by passing 0 to it. + // This should fail as the specified parent's limit is less than the cost: 13 < + // 14. assert_err_ignore_postinfo!( Contracts::call( RuntimeOrigin::signed(ALICE), addr_caller.clone(), 0, GAS_LIMIT, - Some(codec::Compact(5)), - 100u32 - .to_le_bytes() - .as_ref() - .iter() - .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) - .cloned() - .collect(), + Some(codec::Compact(13)), + (100u32, &addr_callee, 0u64).encode(), ), >::StorageDepositLimitExhausted, ); - - // Allow for the additional 14 bytes but demand an additional byte in the callee contract. + // Now we specify the parent's limit high enough to cover the caller's storage additions. + // However, we use a single byte more in the callee, hence the storage deposit should be 15 + // Balance. + // The nested call doesn't get a special limit, which is set by passing 0 to it. + // This should fail as the specified parent's limit is less than the cost: 14 + // < 15. assert_err_ignore_postinfo!( Contracts::call( RuntimeOrigin::signed(ALICE), @@ -4084,19 +4185,31 @@ fn storage_deposit_limit_is_enforced_late() { 0, GAS_LIMIT, Some(codec::Compact(14)), - 101u32 - .to_le_bytes() - .as_ref() - .iter() - .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) - .cloned() - .collect(), + (101u32, &addr_callee, 0u64).encode(), + ), + >::StorageDepositLimitExhausted, + ); + + // Now we specify the parent's limit high enough to cover both the caller's and callee's + // storage additions. However, we set a special deposit limit of 1 Balance for the nested + // call. This should fail as callee adds up 2 bytes to the storage, meaning that the nested + // call should have a deposit limit of at least 2 Balance. The sub-call should be rolled + // back, which is covered by the next test case. + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(ALICE), + addr_caller.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(16)), + (102u32, &addr_callee, 1u64).encode(), ), >::StorageDepositLimitExhausted, ); - // Refund in the callee contract but not enough to cover the 14 balance required by the - // caller. + // Refund in the callee contract but not enough to cover the 14 Balance required by the + // caller. Note that if previous sub-call wouldn't roll back, this call would pass making + // the test case fail. We don't set a special limit for the nested call here. assert_err_ignore_postinfo!( Contracts::call( RuntimeOrigin::signed(ALICE), @@ -4104,59 +4217,195 @@ fn storage_deposit_limit_is_enforced_late() { 0, GAS_LIMIT, Some(codec::Compact(0)), - 87u32 - .to_le_bytes() - .as_ref() - .iter() - .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) - .cloned() - .collect(), + (87u32, &addr_callee, 0u64).encode(), ), >::StorageDepositLimitExhausted, ); let _ = Balances::make_free_balance_be(&ALICE, 1_000); - // Send more than the sender has balance. + // Require more than the sender's balance. + // We don't set a special limit for the nested call. assert_err_ignore_postinfo!( Contracts::call( RuntimeOrigin::signed(ALICE), addr_caller.clone(), 0, GAS_LIMIT, - Some(codec::Compact(50)), - 1_200u32 - .to_le_bytes() - .as_ref() - .iter() - .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) - .cloned() - .collect(), + None, + (1200u32, &addr_callee, 1u64).encode(), ), >::StorageDepositLimitExhausted, ); - // Same as above but allow for the additional balance. + // Same as above but allow for the additional deposit of 1 Balance in parent. + // We set the special deposit limit of 1 Balance for the nested call, which isn't + // enforced as callee frees up storage. This should pass. assert_ok!(Contracts::call( RuntimeOrigin::signed(ALICE), addr_caller.clone(), 0, GAS_LIMIT, Some(codec::Compact(1)), - 87u32 - .to_le_bytes() - .as_ref() - .iter() - .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) - .cloned() - .collect(), + (87u32, &addr_callee, 1u64).encode(), )); }); } +#[test] +fn deposit_limit_in_nested_instantiate() { + let (wasm_caller, _code_hash_caller) = + compile_module::("create_storage_and_instantiate").unwrap(); + let (wasm_callee, code_hash_callee) = compile_module::("store_deploy").unwrap(); + const ED: u64 = 5; + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = Balances::deposit_creating(&BOB, 1_000_000); + // Create caller contract + let addr_caller = Contracts::bare_instantiate( + ALICE, + 10_000u64, // this balance is later passed to the deployed contract + GAS_LIMIT, + None, + Code::Upload(wasm_caller), + vec![], + vec![], + false, + ) + .result + .unwrap() + .account_id; + // Deploy a contract to get its occupied storage size + let addr = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(wasm_callee), + vec![0, 0, 0, 0], + vec![], + false, + ) + .result + .unwrap() + .account_id; + + let callee_info_len = ContractInfoOf::::get(&addr).unwrap().encoded_size() as u64; + + // We don't set a special deposit limit for the nested instantiation. + // + // The deposit limit set for the parent is insufficient for the instantiation, which + // requires: + // - callee_info_len + 2 for storing the new contract info, + // - ED for deployed contract account, + // - 2 for the storage item of 0 bytes being created in the callee constructor + // or (callee_info_len + 2 + ED + 2) Balance in total. + // + // Provided the limit is set to be 1 Balance less, + // this call should fail on the return from the caller contract. + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(BOB), + addr_caller.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(callee_info_len + 2 + ED + 1)), + (0u32, &code_hash_callee, 0u64).encode(), + ), + >::StorageDepositLimitExhausted, + ); + // The charges made on instantiation should be rolled back. + assert_eq!(::Currency::free_balance(&BOB), 1_000_000); + + // Now we give enough limit for the instantiation itself, but require for 1 more storage + // byte in the constructor. Hence +1 Balance to the limit is needed. This should fail on the + // return from constructor. + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(BOB), + addr_caller.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(callee_info_len + 2 + ED + 2)), + (1u32, &code_hash_callee, 0u64).encode(), + ), + >::StorageDepositLimitExhausted, + ); + // The charges made on the instantiation should be rolled back. + assert_eq!(::Currency::free_balance(&BOB), 1_000_000); + + // Now we set enough limit in parent call, but an insufficient limit for child instantiate. + // This should fail during the charging for the instantiation in + // `RawMeter::charge_instantiate()` + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(BOB), + addr_caller.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(callee_info_len + 2 + ED + 2)), + (0u32, &code_hash_callee, callee_info_len + 2 + ED + 1).encode(), + ), + >::StorageDepositLimitExhausted, + ); + // The charges made on the instantiation should be rolled back. + assert_eq!(::Currency::free_balance(&BOB), 1_000_000); + + // Same as above but requires for single added storage + // item of 1 byte to be covered by the limit, which implies 3 more Balance. + // Now we set enough limit for the parent call, but insufficient limit for child + // instantiate. This should fail right after the constructor execution. + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(BOB), + addr_caller.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(callee_info_len + 2 + ED + 3)), // enough parent limit + (1u32, &code_hash_callee, callee_info_len + 2 + ED + 2).encode(), + ), + >::StorageDepositLimitExhausted, + ); + // The charges made on the instantiation should be rolled back. + assert_eq!(::Currency::free_balance(&BOB), 1_000_000); + + // Set enough deposit limit for the child instantiate. This should succeed. + let result = Contracts::bare_call( + BOB, + addr_caller.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(callee_info_len + 2 + ED + 4).into()), + (1u32, &code_hash_callee, callee_info_len + 2 + ED + 3).encode(), + false, + Determinism::Enforced, + ); + + let returned = result.result.unwrap(); + // All balance of the caller except ED has been transferred to the callee. + // No deposit has been taken from it. + assert_eq!(::Currency::free_balance(&addr_caller), ED); + // Get address of the deployed contract. + let addr_callee = AccountId32::from_slice(&returned.data[0..32]).unwrap(); + // 10_000 should be sent to callee from the caller contract, plus ED to be sent from the + // origin. + assert_eq!(::Currency::free_balance(&addr_callee), 10_000 + ED); + // The origin should be charged with: + // - callee instantiation deposit = (callee_info_len + 2) + // - callee account ED + // - for writing an item of 1 byte to storage = 3 Balance + assert_eq!( + ::Currency::free_balance(&BOB), + 1_000_000 - (callee_info_len + 2 + ED + 3) + ); + // Check that deposit due to be charged still includes these 3 Balance + assert_eq!(result.storage_deposit.charge_or_zero(), (callee_info_len + 2 + ED + 3),) + }); +} + #[test] fn deposit_limit_honors_liquidity_restrictions() { - let (wasm, _code_hash) = compile_module::("store").unwrap(); + let (wasm, _code_hash) = compile_module::("store_call").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); let _ = Balances::deposit_creating(&BOB, 1_000); @@ -4200,7 +4449,7 @@ fn deposit_limit_honors_liquidity_restrictions() { #[test] fn deposit_limit_honors_existential_deposit() { - let (wasm, _code_hash) = compile_module::("store").unwrap(); + let (wasm, _code_hash) = compile_module::("store_call").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); let _ = Balances::deposit_creating(&BOB, 1_000); @@ -4243,7 +4492,7 @@ fn deposit_limit_honors_existential_deposit() { #[test] fn deposit_limit_honors_min_leftover() { - let (wasm, _code_hash) = compile_module::("store").unwrap(); + let (wasm, _code_hash) = compile_module::("store_call").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); let _ = Balances::deposit_creating(&BOB, 1_000); @@ -4268,7 +4517,7 @@ fn deposit_limit_honors_min_leftover() { assert_eq!(get_contract(&addr).total_deposit(), min_balance); assert_eq!(::Currency::total_balance(&addr), min_balance); - // check that the minumum leftover (value send) is considered + // check that the minimum leftover (value send) is considered assert_err_ignore_postinfo!( Contracts::call( RuntimeOrigin::signed(BOB), @@ -4325,7 +4574,7 @@ fn cannot_instantiate_indeterministic_code() { RuntimeOrigin::signed(ALICE), wasm.clone(), None, - Determinism::Deterministic + Determinism::Enforced ), >::CodeRejected, ); @@ -4335,7 +4584,7 @@ fn cannot_instantiate_indeterministic_code() { RuntimeOrigin::signed(ALICE), wasm, None, - Determinism::AllowIndeterminism, + Determinism::Relaxed, )); assert_err_ignore_postinfo!( Contracts::instantiate( @@ -4389,7 +4638,7 @@ fn cannot_instantiate_indeterministic_code() { None, code_hash.encode(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result, >::Indeterministic, @@ -4405,7 +4654,7 @@ fn cannot_instantiate_indeterministic_code() { None, code_hash.encode(), false, - Determinism::AllowIndeterminism, + Determinism::Relaxed, ) .result, >::Indeterministic, @@ -4425,7 +4674,7 @@ fn cannot_set_code_indeterministic_code() { RuntimeOrigin::signed(ALICE), wasm, None, - Determinism::AllowIndeterminism, + Determinism::Relaxed, )); // Create the contract that will call `seal_set_code_hash` @@ -4453,7 +4702,7 @@ fn cannot_set_code_indeterministic_code() { None, code_hash.encode(), false, - Determinism::AllowIndeterminism, + Determinism::Relaxed, ) .result, >::Indeterministic, @@ -4473,7 +4722,7 @@ fn delegate_call_indeterministic_code() { RuntimeOrigin::signed(ALICE), wasm, None, - Determinism::AllowIndeterminism, + Determinism::Relaxed, )); // Create the contract that will call `seal_delegate_call` @@ -4501,7 +4750,7 @@ fn delegate_call_indeterministic_code() { None, code_hash.encode(), false, - Determinism::Deterministic, + Determinism::Enforced, ) .result, >::Indeterministic, @@ -4517,7 +4766,7 @@ fn delegate_call_indeterministic_code() { None, code_hash.encode(), false, - Determinism::AllowIndeterminism, + Determinism::Relaxed, ) .result ); @@ -4556,7 +4805,7 @@ fn reentrance_count_works_with_call() { None, input, true, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -4595,7 +4844,7 @@ fn reentrance_count_works_with_delegated_call() { None, input, true, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -4647,7 +4896,7 @@ fn account_reentrance_count_works() { None, contract_addr.encode(), true, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); @@ -4660,7 +4909,7 @@ fn account_reentrance_count_works() { None, another_contract_addr.encode(), true, - Determinism::Deterministic, + Determinism::Enforced, ) .result .unwrap(); diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index f8c5a6ed96c57..5be28a301d8f4 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -43,7 +43,7 @@ use crate::{ exec::{ExecResult, Executable, ExportedFunction, Ext}, gas::GasMeter, AccountIdOf, BalanceOf, CodeHash, CodeVec, Config, Error, OwnerInfoOf, RelaxedCodeVec, - Schedule, + Schedule, LOG_TARGET, }; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::dispatch::{DispatchError, DispatchResult}; @@ -127,7 +127,7 @@ pub enum Determinism { /// allowed. /// /// Dispatchables always use this mode in order to make on-chain execution deterministic. - Deterministic, + Enforced, /// Allow calling or uploading an indeterministic code. /// /// This is only possible when calling into `pallet-contracts` directly via @@ -136,7 +136,7 @@ pub enum Determinism { /// # Note /// /// **Never** use this mode for on-chain execution. - AllowIndeterminism, + Relaxed, } impl ExportedFunction { @@ -186,7 +186,7 @@ impl PrefabWasmModule { code_cache::try_remove::(origin, code_hash) } - /// Returns whether there is a deposit to be payed for this module. + /// Returns whether there is a deposit to be paid for this module. /// /// Returns `0` if the module is already in storage and hence no deposit will /// be charged when storing it. @@ -225,7 +225,7 @@ impl PrefabWasmModule { let engine = Engine::new(&config); let module = Module::new(&engine, code)?; let mut store = Store::new(&engine, host_state); - let mut linker = Linker::new(); + let mut linker = Linker::new(&engine); E::define( &mut store, &mut linker, @@ -326,16 +326,16 @@ impl Executable for PrefabWasmModule { }, ) .map_err(|msg| { - log::debug!(target: "runtime::contracts", "failed to instantiate code: {}", msg); + log::debug!(target: LOG_TARGET, "failed to instantiate code: {}", msg); Error::::CodeRejected })?; - store.state_mut().set_memory(memory); + store.data_mut().set_memory(memory); let exported_func = instance .get_export(&store, function.identifier()) .and_then(|export| export.into_func()) .ok_or_else(|| { - log::error!(target: "runtime::contracts", "failed to find entry point"); + log::error!(target: LOG_TARGET, "failed to find entry point"); Error::::CodeRejected })?; @@ -346,7 +346,7 @@ impl Executable for PrefabWasmModule { let result = exported_func.call(&mut store, &[], &mut []); - store.into_state().to_execution_result(result) + store.into_data().to_execution_result(result) } fn code_hash(&self) -> &CodeHash { @@ -358,7 +358,7 @@ impl Executable for PrefabWasmModule { } fn is_deterministic(&self) -> bool { - matches!(self.determinism, Determinism::Deterministic) + matches!(self.determinism, Determinism::Enforced) } } @@ -366,10 +366,7 @@ impl Executable for PrefabWasmModule { mod tests { use super::*; use crate::{ - exec::{ - AccountIdOf, BlockNumberOf, ErrorOrigin, ExecError, Executable, Ext, FixSizedKey, - SeedOf, VarSizedKey, - }, + exec::{AccountIdOf, BlockNumberOf, ErrorOrigin, ExecError, Executable, Ext, Key, SeedOf}, gas::GasMeter, storage::WriteOutcome, tests::{RuntimeCall, Test, ALICE, BOB}, @@ -377,9 +374,7 @@ mod tests { }; use assert_matches::assert_matches; use frame_support::{ - assert_err, assert_ok, - dispatch::DispatchResultWithPostInfo, - weights::{OldWeight, Weight}, + assert_err, assert_ok, dispatch::DispatchResultWithPostInfo, weights::Weight, }; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pretty_assertions::assert_eq; @@ -439,6 +434,7 @@ mod tests { gas_meter: GasMeter, debug_buffer: Vec, ecdsa_recover: RefCell>, + sr25519_verify: RefCell, [u8; 32])>>, code_hashes: Vec>, } @@ -463,6 +459,7 @@ mod tests { gas_meter: GasMeter::new(Weight::from_parts(10_000_000_000, 10 * 1024 * 1024)), debug_buffer: Default::default(), ecdsa_recover: Default::default(), + sr25519_verify: Default::default(), } } } @@ -473,6 +470,7 @@ mod tests { fn call( &mut self, _gas_limit: Weight, + _deposit_limit: BalanceOf, to: AccountIdOf, value: u64, data: Vec, @@ -492,6 +490,7 @@ mod tests { fn instantiate( &mut self, gas_limit: Weight, + _deposit_limit: BalanceOf, code_hash: CodeHash, value: u64, data: Vec, @@ -521,40 +520,15 @@ mod tests { self.terminations.push(TerminationEntry { beneficiary: beneficiary.clone() }); Ok(()) } - fn get_storage(&mut self, key: &FixSizedKey) -> Option> { + fn get_storage(&mut self, key: &Key) -> Option> { self.storage.get(&key.to_vec()).cloned() } - fn get_storage_transparent(&mut self, key: &VarSizedKey) -> Option> { - self.storage.get(&key.to_vec()).cloned() - } - fn get_storage_size(&mut self, key: &FixSizedKey) -> Option { - self.storage.get(&key.to_vec()).map(|val| val.len() as u32) - } - fn get_storage_size_transparent(&mut self, key: &VarSizedKey) -> Option { + fn get_storage_size(&mut self, key: &Key) -> Option { self.storage.get(&key.to_vec()).map(|val| val.len() as u32) } fn set_storage( &mut self, - key: &FixSizedKey, - value: Option>, - take_old: bool, - ) -> Result { - let key = key.to_vec(); - let entry = self.storage.entry(key.clone()); - let result = match (entry, take_old) { - (Entry::Vacant(_), _) => WriteOutcome::New, - (Entry::Occupied(entry), false) => - WriteOutcome::Overwritten(entry.remove().len() as u32), - (Entry::Occupied(entry), true) => WriteOutcome::Taken(entry.remove()), - }; - if let Some(value) = value { - self.storage.insert(key, value); - } - Ok(result) - } - fn set_storage_transparent( - &mut self, - key: &VarSizedKey, + key: &Key, value: Option>, take_old: bool, ) -> Result { @@ -615,7 +589,11 @@ mod tests { 16_384 } fn get_weight_price(&self, weight: Weight) -> BalanceOf { - BalanceOf::::from(1312_u32).saturating_mul(weight.ref_time().into()) + BalanceOf::::from(1312_u32) + .saturating_mul(weight.ref_time().into()) + .saturating_add( + BalanceOf::::from(103_u32).saturating_mul(weight.proof_size()), + ) } fn schedule(&self) -> &Schedule { &self.schedule @@ -642,6 +620,10 @@ mod tests { self.ecdsa_recover.borrow_mut().push((*signature, *message_hash)); Ok([3; 33]) } + fn sr25519_verify(&self, signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> bool { + self.sr25519_verify.borrow_mut().push((*signature, message.to_vec(), *pub_key)); + true + } fn contract_info(&mut self) -> &mut crate::ContractInfo { unimplemented!() } @@ -681,7 +663,7 @@ mod tests { wasm, &schedule, ALICE, - Determinism::Deterministic, + Determinism::Enforced, TryInstantiate::Instantiate, ) .map_err(|err| err.0)? @@ -689,7 +671,7 @@ mod tests { executable.execute(ext.borrow_mut(), entry_point, input_data) } - /// Execute the suppplied code. + /// Execute the supplied code. fn execute>(wat: &str, input_data: Vec, ext: E) -> ExecResult { execute_internal(wat, input_data, ext, &ExportedFunction::Call, true, false) } @@ -1071,14 +1053,14 @@ mod tests { "#; let mut ext = MockExt::default(); - ext.set_storage_transparent( - &VarSizedKey::::try_from([1u8; 64].to_vec()).unwrap(), + ext.set_storage( + &Key::::try_from_var([1u8; 64].to_vec()).unwrap(), Some(vec![42u8]), false, ) .unwrap(); - ext.set_storage_transparent( - &VarSizedKey::::try_from([2u8; 19].to_vec()).unwrap(), + ext.set_storage( + &Key::::try_from_var([2u8; 19].to_vec()).unwrap(), Some(vec![]), false, ) @@ -1349,6 +1331,49 @@ mod tests { ); } + #[test] + fn contract_sr25519() { + const CODE_SR25519: &str = r#" +(module + (import "seal0" "sr25519_verify" (func $sr25519_verify (param i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call") + (drop + (call $sr25519_verify + (i32.const 0) ;; Pointer to signature. + (i32.const 64) ;; Pointer to public key. + (i32.const 16) ;; message length. + (i32.const 96) ;; Pointer to message. + ) + ) + ) + (func (export "deploy")) + + ;; Signature (64 bytes) + (data (i32.const 0) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + + ;; public key (32 bytes) + (data (i32.const 64) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + + ;; message. (16 bytes) + (data (i32.const 96) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) +) +"#; + let mut mock_ext = MockExt::default(); + assert_ok!(execute(&CODE_SR25519, vec![], &mut mock_ext)); + assert_eq!(mock_ext.sr25519_verify.into_inner(), [([1; 64], [1; 16].to_vec(), [1; 32])]); + } + const CODE_GET_STORAGE: &str = r#" (module (import "seal0" "seal_get_storage" (func $seal_get_storage (param i32 i32 i32) (result i32))) @@ -1570,7 +1595,7 @@ mod tests { const CODE_GAS_PRICE: &str = r#" (module - (import "seal0" "seal_weight_to_fee" (func $seal_weight_to_fee (param i64 i32 i32))) + (import "seal1" "weight_to_fee" (func $seal_weight_to_fee (param i64 i64 i32 i32))) (import "env" "memory" (memory 1 1)) ;; size of our buffer is 32 bytes @@ -1587,7 +1612,7 @@ mod tests { (func (export "call") ;; This stores the gas price in the buffer - (call $seal_weight_to_fee (i64.const 2) (i32.const 0) (i32.const 32)) + (call $seal_weight_to_fee (i64.const 2) (i64.const 1) (i32.const 0) (i32.const 32)) ;; assert len == 8 (call $assert @@ -1597,11 +1622,11 @@ mod tests { ) ) - ;; assert that contents of the buffer is equal to the i64 value of 2 * 1312. + ;; assert that contents of the buffer is equal to the i64 value of 2 * 1312 + 103 = 2727. (call $assert (i64.eq (i64.load (i32.const 0)) - (i64.const 2624) + (i64.const 2727) ) ) ) @@ -1616,12 +1641,12 @@ mod tests { const CODE_GAS_LEFT: &str = r#" (module - (import "seal0" "seal_gas_left" (func $seal_gas_left (param i32 i32))) + (import "seal1" "gas_left" (func $seal_gas_left (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) - ;; size of our buffer is 32 bytes - (data (i32.const 32) "\20") + ;; Make output buffer size 20 bytes + (data (i32.const 20) "\14") (func $assert (param i32) (block $ok @@ -1633,19 +1658,19 @@ mod tests { ) (func (export "call") - ;; This stores the gas left in the buffer - (call $seal_gas_left (i32.const 0) (i32.const 32)) + ;; This stores the weight left to the buffer + (call $seal_gas_left (i32.const 0) (i32.const 20)) - ;; assert len == 8 + ;; Assert len <= 16 (max encoded Weight len) (call $assert - (i32.eq - (i32.load (i32.const 32)) - (i32.const 8) + (i32.le_u + (i32.load (i32.const 20)) + (i32.const 16) ) ) - ;; return gas left - (call $seal_return (i32.const 0) (i32.const 0) (i32.const 8)) + ;; Return weight left and its encoded value len + (call $seal_return (i32.const 0) (i32.const 0) (i32.load (i32.const 20))) (unreachable) ) @@ -1660,11 +1685,22 @@ mod tests { let output = execute(CODE_GAS_LEFT, vec![], &mut ext).unwrap(); - let OldWeight(gas_left) = OldWeight::decode(&mut &*output.data).unwrap(); + let weight_left = Weight::decode(&mut &*output.data).unwrap(); let actual_left = ext.gas_meter.gas_left(); - // TODO: account for proof size weight - assert!(gas_left < gas_limit.ref_time(), "gas_left must be less than initial"); - assert!(gas_left > actual_left.ref_time(), "gas_left must be greater than final"); + + assert!(weight_left.all_lt(gas_limit), "gas_left must be less than initial"); + assert!(weight_left.all_gt(actual_left), "gas_left must be greater than final"); + } + + /// Test that [`frame_support::weights::OldWeight`] en/decodes the same as our + /// [`crate::OldWeight`]. + #[test] + fn old_weight_decode() { + #![allow(deprecated)] + let sp = frame_support::weights::OldWeight(42).encode(); + let our = crate::OldWeight::decode(&mut &*sp).unwrap(); + + assert_eq!(our, 42); } const CODE_VALUE_TRANSFERRED: &str = r#" @@ -2547,15 +2583,15 @@ mod tests { let mut ext = MockExt::default(); - ext.set_storage_transparent( - &VarSizedKey::::try_from([1u8; 64].to_vec()).unwrap(), + ext.set_storage( + &Key::::try_from_var([1u8; 64].to_vec()).unwrap(), Some(vec![42u8]), false, ) .unwrap(); - ext.set_storage_transparent( - &VarSizedKey::::try_from([2u8; 19].to_vec()).unwrap(), + ext.set_storage( + &Key::::try_from_var([2u8; 19].to_vec()).unwrap(), Some(vec![]), false, ) @@ -2631,14 +2667,14 @@ mod tests { let mut ext = MockExt::default(); - ext.set_storage_transparent( - &VarSizedKey::::try_from([1u8; 64].to_vec()).unwrap(), + ext.set_storage( + &Key::::try_from_var([1u8; 64].to_vec()).unwrap(), Some(vec![42u8]), false, ) .unwrap(); - ext.set_storage_transparent( - &VarSizedKey::::try_from([2u8; 19].to_vec()).unwrap(), + ext.set_storage( + &Key::::try_from_var([2u8; 19].to_vec()).unwrap(), Some(vec![]), false, ) @@ -2728,15 +2764,15 @@ mod tests { let mut ext = MockExt::default(); - ext.set_storage_transparent( - &VarSizedKey::::try_from([1u8; 64].to_vec()).unwrap(), + ext.set_storage( + &Key::::try_from_var([1u8; 64].to_vec()).unwrap(), Some(vec![42u8]), false, ) .unwrap(); - ext.set_storage_transparent( - &VarSizedKey::::try_from([2u8; 19].to_vec()).unwrap(), + ext.set_storage( + &Key::::try_from_var([2u8; 19].to_vec()).unwrap(), Some(vec![]), false, ) @@ -3086,12 +3122,8 @@ mod tests { let schedule = crate::Schedule::::default(); #[cfg(not(feature = "runtime-benchmarks"))] assert_err!(execute(CODE_RANDOM, vec![], MockExt::default()), >::CodeRejected); - self::prepare::reinstrument::( - &wasm, - &schedule, - Determinism::Deterministic, - ) - .unwrap(); + self::prepare::reinstrument::(&wasm, &schedule, Determinism::Enforced) + .unwrap(); } /// This test check that an unstable interface cannot be deployed. In case of runtime diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 3c69fc2a7940f..14fec834733eb 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -25,7 +25,7 @@ use crate::{ wasm::{ runtime::AllowDeprecatedInterface, Determinism, Environment, OwnerInfo, PrefabWasmModule, }, - AccountIdOf, CodeVec, Config, Error, Schedule, + AccountIdOf, CodeVec, Config, Error, Schedule, LOG_TARGET, }; use codec::{Encode, MaxEncodedLen}; use sp_runtime::{traits::Hash, DispatchError}; @@ -50,7 +50,7 @@ pub enum TryInstantiate { Instantiate, /// Skip the instantiation during preparation. /// - /// This makes sense when the preparation takes place as part of an instantation. Then + /// This makes sense when the preparation takes place as part of an instantiation. Then /// this instantiation would fail the whole transaction and an extra check is not /// necessary. Skip, @@ -155,52 +155,6 @@ impl<'a, T: Config> ContractModule<'a, T> { Ok(()) } - /// Ensures that no floating point types are in use. - fn ensure_no_floating_types(&self) -> Result<(), &'static str> { - if let Some(global_section) = self.module.global_section() { - for global in global_section.entries() { - match global.global_type().content_type() { - ValueType::F32 | ValueType::F64 => - return Err("use of floating point type in globals is forbidden"), - _ => {}, - } - } - } - - if let Some(code_section) = self.module.code_section() { - for func_body in code_section.bodies() { - for local in func_body.locals() { - match local.value_type() { - ValueType::F32 | ValueType::F64 => - return Err("use of floating point type in locals is forbidden"), - _ => {}, - } - } - } - } - - if let Some(type_section) = self.module.type_section() { - for wasm_type in type_section.types() { - match wasm_type { - Type::Function(func_type) => { - let return_type = func_type.results().get(0); - for value_type in func_type.params().iter().chain(return_type) { - match value_type { - ValueType::F32 | ValueType::F64 => - return Err( - "use of floating point type in function types is forbidden", - ), - _ => {}, - } - } - }, - } - } - } - - Ok(()) - } - /// Ensure that no function exists that has more parameters than allowed. fn ensure_parameter_limit(&self, limit: u32) -> Result<(), &'static str> { let type_section = if let Some(type_section) = self.module.type_section() { @@ -219,7 +173,7 @@ impl<'a, T: Config> ContractModule<'a, T> { } fn inject_gas_metering(self, determinism: Determinism) -> Result { - let gas_rules = self.schedule.rules(&self.module, determinism); + let gas_rules = self.schedule.rules(determinism); let backend = gas_metering::host_function::Injector::new("seal0", "gas"); let contract_module = gas_metering::inject(self.module, backend, &gas_rules) .map_err(|_| "gas instrumentation failed")?; @@ -376,7 +330,7 @@ fn get_memory_limits( }, } } else { - // If none memory imported then just crate an empty placeholder. + // If none memory imported then just create an empty placeholder. // Any access to it will lead to out of bounds trap. Ok((0, 0)) } @@ -411,10 +365,9 @@ where memory64: false, extended_const: false, component_model: false, - // This is not our only defense: We check for float types later in the preparation - // process. Additionally, all instructions explictily need to have weights assigned + // This is not our only defense: All instructions explicitly need to have weights assigned // or the deployment will fail. We have none assigned for float instructions. - deterministic_only: matches!(determinism, Determinism::Deterministic), + floats: matches!(determinism, Determinism::Relaxed), mutable_global: false, saturating_float_to_int: false, sign_extension: false, @@ -422,10 +375,11 @@ where multi_value: false, reference_types: false, simd: false, + memory_control: false, }) .validate_all(original_code) .map_err(|err| { - log::debug!(target: "runtime::contracts", "{}", err); + log::debug!(target: LOG_TARGET, "{}", err); (Error::::CodeRejected.into(), "validation of new code failed") })?; @@ -439,10 +393,6 @@ where contract_module.ensure_parameter_limit(schedule.limits.parameters)?; contract_module.ensure_br_table_size_limit(schedule.limits.br_table_size)?; - if matches!(determinism, Determinism::Deterministic) { - contract_module.ensure_no_floating_types()?; - } - // We disallow importing `gas` function here since it is treated as implementation detail. let disallowed_imports = [b"gas".as_ref()]; let memory_limits = @@ -453,7 +403,7 @@ where Ok((code, memory_limits)) })() .map_err(|msg: &str| { - log::debug!(target: "runtime::contracts", "new code rejected: {}", msg); + log::debug!(target: LOG_TARGET, "new code rejected: {}", msg); (Error::::CodeRejected.into(), msg) })?; @@ -476,7 +426,7 @@ where }, ) .map_err(|err| { - log::debug!(target: "runtime::contracts", "{}", err); + log::debug!(target: LOG_TARGET, "{}", err); (Error::::CodeRejected.into(), "new code rejected after instrumentation") })?; } @@ -568,7 +518,7 @@ where InstrumentReason::Reinstrument, ) .map_err(|(err, msg)| { - log::error!(target: "runtime::contracts", "CodeRejected during reinstrument: {}", msg); + log::error!(target: LOG_TARGET, "CodeRejected during reinstrument: {}", msg); err }) .map(|(code, _)| code) @@ -608,7 +558,7 @@ pub mod benchmarking { deposit: Default::default(), refcount: 0, }), - determinism: Determinism::Deterministic, + determinism: Determinism::Enforced, }) } } @@ -682,7 +632,7 @@ mod tests { wasm, &schedule, ALICE, - Determinism::Deterministic, + Determinism::Enforced, TryInstantiate::Instantiate, ); assert_matches::assert_matches!(r.map_err(|(_, msg)| msg), $($expected)*); @@ -704,7 +654,7 @@ mod tests { ) (func (export "deploy")) )"#, - Err("gas instrumentation failed") + Err("validation of new code failed") ); mod functions { @@ -1214,7 +1164,7 @@ mod tests { (func (export "deploy")) ) "#, - Err("use of floating point type in globals is forbidden") + Err("validation of new code failed") ); prepare_test!( @@ -1226,7 +1176,7 @@ mod tests { (func (export "deploy")) ) "#, - Err("use of floating point type in locals is forbidden") + Err("validation of new code failed") ); prepare_test!( @@ -1238,7 +1188,7 @@ mod tests { (func (export "deploy")) ) "#, - Err("use of floating point type in function types is forbidden") + Err("validation of new code failed") ); prepare_test!( @@ -1250,7 +1200,7 @@ mod tests { (func (export "deploy")) ) "#, - Err("use of floating point type in function types is forbidden") + Err("validation of new code failed") ); } } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index e80e2cde302fe..92ae2e3e6ea57 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -18,7 +18,7 @@ //! Environment definition of the wasm smart-contract runtime. use crate::{ - exec::{ExecError, ExecResult, Ext, FixSizedKey, TopicOf, VarSizedKey}, + exec::{ExecError, ExecResult, Ext, Key, TopicOf}, gas::{ChargedAmount, Token}, schedule::HostFnWeights, BalanceOf, CodeHash, Config, DebugBufferVec, Error, SENTINEL, @@ -73,19 +73,7 @@ enum KeyType { Fix, /// Variable sized key used in transparent hashing, /// cannot be larger than MaxStorageKeyLen. - Variable(u32), -} - -impl KeyType { - fn len(&self) -> Result { - match self { - KeyType::Fix => Ok(32u32), - KeyType::Variable(len) => { - ensure!(len <= &::MaxStorageKeyLen::get(), Error::::DecodingFailed); - Ok(*len) - }, - } - } + Var(u32), } /// Every error that can be returned to a contract when it calls any of the host functions. @@ -96,6 +84,7 @@ impl KeyType { /// will not be changed or removed. This means that any contract **must not** exhaustively /// match return codes. Instead, contracts should prepare for unknown variants and deal with /// those errors gracefully in order to be forward compatible. +#[derive(Debug)] #[repr(u32)] pub enum ReturnCode { /// API call successful. @@ -120,6 +109,8 @@ pub enum ReturnCode { /// ECDSA compressed pubkey conversion into Ethereum address failed (most probably /// wrong pubkey provided). EcdsaRecoverFailed = 11, + /// sr25519 signature verification failed. + Sr25519VerifyFailed = 12, } impl From for ReturnCode { @@ -262,6 +253,8 @@ pub enum RuntimeCosts { HashBlake128(u32), /// Weight of calling `seal_ecdsa_recover`. EcdsaRecovery, + /// Weight of calling `seal_sr25519_verify` for the given input size. + Sr25519Verify(u32), /// Weight charged by a chain extension through `seal_call_chain_extension`. ChainExtension(Weight), /// Weight charged for calling into the runtime. @@ -347,6 +340,9 @@ impl RuntimeCosts { .hash_blake2_128 .saturating_add(s.hash_blake2_128_per_byte.saturating_mul(len.into())), EcdsaRecovery => s.ecdsa_recover, + Sr25519Verify(len) => s + .sr25519_verify + .saturating_add(s.sr25519_verify_per_byte.saturating_mul(len.into())), ChainExtension(weight) => weight, CallRuntime(weight) => weight, SetCodeHash => s.set_code_hash, @@ -437,7 +433,7 @@ bitflags! { /// The kind of call that should be performed. enum CallType { /// Execute another instantiated contract - Call { callee_ptr: u32, value_ptr: u32, gas: u64 }, + Call { callee_ptr: u32, value_ptr: u32, deposit_ptr: u32, weight: Weight }, /// Execute deployed code in the context (storage, account ID, value) of the caller contract DelegateCall { code_hash_ptr: u32 }, } @@ -494,11 +490,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { Err(wasmi::Error::Trap(trap)) => { // If we encoded a reason then it is some abort generated by a host function. // Otherwise the trap came from the contract. - let reason: TrapReason = *trap - .into_host() - .ok_or(Error::::ContractTrapped)? - .downcast() - .expect("`TrapReason` is the only type we use to encode host errors; qed"); + let reason: TrapReason = trap.downcast().ok_or(Error::::ContractTrapped)?; match reason { Return(ReturnData { flags, data }) => { let flags = @@ -752,6 +744,29 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { (err, _) => Self::err_into_return_code(err), } } + fn decode_key( + &self, + memory: &[u8], + key_type: KeyType, + key_ptr: u32, + ) -> Result, TrapReason> { + let res = match key_type { + KeyType::Fix => { + let key = self.read_sandbox_memory(memory, key_ptr, 32u32)?; + Key::try_from_fix(key) + }, + KeyType::Var(len) => { + ensure!( + len <= <::T as Config>::MaxStorageKeyLen::get(), + Error::::DecodingFailed + ); + let key = self.read_sandbox_memory(memory, key_ptr, len)?; + Key::try_from_var(key) + }, + }; + + res.map_err(|_| Error::::DecodingFailed.into()) + } fn set_storage( &mut self, @@ -767,20 +782,9 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { if value_len > max_size { return Err(Error::::ValueTooLarge.into()) } - let key = self.read_sandbox_memory(memory, key_ptr, key_type.len::()?)?; + let key = self.decode_key(memory, key_type, key_ptr)?; let value = Some(self.read_sandbox_memory(memory, value_ptr, value_len)?); - let write_outcome = match key_type { - KeyType::Fix => self.ext.set_storage( - &FixSizedKey::try_from(key).map_err(|_| Error::::DecodingFailed)?, - value, - false, - )?, - KeyType::Variable(_) => self.ext.set_storage_transparent( - &VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, - value, - false, - )?, - }; + let write_outcome = self.ext.set_storage(&key, value, false)?; self.adjust_gas( charged, @@ -796,19 +800,8 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { key_ptr: u32, ) -> Result { let charged = self.charge_gas(RuntimeCosts::ClearStorage(self.ext.max_value_size()))?; - let key = self.read_sandbox_memory(memory, key_ptr, key_type.len::()?)?; - let outcome = match key_type { - KeyType::Fix => self.ext.set_storage( - &FixSizedKey::try_from(key).map_err(|_| Error::::DecodingFailed)?, - None, - false, - )?, - KeyType::Variable(_) => self.ext.set_storage_transparent( - &VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, - None, - false, - )?, - }; + let key = self.decode_key(memory, key_type, key_ptr)?; + let outcome = self.ext.set_storage(&key, None, false)?; self.adjust_gas(charged, RuntimeCosts::ClearStorage(outcome.old_len())); Ok(outcome.old_len_with_sentinel()) @@ -823,15 +816,8 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { out_len_ptr: u32, ) -> Result { let charged = self.charge_gas(RuntimeCosts::GetStorage(self.ext.max_value_size()))?; - let key = self.read_sandbox_memory(memory, key_ptr, key_type.len::()?)?; - let outcome = match key_type { - KeyType::Fix => self.ext.get_storage( - &FixSizedKey::try_from(key).map_err(|_| Error::::DecodingFailed)?, - ), - KeyType::Variable(_) => self.ext.get_storage_transparent( - &VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, - ), - }; + let key = self.decode_key(memory, key_type, key_ptr)?; + let outcome = self.ext.get_storage(&key); if let Some(value) = outcome { self.adjust_gas(charged, RuntimeCosts::GetStorage(value.len() as u32)); @@ -857,15 +843,8 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { key_ptr: u32, ) -> Result { let charged = self.charge_gas(RuntimeCosts::ContainsStorage(self.ext.max_value_size()))?; - let key = self.read_sandbox_memory(memory, key_ptr, key_type.len::()?)?; - let outcome = match key_type { - KeyType::Fix => self.ext.get_storage_size( - &FixSizedKey::try_from(key).map_err(|_| Error::::DecodingFailed)?, - ), - KeyType::Variable(_) => self.ext.get_storage_size_transparent( - &VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, - ), - }; + let key = self.decode_key(memory, key_type, key_ptr)?; + let outcome = self.ext.get_storage_size(&key); self.adjust_gas(charged, RuntimeCosts::ClearStorage(outcome.unwrap_or(0))); Ok(outcome.unwrap_or(SENTINEL)) @@ -894,16 +873,22 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { }; let call_outcome = match call_type { - CallType::Call { callee_ptr, value_ptr, gas } => { + CallType::Call { callee_ptr, value_ptr, deposit_ptr, weight } => { let callee: <::T as frame_system::Config>::AccountId = self.read_sandbox_memory_as(memory, callee_ptr)?; + let deposit_limit: BalanceOf<::T> = if deposit_ptr == SENTINEL { + BalanceOf::<::T>::zero() + } else { + self.read_sandbox_memory_as(memory, deposit_ptr)? + }; let value: BalanceOf<::T> = self.read_sandbox_memory_as(memory, value_ptr)?; if value > 0u32.into() { self.charge_gas(RuntimeCosts::CallSurchargeTransfer)?; } self.ext.call( - Weight::from_parts(gas, 0), + weight, + deposit_limit, callee, value, input_data, @@ -947,7 +932,8 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { &mut self, memory: &mut [u8], code_hash_ptr: u32, - gas: u64, + weight: Weight, + deposit_ptr: u32, value_ptr: u32, input_data_ptr: u32, input_data_len: u32, @@ -958,8 +944,12 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { salt_ptr: u32, salt_len: u32, ) -> Result { - let gas = Weight::from_parts(gas, 0); self.charge_gas(RuntimeCosts::InstantiateBase { input_data_len, salt_len })?; + let deposit_limit: BalanceOf<::T> = if deposit_ptr == SENTINEL { + BalanceOf::<::T>::zero() + } else { + self.read_sandbox_memory_as(memory, deposit_ptr)? + }; let value: BalanceOf<::T> = self.read_sandbox_memory_as(memory, value_ptr)?; if value > 0u32.into() { self.charge_gas(RuntimeCosts::InstantiateSurchargeTransfer)?; @@ -968,7 +958,8 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { self.read_sandbox_memory_as(memory, code_hash_ptr)?; let input_data = self.read_sandbox_memory(memory, input_data_ptr, input_data_len)?; let salt = self.read_sandbox_memory(memory, salt_ptr, salt_len)?; - let instantiate_outcome = self.ext.instantiate(gas, code_hash, value, input_data, &salt); + let instantiate_outcome = + self.ext.instantiate(weight, deposit_limit, code_hash, value, input_data, &salt); if let Ok((address, output)) = &instantiate_outcome { if !output.flags.contains(ReturnFlags::REVERT) { self.write_sandbox_output( @@ -1014,6 +1005,7 @@ pub mod env { /// /// NOTE: This is a implementation defined call and is NOT a part of the public API. /// This call is supposed to be called only by instrumentation injected code. + /// It deals only with the *ref_time* Weight. /// /// - `amount`: How much gas is used. fn gas(ctx: _, _memory: _, amount: u64) -> Result<(), TrapReason> { @@ -1023,8 +1015,9 @@ pub mod env { /// Set the value at the given key in the contract storage. /// - /// Equivalent to the newer version [`super::seal1::Api::set_storage`] with the exception of the - /// return type. Still a valid thing to call when not interested in the return value. + /// Equivalent to the newer [`seal1`][`super::api_doc::Version1::set_storage`] version with the + /// exception of the return type. Still a valid thing to call when not interested in the return + /// value. #[prefixed_alias] fn set_storage( ctx: _, @@ -1092,13 +1085,14 @@ pub mod env { value_ptr: u32, value_len: u32, ) -> Result { - ctx.set_storage(memory, KeyType::Variable(key_len), key_ptr, value_ptr, value_len) + ctx.set_storage(memory, KeyType::Var(key_len), key_ptr, value_ptr, value_len) } /// Clear the value at the given key in the contract storage. /// - /// Equivalent to the newer version [`super::seal1::Api::clear_storage`] with the exception of - /// the return type. Still a valid thing to call when not interested in the return value. + /// Equivalent to the newer [`seal1`][`super::api_doc::Version1::clear_storage`] version with + /// the exception of the return type. Still a valid thing to call when not interested in the + /// return value. #[prefixed_alias] fn clear_storage(ctx: _, memory: _, key_ptr: u32) -> Result<(), TrapReason> { ctx.clear_storage(memory, KeyType::Fix, key_ptr).map(|_| ()) @@ -1118,7 +1112,7 @@ pub mod env { #[version(1)] #[prefixed_alias] fn clear_storage(ctx: _, memory: _, key_ptr: u32, key_len: u32) -> Result { - ctx.clear_storage(memory, KeyType::Variable(key_len), key_ptr) + ctx.clear_storage(memory, KeyType::Var(key_len), key_ptr) } /// Retrieve the value under the given key from storage. @@ -1175,7 +1169,7 @@ pub mod env { out_ptr: u32, out_len_ptr: u32, ) -> Result { - ctx.get_storage(memory, KeyType::Variable(key_len), key_ptr, out_ptr, out_len_ptr) + ctx.get_storage(memory, KeyType::Var(key_len), key_ptr, out_ptr, out_len_ptr) } /// Checks whether there is a value stored under the given key. @@ -1212,7 +1206,7 @@ pub mod env { #[version(1)] #[prefixed_alias] fn contains_storage(ctx: _, memory: _, key_ptr: u32, key_len: u32) -> Result { - ctx.contains_storage(memory, KeyType::Variable(key_len), key_ptr) + ctx.contains_storage(memory, KeyType::Var(key_len), key_ptr) } /// Retrieve and remove the value under the given key from storage. @@ -1239,8 +1233,8 @@ pub mod env { ) -> Result { let charged = ctx.charge_gas(RuntimeCosts::TakeStorage(ctx.ext.max_value_size()))?; let key = ctx.read_sandbox_memory(memory, key_ptr, key_len)?; - if let crate::storage::WriteOutcome::Taken(value) = ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, + if let crate::storage::WriteOutcome::Taken(value) = ctx.ext.set_storage( + &Key::::try_from_var(key).map_err(|_| Error::::DecodingFailed)?, None, true, )? { @@ -1320,7 +1314,47 @@ pub mod env { ctx.call( memory, CallFlags::ALLOW_REENTRY, - CallType::Call { callee_ptr, value_ptr, gas }, + CallType::Call { + callee_ptr, + value_ptr, + deposit_ptr: SENTINEL, + weight: Weight::from_parts(gas, 0), + }, + input_data_ptr, + input_data_len, + output_ptr, + output_len_ptr, + ) + } + + /// Make a call to another contract. + /// + /// Equivalent to the newer [`seal2`][`super::api_doc::Version2::call`] version but works with + /// *ref_time* Weight only. It is recommended to switch to the latest version, once it's + /// stabilized. + #[version(1)] + #[prefixed_alias] + fn call( + ctx: _, + memory: _, + flags: u32, + callee_ptr: u32, + gas: u64, + value_ptr: u32, + input_data_ptr: u32, + input_data_len: u32, + output_ptr: u32, + output_len_ptr: u32, + ) -> Result { + ctx.call( + memory, + CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, + CallType::Call { + callee_ptr, + value_ptr, + deposit_ptr: SENTINEL, + weight: Weight::from_parts(gas, 0), + }, input_data_ptr, input_data_len, output_ptr, @@ -1336,10 +1370,15 @@ pub mod env { /// /// # Parameters /// - /// - `flags`: See `crate::wasm::runtime::CallFlags` for a documenation of the supported flags. + /// - `flags`: See `crate::wasm::runtime::CallFlags` for a documentation of the supported flags. /// - `callee_ptr`: a pointer to the address of the callee contract. Should be decodable as an /// `T::AccountId`. Traps otherwise. - /// - `gas`: how much gas to devote to the execution. + /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. + /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. + /// - `deposit_ptr`: a pointer to the buffer with value of the storage deposit limit for the + /// call. Should be decodable as a `T::Balance`. Traps otherwise. Passing `SENTINEL` means + /// setting no specific limit for the call, which implies storage usage up to the limit of the + /// parent call. /// - `value_ptr`: a pointer to the buffer with value, how much value to send. Should be /// decodable as a `T::Balance`. Traps otherwise. /// - `input_data_ptr`: a pointer to a buffer to be used as input data to the callee. @@ -1357,14 +1396,16 @@ pub mod env { /// - `ReturnCode::CalleeTrapped` /// - `ReturnCode::TransferFailed` /// - `ReturnCode::NotCallable` - #[version(1)] - #[prefixed_alias] + #[version(2)] + #[unstable] fn call( ctx: _, memory: _, flags: u32, callee_ptr: u32, - gas: u64, + ref_time_limit: u64, + proof_size_limit: u64, + deposit_ptr: u32, value_ptr: u32, input_data_ptr: u32, input_data_len: u32, @@ -1374,7 +1415,12 @@ pub mod env { ctx.call( memory, CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, - CallType::Call { callee_ptr, value_ptr, gas }, + CallType::Call { + callee_ptr, + value_ptr, + deposit_ptr, + weight: Weight::from_parts(ref_time_limit, proof_size_limit), + }, input_data_ptr, input_data_len, output_ptr, @@ -1438,8 +1484,7 @@ pub mod env { /// # Note /// /// The values `_code_hash_len` and `_value_len` are ignored because the encoded sizes - /// of those types are fixed through - /// [`codec::MaxEncodedLen`]. The fields exist + /// of those types are fixed through [`codec::MaxEncodedLen`]. The fields exist /// for backwards compatibility. Consider switching to the newest version of this function. #[prefixed_alias] fn instantiate( @@ -1462,7 +1507,47 @@ pub mod env { ctx.instantiate( memory, code_hash_ptr, - gas, + Weight::from_parts(gas, 0), + SENTINEL, + value_ptr, + input_data_ptr, + input_data_len, + address_ptr, + address_len_ptr, + output_ptr, + output_len_ptr, + salt_ptr, + salt_len, + ) + } + + /// Instantiate a contract with the specified code hash. + /// + /// Equivalent to the newer [`seal2`][`super::api_doc::Version2::instantiate`] version but works + /// with *ref_time* Weight only. It is recommended to switch to the latest version, once it's + /// stabilized. + #[version(1)] + #[prefixed_alias] + fn instantiate( + ctx: _, + memory: _, + code_hash_ptr: u32, + gas: u64, + value_ptr: u32, + input_data_ptr: u32, + input_data_len: u32, + address_ptr: u32, + address_len_ptr: u32, + output_ptr: u32, + output_len_ptr: u32, + salt_ptr: u32, + salt_len: u32, + ) -> Result { + ctx.instantiate( + memory, + code_hash_ptr, + Weight::from_parts(gas, 0), + SENTINEL, value_ptr, input_data_ptr, input_data_len, @@ -1489,15 +1574,21 @@ pub mod env { /// # Parameters /// /// - `code_hash_ptr`: a pointer to the buffer that contains the initializer code. - /// - `gas`: how much gas to devote to the execution of the initializer code. + /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. + /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. + /// - `deposit_ptr`: a pointer to the buffer with value of the storage deposit limit for + /// instantiation. Should be decodable as a `T::Balance`. Traps otherwise. Passing `SENTINEL` + /// means setting no specific limit for the call, which implies storage usage up to the limit + /// of the parent call. /// - `value_ptr`: a pointer to the buffer with value, how much value to send. Should be /// decodable as a `T::Balance`. Traps otherwise. /// - `input_data_ptr`: a pointer to a buffer to be used as input data to the initializer code. /// - `input_data_len`: length of the input data buffer. - /// - `address_ptr`: a pointer where the new account's address is copied to. - /// - `address_len_ptr`: in-out pointer to where the length of the buffer is read from and the - /// actual length is written to. - /// - `output_ptr`: a pointer where the output buffer is copied to. + /// - `address_ptr`: a pointer where the new account's address is copied to. `SENTINEL` means + /// not to copy. + /// - `address_len_ptr`: pointer to where put the length of the address. + /// - `output_ptr`: a pointer where the output buffer is copied to. `SENTINEL` means not to + /// copy. /// - `output_len_ptr`: in-out pointer to where the length of the buffer is read from and the /// actual length is written to. /// - `salt_ptr`: Pointer to raw bytes used for address derivation. See `fn contract_address`. @@ -1515,13 +1606,15 @@ pub mod env { /// - `ReturnCode::CalleeTrapped` /// - `ReturnCode::TransferFailed` /// - `ReturnCode::CodeNotFound` - #[version(1)] - #[prefixed_alias] + #[version(2)] + #[unstable] fn instantiate( ctx: _, memory: _, code_hash_ptr: u32, - gas: u64, + ref_time_limit: u64, + proof_size_limit: u64, + deposit_ptr: u32, value_ptr: u32, input_data_ptr: u32, input_data_len: u32, @@ -1535,7 +1628,8 @@ pub mod env { ctx.instantiate( memory, code_hash_ptr, - gas, + Weight::from_parts(ref_time_limit, proof_size_limit), + deposit_ptr, value_ptr, input_data_ptr, input_data_len, @@ -1783,49 +1877,99 @@ pub mod env { /// Stores the price for the specified amount of gas into the supplied buffer. /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// Equivalent to the newer [`seal1`][`super::api_doc::Version2::weight_to_fee`] version but + /// works with *ref_time* Weight only. It is recommended to switch to the latest version, once + /// it's stabilized. + #[prefixed_alias] + fn weight_to_fee( + ctx: _, + memory: _, + gas: u64, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { + let gas = Weight::from_parts(gas, 0); + ctx.charge_gas(RuntimeCosts::WeightToFee)?; + Ok(ctx.write_sandbox_output( + memory, + out_ptr, + out_len_ptr, + &ctx.ext.get_weight_price(gas).encode(), + false, + already_charged, + )?) + } + + /// Stores the price for the specified amount of weight into the supplied buffer. + /// + /// # Parameters + /// + /// - `out_ptr`: pointer to the linear memory where the returning value is written to. If the + /// available space at `out_ptr` is less than the size of the value a trap is triggered. + /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and + /// the value length is written to. /// /// The data is encoded as `T::Balance`. /// /// # Note /// - /// It is recommended to avoid specifying very small values for `gas` as the prices for a single - /// gas can be smaller than one. - #[prefixed_alias] + /// It is recommended to avoid specifying very small values for `ref_time_limit` and + /// `proof_size_limit` as the prices for a single gas can be smaller than the basic balance + /// unit. + #[version(1)] + #[unstable] fn weight_to_fee( ctx: _, memory: _, - gas: u64, + ref_time_limit: u64, + proof_size_limit: u64, out_ptr: u32, out_len_ptr: u32, ) -> Result<(), TrapReason> { - let gas = Weight::from_parts(gas, 0); + let weight = Weight::from_parts(ref_time_limit, proof_size_limit); ctx.charge_gas(RuntimeCosts::WeightToFee)?; Ok(ctx.write_sandbox_output( memory, out_ptr, out_len_ptr, - &ctx.ext.get_weight_price(gas).encode(), + &ctx.ext.get_weight_price(weight).encode(), false, already_charged, )?) } - /// Stores the amount of gas left into the supplied buffer. + /// Stores the weight left into the supplied buffer. + /// + /// Equivalent to the newer [`seal1`][`super::api_doc::Version2::gas_left`] version but + /// works with *ref_time* Weight only. It is recommended to switch to the latest version, once + /// it's stabilized. + #[prefixed_alias] + fn gas_left(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + ctx.charge_gas(RuntimeCosts::GasLeft)?; + let gas_left = &ctx.ext.gas_meter().gas_left().ref_time().encode(); + Ok(ctx.write_sandbox_output( + memory, + out_ptr, + out_len_ptr, + gas_left, + false, + already_charged, + )?) + } + + /// Stores the amount of weight left into the supplied buffer. /// /// The value is stored to linear memory at the address pointed to by `out_ptr`. /// `out_len_ptr` must point to a u32 value that describes the available space at /// `out_ptr`. This call overwrites it with the size of the value. If the available /// space at `out_ptr` is less than the size of the value a trap is triggered. /// - /// The data is encoded as Gas. - #[prefixed_alias] + /// The data is encoded as Weight. + #[version(1)] + #[unstable] fn gas_left(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::GasLeft)?; - let gas_left = &ctx.ext.gas_meter().gas_left().ref_time().encode(); + let gas_left = &ctx.ext.gas_meter().gas_left().encode(); Ok(ctx.write_sandbox_output( memory, out_ptr, @@ -2038,7 +2182,7 @@ pub mod env { /// # Note /// /// The state rent functionality was removed. This is stub only exists for - /// backwards compatiblity + /// backwards compatibility #[prefixed_alias] #[deprecated] fn restore_to( @@ -2062,7 +2206,7 @@ pub mod env { /// # Note /// /// The state rent functionality was removed. This is stub only exists for - /// backwards compatiblity + /// backwards compatibility #[version(1)] #[prefixed_alias] #[deprecated] @@ -2084,7 +2228,7 @@ pub mod env { /// # Note /// /// The state rent functionality was removed. This is stub only exists for - /// backwards compatiblity. + /// backwards compatibility. #[prefixed_alias] #[deprecated] fn set_rent_allowance( @@ -2102,7 +2246,7 @@ pub mod env { /// # Note /// /// The state rent functionality was removed. This is stub only exists for - /// backwards compatiblity. + /// backwards compatibility. #[version(1)] #[prefixed_alias] #[deprecated] @@ -2116,7 +2260,7 @@ pub mod env { /// # Note /// /// The state rent functionality was removed. This is stub only exists for - /// backwards compatiblity. + /// backwards compatibility. #[prefixed_alias] #[deprecated] fn rent_allowance(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { @@ -2409,7 +2553,7 @@ pub mod env { /// /// # Return Value /// - /// Returns `ReturnCode::Success` when the dispatchable was succesfully executed and + /// Returns `ReturnCode::Success` when the dispatchable was successfully executed and /// returned `Ok`. When the dispatchable was exeuted but returned an error /// `ReturnCode::CallRuntimeFailed` is returned. The full error is not /// provided because it is not guaranteed to be stable. @@ -2425,8 +2569,6 @@ pub mod env { /// - Provide functionality **exclusively** to contracts. /// - Provide custom weights. /// - Avoid the need to keep the `Call` data structure stable. - #[unstable] - #[prefixed_alias] fn call_runtime( ctx: _, memory: _, @@ -2494,6 +2636,46 @@ pub mod env { } } + /// Verify a sr25519 signature + /// + /// # Parameters + /// + /// - `signature_ptr`: the pointer into the linear memory where the signature is placed. Should + /// be a value of 64 bytes. + /// - `pub_key_ptr`: the pointer into the linear memory where the public key is placed. Should + /// be a value of 32 bytes. + /// - `message_len`: the length of the message payload. + /// - `message_ptr`: the pointer into the linear memory where the message is placed. + /// + /// # Errors + /// + /// - `ReturnCode::Sr25519VerifyFailed + #[unstable] + fn sr25519_verify( + ctx: _, + memory: _, + signature_ptr: u32, + pub_key_ptr: u32, + message_len: u32, + message_ptr: u32, + ) -> Result { + ctx.charge_gas(RuntimeCosts::Sr25519Verify(message_len))?; + + let mut signature: [u8; 64] = [0; 64]; + ctx.read_sandbox_memory_into_buf(memory, signature_ptr, &mut signature)?; + + let mut pub_key: [u8; 32] = [0; 32]; + ctx.read_sandbox_memory_into_buf(memory, pub_key_ptr, &mut pub_key)?; + + let message: Vec = ctx.read_sandbox_memory(memory, message_ptr, message_len)?; + + if ctx.ext.sr25519_verify(&signature, &message, &pub_key) { + Ok(ReturnCode::Success) + } else { + Ok(ReturnCode::Sr25519VerifyFailed) + } + } + /// Replace the contract code at the specified address with new code. /// /// # Note @@ -2603,7 +2785,7 @@ pub mod env { /// Returns a nonce that is unique per contract instantiation. /// - /// The nonce is incremented for each succesful contract instantiation. This is a + /// The nonce is incremented for each successful contract instantiation. This is a /// sensible default salt for contract instantiations. #[unstable] fn instantiation_nonce(ctx: _, _memory: _) -> Result { diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 14e680fb6a0ed..cdaba7f59b72f 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -18,26 +18,25 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-02-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_contracts // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_contracts -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/contracts/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -45,13 +44,12 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_contracts. pub trait WeightInfo { fn on_process_deletion_queue_batch() -> Weight; fn on_initialize_per_trie_key(k: u32, ) -> Weight; - fn on_initialize_per_queue_item(q: u32, ) -> Weight; fn reinstrument(c: u32, ) -> Weight; fn call_with_code_per_byte(c: u32, ) -> Weight; fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight; @@ -75,40 +73,42 @@ pub trait WeightInfo { fn seal_weight_to_fee(r: u32, ) -> Weight; fn seal_gas(r: u32, ) -> Weight; fn seal_input(r: u32, ) -> Weight; - fn seal_input_per_kb(n: u32, ) -> Weight; + fn seal_input_per_byte(n: u32, ) -> Weight; fn seal_return(r: u32, ) -> Weight; - fn seal_return_per_kb(n: u32, ) -> Weight; + fn seal_return_per_byte(n: u32, ) -> Weight; fn seal_terminate(r: u32, ) -> Weight; fn seal_random(r: u32, ) -> Weight; fn seal_deposit_event(r: u32, ) -> Weight; - fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight; + fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight; fn seal_debug_message(r: u32, ) -> Weight; - fn seal_debug_message_per_kb(i: u32, ) -> Weight; + fn seal_debug_message_per_byte(i: u32, ) -> Weight; fn seal_set_storage(r: u32, ) -> Weight; - fn seal_set_storage_per_new_kb(n: u32, ) -> Weight; - fn seal_set_storage_per_old_kb(n: u32, ) -> Weight; + fn seal_set_storage_per_new_byte(n: u32, ) -> Weight; + fn seal_set_storage_per_old_byte(n: u32, ) -> Weight; fn seal_clear_storage(r: u32, ) -> Weight; - fn seal_clear_storage_per_kb(n: u32, ) -> Weight; + fn seal_clear_storage_per_byte(n: u32, ) -> Weight; fn seal_get_storage(r: u32, ) -> Weight; - fn seal_get_storage_per_kb(n: u32, ) -> Weight; + fn seal_get_storage_per_byte(n: u32, ) -> Weight; fn seal_contains_storage(r: u32, ) -> Weight; - fn seal_contains_storage_per_kb(n: u32, ) -> Weight; + fn seal_contains_storage_per_byte(n: u32, ) -> Weight; fn seal_take_storage(r: u32, ) -> Weight; - fn seal_take_storage_per_kb(n: u32, ) -> Weight; + fn seal_take_storage_per_byte(n: u32, ) -> Weight; fn seal_transfer(r: u32, ) -> Weight; fn seal_call(r: u32, ) -> Weight; fn seal_delegate_call(r: u32, ) -> Weight; - fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight; + fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight; fn seal_instantiate(r: u32, ) -> Weight; - fn seal_instantiate_per_transfer_input_salt_kb(t: u32, i: u32, s: u32, ) -> Weight; + fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight; fn seal_hash_sha2_256(r: u32, ) -> Weight; - fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight; + fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight; fn seal_hash_keccak_256(r: u32, ) -> Weight; - fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight; + fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight; fn seal_hash_blake2_256(r: u32, ) -> Weight; - fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight; + fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight; fn seal_hash_blake2_128(r: u32, ) -> Weight; - fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight; + fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight; + fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight; + fn seal_sr25519_verify(r: u32, ) -> Weight; fn seal_ecdsa_recover(r: u32, ) -> Weight; fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight; fn seal_set_code_hash(r: u32, ) -> Weight; @@ -126,7 +126,6 @@ pub trait WeightInfo { fn instr_br_table_per_entry(e: u32, ) -> Weight; fn instr_call(r: u32, ) -> Weight; fn instr_call_indirect(r: u32, ) -> Weight; - fn instr_call_indirect_per_param(p: u32, ) -> Weight; fn instr_call_per_local(l: u32, ) -> Weight; fn instr_local_get(r: u32, ) -> Weight; fn instr_local_set(r: u32, ) -> Weight; @@ -172,15 +171,14 @@ pub trait WeightInfo { /// Weights for pallet_contracts using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Contracts DeletionQueue (r:1 w:0) - /// Proof: Contracts DeletionQueue (max_values: Some(1), max_size: Some(16642), added: 17137, mode: Measured) + /// Storage: Contracts DeletionQueueCounter (r:1 w:0) + /// Proof: Contracts DeletionQueueCounter (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) fn on_process_deletion_queue_batch() -> Weight { // Proof Size summary in bytes: // Measured: `109` - // Estimated: `604` - // Minimum execution time: 2_591 nanoseconds. - Weight::from_parts(2_817_000, 0) - .saturating_add(Weight::from_parts(0, 604)) + // Estimated: `1594` + // Minimum execution time: 2_565_000 picoseconds. + Weight::from_parts(2_759_000, 1594) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Skipped Metadata (r:0 w:0) @@ -188,34 +186,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `481 + k * (69 ±0)` - // Estimated: `471 + k * (70 ±0)` - // Minimum execution time: 10_190 nanoseconds. - Weight::from_parts(6_642_117, 0) - .saturating_add(Weight::from_parts(0, 471)) - // Standard Error: 992 - .saturating_add(Weight::from_parts(919_828, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Measured: `488 + k * (69 ±0)` + // Estimated: `478 + k * (70 ±0)` + // Minimum execution time: 13_480_000 picoseconds. + Weight::from_parts(10_153_869, 478) + // Standard Error: 427 + .saturating_add(Weight::from_parts(958_726, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) + .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) } - /// Storage: Contracts DeletionQueue (r:1 w:1) - /// Proof: Contracts DeletionQueue (max_values: Some(1), max_size: Some(16642), added: 17137, mode: Measured) - /// The range of component `q` is `[0, 128]`. - fn on_initialize_per_queue_item(q: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `281 + q * (33 ±0)` - // Estimated: `763 + q * (33 ±0)` - // Minimum execution time: 2_598 nanoseconds. - Weight::from_parts(10_288_252, 0) - .saturating_add(Weight::from_parts(0, 763)) - // Standard Error: 2_886 - .saturating_add(Weight::from_parts(1_092_420, 0).saturating_mul(q.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 33).saturating_mul(q.into())) - } /// Storage: Contracts PristineCode (r:1 w:0) /// Proof: Contracts PristineCode (max_values: None, max_size: Some(125988), added: 128463, mode: Measured) /// Storage: Contracts CodeStorage (r:0 w:1) @@ -223,16 +205,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 61717]`. fn reinstrument(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `270 + c * (1 ±0)` - // Estimated: `3025 + c * (2 ±0)` - // Minimum execution time: 34_338 nanoseconds. - Weight::from_parts(32_159_677, 0) - .saturating_add(Weight::from_parts(0, 3025)) - // Standard Error: 53 - .saturating_add(Weight::from_parts(51_034, 0).saturating_mul(c.into())) + // Measured: `238 + c * (1 ±0)` + // Estimated: `3708 + c * (1 ±0)` + // Minimum execution time: 30_406_000 picoseconds. + Weight::from_parts(28_467_370, 3708) + // Standard Error: 46 + .saturating_add(Weight::from_parts(53_724, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 2).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: Contracts ContractInfoOf (r:1 w:1) /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) @@ -247,16 +228,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `803` - // Estimated: `16930 + c * (5 ±0)` - // Minimum execution time: 385_587 nanoseconds. - Weight::from_parts(395_545_811, 0) - .saturating_add(Weight::from_parts(0, 16930)) - // Standard Error: 27 - .saturating_add(Weight::from_parts(31_342, 0).saturating_mul(c.into())) + // Measured: `707` + // Estimated: `6656 + c * (1 ±0)` + // Minimum execution time: 263_198_000 picoseconds. + Weight::from_parts(276_162_279, 6656) + // Standard Error: 18 + .saturating_add(Weight::from_parts(37_378, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 5).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: Contracts OwnerInfoOf (r:1 w:1) /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) @@ -280,16 +260,15 @@ impl WeightInfo for SubstrateWeight { fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `270` - // Estimated: `20267` - // Minimum execution time: 3_799_742 nanoseconds. - Weight::from_parts(670_115_588, 0) - .saturating_add(Weight::from_parts(0, 20267)) - // Standard Error: 287 - .saturating_add(Weight::from_parts(93_885, 0).saturating_mul(c.into())) + // Estimated: `8659` + // Minimum execution time: 3_132_259_000 picoseconds. + Weight::from_parts(513_284_834, 8659) + // Standard Error: 280 + .saturating_add(Weight::from_parts(106_723, 0).saturating_mul(c.into())) // Standard Error: 16 - .saturating_add(Weight::from_parts(1_367, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_166, 0).saturating_mul(i.into())) // Standard Error: 16 - .saturating_add(Weight::from_parts(1_781, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_436, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -311,15 +290,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `546` - // Estimated: `22039` - // Minimum execution time: 1_949_008 nanoseconds. - Weight::from_parts(214_033_418, 0) - .saturating_add(Weight::from_parts(0, 22039)) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_666, 0).saturating_mul(i.into())) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_801, 0).saturating_mul(s.into())) + // Measured: `482` + // Estimated: `6408` + // Minimum execution time: 1_646_604_000 picoseconds. + Weight::from_parts(271_369_256, 6408) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_426, 0).saturating_mul(i.into())) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_438, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -335,11 +313,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `855` - // Estimated: `17145` - // Minimum execution time: 146_654 nanoseconds. - Weight::from_parts(147_528_000, 0) - .saturating_add(Weight::from_parts(0, 17145)) + // Measured: `759` + // Estimated: `6699` + // Minimum execution time: 191_360_000 picoseconds. + Weight::from_parts(192_625_000, 6699) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -355,12 +332,11 @@ impl WeightInfo for SubstrateWeight { fn upload_code(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `109` - // Estimated: `5386` - // Minimum execution time: 387_889 nanoseconds. - Weight::from_parts(391_379_335, 0) - .saturating_add(Weight::from_parts(0, 5386)) - // Standard Error: 89 - .saturating_add(Weight::from_parts(94_810, 0).saturating_mul(c.into())) + // Estimated: `3574` + // Minimum execution time: 245_207_000 picoseconds. + Weight::from_parts(244_703_457, 3574) + // Standard Error: 61 + .saturating_add(Weight::from_parts(106_850, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -374,11 +350,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Contracts PristineCode (max_values: None, max_size: Some(125988), added: 128463, mode: Measured) fn remove_code() -> Weight { // Proof Size summary in bytes: - // Measured: `287` - // Estimated: `6098` - // Minimum execution time: 26_014 nanoseconds. - Weight::from_parts(26_510_000, 0) - .saturating_add(Weight::from_parts(0, 6098)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 33_560_000 picoseconds. + Weight::from_parts(33_833_000, 3720) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -390,11 +365,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `666` - // Estimated: `16848` - // Minimum execution time: 30_177 nanoseconds. - Weight::from_parts(30_639_000, 0) - .saturating_add(Weight::from_parts(0, 16848)) + // Measured: `570` + // Estimated: `8985` + // Minimum execution time: 33_288_000 picoseconds. + Weight::from_parts(33_775_000, 8985) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -408,19 +382,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_caller(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (480 ±0)` - // Estimated: `17295 + r * (2400 ±0)` - // Minimum execution time: 373_786 nanoseconds. - Weight::from_parts(377_332_691, 0) - .saturating_add(Weight::from_parts(0, 17295)) - // Standard Error: 51_211 - .saturating_add(Weight::from_parts(17_715_615, 0).saturating_mul(r.into())) + // Measured: `781 + r * (6 ±0)` + // Estimated: `6722 + r * (6 ±0)` + // Minimum execution time: 234_292_000 picoseconds. + Weight::from_parts(235_941_911, 6722) + // Standard Error: 413 + .saturating_add(Weight::from_parts(339_913, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -432,20 +405,19 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_is_contract(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `917 + r * (21778 ±0)` - // Estimated: `17295 + r * (306895 ±0)` - // Minimum execution time: 374_009 nanoseconds. - Weight::from_parts(238_991_986, 0) - .saturating_add(Weight::from_parts(0, 17295)) - // Standard Error: 464_711 - .saturating_add(Weight::from_parts(249_099_538, 0).saturating_mul(r.into())) + // Measured: `839 + r * (240 ±0)` + // Estimated: `6743 + r * (2715 ±0)` + // Minimum execution time: 236_273_000 picoseconds. + Weight::from_parts(74_380_906, 6743) + // Standard Error: 5_745 + .saturating_add(Weight::from_parts(3_331_781, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 306895).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2715).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -457,20 +429,19 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (22099 ±0)` - // Estimated: `17340 + r * (308500 ±0)` - // Minimum execution time: 375_058 nanoseconds. - Weight::from_parts(238_765_068, 0) - .saturating_add(Weight::from_parts(0, 17340)) - // Standard Error: 662_617 - .saturating_add(Weight::from_parts(302_175_089, 0).saturating_mul(r.into())) + // Measured: `831 + r * (244 ±0)` + // Estimated: `6747 + r * (2719 ±0)` + // Minimum execution time: 236_573_000 picoseconds. + Weight::from_parts(82_473_906, 6747) + // Standard Error: 5_510 + .saturating_add(Weight::from_parts(4_131_820, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 308500).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2719).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -482,19 +453,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_own_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `884 + r * (480 ±0)` - // Estimated: `17330 + r * (2400 ±0)` - // Minimum execution time: 374_747 nanoseconds. - Weight::from_parts(376_482_380, 0) - .saturating_add(Weight::from_parts(0, 17330)) - // Standard Error: 61_919 - .saturating_add(Weight::from_parts(22_376_795, 0).saturating_mul(r.into())) + // Measured: `788 + r * (6 ±0)` + // Estimated: `6730 + r * (6 ±0)` + // Minimum execution time: 235_878_000 picoseconds. + Weight::from_parts(238_387_359, 6730) + // Standard Error: 318 + .saturating_add(Weight::from_parts(409_923, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -506,19 +476,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `874 + r * (240 ±0)` - // Estimated: `17265 + r * (1200 ±0)` - // Minimum execution time: 372_287 nanoseconds. - Weight::from_parts(376_250_858, 0) - .saturating_add(Weight::from_parts(0, 17265)) - // Standard Error: 40_119 - .saturating_add(Weight::from_parts(11_359_647, 0).saturating_mul(r.into())) + // Measured: `778 + r * (3 ±0)` + // Estimated: `6723 + r * (3 ±0)` + // Minimum execution time: 233_476_000 picoseconds. + Weight::from_parts(238_014_452, 6723) + // Standard Error: 145 + .saturating_add(Weight::from_parts(165_823, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 1200).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -530,19 +499,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `878 + r * (480 ±0)` - // Estimated: `17260 + r * (2400 ±0)` - // Minimum execution time: 374_445 nanoseconds. - Weight::from_parts(377_243_521, 0) - .saturating_add(Weight::from_parts(0, 17260)) - // Standard Error: 53_032 - .saturating_add(Weight::from_parts(17_684_246, 0).saturating_mul(r.into())) + // Measured: `782 + r * (6 ±0)` + // Estimated: `6724 + r * (6 ±0)` + // Minimum execution time: 235_490_000 picoseconds. + Weight::from_parts(240_039_685, 6724) + // Standard Error: 330 + .saturating_add(Weight::from_parts(332_291, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -554,19 +522,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `879 + r * (480 ±0)` - // Estimated: `17250 + r * (2405 ±0)` - // Minimum execution time: 374_029 nanoseconds. - Weight::from_parts(380_415_186, 0) - .saturating_add(Weight::from_parts(0, 17250)) - // Standard Error: 60_562 - .saturating_add(Weight::from_parts(17_152_599, 0).saturating_mul(r.into())) + // Measured: `783 + r * (6 ±0)` + // Estimated: `6721 + r * (6 ±0)` + // Minimum execution time: 236_093_000 picoseconds. + Weight::from_parts(238_513_328, 6721) + // Standard Error: 206 + .saturating_add(Weight::from_parts(328_899, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2405).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:2 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -578,19 +545,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1049 + r * (480 ±0)` - // Estimated: `19849 + r * (2456 ±0)` - // Minimum execution time: 373_999 nanoseconds. - Weight::from_parts(381_757_033, 0) - .saturating_add(Weight::from_parts(0, 19849)) - // Standard Error: 97_983 - .saturating_add(Weight::from_parts(98_290_984, 0).saturating_mul(r.into())) + // Measured: `922 + r * (6 ±0)` + // Estimated: `6846 + r * (6 ±0)` + // Minimum execution time: 234_801_000 picoseconds. + Weight::from_parts(243_519_159, 6846) + // Standard Error: 1_367 + .saturating_add(Weight::from_parts(1_449_599, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2456).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -602,19 +568,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_value_transferred(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `888 + r * (480 ±0)` - // Estimated: `17360 + r * (2400 ±0)` - // Minimum execution time: 374_197 nanoseconds. - Weight::from_parts(377_755_896, 0) - .saturating_add(Weight::from_parts(0, 17360)) - // Standard Error: 60_542 - .saturating_add(Weight::from_parts(17_442_065, 0).saturating_mul(r.into())) + // Measured: `792 + r * (6 ±0)` + // Estimated: `6741 + r * (6 ±0)` + // Minimum execution time: 236_765_000 picoseconds. + Weight::from_parts(237_843_244, 6741) + // Standard Error: 308 + .saturating_add(Weight::from_parts(329_911, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -626,19 +591,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `886 + r * (480 ±0)` - // Estimated: `17290 + r * (2400 ±0)` - // Minimum execution time: 373_888 nanoseconds. - Weight::from_parts(377_825_771, 0) - .saturating_add(Weight::from_parts(0, 17290)) - // Standard Error: 38_026 - .saturating_add(Weight::from_parts(17_147_903, 0).saturating_mul(r.into())) + // Measured: `790 + r * (6 ±0)` + // Estimated: `6739 + r * (6 ±0)` + // Minimum execution time: 236_690_000 picoseconds. + Weight::from_parts(241_743_164, 6739) + // Standard Error: 333 + .saturating_add(Weight::from_parts(324_693, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -650,19 +614,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `883 + r * (480 ±0)` - // Estimated: `17315 + r * (2400 ±0)` - // Minimum execution time: 373_904 nanoseconds. - Weight::from_parts(378_652_372, 0) - .saturating_add(Weight::from_parts(0, 17315)) - // Standard Error: 43_833 - .saturating_add(Weight::from_parts(16_936_781, 0).saturating_mul(r.into())) + // Measured: `787 + r * (6 ±0)` + // Estimated: `6737 + r * (6 ±0)` + // Minimum execution time: 236_149_000 picoseconds. + Weight::from_parts(239_090_707, 6737) + // Standard Error: 246 + .saturating_add(Weight::from_parts(344_488, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -674,19 +637,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `874 + r * (480 ±0)` - // Estimated: `17245 + r * (2400 ±0)` - // Minimum execution time: 373_473 nanoseconds. - Weight::from_parts(376_386_312, 0) - .saturating_add(Weight::from_parts(0, 17245)) - // Standard Error: 46_945 - .saturating_add(Weight::from_parts(17_336_462, 0).saturating_mul(r.into())) + // Measured: `778 + r * (6 ±0)` + // Estimated: `6723 + r * (6 ±0)` + // Minimum execution time: 235_057_000 picoseconds. + Weight::from_parts(237_752_870, 6723) + // Standard Error: 236 + .saturating_add(Weight::from_parts(328_235, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -700,19 +662,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: TransactionPayment NextFeeMultiplier (max_values: Some(1), max_size: Some(16), added: 511, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_weight_to_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `951 + r * (800 ±0)` - // Estimated: `19046 + r * (4805 ±0)` - // Minimum execution time: 373_661 nanoseconds. - Weight::from_parts(385_824_015, 0) - .saturating_add(Weight::from_parts(0, 19046)) - // Standard Error: 75_964 - .saturating_add(Weight::from_parts(88_530_074, 0).saturating_mul(r.into())) + // Measured: `856 + r * (10 ±0)` + // Estimated: `6796 + r * (10 ±0)` + // Minimum execution time: 234_995_000 picoseconds. + Weight::from_parts(246_473_554, 6796) + // Standard Error: 1_015 + .saturating_add(Weight::from_parts(1_337_653, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 4805).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -724,19 +685,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_gas(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `841 + r * (320 ±0)` - // Estimated: `17120 + r * (1600 ±0)` - // Minimum execution time: 133_849 nanoseconds. - Weight::from_parts(137_283_391, 0) - .saturating_add(Weight::from_parts(0, 17120)) - // Standard Error: 13_312 - .saturating_add(Weight::from_parts(8_055_328, 0).saturating_mul(r.into())) + // Measured: `745 + r * (4 ±0)` + // Estimated: `6687 + r * (4 ±0)` + // Minimum execution time: 160_445_000 picoseconds. + Weight::from_parts(165_558_135, 6687) + // Standard Error: 234 + .saturating_add(Weight::from_parts(133_607, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 1600).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 4).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -748,19 +708,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_input(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `876 + r * (480 ±0)` - // Estimated: `17245 + r * (2400 ±0)` - // Minimum execution time: 373_468 nanoseconds. - Weight::from_parts(376_121_093, 0) - .saturating_add(Weight::from_parts(0, 17245)) - // Standard Error: 61_857 - .saturating_add(Weight::from_parts(15_868_414, 0).saturating_mul(r.into())) + // Measured: `780 + r * (6 ±0)` + // Estimated: `6724 + r * (6 ±0)` + // Minimum execution time: 235_065_000 picoseconds. + Weight::from_parts(237_797_177, 6724) + // Standard Error: 336 + .saturating_add(Weight::from_parts(267_302, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -772,16 +731,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 1024]`. - fn seal_input_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1356` - // Estimated: `19650` - // Minimum execution time: 390_668 nanoseconds. - Weight::from_parts(419_608_449, 0) - .saturating_add(Weight::from_parts(0, 19650)) - // Standard Error: 4_890 - .saturating_add(Weight::from_parts(9_672_288, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 1048576]`. + fn seal_input_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `784` + // Estimated: `6724` + // Minimum execution time: 236_215_000 picoseconds. + Weight::from_parts(239_347_313, 6724) + // Standard Error: 0 + .saturating_add(Weight::from_parts(587, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -798,16 +756,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (45 ±0)` - // Estimated: `17190 + r * (225 ±0)` - // Minimum execution time: 371_309 nanoseconds. - Weight::from_parts(373_625_402, 0) - .saturating_add(Weight::from_parts(0, 17190)) - // Standard Error: 419_605 - .saturating_add(Weight::from_parts(1_737_397, 0).saturating_mul(r.into())) + // Measured: `768 + r * (45 ±0)` + // Estimated: `6708 + r * (45 ±0)` + // Minimum execution time: 231_571_000 picoseconds. + Weight::from_parts(233_477_918, 6708) + // Standard Error: 95_776 + .saturating_add(Weight::from_parts(1_733_181, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 225).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -819,16 +776,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 1024]`. - fn seal_return_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `874` - // Estimated: `17285` - // Minimum execution time: 374_094 nanoseconds. - Weight::from_parts(375_965_200, 0) - .saturating_add(Weight::from_parts(0, 17285)) - // Standard Error: 1_127 - .saturating_add(Weight::from_parts(232_645, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 1048576]`. + fn seal_return_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `778` + // Estimated: `6731` + // Minimum execution time: 234_956_000 picoseconds. + Weight::from_parts(236_785_051, 6731) + // Standard Error: 0 + .saturating_add(Weight::from_parts(177, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -840,27 +796,28 @@ impl WeightInfo for SubstrateWeight { /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) /// Storage: Timestamp Now (r:1 w:0) /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts DeletionQueue (r:1 w:1) - /// Proof: Contracts DeletionQueue (max_values: Some(1), max_size: Some(16642), added: 17137, mode: Measured) + /// Storage: Contracts DeletionQueueCounter (r:1 w:1) + /// Proof: Contracts DeletionQueueCounter (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: Contracts OwnerInfoOf (r:1 w:1) /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) /// Storage: System EventTopics (r:3 w:3) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: Contracts DeletionQueue (r:0 w:1) + /// Proof: Contracts DeletionQueue (max_values: None, max_size: Some(142), added: 2617, mode: Measured) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `906 + r * (452 ±0)` - // Estimated: `20242 + r * (15004 ±0)` - // Minimum execution time: 373_123 nanoseconds. - Weight::from_parts(374_924_634, 0) - .saturating_add(Weight::from_parts(0, 20242)) - // Standard Error: 378_010 - .saturating_add(Weight::from_parts(70_441_665, 0).saturating_mul(r.into())) + // Measured: `810 + r * (356 ±0)` + // Estimated: `6750 + r * (7781 ±0)` + // Minimum execution time: 234_275_000 picoseconds. + Weight::from_parts(236_776_769, 6750) + // Standard Error: 137_203 + .saturating_add(Weight::from_parts(110_758_930, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((7_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 15004).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().writes((8_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 7781).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -874,19 +831,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: RandomnessCollectiveFlip RandomMaterial (max_values: Some(1), max_size: Some(2594), added: 3089, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_random(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (800 ±0)` - // Estimated: `18835 + r * (4805 ±0)` - // Minimum execution time: 373_291 nanoseconds. - Weight::from_parts(385_684_344, 0) - .saturating_add(Weight::from_parts(0, 18835)) - // Standard Error: 99_025 - .saturating_add(Weight::from_parts(111_308_793, 0).saturating_mul(r.into())) + // Measured: `825 + r * (10 ±0)` + // Estimated: `6769 + r * (10 ±0)` + // Minimum execution time: 235_593_000 picoseconds. + Weight::from_parts(253_731_242, 6769) + // Standard Error: 2_129 + .saturating_add(Weight::from_parts(1_771_297, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 4805).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -898,19 +854,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_deposit_event(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `874 + r * (800 ±0)` - // Estimated: `17250 + r * (4000 ±0)` - // Minimum execution time: 371_900 nanoseconds. - Weight::from_parts(384_166_626, 0) - .saturating_add(Weight::from_parts(0, 17250)) - // Standard Error: 205_255 - .saturating_add(Weight::from_parts(229_214_157, 0).saturating_mul(r.into())) + // Measured: `778 + r * (10 ±0)` + // Estimated: `6723 + r * (10 ±0)` + // Minimum execution time: 232_124_000 picoseconds. + Weight::from_parts(245_904_447, 6723) + // Standard Error: 2_185 + .saturating_add(Weight::from_parts(3_470_410, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 4000).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -920,27 +875,25 @@ impl WeightInfo for SubstrateWeight { /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) /// Storage: Timestamp Now (r:1 w:0) /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:322 w:322) + /// Storage: System EventTopics (r:6 w:6) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) /// The range of component `t` is `[0, 4]`. - /// The range of component `n` is `[0, 16]`. - fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1821 + t * (2608 ±0) + n * (7 ±0)` - // Estimated: `21870 + t * (211030 ±0) + n * (50 ±0)` - // Minimum execution time: 1_289_873 nanoseconds. - Weight::from_parts(581_702_206, 0) - .saturating_add(Weight::from_parts(0, 21870)) - // Standard Error: 665_638 - .saturating_add(Weight::from_parts(181_470_553, 0).saturating_mul(t.into())) - // Standard Error: 182_816 - .saturating_add(Weight::from_parts(71_635_250, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 16384]`. + fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `797 + t * (32 ±0)` + // Estimated: `6744 + t * (2508 ±0)` + // Minimum execution time: 250_301_000 picoseconds. + Weight::from_parts(245_292_258, 6744) + // Standard Error: 29_864 + .saturating_add(Weight::from_parts(2_163_531, 0).saturating_mul(t.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(583, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((80_u64).saturating_mul(t.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((80_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 211030).saturating_mul(t.into())) - .saturating_add(Weight::from_parts(0, 50).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 2508).saturating_mul(t.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -952,19 +905,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_debug_message(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (560 ±0)` - // Estimated: `17240 + r * (2800 ±0)` - // Minimum execution time: 148_635 nanoseconds. - Weight::from_parts(154_095_712, 0) - .saturating_add(Weight::from_parts(0, 17240)) - // Standard Error: 77_790 - .saturating_add(Weight::from_parts(14_837_085, 0).saturating_mul(r.into())) + // Measured: `777 + r * (7 ±0)` + // Estimated: `6721 + r * (7 ±0)` + // Minimum execution time: 165_711_000 picoseconds. + Weight::from_parts(168_792_571, 6721) + // Standard Error: 216 + .saturating_add(Weight::from_parts(230_285, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2800).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) @@ -976,212 +928,189 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `i` is `[0, 1024]`. - fn seal_debug_message_per_kb(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `125824` - // Estimated: `265128` - // Minimum execution time: 501_014 nanoseconds. - Weight::from_parts(505_634_218, 0) - .saturating_add(Weight::from_parts(0, 265128)) - // Standard Error: 2_441 - .saturating_add(Weight::from_parts(819_257, 0).saturating_mul(i.into())) + /// The range of component `i` is `[0, 1048576]`. + fn seal_debug_message_per_byte(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `125728` + // Estimated: `131670` + // Minimum execution time: 348_928_000 picoseconds. + Weight::from_parts(352_224_793, 131670) + // Standard Error: 0 + .saturating_add(Weight::from_parts(731, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 800]`. fn seal_set_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `911 + r * (23420 ±0)` - // Estimated: `911 + r * (23418 ±0)` - // Minimum execution time: 375_301 nanoseconds. - Weight::from_parts(291_498_841, 0) - .saturating_add(Weight::from_parts(0, 911)) - // Standard Error: 809_989 - .saturating_add(Weight::from_parts(464_550_291, 0).saturating_mul(r.into())) + // Measured: `845 + r * (292 ±0)` + // Estimated: `843 + r * (293 ±0)` + // Minimum execution time: 236_418_000 picoseconds. + Weight::from_parts(129_862_840, 843) + // Standard Error: 9_733 + .saturating_add(Weight::from_parts(6_005_187, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((80_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 23418).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 293).saturating_mul(r.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 8]`. - fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12672 + n * (11945 ±0)` - // Estimated: `8529 + n * (12814 ±61)` - // Minimum execution time: 506_318 nanoseconds. - Weight::from_parts(676_935_313, 0) - .saturating_add(Weight::from_parts(0, 8529)) - // Standard Error: 1_589_291 - .saturating_add(Weight::from_parts(97_839_399, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(52_u64)) - .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(50_u64)) - .saturating_add(T::DbWeight::get().writes((7_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 12814).saturating_mul(n.into())) + /// The range of component `n` is `[0, 16384]`. + fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1304` + // Estimated: `1280` + // Minimum execution time: 251_599_000 picoseconds. + Weight::from_parts(285_284_665, 1280) + // Standard Error: 46 + .saturating_add(Weight::from_parts(410, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 8]`. - fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `15170 + n * (175775 ±0)` - // Estimated: `9914 + n * (176858 ±74)` - // Minimum execution time: 506_148 nanoseconds. - Weight::from_parts(648_278_778, 0) - .saturating_add(Weight::from_parts(0, 9914)) - // Standard Error: 1_343_586 - .saturating_add(Weight::from_parts(65_789_595, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(51_u64)) - .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(49_u64)) - .saturating_add(T::DbWeight::get().writes((7_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 176858).saturating_mul(n.into())) + /// The range of component `n` is `[0, 16384]`. + fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1167 + n * (1 ±0)` + // Estimated: `1167 + n * (1 ±0)` + // Minimum execution time: 251_309_000 picoseconds. + Weight::from_parts(253_555_552, 1167) + // Standard Error: 9 + .saturating_add(Weight::from_parts(27, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 800]`. fn seal_clear_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `903 + r * (23099 ±0)` - // Estimated: `908 + r * (23099 ±0)` - // Minimum execution time: 374_344 nanoseconds. - Weight::from_parts(293_272_061, 0) - .saturating_add(Weight::from_parts(0, 908)) - // Standard Error: 810_412 - .saturating_add(Weight::from_parts(453_315_956, 0).saturating_mul(r.into())) + // Measured: `841 + r * (288 ±0)` + // Estimated: `845 + r * (289 ±0)` + // Minimum execution time: 235_441_000 picoseconds. + Weight::from_parts(132_980_942, 845) + // Standard Error: 9_421 + .saturating_add(Weight::from_parts(5_854_896, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((80_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 23099).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 8]`. - fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `14895 + n * (175768 ±0)` - // Estimated: `9551 + n * (176867 ±75)` - // Minimum execution time: 478_564 nanoseconds. - Weight::from_parts(630_839_142, 0) - .saturating_add(Weight::from_parts(0, 9551)) - // Standard Error: 1_427_520 - .saturating_add(Weight::from_parts(66_813_592, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(51_u64)) - .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(48_u64)) - .saturating_add(T::DbWeight::get().writes((7_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 176867).saturating_mul(n.into())) + /// The range of component `n` is `[0, 16384]`. + fn seal_clear_storage_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1163 + n * (1 ±0)` + // Estimated: `1163 + n * (1 ±0)` + // Minimum execution time: 249_967_000 picoseconds. + Weight::from_parts(252_122_186, 1163) + // Standard Error: 10 + .saturating_add(Weight::from_parts(74, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 800]`. fn seal_get_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `896 + r * (23744 ±0)` - // Estimated: `909 + r * (23740 ±0)` - // Minimum execution time: 374_479 nanoseconds. - Weight::from_parts(311_839_315, 0) - .saturating_add(Weight::from_parts(0, 909)) - // Standard Error: 666_553 - .saturating_add(Weight::from_parts(371_213_042, 0).saturating_mul(r.into())) + // Measured: `835 + r * (296 ±0)` + // Estimated: `840 + r * (297 ±0)` + // Minimum execution time: 235_647_000 picoseconds. + Weight::from_parts(145_525_169, 840) + // Standard Error: 8_553 + .saturating_add(Weight::from_parts(4_948_021, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 23740).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 8]`. - fn seal_get_storage_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `15501 + n * (175775 ±0)` - // Estimated: `10042 + n * (176900 ±76)` - // Minimum execution time: 460_639 nanoseconds. - Weight::from_parts(591_187_094, 0) - .saturating_add(Weight::from_parts(0, 10042)) - // Standard Error: 1_233_792 - .saturating_add(Weight::from_parts(160_874_477, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(51_u64)) - .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(n.into()))) + /// The range of component `n` is `[0, 16384]`. + fn seal_get_storage_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1179 + n * (1 ±0)` + // Estimated: `1179 + n * (1 ±0)` + // Minimum execution time: 249_576_000 picoseconds. + Weight::from_parts(250_747_191, 1179) + // Standard Error: 8 + .saturating_add(Weight::from_parts(717, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 176900).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 800]`. fn seal_contains_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `914 + r * (23098 ±0)` - // Estimated: `920 + r * (23098 ±0)` - // Minimum execution time: 374_272 nanoseconds. - Weight::from_parts(311_446_269, 0) - .saturating_add(Weight::from_parts(0, 920)) - // Standard Error: 630_307 - .saturating_add(Weight::from_parts(357_134_931, 0).saturating_mul(r.into())) + // Measured: `856 + r * (288 ±0)` + // Estimated: `857 + r * (289 ±0)` + // Minimum execution time: 236_110_000 picoseconds. + Weight::from_parts(148_420_625, 857) + // Standard Error: 8_175 + .saturating_add(Weight::from_parts(4_684_126, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 23098).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 8]`. - fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `14839 + n * (175789 ±0)` - // Estimated: `9532 + n * (176874 ±75)` - // Minimum execution time: 456_013 nanoseconds. - Weight::from_parts(575_116_352, 0) - .saturating_add(Weight::from_parts(0, 9532)) - // Standard Error: 1_122_298 - .saturating_add(Weight::from_parts(61_786_107, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(51_u64)) - .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(n.into()))) + /// The range of component `n` is `[0, 16384]`. + fn seal_contains_storage_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1166 + n * (1 ±0)` + // Estimated: `1166 + n * (1 ±0)` + // Minimum execution time: 247_800_000 picoseconds. + Weight::from_parts(249_410_575, 1166) + // Standard Error: 6 + .saturating_add(Weight::from_parts(99, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 176874).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 800]`. fn seal_take_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `911 + r * (23740 ±0)` - // Estimated: `913 + r * (23739 ±0)` - // Minimum execution time: 374_621 nanoseconds. - Weight::from_parts(299_689_489, 0) - .saturating_add(Weight::from_parts(0, 913)) - // Standard Error: 757_735 - .saturating_add(Weight::from_parts(465_213_246, 0).saturating_mul(r.into())) + // Measured: `829 + r * (296 ±0)` + // Estimated: `836 + r * (297 ±0)` + // Minimum execution time: 235_251_000 picoseconds. + Weight::from_parts(128_816_707, 836) + // Standard Error: 9_887 + .saturating_add(Weight::from_parts(6_167_176, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((80_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 23739).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 8]`. - fn seal_take_storage_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `15502 + n * (175775 ±0)` - // Estimated: `10042 + n * (176898 ±76)` - // Minimum execution time: 481_980 nanoseconds. - Weight::from_parts(647_289_053, 0) - .saturating_add(Weight::from_parts(0, 10042)) - // Standard Error: 1_556_155 - .saturating_add(Weight::from_parts(166_592_657, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(51_u64)) - .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(48_u64)) - .saturating_add(T::DbWeight::get().writes((7_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 176898).saturating_mul(n.into())) + /// The range of component `n` is `[0, 16384]`. + fn seal_take_storage_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1180 + n * (1 ±0)` + // Estimated: `1180 + n * (1 ±0)` + // Minimum execution time: 250_401_000 picoseconds. + Weight::from_parts(253_298_243, 1180) + // Standard Error: 9 + .saturating_add(Weight::from_parts(667, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: System Account (r:1602 w:1601) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -1193,136 +1122,131 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_transfer(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1457 + r * (3604 ±0)` - // Estimated: `21583 + r * (216101 ±0)` - // Minimum execution time: 374_962 nanoseconds. - Weight::from_parts(313_416_386, 0) - .saturating_add(Weight::from_parts(0, 21583)) - // Standard Error: 710_675 - .saturating_add(Weight::from_parts(1_396_551_156, 0).saturating_mul(r.into())) + // Measured: `1373 + r * (45 ±0)` + // Estimated: `7270 + r * (2520 ±0)` + // Minimum execution time: 236_470_000 picoseconds. + Weight::from_parts(98_898_727, 7270) + // Standard Error: 33_316 + .saturating_add(Weight::from_parts(35_149_946, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(T::DbWeight::get().writes((80_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 216101).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2520).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1601 w:1601) + /// Storage: Contracts ContractInfoOf (r:801 w:801) /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) /// Storage: Contracts CodeStorage (r:2 w:0) /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) /// Storage: Timestamp Now (r:1 w:0) /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:1602 w:1602) + /// Storage: System EventTopics (r:802 w:802) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1609 + r * (23073 ±0)` - // Estimated: `22098 + r * (511456 ±1)` - // Minimum execution time: 375_916 nanoseconds. - Weight::from_parts(376_468_000, 0) - .saturating_add(Weight::from_parts(0, 22098)) - // Standard Error: 7_246_855 - .saturating_add(Weight::from_parts(28_982_425_139, 0).saturating_mul(r.into())) + // Measured: `1237 + r * (256 ±0)` + // Estimated: `7125 + r * (2732 ±0)` + // Minimum execution time: 238_303_000 picoseconds. + Weight::from_parts(239_024_000, 7125) + // Standard Error: 65_907 + .saturating_add(Weight::from_parts(209_419_071, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().reads((160_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((160_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 511456).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2732).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) /// Storage: Contracts ContractInfoOf (r:1 w:1) /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1536 w:0) + /// Storage: Contracts CodeStorage (r:736 w:0) /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) /// Storage: Timestamp Now (r:1 w:0) /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:1537 w:1537) + /// Storage: System EventTopics (r:737 w:737) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 800]`. fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + r * (71670 ±0)` - // Estimated: `17285 + r * (659930 ±563)` - // Minimum execution time: 375_412 nanoseconds. - Weight::from_parts(376_493_000, 0) - .saturating_add(Weight::from_parts(0, 17285)) - // Standard Error: 8_239_575 - .saturating_add(Weight::from_parts(28_716_347_183, 0).saturating_mul(r.into())) + // Measured: `0 + r * (502 ±0)` + // Estimated: `6727 + r * (2572 ±10)` + // Minimum execution time: 235_961_000 picoseconds. + Weight::from_parts(236_939_000, 6727) + // Standard Error: 83_087 + .saturating_add(Weight::from_parts(205_646_517, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((150_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((75_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 659930).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2572).saturating_mul(r.into())) } - /// Storage: System Account (r:82 w:81) + /// Storage: System Account (r:3 w:2) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:81 w:81) + /// Storage: Contracts ContractInfoOf (r:2 w:2) /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) /// Storage: Contracts CodeStorage (r:2 w:0) /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) /// Storage: Timestamp Now (r:1 w:0) /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:82 w:82) + /// Storage: System EventTopics (r:3 w:3) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) /// The range of component `t` is `[0, 1]`. - /// The range of component `c` is `[0, 1024]`. - fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `24269 + t * (16910 ±0)` - // Estimated: `532690 + t * (285025 ±0)` - // Minimum execution time: 10_443_315 nanoseconds. - Weight::from_parts(9_342_574_069, 0) - .saturating_add(Weight::from_parts(0, 532690)) - // Standard Error: 7_237_279 - .saturating_add(Weight::from_parts(1_390_221_936, 0).saturating_mul(t.into())) - // Standard Error: 10_851 - .saturating_add(Weight::from_parts(9_842_151, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(167_u64)) - .saturating_add(T::DbWeight::get().reads((81_u64).saturating_mul(t.into()))) - .saturating_add(T::DbWeight::get().writes(163_u64)) - .saturating_add(T::DbWeight::get().writes((81_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 285025).saturating_mul(t.into())) - } - /// Storage: System Account (r:3202 w:3202) + /// The range of component `c` is `[0, 1048576]`. + fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1154 + t * (204 ±0)` + // Estimated: `9569 + t * (5154 ±0)` + // Minimum execution time: 410_156_000 picoseconds. + Weight::from_parts(378_378_143, 9569) + // Standard Error: 285_172 + .saturating_add(Weight::from_parts(34_736_740, 0).saturating_mul(t.into())) + // Standard Error: 0 + .saturating_add(Weight::from_parts(591, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) + .saturating_add(T::DbWeight::get().writes(5_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 5154).saturating_mul(t.into())) + } + /// Storage: System Account (r:1602 w:1602) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1601 w:1601) + /// Storage: Contracts ContractInfoOf (r:801 w:801) /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1601 w:0) + /// Storage: Contracts CodeStorage (r:801 w:0) /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) /// Storage: Timestamp Now (r:1 w:0) /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: Contracts Nonce (r:1 w:1) /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:1600 w:1600) + /// Storage: Contracts OwnerInfoOf (r:800 w:800) /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:1602 w:1602) + /// Storage: System EventTopics (r:802 w:802) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1775 + r * (25568 ±0)` - // Estimated: `26563 + r * (1367114 ±2)` - // Minimum execution time: 376_418 nanoseconds. - Weight::from_parts(377_292_000, 0) - .saturating_add(Weight::from_parts(0, 26563)) - // Standard Error: 32_312_545 - .saturating_add(Weight::from_parts(35_904_826_312, 0).saturating_mul(r.into())) + // Measured: `1301 + r * (254 ±0)` + // Estimated: `7131 + r * (5205 ±0)` + // Minimum execution time: 236_748_000 picoseconds. + Weight::from_parts(237_129_000, 7131) + // Standard Error: 280_059 + .saturating_add(Weight::from_parts(341_428_013, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().reads((480_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(5_u64)) - .saturating_add(T::DbWeight::get().writes((400_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 1367114).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 5205).saturating_mul(r.into())) } - /// Storage: System Account (r:162 w:162) + /// Storage: System Account (r:4 w:4) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:81 w:81) + /// Storage: Contracts ContractInfoOf (r:2 w:2) /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) /// Storage: Contracts CodeStorage (r:2 w:0) /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) @@ -1332,29 +1256,28 @@ impl WeightInfo for SubstrateWeight { /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: Contracts OwnerInfoOf (r:1 w:1) /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:82 w:82) + /// Storage: System EventTopics (r:3 w:3) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) /// The range of component `t` is `[0, 1]`. - /// The range of component `i` is `[0, 960]`. - /// The range of component `s` is `[0, 960]`. - fn seal_instantiate_per_transfer_input_salt_kb(t: u32, i: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `5785 + t * (33 ±0)` - // Estimated: `850985 + t * (2671 ±3)` - // Minimum execution time: 132_157_340 nanoseconds. - Weight::from_parts(11_329_968_948, 0) - .saturating_add(Weight::from_parts(0, 850985)) - // Standard Error: 99_102_968 - .saturating_add(Weight::from_parts(84_719_458, 0).saturating_mul(t.into())) - // Standard Error: 161_609 - .saturating_add(Weight::from_parts(126_156_627, 0).saturating_mul(i.into())) - // Standard Error: 161_609 - .saturating_add(Weight::from_parts(126_628_313, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(329_u64)) + /// The range of component `i` is `[0, 983040]`. + /// The range of component `s` is `[0, 983040]`. + fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1071 + t * (187 ±0)` + // Estimated: `9492 + t * (2634 ±2)` + // Minimum execution time: 1_613_796_000 picoseconds. + Weight::from_parts(340_002_206, 9492) + // Standard Error: 4_296_381 + .saturating_add(Weight::from_parts(115_239_834, 0).saturating_mul(t.into())) + // Standard Error: 6 + .saturating_add(Weight::from_parts(1_145, 0).saturating_mul(i.into())) + // Standard Error: 6 + .saturating_add(Weight::from_parts(1_315, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) - .saturating_add(T::DbWeight::get().writes(326_u64)) + .saturating_add(T::DbWeight::get().writes(10_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2671).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 2634).saturating_mul(t.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -1366,19 +1289,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 1600]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `871 + r * (642 ±0)` - // Estimated: `17225 + r * (3210 ±0)` - // Minimum execution time: 373_559 nanoseconds. - Weight::from_parts(375_166_904, 0) - .saturating_add(Weight::from_parts(0, 17225)) - // Standard Error: 125_024 - .saturating_add(Weight::from_parts(42_291_595, 0).saturating_mul(r.into())) + // Measured: `777 + r * (8 ±0)` + // Estimated: `6718 + r * (8 ±0)` + // Minimum execution time: 233_111_000 picoseconds. + Weight::from_parts(238_643_933, 6718) + // Standard Error: 184 + .saturating_add(Weight::from_parts(572_296, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3210).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -1390,16 +1312,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 1024]`. - fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1673` - // Estimated: `21160` - // Minimum execution time: 416_233 nanoseconds. - Weight::from_parts(416_785_000, 0) - .saturating_add(Weight::from_parts(0, 21160)) - // Standard Error: 56_223 - .saturating_add(Weight::from_parts(324_513_835, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 1048576]`. + fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `785` + // Estimated: `6725` + // Minimum execution time: 234_746_000 picoseconds. + Weight::from_parts(229_815_552, 6725) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_892, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1413,19 +1334,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (642 ±0)` - // Estimated: `17235 + r * (3210 ±0)` - // Minimum execution time: 371_735 nanoseconds. - Weight::from_parts(375_979_430, 0) - .saturating_add(Weight::from_parts(0, 17235)) - // Standard Error: 968_037 - .saturating_add(Weight::from_parts(57_780_769, 0).saturating_mul(r.into())) + // Measured: `779 + r * (8 ±0)` + // Estimated: `6721 + r * (8 ±0)` + // Minimum execution time: 232_732_000 picoseconds. + Weight::from_parts(239_007_209, 6721) + // Standard Error: 256 + .saturating_add(Weight::from_parts(733_879, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3210).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -1437,16 +1357,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 1024]`. - fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1675` - // Estimated: `21205` - // Minimum execution time: 428_196 nanoseconds. - Weight::from_parts(429_438_000, 0) - .saturating_add(Weight::from_parts(0, 21205)) - // Standard Error: 57_860 - .saturating_add(Weight::from_parts(260_917_896, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 1048576]`. + fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `787` + // Estimated: `6729` + // Minimum execution time: 234_184_000 picoseconds. + Weight::from_parts(227_603_375, 6729) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_127, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1460,19 +1379,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (642 ±0)` - // Estimated: `17235 + r * (3210 ±0)` - // Minimum execution time: 371_412 nanoseconds. - Weight::from_parts(373_635_818, 0) - .saturating_add(Weight::from_parts(0, 17235)) - // Standard Error: 222_973 - .saturating_add(Weight::from_parts(33_347_181, 0).saturating_mul(r.into())) + // Measured: `779 + r * (8 ±0)` + // Estimated: `6724 + r * (8 ±0)` + // Minimum execution time: 233_038_000 picoseconds. + Weight::from_parts(238_515_817, 6724) + // Standard Error: 255 + .saturating_add(Weight::from_parts(413_343, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3210).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -1484,16 +1402,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 1024]`. - fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1675` - // Estimated: `21180` - // Minimum execution time: 405_858 nanoseconds. - Weight::from_parts(406_498_000, 0) - .saturating_add(Weight::from_parts(0, 21180)) - // Standard Error: 48_388 - .saturating_add(Weight::from_parts(103_283_157, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 1048576]`. + fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `787` + // Estimated: `6733` + // Minimum execution time: 232_996_000 picoseconds. + Weight::from_parts(226_706_997, 6733) + // Standard Error: 1 + .saturating_add(Weight::from_parts(908, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1507,19 +1424,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (679 ±0)` - // Estimated: `17235 + r * (3395 ±0)` - // Minimum execution time: 371_746 nanoseconds. - Weight::from_parts(373_538_171, 0) - .saturating_add(Weight::from_parts(0, 17235)) - // Standard Error: 387_332 - .saturating_add(Weight::from_parts(35_933_528, 0).saturating_mul(r.into())) + // Measured: `779 + r * (8 ±0)` + // Estimated: `6725 + r * (8 ±0)` + // Minimum execution time: 232_292_000 picoseconds. + Weight::from_parts(237_997_001, 6725) + // Standard Error: 219 + .saturating_add(Weight::from_parts(410_177, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3395).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -1531,16 +1447,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 1024]`. - fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1675` - // Estimated: `21225` - // Minimum execution time: 405_752 nanoseconds. - Weight::from_parts(406_417_000, 0) - .saturating_add(Weight::from_parts(0, 21225)) - // Standard Error: 47_051 - .saturating_add(Weight::from_parts(103_325_027, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 1048576]`. + fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `787` + // Estimated: `6727` + // Minimum execution time: 234_815_000 picoseconds. + Weight::from_parts(226_317_150, 6727) + // Standard Error: 1 + .saturating_add(Weight::from_parts(911, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1554,19 +1469,64 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1]`. + /// The range of component `n` is `[0, 125697]`. + fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `912 + n * (1 ±0)` + // Estimated: `6848 + n * (1 ±0)` + // Minimum execution time: 286_323_000 picoseconds. + Weight::from_parts(290_287_955, 6848) + // Standard Error: 1 + .saturating_add(Weight::from_parts(4_693, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) + } + /// Storage: System Account (r:1 w:0) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) + /// Storage: Contracts ContractInfoOf (r:1 w:1) + /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) + /// Storage: Contracts CodeStorage (r:1 w:0) + /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) + /// Storage: System EventTopics (r:2 w:2) + /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// The range of component `r` is `[0, 160]`. + fn seal_sr25519_verify(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `727 + r * (112 ±0)` + // Estimated: `6666 + r * (112 ±0)` + // Minimum execution time: 235_938_000 picoseconds. + Weight::from_parts(242_728_358, 6666) + // Standard Error: 9_725 + .saturating_add(Weight::from_parts(47_527_740, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) + } + /// Storage: System Account (r:1 w:0) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) + /// Storage: Contracts ContractInfoOf (r:1 w:1) + /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) + /// Storage: Contracts CodeStorage (r:1 w:0) + /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) + /// Storage: System EventTopics (r:2 w:2) + /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `917 + r * (6083 ±0)` - // Estimated: `17455 + r * (30415 ±0)` - // Minimum execution time: 373_882 nanoseconds. - Weight::from_parts(376_553_787, 0) - .saturating_add(Weight::from_parts(0, 17455)) - // Standard Error: 912_833 - .saturating_add(Weight::from_parts(3_021_100_412, 0).saturating_mul(r.into())) + // Measured: `822 + r * (76 ±0)` + // Estimated: `6716 + r * (77 ±0)` + // Minimum execution time: 236_108_000 picoseconds. + Weight::from_parts(248_577_226, 6716) + // Standard Error: 9_565 + .saturating_add(Weight::from_parts(36_733_552, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 30415).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -1578,19 +1538,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `886 + r * (3362 ±0)` - // Estimated: `17300 + r * (16810 ±0)` - // Minimum execution time: 373_673 nanoseconds. - Weight::from_parts(375_712_961, 0) - .saturating_add(Weight::from_parts(0, 17300)) - // Standard Error: 596_297 - .saturating_add(Weight::from_parts(738_257_638, 0).saturating_mul(r.into())) + // Measured: `792 + r * (42 ±0)` + // Estimated: `6731 + r * (42 ±0)` + // Minimum execution time: 236_440_000 picoseconds. + Weight::from_parts(240_771_418, 6731) + // Standard Error: 1_849 + .saturating_add(Weight::from_parts(9_185_896, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 16810).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -1604,21 +1563,20 @@ impl WeightInfo for SubstrateWeight { /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) /// Storage: System EventTopics (r:1538 w:1538) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + r * (79300 ±0)` - // Estimated: `64844 + r * (942952 ±833)` - // Minimum execution time: 374_097 nanoseconds. - Weight::from_parts(374_985_000, 0) - .saturating_add(Weight::from_parts(0, 64844)) - // Standard Error: 3_772_336 - .saturating_add(Weight::from_parts(1_546_402_854, 0).saturating_mul(r.into())) + // Measured: `0 + r * (964 ±0)` + // Estimated: `8190 + r * (3090 ±10)` + // Minimum execution time: 235_056_000 picoseconds. + Weight::from_parts(235_743_000, 8190) + // Standard Error: 46_122 + .saturating_add(Weight::from_parts(21_447_984, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((225_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((150_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 942952).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3090).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -1630,19 +1588,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `869 + r * (240 ±0)` - // Estimated: `17215 + r * (1200 ±0)` - // Minimum execution time: 374_249 nanoseconds. - Weight::from_parts(377_990_998, 0) - .saturating_add(Weight::from_parts(0, 17215)) - // Standard Error: 38_133 - .saturating_add(Weight::from_parts(11_483_273, 0).saturating_mul(r.into())) + // Measured: `773 + r * (3 ±0)` + // Estimated: `6723 + r * (3 ±0)` + // Minimum execution time: 235_213_000 picoseconds. + Weight::from_parts(239_456_464, 6723) + // Standard Error: 130 + .saturating_add(Weight::from_parts(159_851, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 1200).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -1654,19 +1611,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_account_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2102 + r * (3154 ±0)` - // Estimated: `21980 + r * (15875 ±2)` - // Minimum execution time: 375_552 nanoseconds. - Weight::from_parts(400_624_032, 0) - .saturating_add(Weight::from_parts(0, 21980)) - // Standard Error: 82_523 - .saturating_add(Weight::from_parts(18_057_327, 0).saturating_mul(r.into())) + // Measured: `1975 + r * (39 ±0)` + // Estimated: `7805 + r * (40 ±0)` + // Minimum execution time: 237_886_000 picoseconds. + Weight::from_parts(262_430_157, 7805) + // Standard Error: 939 + .saturating_add(Weight::from_parts(260_005, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 15875).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -1680,605 +1636,541 @@ impl WeightInfo for SubstrateWeight { /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_instantiation_nonce(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872 + r * (240 ±0)` - // Estimated: `18598 + r * (1440 ±0)` - // Minimum execution time: 373_899 nanoseconds. - Weight::from_parts(379_733_943, 0) - .saturating_add(Weight::from_parts(0, 18598)) - // Standard Error: 32_022 - .saturating_add(Weight::from_parts(9_381_180, 0).saturating_mul(r.into())) + // Measured: `776 + r * (3 ±0)` + // Estimated: `6723 + r * (3 ±0)` + // Minimum execution time: 234_014_000 picoseconds. + Weight::from_parts(240_042_671, 6723) + // Standard Error: 152 + .saturating_add(Weight::from_parts(138_382, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 1440).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64const(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 834 nanoseconds. - Weight::from_parts(1_009_646, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 388 - .saturating_add(Weight::from_parts(411_979, 0).saturating_mul(r.into())) + // Minimum execution time: 1_533_000 picoseconds. + Weight::from_parts(1_846_015, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_935, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64load(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 882 nanoseconds. - Weight::from_parts(1_416_377, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_133 - .saturating_add(Weight::from_parts(1_075_838, 0).saturating_mul(r.into())) + // Minimum execution time: 1_671_000 picoseconds. + Weight::from_parts(2_197_197, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(6_335, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64store(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 878 nanoseconds. - Weight::from_parts(1_343_056, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 426 - .saturating_add(Weight::from_parts(1_001_214, 0).saturating_mul(r.into())) + // Minimum execution time: 1_665_000 picoseconds. + Weight::from_parts(2_200_545, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(6_011, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_select(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 796 nanoseconds. - Weight::from_parts(1_079_086, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 409 - .saturating_add(Weight::from_parts(1_149_188, 0).saturating_mul(r.into())) + // Minimum execution time: 1_545_000 picoseconds. + Weight::from_parts(1_977_462, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(7_901, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_if(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 800 nanoseconds. - Weight::from_parts(1_044_184, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 707 - .saturating_add(Weight::from_parts(1_315_686, 0).saturating_mul(r.into())) + // Minimum execution time: 1_515_000 picoseconds. + Weight::from_parts(1_866_184, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(10_514, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_br(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 807 nanoseconds. - Weight::from_parts(1_049_633, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 361 - .saturating_add(Weight::from_parts(640_530, 0).saturating_mul(r.into())) + // Minimum execution time: 1_618_000 picoseconds. + Weight::from_parts(1_895_104, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(4_523, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_br_if(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 774 nanoseconds. - Weight::from_parts(1_124_053, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 784 - .saturating_add(Weight::from_parts(949_398, 0).saturating_mul(r.into())) + // Minimum execution time: 1_510_000 picoseconds. + Weight::from_parts(1_779_998, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(6_832, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_br_table(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 810 nanoseconds. - Weight::from_parts(676_581, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_356 - .saturating_add(Weight::from_parts(1_163_465, 0).saturating_mul(r.into())) + // Minimum execution time: 1_529_000 picoseconds. + Weight::from_parts(1_726_996, 0) + // Standard Error: 26 + .saturating_add(Weight::from_parts(9_199, 0).saturating_mul(r.into())) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_580 nanoseconds. - Weight::from_parts(2_835_656, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 71 - .saturating_add(Weight::from_parts(4_686, 0).saturating_mul(e.into())) + // Minimum execution time: 1_682_000 picoseconds. + Weight::from_parts(1_789_910, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(42, 0).saturating_mul(e.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 826 nanoseconds. - Weight::from_parts(1_625_698, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_740 - .saturating_add(Weight::from_parts(2_332_187, 0).saturating_mul(r.into())) + // Minimum execution time: 1_539_000 picoseconds. + Weight::from_parts(2_093_056, 0) + // Standard Error: 27 + .saturating_add(Weight::from_parts(18_917, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_call_indirect(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 901 nanoseconds. - Weight::from_parts(2_338_620, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_642 - .saturating_add(Weight::from_parts(2_924_090, 0).saturating_mul(r.into())) - } - /// The range of component `p` is `[0, 128]`. - fn instr_call_indirect_per_param(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_670 nanoseconds. - Weight::from_parts(5_556_246, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_491 - .saturating_add(Weight::from_parts(228_965, 0).saturating_mul(p.into())) + // Minimum execution time: 1_851_000 picoseconds. + Weight::from_parts(3_134_610, 0) + // Standard Error: 34 + .saturating_add(Weight::from_parts(24_714, 0).saturating_mul(r.into())) } /// The range of component `l` is `[0, 1024]`. fn instr_call_per_local(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_099 nanoseconds. - Weight::from_parts(3_896_177, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 99 - .saturating_add(Weight::from_parts(91_304, 0).saturating_mul(l.into())) + // Minimum execution time: 1_654_000 picoseconds. + Weight::from_parts(1_885_921, 0) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_243, 0).saturating_mul(l.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_local_get(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_042 nanoseconds. - Weight::from_parts(3_334_621, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 793 - .saturating_add(Weight::from_parts(459_346, 0).saturating_mul(r.into())) + // Minimum execution time: 2_744_000 picoseconds. + Weight::from_parts(3_014_725, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(2_447, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_local_set(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_968 nanoseconds. - Weight::from_parts(3_235_286, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 427 - .saturating_add(Weight::from_parts(485_454, 0).saturating_mul(r.into())) + // Minimum execution time: 2_881_000 picoseconds. + Weight::from_parts(3_137_711, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(3_608, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_local_tee(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_012 nanoseconds. - Weight::from_parts(3_303_555, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 371 - .saturating_add(Weight::from_parts(657_811, 0).saturating_mul(r.into())) + // Minimum execution time: 2_809_000 picoseconds. + Weight::from_parts(3_142_066, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(3_841, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_global_get(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 865 nanoseconds. - Weight::from_parts(1_249_987, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 417 - .saturating_add(Weight::from_parts(896_704, 0).saturating_mul(r.into())) + // Minimum execution time: 1_704_000 picoseconds. + Weight::from_parts(2_083_619, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(8_366, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_global_set(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 866 nanoseconds. - Weight::from_parts(1_216_218, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 503 - .saturating_add(Weight::from_parts(919_719, 0).saturating_mul(r.into())) + // Minimum execution time: 1_652_000 picoseconds. + Weight::from_parts(2_048_256, 0) + // Standard Error: 5 + .saturating_add(Weight::from_parts(8_826, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_memory_current(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 921 nanoseconds. - Weight::from_parts(1_228_408, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 309 - .saturating_add(Weight::from_parts(813_007, 0).saturating_mul(r.into())) + // Minimum execution time: 1_671_000 picoseconds. + Weight::from_parts(1_924_626, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_746, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 16]`. fn instr_memory_grow(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 820 nanoseconds. - Weight::from_parts(914_830, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 6_018 - .saturating_add(Weight::from_parts(237_062_769, 0).saturating_mul(r.into())) + // Minimum execution time: 1_585_000 picoseconds. + Weight::from_parts(490_856, 0) + // Standard Error: 133_673 + .saturating_add(Weight::from_parts(13_182_582, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64clz(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 812 nanoseconds. - Weight::from_parts(1_554_406, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 9_979 - .saturating_add(Weight::from_parts(625_434, 0).saturating_mul(r.into())) + // Minimum execution time: 1_533_000 picoseconds. + Weight::from_parts(1_851_563, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_820, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64ctz(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 798 nanoseconds. - Weight::from_parts(1_095_113, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 243 - .saturating_add(Weight::from_parts(634_204, 0).saturating_mul(r.into())) + // Minimum execution time: 1_564_000 picoseconds. + Weight::from_parts(1_914_178, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_732, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64popcnt(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 792 nanoseconds. - Weight::from_parts(1_109_845, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 14_944 - .saturating_add(Weight::from_parts(658_834, 0).saturating_mul(r.into())) + // Minimum execution time: 1_559_000 picoseconds. + Weight::from_parts(1_886_992, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_731, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64eqz(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 815 nanoseconds. - Weight::from_parts(1_068_916, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 327 - .saturating_add(Weight::from_parts(652_897, 0).saturating_mul(r.into())) + // Minimum execution time: 1_553_000 picoseconds. + Weight::from_parts(1_886_545, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_658, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64extendsi32(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 798 nanoseconds. - Weight::from_parts(1_069_745, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 306 - .saturating_add(Weight::from_parts(618_481, 0).saturating_mul(r.into())) + // Minimum execution time: 1_507_000 picoseconds. + Weight::from_parts(1_853_647, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_852, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64extendui32(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 799 nanoseconds. - Weight::from_parts(1_398_001, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 6_234 - .saturating_add(Weight::from_parts(611_399, 0).saturating_mul(r.into())) + // Minimum execution time: 1_554_000 picoseconds. + Weight::from_parts(1_868_877, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_806, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i32wrapi64(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 811 nanoseconds. - Weight::from_parts(1_098_083, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 297 - .saturating_add(Weight::from_parts(617_692, 0).saturating_mul(r.into())) + // Minimum execution time: 1_514_000 picoseconds. + Weight::from_parts(1_882_233, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_700, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64eq(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 815 nanoseconds. - Weight::from_parts(1_046_922, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 335 - .saturating_add(Weight::from_parts(909_196, 0).saturating_mul(r.into())) + // Minimum execution time: 1_529_000 picoseconds. + Weight::from_parts(1_897_247, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_955, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64ne(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 805 nanoseconds. - Weight::from_parts(1_093_667, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 233 - .saturating_add(Weight::from_parts(907_378, 0).saturating_mul(r.into())) + // Minimum execution time: 1_513_000 picoseconds. + Weight::from_parts(1_922_333, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_933, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64lts(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 805 nanoseconds. - Weight::from_parts(1_290_591, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_201 - .saturating_add(Weight::from_parts(902_006, 0).saturating_mul(r.into())) + // Minimum execution time: 1_512_000 picoseconds. + Weight::from_parts(1_848_668, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_966, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64ltu(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 783 nanoseconds. - Weight::from_parts(1_159_977, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_310 - .saturating_add(Weight::from_parts(906_489, 0).saturating_mul(r.into())) + // Minimum execution time: 1_522_000 picoseconds. + Weight::from_parts(1_875_257, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_965, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64gts(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 790 nanoseconds. - Weight::from_parts(1_109_719, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 261 - .saturating_add(Weight::from_parts(906_614, 0).saturating_mul(r.into())) + // Minimum execution time: 1_546_000 picoseconds. + Weight::from_parts(1_836_691, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(5_842, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64gtu(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 776 nanoseconds. - Weight::from_parts(1_076_567, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 348 - .saturating_add(Weight::from_parts(919_374, 0).saturating_mul(r.into())) + // Minimum execution time: 1_505_000 picoseconds. + Weight::from_parts(1_907_551, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(6_075, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64les(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 780 nanoseconds. - Weight::from_parts(1_069_663, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 265 - .saturating_add(Weight::from_parts(908_037, 0).saturating_mul(r.into())) + // Minimum execution time: 1_527_000 picoseconds. + Weight::from_parts(1_891_008, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_971, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64leu(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 832 nanoseconds. - Weight::from_parts(930_920, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_170 - .saturating_add(Weight::from_parts(929_811, 0).saturating_mul(r.into())) + // Minimum execution time: 1_556_000 picoseconds. + Weight::from_parts(1_910_864, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(6_059, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64ges(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 787 nanoseconds. - Weight::from_parts(1_087_325, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 315 - .saturating_add(Weight::from_parts(908_321, 0).saturating_mul(r.into())) + // Minimum execution time: 1_544_000 picoseconds. + Weight::from_parts(1_912_650, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_943, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64geu(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 798 nanoseconds. - Weight::from_parts(1_029_132, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_095 - .saturating_add(Weight::from_parts(913_553, 0).saturating_mul(r.into())) + // Minimum execution time: 1_513_000 picoseconds. + Weight::from_parts(1_855_260, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_975, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64add(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 791 nanoseconds. - Weight::from_parts(1_086_314, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 197 - .saturating_add(Weight::from_parts(896_392, 0).saturating_mul(r.into())) + // Minimum execution time: 1_521_000 picoseconds. + Weight::from_parts(1_867_259, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_846, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64sub(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 793 nanoseconds. - Weight::from_parts(1_078_172, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 404 - .saturating_add(Weight::from_parts(886_329, 0).saturating_mul(r.into())) + // Minimum execution time: 1_509_000 picoseconds. + Weight::from_parts(1_893_018, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(6_096, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64mul(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 799 nanoseconds. - Weight::from_parts(1_095_010, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 431 - .saturating_add(Weight::from_parts(886_513, 0).saturating_mul(r.into())) + // Minimum execution time: 1_496_000 picoseconds. + Weight::from_parts(1_886_659, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_754, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64divs(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 810 nanoseconds. - Weight::from_parts(1_114_325, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 452 - .saturating_add(Weight::from_parts(1_521_849, 0).saturating_mul(r.into())) + // Minimum execution time: 1_527_000 picoseconds. + Weight::from_parts(1_890_548, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(11_842, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64divu(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 784 nanoseconds. - Weight::from_parts(1_123_153, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 475 - .saturating_add(Weight::from_parts(1_457_746, 0).saturating_mul(r.into())) + // Minimum execution time: 1_518_000 picoseconds. + Weight::from_parts(1_891_903, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(10_613, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64rems(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 809 nanoseconds. - Weight::from_parts(1_145_906, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 718 - .saturating_add(Weight::from_parts(1_549_964, 0).saturating_mul(r.into())) + // Minimum execution time: 1_504_000 picoseconds. + Weight::from_parts(1_632_694, 0) + // Standard Error: 7 + .saturating_add(Weight::from_parts(12_281, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64remu(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 803 nanoseconds. - Weight::from_parts(1_110_328, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 627 - .saturating_add(Weight::from_parts(1_453_013, 0).saturating_mul(r.into())) + // Minimum execution time: 1_507_000 picoseconds. + Weight::from_parts(1_878_413, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(10_737, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64and(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 786 nanoseconds. - Weight::from_parts(1_108_792, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 286 - .saturating_add(Weight::from_parts(897_035, 0).saturating_mul(r.into())) + // Minimum execution time: 1_534_000 picoseconds. + Weight::from_parts(1_898_519, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_645, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64or(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 787 nanoseconds. - Weight::from_parts(830_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 15_995 - .saturating_add(Weight::from_parts(963_344, 0).saturating_mul(r.into())) + // Minimum execution time: 1_503_000 picoseconds. + Weight::from_parts(1_895_532, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_745, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64xor(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 773 nanoseconds. - Weight::from_parts(1_082_459, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 330 - .saturating_add(Weight::from_parts(897_077, 0).saturating_mul(r.into())) + // Minimum execution time: 1_507_000 picoseconds. + Weight::from_parts(1_868_720, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_873, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64shl(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 810 nanoseconds. - Weight::from_parts(1_325_815, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_352 - .saturating_add(Weight::from_parts(899_555, 0).saturating_mul(r.into())) + // Minimum execution time: 1_513_000 picoseconds. + Weight::from_parts(1_894_207, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_843, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64shrs(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 808 nanoseconds. - Weight::from_parts(1_085_903, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 430 - .saturating_add(Weight::from_parts(903_249, 0).saturating_mul(r.into())) + // Minimum execution time: 1_473_000 picoseconds. + Weight::from_parts(1_880_224, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(6_107, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64shru(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 792 nanoseconds. - Weight::from_parts(1_091_261, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 312 - .saturating_add(Weight::from_parts(902_245, 0).saturating_mul(r.into())) + // Minimum execution time: 1_447_000 picoseconds. + Weight::from_parts(1_884_551, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_849, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64rotl(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 807 nanoseconds. - Weight::from_parts(1_121_052, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 506 - .saturating_add(Weight::from_parts(902_772, 0).saturating_mul(r.into())) + // Minimum execution time: 1_538_000 picoseconds. + Weight::from_parts(1_908_813, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_987, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64rotr(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 823 nanoseconds. - Weight::from_parts(1_317_597, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 6_219 - .saturating_add(Weight::from_parts(896_692, 0).saturating_mul(r.into())) + // Minimum execution time: 1_538_000 picoseconds. + Weight::from_parts(1_878_015, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_848, 0).saturating_mul(r.into())) } } // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Contracts DeletionQueue (r:1 w:0) - /// Proof: Contracts DeletionQueue (max_values: Some(1), max_size: Some(16642), added: 17137, mode: Measured) + /// Storage: Contracts DeletionQueueCounter (r:1 w:0) + /// Proof: Contracts DeletionQueueCounter (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) fn on_process_deletion_queue_batch() -> Weight { // Proof Size summary in bytes: // Measured: `109` - // Estimated: `604` - // Minimum execution time: 2_591 nanoseconds. - Weight::from_parts(2_817_000, 0) - .saturating_add(Weight::from_parts(0, 604)) + // Estimated: `1594` + // Minimum execution time: 2_565_000 picoseconds. + Weight::from_parts(2_759_000, 1594) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Skipped Metadata (r:0 w:0) @@ -2286,34 +2178,18 @@ impl WeightInfo for () { /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `481 + k * (69 ±0)` - // Estimated: `471 + k * (70 ±0)` - // Minimum execution time: 10_190 nanoseconds. - Weight::from_parts(6_642_117, 0) - .saturating_add(Weight::from_parts(0, 471)) - // Standard Error: 992 - .saturating_add(Weight::from_parts(919_828, 0).saturating_mul(k.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Measured: `488 + k * (69 ±0)` + // Estimated: `478 + k * (70 ±0)` + // Minimum execution time: 13_480_000 picoseconds. + Weight::from_parts(10_153_869, 478) + // Standard Error: 427 + .saturating_add(Weight::from_parts(958_726, 0).saturating_mul(k.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) + .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) } - /// Storage: Contracts DeletionQueue (r:1 w:1) - /// Proof: Contracts DeletionQueue (max_values: Some(1), max_size: Some(16642), added: 17137, mode: Measured) - /// The range of component `q` is `[0, 128]`. - fn on_initialize_per_queue_item(q: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `281 + q * (33 ±0)` - // Estimated: `763 + q * (33 ±0)` - // Minimum execution time: 2_598 nanoseconds. - Weight::from_parts(10_288_252, 0) - .saturating_add(Weight::from_parts(0, 763)) - // Standard Error: 2_886 - .saturating_add(Weight::from_parts(1_092_420, 0).saturating_mul(q.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 33).saturating_mul(q.into())) - } /// Storage: Contracts PristineCode (r:1 w:0) /// Proof: Contracts PristineCode (max_values: None, max_size: Some(125988), added: 128463, mode: Measured) /// Storage: Contracts CodeStorage (r:0 w:1) @@ -2321,16 +2197,15 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 61717]`. fn reinstrument(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `270 + c * (1 ±0)` - // Estimated: `3025 + c * (2 ±0)` - // Minimum execution time: 34_338 nanoseconds. - Weight::from_parts(32_159_677, 0) - .saturating_add(Weight::from_parts(0, 3025)) - // Standard Error: 53 - .saturating_add(Weight::from_parts(51_034, 0).saturating_mul(c.into())) + // Measured: `238 + c * (1 ±0)` + // Estimated: `3708 + c * (1 ±0)` + // Minimum execution time: 30_406_000 picoseconds. + Weight::from_parts(28_467_370, 3708) + // Standard Error: 46 + .saturating_add(Weight::from_parts(53_724, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 2).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: Contracts ContractInfoOf (r:1 w:1) /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) @@ -2345,16 +2220,15 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `803` - // Estimated: `16930 + c * (5 ±0)` - // Minimum execution time: 385_587 nanoseconds. - Weight::from_parts(395_545_811, 0) - .saturating_add(Weight::from_parts(0, 16930)) - // Standard Error: 27 - .saturating_add(Weight::from_parts(31_342, 0).saturating_mul(c.into())) + // Measured: `707` + // Estimated: `6656 + c * (1 ±0)` + // Minimum execution time: 263_198_000 picoseconds. + Weight::from_parts(276_162_279, 6656) + // Standard Error: 18 + .saturating_add(Weight::from_parts(37_378, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 5).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: Contracts OwnerInfoOf (r:1 w:1) /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) @@ -2378,16 +2252,15 @@ impl WeightInfo for () { fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `270` - // Estimated: `20267` - // Minimum execution time: 3_799_742 nanoseconds. - Weight::from_parts(670_115_588, 0) - .saturating_add(Weight::from_parts(0, 20267)) - // Standard Error: 287 - .saturating_add(Weight::from_parts(93_885, 0).saturating_mul(c.into())) + // Estimated: `8659` + // Minimum execution time: 3_132_259_000 picoseconds. + Weight::from_parts(513_284_834, 8659) + // Standard Error: 280 + .saturating_add(Weight::from_parts(106_723, 0).saturating_mul(c.into())) // Standard Error: 16 - .saturating_add(Weight::from_parts(1_367, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_166, 0).saturating_mul(i.into())) // Standard Error: 16 - .saturating_add(Weight::from_parts(1_781, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_436, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } @@ -2409,15 +2282,14 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `546` - // Estimated: `22039` - // Minimum execution time: 1_949_008 nanoseconds. - Weight::from_parts(214_033_418, 0) - .saturating_add(Weight::from_parts(0, 22039)) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_666, 0).saturating_mul(i.into())) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_801, 0).saturating_mul(s.into())) + // Measured: `482` + // Estimated: `6408` + // Minimum execution time: 1_646_604_000 picoseconds. + Weight::from_parts(271_369_256, 6408) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_426, 0).saturating_mul(i.into())) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_438, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -2433,11 +2305,10 @@ impl WeightInfo for () { /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `855` - // Estimated: `17145` - // Minimum execution time: 146_654 nanoseconds. - Weight::from_parts(147_528_000, 0) - .saturating_add(Weight::from_parts(0, 17145)) + // Measured: `759` + // Estimated: `6699` + // Minimum execution time: 191_360_000 picoseconds. + Weight::from_parts(192_625_000, 6699) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2453,12 +2324,11 @@ impl WeightInfo for () { fn upload_code(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `109` - // Estimated: `5386` - // Minimum execution time: 387_889 nanoseconds. - Weight::from_parts(391_379_335, 0) - .saturating_add(Weight::from_parts(0, 5386)) - // Standard Error: 89 - .saturating_add(Weight::from_parts(94_810, 0).saturating_mul(c.into())) + // Estimated: `3574` + // Minimum execution time: 245_207_000 picoseconds. + Weight::from_parts(244_703_457, 3574) + // Standard Error: 61 + .saturating_add(Weight::from_parts(106_850, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2472,11 +2342,10 @@ impl WeightInfo for () { /// Proof: Contracts PristineCode (max_values: None, max_size: Some(125988), added: 128463, mode: Measured) fn remove_code() -> Weight { // Proof Size summary in bytes: - // Measured: `287` - // Estimated: `6098` - // Minimum execution time: 26_014 nanoseconds. - Weight::from_parts(26_510_000, 0) - .saturating_add(Weight::from_parts(0, 6098)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 33_560_000 picoseconds. + Weight::from_parts(33_833_000, 3720) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2488,11 +2357,10 @@ impl WeightInfo for () { /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `666` - // Estimated: `16848` - // Minimum execution time: 30_177 nanoseconds. - Weight::from_parts(30_639_000, 0) - .saturating_add(Weight::from_parts(0, 16848)) + // Measured: `570` + // Estimated: `8985` + // Minimum execution time: 33_288_000 picoseconds. + Weight::from_parts(33_775_000, 8985) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -2506,19 +2374,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_caller(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (480 ±0)` - // Estimated: `17295 + r * (2400 ±0)` - // Minimum execution time: 373_786 nanoseconds. - Weight::from_parts(377_332_691, 0) - .saturating_add(Weight::from_parts(0, 17295)) - // Standard Error: 51_211 - .saturating_add(Weight::from_parts(17_715_615, 0).saturating_mul(r.into())) + // Measured: `781 + r * (6 ±0)` + // Estimated: `6722 + r * (6 ±0)` + // Minimum execution time: 234_292_000 picoseconds. + Weight::from_parts(235_941_911, 6722) + // Standard Error: 413 + .saturating_add(Weight::from_parts(339_913, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2530,20 +2397,19 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_is_contract(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `917 + r * (21778 ±0)` - // Estimated: `17295 + r * (306895 ±0)` - // Minimum execution time: 374_009 nanoseconds. - Weight::from_parts(238_991_986, 0) - .saturating_add(Weight::from_parts(0, 17295)) - // Standard Error: 464_711 - .saturating_add(Weight::from_parts(249_099_538, 0).saturating_mul(r.into())) + // Measured: `839 + r * (240 ±0)` + // Estimated: `6743 + r * (2715 ±0)` + // Minimum execution time: 236_273_000 picoseconds. + Weight::from_parts(74_380_906, 6743) + // Standard Error: 5_745 + .saturating_add(Weight::from_parts(3_331_781, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 306895).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2715).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2555,20 +2421,19 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (22099 ±0)` - // Estimated: `17340 + r * (308500 ±0)` - // Minimum execution time: 375_058 nanoseconds. - Weight::from_parts(238_765_068, 0) - .saturating_add(Weight::from_parts(0, 17340)) - // Standard Error: 662_617 - .saturating_add(Weight::from_parts(302_175_089, 0).saturating_mul(r.into())) + // Measured: `831 + r * (244 ±0)` + // Estimated: `6747 + r * (2719 ±0)` + // Minimum execution time: 236_573_000 picoseconds. + Weight::from_parts(82_473_906, 6747) + // Standard Error: 5_510 + .saturating_add(Weight::from_parts(4_131_820, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 308500).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2719).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2580,19 +2445,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_own_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `884 + r * (480 ±0)` - // Estimated: `17330 + r * (2400 ±0)` - // Minimum execution time: 374_747 nanoseconds. - Weight::from_parts(376_482_380, 0) - .saturating_add(Weight::from_parts(0, 17330)) - // Standard Error: 61_919 - .saturating_add(Weight::from_parts(22_376_795, 0).saturating_mul(r.into())) + // Measured: `788 + r * (6 ±0)` + // Estimated: `6730 + r * (6 ±0)` + // Minimum execution time: 235_878_000 picoseconds. + Weight::from_parts(238_387_359, 6730) + // Standard Error: 318 + .saturating_add(Weight::from_parts(409_923, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2604,19 +2468,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `874 + r * (240 ±0)` - // Estimated: `17265 + r * (1200 ±0)` - // Minimum execution time: 372_287 nanoseconds. - Weight::from_parts(376_250_858, 0) - .saturating_add(Weight::from_parts(0, 17265)) - // Standard Error: 40_119 - .saturating_add(Weight::from_parts(11_359_647, 0).saturating_mul(r.into())) + // Measured: `778 + r * (3 ±0)` + // Estimated: `6723 + r * (3 ±0)` + // Minimum execution time: 233_476_000 picoseconds. + Weight::from_parts(238_014_452, 6723) + // Standard Error: 145 + .saturating_add(Weight::from_parts(165_823, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 1200).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2628,19 +2491,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `878 + r * (480 ±0)` - // Estimated: `17260 + r * (2400 ±0)` - // Minimum execution time: 374_445 nanoseconds. - Weight::from_parts(377_243_521, 0) - .saturating_add(Weight::from_parts(0, 17260)) - // Standard Error: 53_032 - .saturating_add(Weight::from_parts(17_684_246, 0).saturating_mul(r.into())) + // Measured: `782 + r * (6 ±0)` + // Estimated: `6724 + r * (6 ±0)` + // Minimum execution time: 235_490_000 picoseconds. + Weight::from_parts(240_039_685, 6724) + // Standard Error: 330 + .saturating_add(Weight::from_parts(332_291, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2652,19 +2514,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `879 + r * (480 ±0)` - // Estimated: `17250 + r * (2405 ±0)` - // Minimum execution time: 374_029 nanoseconds. - Weight::from_parts(380_415_186, 0) - .saturating_add(Weight::from_parts(0, 17250)) - // Standard Error: 60_562 - .saturating_add(Weight::from_parts(17_152_599, 0).saturating_mul(r.into())) + // Measured: `783 + r * (6 ±0)` + // Estimated: `6721 + r * (6 ±0)` + // Minimum execution time: 236_093_000 picoseconds. + Weight::from_parts(238_513_328, 6721) + // Standard Error: 206 + .saturating_add(Weight::from_parts(328_899, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2405).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:2 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2676,19 +2537,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1049 + r * (480 ±0)` - // Estimated: `19849 + r * (2456 ±0)` - // Minimum execution time: 373_999 nanoseconds. - Weight::from_parts(381_757_033, 0) - .saturating_add(Weight::from_parts(0, 19849)) - // Standard Error: 97_983 - .saturating_add(Weight::from_parts(98_290_984, 0).saturating_mul(r.into())) + // Measured: `922 + r * (6 ±0)` + // Estimated: `6846 + r * (6 ±0)` + // Minimum execution time: 234_801_000 picoseconds. + Weight::from_parts(243_519_159, 6846) + // Standard Error: 1_367 + .saturating_add(Weight::from_parts(1_449_599, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2456).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2700,19 +2560,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_value_transferred(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `888 + r * (480 ±0)` - // Estimated: `17360 + r * (2400 ±0)` - // Minimum execution time: 374_197 nanoseconds. - Weight::from_parts(377_755_896, 0) - .saturating_add(Weight::from_parts(0, 17360)) - // Standard Error: 60_542 - .saturating_add(Weight::from_parts(17_442_065, 0).saturating_mul(r.into())) + // Measured: `792 + r * (6 ±0)` + // Estimated: `6741 + r * (6 ±0)` + // Minimum execution time: 236_765_000 picoseconds. + Weight::from_parts(237_843_244, 6741) + // Standard Error: 308 + .saturating_add(Weight::from_parts(329_911, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2724,19 +2583,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `886 + r * (480 ±0)` - // Estimated: `17290 + r * (2400 ±0)` - // Minimum execution time: 373_888 nanoseconds. - Weight::from_parts(377_825_771, 0) - .saturating_add(Weight::from_parts(0, 17290)) - // Standard Error: 38_026 - .saturating_add(Weight::from_parts(17_147_903, 0).saturating_mul(r.into())) + // Measured: `790 + r * (6 ±0)` + // Estimated: `6739 + r * (6 ±0)` + // Minimum execution time: 236_690_000 picoseconds. + Weight::from_parts(241_743_164, 6739) + // Standard Error: 333 + .saturating_add(Weight::from_parts(324_693, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2748,19 +2606,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `883 + r * (480 ±0)` - // Estimated: `17315 + r * (2400 ±0)` - // Minimum execution time: 373_904 nanoseconds. - Weight::from_parts(378_652_372, 0) - .saturating_add(Weight::from_parts(0, 17315)) - // Standard Error: 43_833 - .saturating_add(Weight::from_parts(16_936_781, 0).saturating_mul(r.into())) + // Measured: `787 + r * (6 ±0)` + // Estimated: `6737 + r * (6 ±0)` + // Minimum execution time: 236_149_000 picoseconds. + Weight::from_parts(239_090_707, 6737) + // Standard Error: 246 + .saturating_add(Weight::from_parts(344_488, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2772,19 +2629,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `874 + r * (480 ±0)` - // Estimated: `17245 + r * (2400 ±0)` - // Minimum execution time: 373_473 nanoseconds. - Weight::from_parts(376_386_312, 0) - .saturating_add(Weight::from_parts(0, 17245)) - // Standard Error: 46_945 - .saturating_add(Weight::from_parts(17_336_462, 0).saturating_mul(r.into())) + // Measured: `778 + r * (6 ±0)` + // Estimated: `6723 + r * (6 ±0)` + // Minimum execution time: 235_057_000 picoseconds. + Weight::from_parts(237_752_870, 6723) + // Standard Error: 236 + .saturating_add(Weight::from_parts(328_235, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2798,19 +2654,18 @@ impl WeightInfo for () { /// Proof: TransactionPayment NextFeeMultiplier (max_values: Some(1), max_size: Some(16), added: 511, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_weight_to_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `951 + r * (800 ±0)` - // Estimated: `19046 + r * (4805 ±0)` - // Minimum execution time: 373_661 nanoseconds. - Weight::from_parts(385_824_015, 0) - .saturating_add(Weight::from_parts(0, 19046)) - // Standard Error: 75_964 - .saturating_add(Weight::from_parts(88_530_074, 0).saturating_mul(r.into())) + // Measured: `856 + r * (10 ±0)` + // Estimated: `6796 + r * (10 ±0)` + // Minimum execution time: 234_995_000 picoseconds. + Weight::from_parts(246_473_554, 6796) + // Standard Error: 1_015 + .saturating_add(Weight::from_parts(1_337_653, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 4805).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2822,19 +2677,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_gas(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `841 + r * (320 ±0)` - // Estimated: `17120 + r * (1600 ±0)` - // Minimum execution time: 133_849 nanoseconds. - Weight::from_parts(137_283_391, 0) - .saturating_add(Weight::from_parts(0, 17120)) - // Standard Error: 13_312 - .saturating_add(Weight::from_parts(8_055_328, 0).saturating_mul(r.into())) + // Measured: `745 + r * (4 ±0)` + // Estimated: `6687 + r * (4 ±0)` + // Minimum execution time: 160_445_000 picoseconds. + Weight::from_parts(165_558_135, 6687) + // Standard Error: 234 + .saturating_add(Weight::from_parts(133_607, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 1600).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 4).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2846,19 +2700,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_input(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `876 + r * (480 ±0)` - // Estimated: `17245 + r * (2400 ±0)` - // Minimum execution time: 373_468 nanoseconds. - Weight::from_parts(376_121_093, 0) - .saturating_add(Weight::from_parts(0, 17245)) - // Standard Error: 61_857 - .saturating_add(Weight::from_parts(15_868_414, 0).saturating_mul(r.into())) + // Measured: `780 + r * (6 ±0)` + // Estimated: `6724 + r * (6 ±0)` + // Minimum execution time: 235_065_000 picoseconds. + Weight::from_parts(237_797_177, 6724) + // Standard Error: 336 + .saturating_add(Weight::from_parts(267_302, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2400).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2870,16 +2723,15 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 1024]`. - fn seal_input_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1356` - // Estimated: `19650` - // Minimum execution time: 390_668 nanoseconds. - Weight::from_parts(419_608_449, 0) - .saturating_add(Weight::from_parts(0, 19650)) - // Standard Error: 4_890 - .saturating_add(Weight::from_parts(9_672_288, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 1048576]`. + fn seal_input_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `784` + // Estimated: `6724` + // Minimum execution time: 236_215_000 picoseconds. + Weight::from_parts(239_347_313, 6724) + // Standard Error: 0 + .saturating_add(Weight::from_parts(587, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -2896,16 +2748,15 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (45 ±0)` - // Estimated: `17190 + r * (225 ±0)` - // Minimum execution time: 371_309 nanoseconds. - Weight::from_parts(373_625_402, 0) - .saturating_add(Weight::from_parts(0, 17190)) - // Standard Error: 419_605 - .saturating_add(Weight::from_parts(1_737_397, 0).saturating_mul(r.into())) + // Measured: `768 + r * (45 ±0)` + // Estimated: `6708 + r * (45 ±0)` + // Minimum execution time: 231_571_000 picoseconds. + Weight::from_parts(233_477_918, 6708) + // Standard Error: 95_776 + .saturating_add(Weight::from_parts(1_733_181, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 225).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2917,16 +2768,15 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 1024]`. - fn seal_return_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `874` - // Estimated: `17285` - // Minimum execution time: 374_094 nanoseconds. - Weight::from_parts(375_965_200, 0) - .saturating_add(Weight::from_parts(0, 17285)) - // Standard Error: 1_127 - .saturating_add(Weight::from_parts(232_645, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 1048576]`. + fn seal_return_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `778` + // Estimated: `6731` + // Minimum execution time: 234_956_000 picoseconds. + Weight::from_parts(236_785_051, 6731) + // Standard Error: 0 + .saturating_add(Weight::from_parts(177, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -2938,27 +2788,28 @@ impl WeightInfo for () { /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) /// Storage: Timestamp Now (r:1 w:0) /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts DeletionQueue (r:1 w:1) - /// Proof: Contracts DeletionQueue (max_values: Some(1), max_size: Some(16642), added: 17137, mode: Measured) + /// Storage: Contracts DeletionQueueCounter (r:1 w:1) + /// Proof: Contracts DeletionQueueCounter (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: Contracts OwnerInfoOf (r:1 w:1) /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) /// Storage: System EventTopics (r:3 w:3) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: Contracts DeletionQueue (r:0 w:1) + /// Proof: Contracts DeletionQueue (max_values: None, max_size: Some(142), added: 2617, mode: Measured) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `906 + r * (452 ±0)` - // Estimated: `20242 + r * (15004 ±0)` - // Minimum execution time: 373_123 nanoseconds. - Weight::from_parts(374_924_634, 0) - .saturating_add(Weight::from_parts(0, 20242)) - // Standard Error: 378_010 - .saturating_add(Weight::from_parts(70_441_665, 0).saturating_mul(r.into())) + // Measured: `810 + r * (356 ±0)` + // Estimated: `6750 + r * (7781 ±0)` + // Minimum execution time: 234_275_000 picoseconds. + Weight::from_parts(236_776_769, 6750) + // Standard Error: 137_203 + .saturating_add(Weight::from_parts(110_758_930, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((7_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 15004).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().writes((8_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 7781).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2972,19 +2823,18 @@ impl WeightInfo for () { /// Proof: RandomnessCollectiveFlip RandomMaterial (max_values: Some(1), max_size: Some(2594), added: 3089, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_random(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (800 ±0)` - // Estimated: `18835 + r * (4805 ±0)` - // Minimum execution time: 373_291 nanoseconds. - Weight::from_parts(385_684_344, 0) - .saturating_add(Weight::from_parts(0, 18835)) - // Standard Error: 99_025 - .saturating_add(Weight::from_parts(111_308_793, 0).saturating_mul(r.into())) + // Measured: `825 + r * (10 ±0)` + // Estimated: `6769 + r * (10 ±0)` + // Minimum execution time: 235_593_000 picoseconds. + Weight::from_parts(253_731_242, 6769) + // Standard Error: 2_129 + .saturating_add(Weight::from_parts(1_771_297, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 4805).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -2996,19 +2846,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_deposit_event(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `874 + r * (800 ±0)` - // Estimated: `17250 + r * (4000 ±0)` - // Minimum execution time: 371_900 nanoseconds. - Weight::from_parts(384_166_626, 0) - .saturating_add(Weight::from_parts(0, 17250)) - // Standard Error: 205_255 - .saturating_add(Weight::from_parts(229_214_157, 0).saturating_mul(r.into())) + // Measured: `778 + r * (10 ±0)` + // Estimated: `6723 + r * (10 ±0)` + // Minimum execution time: 232_124_000 picoseconds. + Weight::from_parts(245_904_447, 6723) + // Standard Error: 2_185 + .saturating_add(Weight::from_parts(3_470_410, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 4000).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3018,27 +2867,25 @@ impl WeightInfo for () { /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) /// Storage: Timestamp Now (r:1 w:0) /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:322 w:322) + /// Storage: System EventTopics (r:6 w:6) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) /// The range of component `t` is `[0, 4]`. - /// The range of component `n` is `[0, 16]`. - fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1821 + t * (2608 ±0) + n * (7 ±0)` - // Estimated: `21870 + t * (211030 ±0) + n * (50 ±0)` - // Minimum execution time: 1_289_873 nanoseconds. - Weight::from_parts(581_702_206, 0) - .saturating_add(Weight::from_parts(0, 21870)) - // Standard Error: 665_638 - .saturating_add(Weight::from_parts(181_470_553, 0).saturating_mul(t.into())) - // Standard Error: 182_816 - .saturating_add(Weight::from_parts(71_635_250, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 16384]`. + fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `797 + t * (32 ±0)` + // Estimated: `6744 + t * (2508 ±0)` + // Minimum execution time: 250_301_000 picoseconds. + Weight::from_parts(245_292_258, 6744) + // Standard Error: 29_864 + .saturating_add(Weight::from_parts(2_163_531, 0).saturating_mul(t.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(583, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((80_u64).saturating_mul(t.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((80_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 211030).saturating_mul(t.into())) - .saturating_add(Weight::from_parts(0, 50).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 2508).saturating_mul(t.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3050,19 +2897,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_debug_message(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (560 ±0)` - // Estimated: `17240 + r * (2800 ±0)` - // Minimum execution time: 148_635 nanoseconds. - Weight::from_parts(154_095_712, 0) - .saturating_add(Weight::from_parts(0, 17240)) - // Standard Error: 77_790 - .saturating_add(Weight::from_parts(14_837_085, 0).saturating_mul(r.into())) + // Measured: `777 + r * (7 ±0)` + // Estimated: `6721 + r * (7 ±0)` + // Minimum execution time: 165_711_000 picoseconds. + Weight::from_parts(168_792_571, 6721) + // Standard Error: 216 + .saturating_add(Weight::from_parts(230_285, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2800).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) @@ -3074,212 +2920,189 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `i` is `[0, 1024]`. - fn seal_debug_message_per_kb(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `125824` - // Estimated: `265128` - // Minimum execution time: 501_014 nanoseconds. - Weight::from_parts(505_634_218, 0) - .saturating_add(Weight::from_parts(0, 265128)) - // Standard Error: 2_441 - .saturating_add(Weight::from_parts(819_257, 0).saturating_mul(i.into())) + /// The range of component `i` is `[0, 1048576]`. + fn seal_debug_message_per_byte(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `125728` + // Estimated: `131670` + // Minimum execution time: 348_928_000 picoseconds. + Weight::from_parts(352_224_793, 131670) + // Standard Error: 0 + .saturating_add(Weight::from_parts(731, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 800]`. fn seal_set_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `911 + r * (23420 ±0)` - // Estimated: `911 + r * (23418 ±0)` - // Minimum execution time: 375_301 nanoseconds. - Weight::from_parts(291_498_841, 0) - .saturating_add(Weight::from_parts(0, 911)) - // Standard Error: 809_989 - .saturating_add(Weight::from_parts(464_550_291, 0).saturating_mul(r.into())) + // Measured: `845 + r * (292 ±0)` + // Estimated: `843 + r * (293 ±0)` + // Minimum execution time: 236_418_000 picoseconds. + Weight::from_parts(129_862_840, 843) + // Standard Error: 9_733 + .saturating_add(Weight::from_parts(6_005_187, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((80_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 23418).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 293).saturating_mul(r.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 8]`. - fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12672 + n * (11945 ±0)` - // Estimated: `8529 + n * (12814 ±61)` - // Minimum execution time: 506_318 nanoseconds. - Weight::from_parts(676_935_313, 0) - .saturating_add(Weight::from_parts(0, 8529)) - // Standard Error: 1_589_291 - .saturating_add(Weight::from_parts(97_839_399, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(52_u64)) - .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(50_u64)) - .saturating_add(RocksDbWeight::get().writes((7_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 12814).saturating_mul(n.into())) + /// The range of component `n` is `[0, 16384]`. + fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1304` + // Estimated: `1280` + // Minimum execution time: 251_599_000 picoseconds. + Weight::from_parts(285_284_665, 1280) + // Standard Error: 46 + .saturating_add(Weight::from_parts(410, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 8]`. - fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `15170 + n * (175775 ±0)` - // Estimated: `9914 + n * (176858 ±74)` - // Minimum execution time: 506_148 nanoseconds. - Weight::from_parts(648_278_778, 0) - .saturating_add(Weight::from_parts(0, 9914)) - // Standard Error: 1_343_586 - .saturating_add(Weight::from_parts(65_789_595, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(51_u64)) - .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(49_u64)) - .saturating_add(RocksDbWeight::get().writes((7_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 176858).saturating_mul(n.into())) + /// The range of component `n` is `[0, 16384]`. + fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1167 + n * (1 ±0)` + // Estimated: `1167 + n * (1 ±0)` + // Minimum execution time: 251_309_000 picoseconds. + Weight::from_parts(253_555_552, 1167) + // Standard Error: 9 + .saturating_add(Weight::from_parts(27, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 800]`. fn seal_clear_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `903 + r * (23099 ±0)` - // Estimated: `908 + r * (23099 ±0)` - // Minimum execution time: 374_344 nanoseconds. - Weight::from_parts(293_272_061, 0) - .saturating_add(Weight::from_parts(0, 908)) - // Standard Error: 810_412 - .saturating_add(Weight::from_parts(453_315_956, 0).saturating_mul(r.into())) + // Measured: `841 + r * (288 ±0)` + // Estimated: `845 + r * (289 ±0)` + // Minimum execution time: 235_441_000 picoseconds. + Weight::from_parts(132_980_942, 845) + // Standard Error: 9_421 + .saturating_add(Weight::from_parts(5_854_896, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((80_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 23099).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 8]`. - fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `14895 + n * (175768 ±0)` - // Estimated: `9551 + n * (176867 ±75)` - // Minimum execution time: 478_564 nanoseconds. - Weight::from_parts(630_839_142, 0) - .saturating_add(Weight::from_parts(0, 9551)) - // Standard Error: 1_427_520 - .saturating_add(Weight::from_parts(66_813_592, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(51_u64)) - .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(48_u64)) - .saturating_add(RocksDbWeight::get().writes((7_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 176867).saturating_mul(n.into())) + /// The range of component `n` is `[0, 16384]`. + fn seal_clear_storage_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1163 + n * (1 ±0)` + // Estimated: `1163 + n * (1 ±0)` + // Minimum execution time: 249_967_000 picoseconds. + Weight::from_parts(252_122_186, 1163) + // Standard Error: 10 + .saturating_add(Weight::from_parts(74, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 800]`. fn seal_get_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `896 + r * (23744 ±0)` - // Estimated: `909 + r * (23740 ±0)` - // Minimum execution time: 374_479 nanoseconds. - Weight::from_parts(311_839_315, 0) - .saturating_add(Weight::from_parts(0, 909)) - // Standard Error: 666_553 - .saturating_add(Weight::from_parts(371_213_042, 0).saturating_mul(r.into())) + // Measured: `835 + r * (296 ±0)` + // Estimated: `840 + r * (297 ±0)` + // Minimum execution time: 235_647_000 picoseconds. + Weight::from_parts(145_525_169, 840) + // Standard Error: 8_553 + .saturating_add(Weight::from_parts(4_948_021, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 23740).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 8]`. - fn seal_get_storage_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `15501 + n * (175775 ±0)` - // Estimated: `10042 + n * (176900 ±76)` - // Minimum execution time: 460_639 nanoseconds. - Weight::from_parts(591_187_094, 0) - .saturating_add(Weight::from_parts(0, 10042)) - // Standard Error: 1_233_792 - .saturating_add(Weight::from_parts(160_874_477, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(51_u64)) - .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(n.into()))) + /// The range of component `n` is `[0, 16384]`. + fn seal_get_storage_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1179 + n * (1 ±0)` + // Estimated: `1179 + n * (1 ±0)` + // Minimum execution time: 249_576_000 picoseconds. + Weight::from_parts(250_747_191, 1179) + // Standard Error: 8 + .saturating_add(Weight::from_parts(717, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 176900).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 800]`. fn seal_contains_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `914 + r * (23098 ±0)` - // Estimated: `920 + r * (23098 ±0)` - // Minimum execution time: 374_272 nanoseconds. - Weight::from_parts(311_446_269, 0) - .saturating_add(Weight::from_parts(0, 920)) - // Standard Error: 630_307 - .saturating_add(Weight::from_parts(357_134_931, 0).saturating_mul(r.into())) + // Measured: `856 + r * (288 ±0)` + // Estimated: `857 + r * (289 ±0)` + // Minimum execution time: 236_110_000 picoseconds. + Weight::from_parts(148_420_625, 857) + // Standard Error: 8_175 + .saturating_add(Weight::from_parts(4_684_126, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 23098).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 8]`. - fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `14839 + n * (175789 ±0)` - // Estimated: `9532 + n * (176874 ±75)` - // Minimum execution time: 456_013 nanoseconds. - Weight::from_parts(575_116_352, 0) - .saturating_add(Weight::from_parts(0, 9532)) - // Standard Error: 1_122_298 - .saturating_add(Weight::from_parts(61_786_107, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(51_u64)) - .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(n.into()))) + /// The range of component `n` is `[0, 16384]`. + fn seal_contains_storage_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1166 + n * (1 ±0)` + // Estimated: `1166 + n * (1 ±0)` + // Minimum execution time: 247_800_000 picoseconds. + Weight::from_parts(249_410_575, 1166) + // Standard Error: 6 + .saturating_add(Weight::from_parts(99, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 176874).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 800]`. fn seal_take_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `911 + r * (23740 ±0)` - // Estimated: `913 + r * (23739 ±0)` - // Minimum execution time: 374_621 nanoseconds. - Weight::from_parts(299_689_489, 0) - .saturating_add(Weight::from_parts(0, 913)) - // Standard Error: 757_735 - .saturating_add(Weight::from_parts(465_213_246, 0).saturating_mul(r.into())) + // Measured: `829 + r * (296 ±0)` + // Estimated: `836 + r * (297 ±0)` + // Minimum execution time: 235_251_000 picoseconds. + Weight::from_parts(128_816_707, 836) + // Standard Error: 9_887 + .saturating_add(Weight::from_parts(6_167_176, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((80_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 23739).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) } /// Storage: Skipped Metadata (r:0 w:0) /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 8]`. - fn seal_take_storage_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `15502 + n * (175775 ±0)` - // Estimated: `10042 + n * (176898 ±76)` - // Minimum execution time: 481_980 nanoseconds. - Weight::from_parts(647_289_053, 0) - .saturating_add(Weight::from_parts(0, 10042)) - // Standard Error: 1_556_155 - .saturating_add(Weight::from_parts(166_592_657, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(51_u64)) - .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(48_u64)) - .saturating_add(RocksDbWeight::get().writes((7_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 176898).saturating_mul(n.into())) + /// The range of component `n` is `[0, 16384]`. + fn seal_take_storage_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1180 + n * (1 ±0)` + // Estimated: `1180 + n * (1 ±0)` + // Minimum execution time: 250_401_000 picoseconds. + Weight::from_parts(253_298_243, 1180) + // Standard Error: 9 + .saturating_add(Weight::from_parts(667, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: System Account (r:1602 w:1601) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3291,136 +3114,131 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_transfer(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1457 + r * (3604 ±0)` - // Estimated: `21583 + r * (216101 ±0)` - // Minimum execution time: 374_962 nanoseconds. - Weight::from_parts(313_416_386, 0) - .saturating_add(Weight::from_parts(0, 21583)) - // Standard Error: 710_675 - .saturating_add(Weight::from_parts(1_396_551_156, 0).saturating_mul(r.into())) + // Measured: `1373 + r * (45 ±0)` + // Estimated: `7270 + r * (2520 ±0)` + // Minimum execution time: 236_470_000 picoseconds. + Weight::from_parts(98_898_727, 7270) + // Standard Error: 33_316 + .saturating_add(Weight::from_parts(35_149_946, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().reads((80_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(RocksDbWeight::get().writes((80_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 216101).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2520).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1601 w:1601) + /// Storage: Contracts ContractInfoOf (r:801 w:801) /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) /// Storage: Contracts CodeStorage (r:2 w:0) /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) /// Storage: Timestamp Now (r:1 w:0) /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:1602 w:1602) + /// Storage: System EventTopics (r:802 w:802) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1609 + r * (23073 ±0)` - // Estimated: `22098 + r * (511456 ±1)` - // Minimum execution time: 375_916 nanoseconds. - Weight::from_parts(376_468_000, 0) - .saturating_add(Weight::from_parts(0, 22098)) - // Standard Error: 7_246_855 - .saturating_add(Weight::from_parts(28_982_425_139, 0).saturating_mul(r.into())) + // Measured: `1237 + r * (256 ±0)` + // Estimated: `7125 + r * (2732 ±0)` + // Minimum execution time: 238_303_000 picoseconds. + Weight::from_parts(239_024_000, 7125) + // Standard Error: 65_907 + .saturating_add(Weight::from_parts(209_419_071, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().reads((160_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((160_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 511456).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2732).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) /// Storage: Contracts ContractInfoOf (r:1 w:1) /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1536 w:0) + /// Storage: Contracts CodeStorage (r:736 w:0) /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) /// Storage: Timestamp Now (r:1 w:0) /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:1537 w:1537) + /// Storage: System EventTopics (r:737 w:737) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 800]`. fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + r * (71670 ±0)` - // Estimated: `17285 + r * (659930 ±563)` - // Minimum execution time: 375_412 nanoseconds. - Weight::from_parts(376_493_000, 0) - .saturating_add(Weight::from_parts(0, 17285)) - // Standard Error: 8_239_575 - .saturating_add(Weight::from_parts(28_716_347_183, 0).saturating_mul(r.into())) + // Measured: `0 + r * (502 ±0)` + // Estimated: `6727 + r * (2572 ±10)` + // Minimum execution time: 235_961_000 picoseconds. + Weight::from_parts(236_939_000, 6727) + // Standard Error: 83_087 + .saturating_add(Weight::from_parts(205_646_517, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((150_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((75_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 659930).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2572).saturating_mul(r.into())) } - /// Storage: System Account (r:82 w:81) + /// Storage: System Account (r:3 w:2) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:81 w:81) + /// Storage: Contracts ContractInfoOf (r:2 w:2) /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) /// Storage: Contracts CodeStorage (r:2 w:0) /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) /// Storage: Timestamp Now (r:1 w:0) /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:82 w:82) + /// Storage: System EventTopics (r:3 w:3) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) /// The range of component `t` is `[0, 1]`. - /// The range of component `c` is `[0, 1024]`. - fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `24269 + t * (16910 ±0)` - // Estimated: `532690 + t * (285025 ±0)` - // Minimum execution time: 10_443_315 nanoseconds. - Weight::from_parts(9_342_574_069, 0) - .saturating_add(Weight::from_parts(0, 532690)) - // Standard Error: 7_237_279 - .saturating_add(Weight::from_parts(1_390_221_936, 0).saturating_mul(t.into())) - // Standard Error: 10_851 - .saturating_add(Weight::from_parts(9_842_151, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(167_u64)) - .saturating_add(RocksDbWeight::get().reads((81_u64).saturating_mul(t.into()))) - .saturating_add(RocksDbWeight::get().writes(163_u64)) - .saturating_add(RocksDbWeight::get().writes((81_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 285025).saturating_mul(t.into())) - } - /// Storage: System Account (r:3202 w:3202) + /// The range of component `c` is `[0, 1048576]`. + fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1154 + t * (204 ±0)` + // Estimated: `9569 + t * (5154 ±0)` + // Minimum execution time: 410_156_000 picoseconds. + Weight::from_parts(378_378_143, 9569) + // Standard Error: 285_172 + .saturating_add(Weight::from_parts(34_736_740, 0).saturating_mul(t.into())) + // Standard Error: 0 + .saturating_add(Weight::from_parts(591, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(t.into()))) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 5154).saturating_mul(t.into())) + } + /// Storage: System Account (r:1602 w:1602) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1601 w:1601) + /// Storage: Contracts ContractInfoOf (r:801 w:801) /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1601 w:0) + /// Storage: Contracts CodeStorage (r:801 w:0) /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) /// Storage: Timestamp Now (r:1 w:0) /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: Contracts Nonce (r:1 w:1) /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:1600 w:1600) + /// Storage: Contracts OwnerInfoOf (r:800 w:800) /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:1602 w:1602) + /// Storage: System EventTopics (r:802 w:802) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1775 + r * (25568 ±0)` - // Estimated: `26563 + r * (1367114 ±2)` - // Minimum execution time: 376_418 nanoseconds. - Weight::from_parts(377_292_000, 0) - .saturating_add(Weight::from_parts(0, 26563)) - // Standard Error: 32_312_545 - .saturating_add(Weight::from_parts(35_904_826_312, 0).saturating_mul(r.into())) + // Measured: `1301 + r * (254 ±0)` + // Estimated: `7131 + r * (5205 ±0)` + // Minimum execution time: 236_748_000 picoseconds. + Weight::from_parts(237_129_000, 7131) + // Standard Error: 280_059 + .saturating_add(Weight::from_parts(341_428_013, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().reads((480_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(5_u64)) - .saturating_add(RocksDbWeight::get().writes((400_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 1367114).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 5205).saturating_mul(r.into())) } - /// Storage: System Account (r:162 w:162) + /// Storage: System Account (r:4 w:4) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:81 w:81) + /// Storage: Contracts ContractInfoOf (r:2 w:2) /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) /// Storage: Contracts CodeStorage (r:2 w:0) /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) @@ -3430,29 +3248,28 @@ impl WeightInfo for () { /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: Contracts OwnerInfoOf (r:1 w:1) /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:82 w:82) + /// Storage: System EventTopics (r:3 w:3) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) /// The range of component `t` is `[0, 1]`. - /// The range of component `i` is `[0, 960]`. - /// The range of component `s` is `[0, 960]`. - fn seal_instantiate_per_transfer_input_salt_kb(t: u32, i: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `5785 + t * (33 ±0)` - // Estimated: `850985 + t * (2671 ±3)` - // Minimum execution time: 132_157_340 nanoseconds. - Weight::from_parts(11_329_968_948, 0) - .saturating_add(Weight::from_parts(0, 850985)) - // Standard Error: 99_102_968 - .saturating_add(Weight::from_parts(84_719_458, 0).saturating_mul(t.into())) - // Standard Error: 161_609 - .saturating_add(Weight::from_parts(126_156_627, 0).saturating_mul(i.into())) - // Standard Error: 161_609 - .saturating_add(Weight::from_parts(126_628_313, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(329_u64)) + /// The range of component `i` is `[0, 983040]`. + /// The range of component `s` is `[0, 983040]`. + fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1071 + t * (187 ±0)` + // Estimated: `9492 + t * (2634 ±2)` + // Minimum execution time: 1_613_796_000 picoseconds. + Weight::from_parts(340_002_206, 9492) + // Standard Error: 4_296_381 + .saturating_add(Weight::from_parts(115_239_834, 0).saturating_mul(t.into())) + // Standard Error: 6 + .saturating_add(Weight::from_parts(1_145, 0).saturating_mul(i.into())) + // Standard Error: 6 + .saturating_add(Weight::from_parts(1_315, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) - .saturating_add(RocksDbWeight::get().writes(326_u64)) + .saturating_add(RocksDbWeight::get().writes(10_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2671).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 2634).saturating_mul(t.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3464,19 +3281,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 1600]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `871 + r * (642 ±0)` - // Estimated: `17225 + r * (3210 ±0)` - // Minimum execution time: 373_559 nanoseconds. - Weight::from_parts(375_166_904, 0) - .saturating_add(Weight::from_parts(0, 17225)) - // Standard Error: 125_024 - .saturating_add(Weight::from_parts(42_291_595, 0).saturating_mul(r.into())) + // Measured: `777 + r * (8 ±0)` + // Estimated: `6718 + r * (8 ±0)` + // Minimum execution time: 233_111_000 picoseconds. + Weight::from_parts(238_643_933, 6718) + // Standard Error: 184 + .saturating_add(Weight::from_parts(572_296, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3210).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3488,16 +3304,15 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 1024]`. - fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1673` - // Estimated: `21160` - // Minimum execution time: 416_233 nanoseconds. - Weight::from_parts(416_785_000, 0) - .saturating_add(Weight::from_parts(0, 21160)) - // Standard Error: 56_223 - .saturating_add(Weight::from_parts(324_513_835, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 1048576]`. + fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `785` + // Estimated: `6725` + // Minimum execution time: 234_746_000 picoseconds. + Weight::from_parts(229_815_552, 6725) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_892, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3511,19 +3326,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (642 ±0)` - // Estimated: `17235 + r * (3210 ±0)` - // Minimum execution time: 371_735 nanoseconds. - Weight::from_parts(375_979_430, 0) - .saturating_add(Weight::from_parts(0, 17235)) - // Standard Error: 968_037 - .saturating_add(Weight::from_parts(57_780_769, 0).saturating_mul(r.into())) + // Measured: `779 + r * (8 ±0)` + // Estimated: `6721 + r * (8 ±0)` + // Minimum execution time: 232_732_000 picoseconds. + Weight::from_parts(239_007_209, 6721) + // Standard Error: 256 + .saturating_add(Weight::from_parts(733_879, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3210).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3535,16 +3349,15 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 1024]`. - fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1675` - // Estimated: `21205` - // Minimum execution time: 428_196 nanoseconds. - Weight::from_parts(429_438_000, 0) - .saturating_add(Weight::from_parts(0, 21205)) - // Standard Error: 57_860 - .saturating_add(Weight::from_parts(260_917_896, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 1048576]`. + fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `787` + // Estimated: `6729` + // Minimum execution time: 234_184_000 picoseconds. + Weight::from_parts(227_603_375, 6729) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_127, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3558,19 +3371,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (642 ±0)` - // Estimated: `17235 + r * (3210 ±0)` - // Minimum execution time: 371_412 nanoseconds. - Weight::from_parts(373_635_818, 0) - .saturating_add(Weight::from_parts(0, 17235)) - // Standard Error: 222_973 - .saturating_add(Weight::from_parts(33_347_181, 0).saturating_mul(r.into())) + // Measured: `779 + r * (8 ±0)` + // Estimated: `6724 + r * (8 ±0)` + // Minimum execution time: 233_038_000 picoseconds. + Weight::from_parts(238_515_817, 6724) + // Standard Error: 255 + .saturating_add(Weight::from_parts(413_343, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3210).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3582,16 +3394,15 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 1024]`. - fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1675` - // Estimated: `21180` - // Minimum execution time: 405_858 nanoseconds. - Weight::from_parts(406_498_000, 0) - .saturating_add(Weight::from_parts(0, 21180)) - // Standard Error: 48_388 - .saturating_add(Weight::from_parts(103_283_157, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 1048576]`. + fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `787` + // Estimated: `6733` + // Minimum execution time: 232_996_000 picoseconds. + Weight::from_parts(226_706_997, 6733) + // Standard Error: 1 + .saturating_add(Weight::from_parts(908, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3605,19 +3416,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (679 ±0)` - // Estimated: `17235 + r * (3395 ±0)` - // Minimum execution time: 371_746 nanoseconds. - Weight::from_parts(373_538_171, 0) - .saturating_add(Weight::from_parts(0, 17235)) - // Standard Error: 387_332 - .saturating_add(Weight::from_parts(35_933_528, 0).saturating_mul(r.into())) + // Measured: `779 + r * (8 ±0)` + // Estimated: `6725 + r * (8 ±0)` + // Minimum execution time: 232_292_000 picoseconds. + Weight::from_parts(237_997_001, 6725) + // Standard Error: 219 + .saturating_add(Weight::from_parts(410_177, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3395).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3629,16 +3439,15 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `n` is `[0, 1024]`. - fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1675` - // Estimated: `21225` - // Minimum execution time: 405_752 nanoseconds. - Weight::from_parts(406_417_000, 0) - .saturating_add(Weight::from_parts(0, 21225)) - // Standard Error: 47_051 - .saturating_add(Weight::from_parts(103_325_027, 0).saturating_mul(n.into())) + /// The range of component `n` is `[0, 1048576]`. + fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `787` + // Estimated: `6727` + // Minimum execution time: 234_815_000 picoseconds. + Weight::from_parts(226_317_150, 6727) + // Standard Error: 1 + .saturating_add(Weight::from_parts(911, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3652,19 +3461,64 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1]`. + /// The range of component `n` is `[0, 125697]`. + fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `912 + n * (1 ±0)` + // Estimated: `6848 + n * (1 ±0)` + // Minimum execution time: 286_323_000 picoseconds. + Weight::from_parts(290_287_955, 6848) + // Standard Error: 1 + .saturating_add(Weight::from_parts(4_693, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) + } + /// Storage: System Account (r:1 w:0) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) + /// Storage: Contracts ContractInfoOf (r:1 w:1) + /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) + /// Storage: Contracts CodeStorage (r:1 w:0) + /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) + /// Storage: System EventTopics (r:2 w:2) + /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// The range of component `r` is `[0, 160]`. + fn seal_sr25519_verify(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `727 + r * (112 ±0)` + // Estimated: `6666 + r * (112 ±0)` + // Minimum execution time: 235_938_000 picoseconds. + Weight::from_parts(242_728_358, 6666) + // Standard Error: 9_725 + .saturating_add(Weight::from_parts(47_527_740, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) + } + /// Storage: System Account (r:1 w:0) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) + /// Storage: Contracts ContractInfoOf (r:1 w:1) + /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) + /// Storage: Contracts CodeStorage (r:1 w:0) + /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) + /// Storage: System EventTopics (r:2 w:2) + /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `917 + r * (6083 ±0)` - // Estimated: `17455 + r * (30415 ±0)` - // Minimum execution time: 373_882 nanoseconds. - Weight::from_parts(376_553_787, 0) - .saturating_add(Weight::from_parts(0, 17455)) - // Standard Error: 912_833 - .saturating_add(Weight::from_parts(3_021_100_412, 0).saturating_mul(r.into())) + // Measured: `822 + r * (76 ±0)` + // Estimated: `6716 + r * (77 ±0)` + // Minimum execution time: 236_108_000 picoseconds. + Weight::from_parts(248_577_226, 6716) + // Standard Error: 9_565 + .saturating_add(Weight::from_parts(36_733_552, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 30415).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3676,19 +3530,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `886 + r * (3362 ±0)` - // Estimated: `17300 + r * (16810 ±0)` - // Minimum execution time: 373_673 nanoseconds. - Weight::from_parts(375_712_961, 0) - .saturating_add(Weight::from_parts(0, 17300)) - // Standard Error: 596_297 - .saturating_add(Weight::from_parts(738_257_638, 0).saturating_mul(r.into())) + // Measured: `792 + r * (42 ±0)` + // Estimated: `6731 + r * (42 ±0)` + // Minimum execution time: 236_440_000 picoseconds. + Weight::from_parts(240_771_418, 6731) + // Standard Error: 1_849 + .saturating_add(Weight::from_parts(9_185_896, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 16810).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3702,21 +3555,20 @@ impl WeightInfo for () { /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) /// Storage: System EventTopics (r:1538 w:1538) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + r * (79300 ±0)` - // Estimated: `64844 + r * (942952 ±833)` - // Minimum execution time: 374_097 nanoseconds. - Weight::from_parts(374_985_000, 0) - .saturating_add(Weight::from_parts(0, 64844)) - // Standard Error: 3_772_336 - .saturating_add(Weight::from_parts(1_546_402_854, 0).saturating_mul(r.into())) + // Measured: `0 + r * (964 ±0)` + // Estimated: `8190 + r * (3090 ±10)` + // Minimum execution time: 235_056_000 picoseconds. + Weight::from_parts(235_743_000, 8190) + // Standard Error: 46_122 + .saturating_add(Weight::from_parts(21_447_984, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((225_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((150_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 942952).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3090).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3728,19 +3580,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `869 + r * (240 ±0)` - // Estimated: `17215 + r * (1200 ±0)` - // Minimum execution time: 374_249 nanoseconds. - Weight::from_parts(377_990_998, 0) - .saturating_add(Weight::from_parts(0, 17215)) - // Standard Error: 38_133 - .saturating_add(Weight::from_parts(11_483_273, 0).saturating_mul(r.into())) + // Measured: `773 + r * (3 ±0)` + // Estimated: `6723 + r * (3 ±0)` + // Minimum execution time: 235_213_000 picoseconds. + Weight::from_parts(239_456_464, 6723) + // Standard Error: 130 + .saturating_add(Weight::from_parts(159_851, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 1200).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3752,19 +3603,18 @@ impl WeightInfo for () { /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_account_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2102 + r * (3154 ±0)` - // Estimated: `21980 + r * (15875 ±2)` - // Minimum execution time: 375_552 nanoseconds. - Weight::from_parts(400_624_032, 0) - .saturating_add(Weight::from_parts(0, 21980)) - // Standard Error: 82_523 - .saturating_add(Weight::from_parts(18_057_327, 0).saturating_mul(r.into())) + // Measured: `1975 + r * (39 ±0)` + // Estimated: `7805 + r * (40 ±0)` + // Minimum execution time: 237_886_000 picoseconds. + Weight::from_parts(262_430_157, 7805) + // Standard Error: 939 + .saturating_add(Weight::from_parts(260_005, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 15875).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) } /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) @@ -3778,590 +3628,527 @@ impl WeightInfo for () { /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) /// Storage: System EventTopics (r:2 w:2) /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1600]`. fn seal_instantiation_nonce(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872 + r * (240 ±0)` - // Estimated: `18598 + r * (1440 ±0)` - // Minimum execution time: 373_899 nanoseconds. - Weight::from_parts(379_733_943, 0) - .saturating_add(Weight::from_parts(0, 18598)) - // Standard Error: 32_022 - .saturating_add(Weight::from_parts(9_381_180, 0).saturating_mul(r.into())) + // Measured: `776 + r * (3 ±0)` + // Estimated: `6723 + r * (3 ±0)` + // Minimum execution time: 234_014_000 picoseconds. + Weight::from_parts(240_042_671, 6723) + // Standard Error: 152 + .saturating_add(Weight::from_parts(138_382, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 1440).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64const(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 834 nanoseconds. - Weight::from_parts(1_009_646, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 388 - .saturating_add(Weight::from_parts(411_979, 0).saturating_mul(r.into())) + // Minimum execution time: 1_533_000 picoseconds. + Weight::from_parts(1_846_015, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_935, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64load(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 882 nanoseconds. - Weight::from_parts(1_416_377, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_133 - .saturating_add(Weight::from_parts(1_075_838, 0).saturating_mul(r.into())) + // Minimum execution time: 1_671_000 picoseconds. + Weight::from_parts(2_197_197, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(6_335, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64store(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 878 nanoseconds. - Weight::from_parts(1_343_056, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 426 - .saturating_add(Weight::from_parts(1_001_214, 0).saturating_mul(r.into())) + // Minimum execution time: 1_665_000 picoseconds. + Weight::from_parts(2_200_545, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(6_011, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_select(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 796 nanoseconds. - Weight::from_parts(1_079_086, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 409 - .saturating_add(Weight::from_parts(1_149_188, 0).saturating_mul(r.into())) + // Minimum execution time: 1_545_000 picoseconds. + Weight::from_parts(1_977_462, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(7_901, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_if(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 800 nanoseconds. - Weight::from_parts(1_044_184, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 707 - .saturating_add(Weight::from_parts(1_315_686, 0).saturating_mul(r.into())) + // Minimum execution time: 1_515_000 picoseconds. + Weight::from_parts(1_866_184, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(10_514, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_br(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 807 nanoseconds. - Weight::from_parts(1_049_633, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 361 - .saturating_add(Weight::from_parts(640_530, 0).saturating_mul(r.into())) + // Minimum execution time: 1_618_000 picoseconds. + Weight::from_parts(1_895_104, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(4_523, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_br_if(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 774 nanoseconds. - Weight::from_parts(1_124_053, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 784 - .saturating_add(Weight::from_parts(949_398, 0).saturating_mul(r.into())) + // Minimum execution time: 1_510_000 picoseconds. + Weight::from_parts(1_779_998, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(6_832, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_br_table(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 810 nanoseconds. - Weight::from_parts(676_581, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_356 - .saturating_add(Weight::from_parts(1_163_465, 0).saturating_mul(r.into())) + // Minimum execution time: 1_529_000 picoseconds. + Weight::from_parts(1_726_996, 0) + // Standard Error: 26 + .saturating_add(Weight::from_parts(9_199, 0).saturating_mul(r.into())) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_580 nanoseconds. - Weight::from_parts(2_835_656, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 71 - .saturating_add(Weight::from_parts(4_686, 0).saturating_mul(e.into())) + // Minimum execution time: 1_682_000 picoseconds. + Weight::from_parts(1_789_910, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(42, 0).saturating_mul(e.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 826 nanoseconds. - Weight::from_parts(1_625_698, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_740 - .saturating_add(Weight::from_parts(2_332_187, 0).saturating_mul(r.into())) + // Minimum execution time: 1_539_000 picoseconds. + Weight::from_parts(2_093_056, 0) + // Standard Error: 27 + .saturating_add(Weight::from_parts(18_917, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_call_indirect(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 901 nanoseconds. - Weight::from_parts(2_338_620, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_642 - .saturating_add(Weight::from_parts(2_924_090, 0).saturating_mul(r.into())) - } - /// The range of component `p` is `[0, 128]`. - fn instr_call_indirect_per_param(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_670 nanoseconds. - Weight::from_parts(5_556_246, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_491 - .saturating_add(Weight::from_parts(228_965, 0).saturating_mul(p.into())) + // Minimum execution time: 1_851_000 picoseconds. + Weight::from_parts(3_134_610, 0) + // Standard Error: 34 + .saturating_add(Weight::from_parts(24_714, 0).saturating_mul(r.into())) } /// The range of component `l` is `[0, 1024]`. fn instr_call_per_local(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_099 nanoseconds. - Weight::from_parts(3_896_177, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 99 - .saturating_add(Weight::from_parts(91_304, 0).saturating_mul(l.into())) + // Minimum execution time: 1_654_000 picoseconds. + Weight::from_parts(1_885_921, 0) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_243, 0).saturating_mul(l.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_local_get(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_042 nanoseconds. - Weight::from_parts(3_334_621, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 793 - .saturating_add(Weight::from_parts(459_346, 0).saturating_mul(r.into())) + // Minimum execution time: 2_744_000 picoseconds. + Weight::from_parts(3_014_725, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(2_447, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_local_set(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_968 nanoseconds. - Weight::from_parts(3_235_286, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 427 - .saturating_add(Weight::from_parts(485_454, 0).saturating_mul(r.into())) + // Minimum execution time: 2_881_000 picoseconds. + Weight::from_parts(3_137_711, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(3_608, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_local_tee(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_012 nanoseconds. - Weight::from_parts(3_303_555, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 371 - .saturating_add(Weight::from_parts(657_811, 0).saturating_mul(r.into())) + // Minimum execution time: 2_809_000 picoseconds. + Weight::from_parts(3_142_066, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(3_841, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_global_get(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 865 nanoseconds. - Weight::from_parts(1_249_987, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 417 - .saturating_add(Weight::from_parts(896_704, 0).saturating_mul(r.into())) + // Minimum execution time: 1_704_000 picoseconds. + Weight::from_parts(2_083_619, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(8_366, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_global_set(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 866 nanoseconds. - Weight::from_parts(1_216_218, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 503 - .saturating_add(Weight::from_parts(919_719, 0).saturating_mul(r.into())) + // Minimum execution time: 1_652_000 picoseconds. + Weight::from_parts(2_048_256, 0) + // Standard Error: 5 + .saturating_add(Weight::from_parts(8_826, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_memory_current(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 921 nanoseconds. - Weight::from_parts(1_228_408, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 309 - .saturating_add(Weight::from_parts(813_007, 0).saturating_mul(r.into())) + // Minimum execution time: 1_671_000 picoseconds. + Weight::from_parts(1_924_626, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_746, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 16]`. fn instr_memory_grow(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 820 nanoseconds. - Weight::from_parts(914_830, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 6_018 - .saturating_add(Weight::from_parts(237_062_769, 0).saturating_mul(r.into())) + // Minimum execution time: 1_585_000 picoseconds. + Weight::from_parts(490_856, 0) + // Standard Error: 133_673 + .saturating_add(Weight::from_parts(13_182_582, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64clz(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 812 nanoseconds. - Weight::from_parts(1_554_406, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 9_979 - .saturating_add(Weight::from_parts(625_434, 0).saturating_mul(r.into())) + // Minimum execution time: 1_533_000 picoseconds. + Weight::from_parts(1_851_563, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_820, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64ctz(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 798 nanoseconds. - Weight::from_parts(1_095_113, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 243 - .saturating_add(Weight::from_parts(634_204, 0).saturating_mul(r.into())) + // Minimum execution time: 1_564_000 picoseconds. + Weight::from_parts(1_914_178, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_732, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64popcnt(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 792 nanoseconds. - Weight::from_parts(1_109_845, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 14_944 - .saturating_add(Weight::from_parts(658_834, 0).saturating_mul(r.into())) + // Minimum execution time: 1_559_000 picoseconds. + Weight::from_parts(1_886_992, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_731, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64eqz(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 815 nanoseconds. - Weight::from_parts(1_068_916, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 327 - .saturating_add(Weight::from_parts(652_897, 0).saturating_mul(r.into())) + // Minimum execution time: 1_553_000 picoseconds. + Weight::from_parts(1_886_545, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_658, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64extendsi32(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 798 nanoseconds. - Weight::from_parts(1_069_745, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 306 - .saturating_add(Weight::from_parts(618_481, 0).saturating_mul(r.into())) + // Minimum execution time: 1_507_000 picoseconds. + Weight::from_parts(1_853_647, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_852, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64extendui32(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 799 nanoseconds. - Weight::from_parts(1_398_001, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 6_234 - .saturating_add(Weight::from_parts(611_399, 0).saturating_mul(r.into())) + // Minimum execution time: 1_554_000 picoseconds. + Weight::from_parts(1_868_877, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_806, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i32wrapi64(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 811 nanoseconds. - Weight::from_parts(1_098_083, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 297 - .saturating_add(Weight::from_parts(617_692, 0).saturating_mul(r.into())) + // Minimum execution time: 1_514_000 picoseconds. + Weight::from_parts(1_882_233, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_700, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64eq(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 815 nanoseconds. - Weight::from_parts(1_046_922, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 335 - .saturating_add(Weight::from_parts(909_196, 0).saturating_mul(r.into())) + // Minimum execution time: 1_529_000 picoseconds. + Weight::from_parts(1_897_247, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_955, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64ne(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 805 nanoseconds. - Weight::from_parts(1_093_667, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 233 - .saturating_add(Weight::from_parts(907_378, 0).saturating_mul(r.into())) + // Minimum execution time: 1_513_000 picoseconds. + Weight::from_parts(1_922_333, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_933, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64lts(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 805 nanoseconds. - Weight::from_parts(1_290_591, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_201 - .saturating_add(Weight::from_parts(902_006, 0).saturating_mul(r.into())) + // Minimum execution time: 1_512_000 picoseconds. + Weight::from_parts(1_848_668, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_966, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64ltu(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 783 nanoseconds. - Weight::from_parts(1_159_977, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_310 - .saturating_add(Weight::from_parts(906_489, 0).saturating_mul(r.into())) + // Minimum execution time: 1_522_000 picoseconds. + Weight::from_parts(1_875_257, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_965, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64gts(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 790 nanoseconds. - Weight::from_parts(1_109_719, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 261 - .saturating_add(Weight::from_parts(906_614, 0).saturating_mul(r.into())) + // Minimum execution time: 1_546_000 picoseconds. + Weight::from_parts(1_836_691, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(5_842, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64gtu(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 776 nanoseconds. - Weight::from_parts(1_076_567, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 348 - .saturating_add(Weight::from_parts(919_374, 0).saturating_mul(r.into())) + // Minimum execution time: 1_505_000 picoseconds. + Weight::from_parts(1_907_551, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(6_075, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64les(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 780 nanoseconds. - Weight::from_parts(1_069_663, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 265 - .saturating_add(Weight::from_parts(908_037, 0).saturating_mul(r.into())) + // Minimum execution time: 1_527_000 picoseconds. + Weight::from_parts(1_891_008, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_971, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64leu(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 832 nanoseconds. - Weight::from_parts(930_920, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_170 - .saturating_add(Weight::from_parts(929_811, 0).saturating_mul(r.into())) + // Minimum execution time: 1_556_000 picoseconds. + Weight::from_parts(1_910_864, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(6_059, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64ges(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 787 nanoseconds. - Weight::from_parts(1_087_325, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 315 - .saturating_add(Weight::from_parts(908_321, 0).saturating_mul(r.into())) + // Minimum execution time: 1_544_000 picoseconds. + Weight::from_parts(1_912_650, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_943, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64geu(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 798 nanoseconds. - Weight::from_parts(1_029_132, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_095 - .saturating_add(Weight::from_parts(913_553, 0).saturating_mul(r.into())) + // Minimum execution time: 1_513_000 picoseconds. + Weight::from_parts(1_855_260, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_975, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64add(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 791 nanoseconds. - Weight::from_parts(1_086_314, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 197 - .saturating_add(Weight::from_parts(896_392, 0).saturating_mul(r.into())) + // Minimum execution time: 1_521_000 picoseconds. + Weight::from_parts(1_867_259, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_846, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64sub(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 793 nanoseconds. - Weight::from_parts(1_078_172, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 404 - .saturating_add(Weight::from_parts(886_329, 0).saturating_mul(r.into())) + // Minimum execution time: 1_509_000 picoseconds. + Weight::from_parts(1_893_018, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(6_096, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64mul(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 799 nanoseconds. - Weight::from_parts(1_095_010, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 431 - .saturating_add(Weight::from_parts(886_513, 0).saturating_mul(r.into())) + // Minimum execution time: 1_496_000 picoseconds. + Weight::from_parts(1_886_659, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_754, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64divs(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 810 nanoseconds. - Weight::from_parts(1_114_325, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 452 - .saturating_add(Weight::from_parts(1_521_849, 0).saturating_mul(r.into())) + // Minimum execution time: 1_527_000 picoseconds. + Weight::from_parts(1_890_548, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(11_842, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64divu(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 784 nanoseconds. - Weight::from_parts(1_123_153, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 475 - .saturating_add(Weight::from_parts(1_457_746, 0).saturating_mul(r.into())) + // Minimum execution time: 1_518_000 picoseconds. + Weight::from_parts(1_891_903, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(10_613, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64rems(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 809 nanoseconds. - Weight::from_parts(1_145_906, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 718 - .saturating_add(Weight::from_parts(1_549_964, 0).saturating_mul(r.into())) + // Minimum execution time: 1_504_000 picoseconds. + Weight::from_parts(1_632_694, 0) + // Standard Error: 7 + .saturating_add(Weight::from_parts(12_281, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64remu(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 803 nanoseconds. - Weight::from_parts(1_110_328, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 627 - .saturating_add(Weight::from_parts(1_453_013, 0).saturating_mul(r.into())) + // Minimum execution time: 1_507_000 picoseconds. + Weight::from_parts(1_878_413, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(10_737, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64and(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 786 nanoseconds. - Weight::from_parts(1_108_792, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 286 - .saturating_add(Weight::from_parts(897_035, 0).saturating_mul(r.into())) + // Minimum execution time: 1_534_000 picoseconds. + Weight::from_parts(1_898_519, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_645, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64or(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 787 nanoseconds. - Weight::from_parts(830_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 15_995 - .saturating_add(Weight::from_parts(963_344, 0).saturating_mul(r.into())) + // Minimum execution time: 1_503_000 picoseconds. + Weight::from_parts(1_895_532, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_745, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64xor(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 773 nanoseconds. - Weight::from_parts(1_082_459, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 330 - .saturating_add(Weight::from_parts(897_077, 0).saturating_mul(r.into())) + // Minimum execution time: 1_507_000 picoseconds. + Weight::from_parts(1_868_720, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_873, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64shl(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 810 nanoseconds. - Weight::from_parts(1_325_815, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_352 - .saturating_add(Weight::from_parts(899_555, 0).saturating_mul(r.into())) + // Minimum execution time: 1_513_000 picoseconds. + Weight::from_parts(1_894_207, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_843, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64shrs(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 808 nanoseconds. - Weight::from_parts(1_085_903, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 430 - .saturating_add(Weight::from_parts(903_249, 0).saturating_mul(r.into())) + // Minimum execution time: 1_473_000 picoseconds. + Weight::from_parts(1_880_224, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(6_107, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64shru(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 792 nanoseconds. - Weight::from_parts(1_091_261, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 312 - .saturating_add(Weight::from_parts(902_245, 0).saturating_mul(r.into())) + // Minimum execution time: 1_447_000 picoseconds. + Weight::from_parts(1_884_551, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_849, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64rotl(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 807 nanoseconds. - Weight::from_parts(1_121_052, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 506 - .saturating_add(Weight::from_parts(902_772, 0).saturating_mul(r.into())) + // Minimum execution time: 1_538_000 picoseconds. + Weight::from_parts(1_908_813, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_987, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 50]`. + /// The range of component `r` is `[0, 5000]`. fn instr_i64rotr(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 823 nanoseconds. - Weight::from_parts(1_317_597, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 6_219 - .saturating_add(Weight::from_parts(896_692, 0).saturating_mul(r.into())) + // Minimum execution time: 1_538_000 picoseconds. + Weight::from_parts(1_878_015, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(5_848, 0).saturating_mul(r.into())) } } diff --git a/frame/conviction-voting/Cargo.toml b/frame/conviction-voting/Cargo.toml index d112c54fb64a7..f9390d6b6efe1 100644 --- a/frame/conviction-voting/Cargo.toml +++ b/frame/conviction-voting/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } diff --git a/frame/conviction-voting/src/benchmarking.rs b/frame/conviction-voting/src/benchmarking.rs index 27f46507d6078..8701ed7ebb074 100644 --- a/frame/conviction-voting/src/benchmarking.rs +++ b/frame/conviction-voting/src/benchmarking.rs @@ -23,7 +23,11 @@ use assert_matches::assert_matches; use frame_benchmarking::v1::{account, benchmarks_instance_pallet, whitelist_account}; use frame_support::{ dispatch::RawOrigin, - traits::{fungible, Currency, Get}, + traits::{ + fungible, + tokens::{Fortitude::Polite, Preservation::Expendable}, + Currency, Get, + }, }; use sp_runtime::traits::Bounded; use sp_std::collections::btree_map::BTreeMap; @@ -257,13 +261,13 @@ benchmarks_instance_pallet! { } } - let orig_usable = >::reducible_balance(&caller, false); + let orig_usable = >::reducible_balance(&caller, Expendable, Polite); let polls = &all_polls[&class]; // Vote big on the class with the most ongoing votes of them to bump the lock and make it // hard to recompute when removed. ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), polls[0], big_account_vote)?; - let now_usable = >::reducible_balance(&caller, false); + let now_usable = >::reducible_balance(&caller, Expendable, Polite); assert_eq!(orig_usable - now_usable, 100u32.into()); // Remove the vote @@ -272,7 +276,7 @@ benchmarks_instance_pallet! { // We can now unlock on `class` from 200 to 100... }: _(RawOrigin::Signed(caller.clone()), class, caller_lookup) verify { - assert_eq!(orig_usable, >::reducible_balance(&caller, false)); + assert_eq!(orig_usable, >::reducible_balance(&caller, Expendable, Polite)); } impl_benchmark_test_suite!( diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index f28953c4a35b9..3ad81486ed26d 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -388,7 +388,10 @@ impl, I: 'static> Pallet { poll_index: PollIndexOf, vote: AccountVote>, ) -> DispatchResult { - ensure!(vote.balance() <= T::Currency::free_balance(who), Error::::InsufficientFunds); + ensure!( + vote.balance() <= T::Currency::total_balance(who), + Error::::InsufficientFunds + ); T::Polls::try_access_poll(poll_index, |poll_status| { let (tally, class) = poll_status.ensure_ongoing().ok_or(Error::::NotOngoing)?; VotingFor::::try_mutate(who, &class, |voting| { @@ -548,7 +551,7 @@ impl, I: 'static> Pallet { ) -> Result { ensure!(who != target, Error::::Nonsense); T::Polls::classes().binary_search(&class).map_err(|_| Error::::BadClass)?; - ensure!(balance <= T::Currency::free_balance(&who), Error::::InsufficientFunds); + ensure!(balance <= T::Currency::total_balance(&who), Error::::InsufficientFunds); let votes = VotingFor::::try_mutate(&who, &class, |voting| -> Result { let old = sp_std::mem::replace( @@ -636,7 +639,12 @@ impl, I: 'static> Pallet { }, } }); - T::Currency::extend_lock(CONVICTION_VOTING_ID, who, amount, WithdrawReasons::TRANSFER); + T::Currency::extend_lock( + CONVICTION_VOTING_ID, + who, + amount, + WithdrawReasons::except(WithdrawReasons::RESERVE), + ); } /// Rejig the lock on an account. It will never get more stringent (since that would indicate @@ -666,7 +674,7 @@ impl, I: 'static> Pallet { CONVICTION_VOTING_ID, who, lock_needed, - WithdrawReasons::TRANSFER, + WithdrawReasons::except(WithdrawReasons::RESERVE), ); } } diff --git a/frame/conviction-voting/src/tests.rs b/frame/conviction-voting/src/tests.rs index e01931d7ad24a..f33e511a164f6 100644 --- a/frame/conviction-voting/src/tests.rs +++ b/frame/conviction-voting/src/tests.rs @@ -51,7 +51,7 @@ frame_support::construct_runtime!( pub struct BaseFilter; impl Contains for BaseFilter { fn contains(call: &RuntimeCall) -> bool { - !matches!(call, &RuntimeCall::Balances(pallet_balances::Call::set_balance { .. })) + !matches!(call, &RuntimeCall::Balances(pallet_balances::Call::force_set_balance { .. })) } } @@ -92,6 +92,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } #[derive(Clone, PartialEq, Eq, Debug)] diff --git a/frame/conviction-voting/src/weights.rs b/frame/conviction-voting/src/weights.rs index 91c4c013051db..bc53855b6b18e 100644 --- a/frame/conviction-voting/src/weights.rs +++ b/frame/conviction-voting/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_conviction_voting //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -68,15 +68,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: Scheduler Agenda (r:2 w:2) /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `13168` - // Estimated: `257859` - // Minimum execution time: 85_569 nanoseconds. - Weight::from_parts(86_492_000, 257859) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `13074` + // Estimated: `219984` + // Minimum execution time: 99_130_000 picoseconds. + Weight::from_parts(100_355_000, 219984) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: Referenda ReferendumInfoFor (r:1 w:1) @@ -87,15 +89,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: Scheduler Agenda (r:2 w:2) /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `20342` - // Estimated: `257859` - // Minimum execution time: 212_727 nanoseconds. - Weight::from_parts(213_741_000, 257859) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `20216` + // Estimated: `219984` + // Minimum execution time: 276_420_000 picoseconds. + Weight::from_parts(277_433_000, 219984) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: ConvictionVoting VotingFor (r:1 w:1) @@ -106,10 +110,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn remove_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `20062` - // Estimated: `251551` - // Minimum execution time: 200_513 nanoseconds. - Weight::from_parts(201_589_000, 251551) + // Measured: `19968` + // Estimated: `219984` + // Minimum execution time: 241_058_000 picoseconds. + Weight::from_parts(242_235_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -119,10 +123,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) fn remove_other_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `12738` - // Estimated: `32557` - // Minimum execution time: 43_083 nanoseconds. - Weight::from_parts(43_763_000, 32557) + // Measured: `12675` + // Estimated: `30706` + // Minimum execution time: 46_385_000 picoseconds. + Weight::from_parts(46_709_000, 30706) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -136,20 +140,22 @@ impl WeightInfo for SubstrateWeight { /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `r` is `[0, 1]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `272 + r * (1689 ±0)` - // Estimated: `176657 + r * (110917 ±0)` - // Minimum execution time: 33_246 nanoseconds. - Weight::from_parts(34_560_391, 176657) - // Standard Error: 63_925 - .saturating_add(Weight::from_parts(34_500_408, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `240 + r * (1627 ±0)` + // Estimated: `109992 + r * (109992 ±0)` + // Minimum execution time: 48_947_000 picoseconds. + Weight::from_parts(50_219_593, 109992) + // Standard Error: 70_238 + .saturating_add(Weight::from_parts(40_509_706, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 110917).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 109992).saturating_mul(r.into())) } /// Storage: ConvictionVoting VotingFor (r:2 w:2) /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) @@ -160,17 +166,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `470 + r * (1407 ±0)` - // Estimated: `170349 + r * (110917 ±0)` - // Minimum execution time: 20_508 nanoseconds. - Weight::from_parts(21_240_024, 170349) - // Standard Error: 37_314 - .saturating_add(Weight::from_parts(30_890_875, 0).saturating_mul(r.into())) + // Measured: `406 + r * (1376 ±0)` + // Estimated: `109992 + r * (109992 ±0)` + // Minimum execution time: 23_408_000 picoseconds. + Weight::from_parts(24_087_793, 109992) + // Standard Error: 31_776 + .saturating_add(Weight::from_parts(36_594_606, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 110917).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 109992).saturating_mul(r.into())) } /// Storage: ConvictionVoting VotingFor (r:1 w:1) /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) @@ -178,13 +184,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn unlock() -> Weight { // Proof Size summary in bytes: - // Measured: `11797` - // Estimated: `36024` - // Minimum execution time: 50_305 nanoseconds. - Weight::from_parts(51_226_000, 36024) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `11734` + // Estimated: `30706` + // Minimum execution time: 65_903_000 picoseconds. + Weight::from_parts(66_460_000, 30706) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } } @@ -199,15 +207,17 @@ impl WeightInfo for () { /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: Scheduler Agenda (r:2 w:2) /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `13168` - // Estimated: `257859` - // Minimum execution time: 85_569 nanoseconds. - Weight::from_parts(86_492_000, 257859) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `13074` + // Estimated: `219984` + // Minimum execution time: 99_130_000 picoseconds. + Weight::from_parts(100_355_000, 219984) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: Referenda ReferendumInfoFor (r:1 w:1) @@ -218,15 +228,17 @@ impl WeightInfo for () { /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: Scheduler Agenda (r:2 w:2) /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `20342` - // Estimated: `257859` - // Minimum execution time: 212_727 nanoseconds. - Weight::from_parts(213_741_000, 257859) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `20216` + // Estimated: `219984` + // Minimum execution time: 276_420_000 picoseconds. + Weight::from_parts(277_433_000, 219984) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: ConvictionVoting VotingFor (r:1 w:1) @@ -237,10 +249,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn remove_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `20062` - // Estimated: `251551` - // Minimum execution time: 200_513 nanoseconds. - Weight::from_parts(201_589_000, 251551) + // Measured: `19968` + // Estimated: `219984` + // Minimum execution time: 241_058_000 picoseconds. + Weight::from_parts(242_235_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -250,10 +262,10 @@ impl WeightInfo for () { /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) fn remove_other_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `12738` - // Estimated: `32557` - // Minimum execution time: 43_083 nanoseconds. - Weight::from_parts(43_763_000, 32557) + // Measured: `12675` + // Estimated: `30706` + // Minimum execution time: 46_385_000 picoseconds. + Weight::from_parts(46_709_000, 30706) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -267,20 +279,22 @@ impl WeightInfo for () { /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `r` is `[0, 1]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `272 + r * (1689 ±0)` - // Estimated: `176657 + r * (110917 ±0)` - // Minimum execution time: 33_246 nanoseconds. - Weight::from_parts(34_560_391, 176657) - // Standard Error: 63_925 - .saturating_add(Weight::from_parts(34_500_408, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `240 + r * (1627 ±0)` + // Estimated: `109992 + r * (109992 ±0)` + // Minimum execution time: 48_947_000 picoseconds. + Weight::from_parts(50_219_593, 109992) + // Standard Error: 70_238 + .saturating_add(Weight::from_parts(40_509_706, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 110917).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 109992).saturating_mul(r.into())) } /// Storage: ConvictionVoting VotingFor (r:2 w:2) /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) @@ -291,17 +305,17 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `470 + r * (1407 ±0)` - // Estimated: `170349 + r * (110917 ±0)` - // Minimum execution time: 20_508 nanoseconds. - Weight::from_parts(21_240_024, 170349) - // Standard Error: 37_314 - .saturating_add(Weight::from_parts(30_890_875, 0).saturating_mul(r.into())) + // Measured: `406 + r * (1376 ±0)` + // Estimated: `109992 + r * (109992 ±0)` + // Minimum execution time: 23_408_000 picoseconds. + Weight::from_parts(24_087_793, 109992) + // Standard Error: 31_776 + .saturating_add(Weight::from_parts(36_594_606, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 110917).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 109992).saturating_mul(r.into())) } /// Storage: ConvictionVoting VotingFor (r:1 w:1) /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) @@ -309,13 +323,15 @@ impl WeightInfo for () { /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn unlock() -> Weight { // Proof Size summary in bytes: - // Measured: `11797` - // Estimated: `36024` - // Minimum execution time: 50_305 nanoseconds. - Weight::from_parts(51_226_000, 36024) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `11734` + // Estimated: `30706` + // Minimum execution time: 65_903_000 picoseconds. + Weight::from_parts(66_460_000, 30706) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } } diff --git a/frame/core-fellowship/Cargo.toml b/frame/core-fellowship/Cargo.toml index 2785346c7341f..16b673fb41b98 100644 --- a/frame/core-fellowship/Cargo.toml +++ b/frame/core-fellowship/Cargo.toml @@ -25,10 +25,6 @@ sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/ sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -[dev-dependencies] -pallet-ranked-collective = { version = "4.0.0-dev", default-features = false, path = "../ranked-collective" } -pallet-salary = { version = "4.0.0-dev", default-features = false, path = "../salary" } - [features] default = ["std"] std = [ @@ -49,7 +45,5 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "pallet-ranked-collective/runtime-benchmarks", - "pallet-salary/runtime-benchmarks", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/core-fellowship/src/weights.rs b/frame/core-fellowship/src/weights.rs index 9f290635ec1cf..28ebeae64733e 100644 --- a/frame/core-fellowship/src/weights.rs +++ b/frame/core-fellowship/src/weights.rs @@ -18,26 +18,25 @@ //! Autogenerated weights for pallet_core_fellowship //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_core_fellowship // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_core_fellowship -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/core-fellowship/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -70,8 +69,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_489_000 picoseconds. - Weight::from_parts(10_873_000, 0) + // Minimum execution time: 10_390_000 picoseconds. + Weight::from_parts(10_847_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: CoreFellowship Member (r:1 w:1) @@ -88,10 +87,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `16886` - // Estimated: `35762` - // Minimum execution time: 61_794_000 picoseconds. - Weight::from_parts(62_455_000, 35762) + // Measured: `16854` + // Estimated: `19894` + // Minimum execution time: 61_737_000 picoseconds. + Weight::from_parts(62_207_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -109,10 +108,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `16996` - // Estimated: `35762` - // Minimum execution time: 63_921_000 picoseconds. - Weight::from_parts(64_871_000, 35762) + // Measured: `16964` + // Estimated: `19894` + // Minimum execution time: 64_226_000 picoseconds. + Weight::from_parts(64_678_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -123,9 +122,9 @@ impl WeightInfo for SubstrateWeight { fn set_active() -> Weight { // Proof Size summary in bytes: // Measured: `355` - // Estimated: `7021` - // Minimum execution time: 19_023_000 picoseconds. - Weight::from_parts(19_270_000, 7021) + // Estimated: `3514` + // Minimum execution time: 18_977_000 picoseconds. + Weight::from_parts(19_157_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -142,9 +141,9 @@ impl WeightInfo for SubstrateWeight { fn induct() -> Weight { // Proof Size summary in bytes: // Measured: `113` - // Estimated: `10500` - // Minimum execution time: 29_089_000 picoseconds. - Weight::from_parts(29_718_000, 10500) + // Estimated: `3514` + // Minimum execution time: 28_633_000 picoseconds. + Weight::from_parts(29_074_000, 3514) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -164,10 +163,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) fn promote() -> Weight { // Proof Size summary in bytes: - // Measured: `16864` - // Estimated: `32243` - // Minimum execution time: 59_529_000 picoseconds. - Weight::from_parts(60_057_000, 32243) + // Measured: `16832` + // Estimated: `19894` + // Minimum execution time: 59_008_000 picoseconds. + Weight::from_parts(59_690_000, 19894) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -175,14 +174,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) /// Storage: CoreFellowship Member (r:1 w:1) /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: CoreFellowship MemberEvidence (r:0 w:1) + /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `292` - // Estimated: `7021` - // Minimum execution time: 17_221_000 picoseconds. - Weight::from_parts(17_585_000, 7021) + // Measured: `326` + // Estimated: `3514` + // Minimum execution time: 18_892_000 picoseconds. + Weight::from_parts(19_095_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: CoreFellowship Member (r:1 w:1) /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) @@ -191,9 +192,9 @@ impl WeightInfo for SubstrateWeight { fn import() -> Weight { // Proof Size summary in bytes: // Measured: `280` - // Estimated: `7021` - // Minimum execution time: 18_602_000 picoseconds. - Weight::from_parts(18_813_000, 7021) + // Estimated: `3514` + // Minimum execution time: 18_107_000 picoseconds. + Weight::from_parts(18_371_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -205,10 +206,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn approve() -> Weight { // Proof Size summary in bytes: - // Measured: `16842` - // Estimated: `26915` - // Minimum execution time: 43_525_000 picoseconds. - Weight::from_parts(43_994_000, 26915) + // Measured: `16810` + // Estimated: `19894` + // Minimum execution time: 43_243_000 picoseconds. + Weight::from_parts(43_965_000, 19894) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -219,9 +220,9 @@ impl WeightInfo for SubstrateWeight { fn submit_evidence() -> Weight { // Proof Size summary in bytes: // Measured: `79` - // Estimated: `23408` - // Minimum execution time: 27_960_000 picoseconds. - Weight::from_parts(28_331_000, 23408) + // Estimated: `19894` + // Minimum execution time: 27_881_000 picoseconds. + Weight::from_parts(28_208_000, 19894) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -235,8 +236,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_489_000 picoseconds. - Weight::from_parts(10_873_000, 0) + // Minimum execution time: 10_390_000 picoseconds. + Weight::from_parts(10_847_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: CoreFellowship Member (r:1 w:1) @@ -253,10 +254,10 @@ impl WeightInfo for () { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `16886` - // Estimated: `35762` - // Minimum execution time: 61_794_000 picoseconds. - Weight::from_parts(62_455_000, 35762) + // Measured: `16854` + // Estimated: `19894` + // Minimum execution time: 61_737_000 picoseconds. + Weight::from_parts(62_207_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -274,10 +275,10 @@ impl WeightInfo for () { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `16996` - // Estimated: `35762` - // Minimum execution time: 63_921_000 picoseconds. - Weight::from_parts(64_871_000, 35762) + // Measured: `16964` + // Estimated: `19894` + // Minimum execution time: 64_226_000 picoseconds. + Weight::from_parts(64_678_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -288,9 +289,9 @@ impl WeightInfo for () { fn set_active() -> Weight { // Proof Size summary in bytes: // Measured: `355` - // Estimated: `7021` - // Minimum execution time: 19_023_000 picoseconds. - Weight::from_parts(19_270_000, 7021) + // Estimated: `3514` + // Minimum execution time: 18_977_000 picoseconds. + Weight::from_parts(19_157_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -307,9 +308,9 @@ impl WeightInfo for () { fn induct() -> Weight { // Proof Size summary in bytes: // Measured: `113` - // Estimated: `10500` - // Minimum execution time: 29_089_000 picoseconds. - Weight::from_parts(29_718_000, 10500) + // Estimated: `3514` + // Minimum execution time: 28_633_000 picoseconds. + Weight::from_parts(29_074_000, 3514) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -329,10 +330,10 @@ impl WeightInfo for () { /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) fn promote() -> Weight { // Proof Size summary in bytes: - // Measured: `16864` - // Estimated: `32243` - // Minimum execution time: 59_529_000 picoseconds. - Weight::from_parts(60_057_000, 32243) + // Measured: `16832` + // Estimated: `19894` + // Minimum execution time: 59_008_000 picoseconds. + Weight::from_parts(59_690_000, 19894) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -340,14 +341,16 @@ impl WeightInfo for () { /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) /// Storage: CoreFellowship Member (r:1 w:1) /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: CoreFellowship MemberEvidence (r:0 w:1) + /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `292` - // Estimated: `7021` - // Minimum execution time: 17_221_000 picoseconds. - Weight::from_parts(17_585_000, 7021) + // Measured: `326` + // Estimated: `3514` + // Minimum execution time: 18_892_000 picoseconds. + Weight::from_parts(19_095_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: CoreFellowship Member (r:1 w:1) /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) @@ -356,9 +359,9 @@ impl WeightInfo for () { fn import() -> Weight { // Proof Size summary in bytes: // Measured: `280` - // Estimated: `7021` - // Minimum execution time: 18_602_000 picoseconds. - Weight::from_parts(18_813_000, 7021) + // Estimated: `3514` + // Minimum execution time: 18_107_000 picoseconds. + Weight::from_parts(18_371_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -370,10 +373,10 @@ impl WeightInfo for () { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn approve() -> Weight { // Proof Size summary in bytes: - // Measured: `16842` - // Estimated: `26915` - // Minimum execution time: 43_525_000 picoseconds. - Weight::from_parts(43_994_000, 26915) + // Measured: `16810` + // Estimated: `19894` + // Minimum execution time: 43_243_000 picoseconds. + Weight::from_parts(43_965_000, 19894) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -384,9 +387,9 @@ impl WeightInfo for () { fn submit_evidence() -> Weight { // Proof Size summary in bytes: // Measured: `79` - // Estimated: `23408` - // Minimum execution time: 27_960_000 picoseconds. - Weight::from_parts(28_331_000, 23408) + // Estimated: `19894` + // Minimum execution time: 27_881_000 picoseconds. + Weight::from_parts(28_208_000, 19894) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index fa107218cf698..1f46bfaed8c0b 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index a3d7f103a98f3..69438eba91d8e 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -1308,7 +1308,12 @@ impl Pallet { })?; // Extend the lock to `balance` (rather than setting it) since we don't know what other // votes are in place. - T::Currency::extend_lock(DEMOCRACY_ID, who, vote.balance(), WithdrawReasons::TRANSFER); + T::Currency::extend_lock( + DEMOCRACY_ID, + who, + vote.balance(), + WithdrawReasons::except(WithdrawReasons::RESERVE), + ); ReferendumInfoOf::::insert(ref_index, ReferendumInfo::Ongoing(status)); Ok(()) } @@ -1454,7 +1459,12 @@ impl Pallet { let votes = Self::increase_upstream_delegation(&target, conviction.votes(balance)); // Extend the lock to `balance` (rather than setting it) since we don't know what other // votes are in place. - T::Currency::extend_lock(DEMOCRACY_ID, &who, balance, WithdrawReasons::TRANSFER); + T::Currency::extend_lock( + DEMOCRACY_ID, + &who, + balance, + WithdrawReasons::except(WithdrawReasons::RESERVE), + ); Ok(votes) })?; Self::deposit_event(Event::::Delegated { who, target }); @@ -1499,7 +1509,12 @@ impl Pallet { if lock_needed.is_zero() { T::Currency::remove_lock(DEMOCRACY_ID, who); } else { - T::Currency::set_lock(DEMOCRACY_ID, who, lock_needed, WithdrawReasons::TRANSFER); + T::Currency::set_lock( + DEMOCRACY_ID, + who, + lock_needed, + WithdrawReasons::except(WithdrawReasons::RESERVE), + ); } } @@ -1591,11 +1606,6 @@ impl Pallet { if approved { Self::deposit_event(Event::::Passed { ref_index: index }); - // Actually `hold` the proposal now since we didn't hold it when it came in via the - // submit extrinsic and we now know that it will be needed. This will be reversed by - // Scheduler pallet once it is executed which assumes that we will already have placed - // a `hold` on it. - T::Preimages::hold(&status.proposal); // Earliest it can be scheduled for is next block. let when = now.saturating_add(status.delay.max(One::one())); diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 9e735765905d6..06fde5129c6d0 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -72,7 +72,7 @@ frame_support::construct_runtime!( pub struct BaseFilter; impl Contains for BaseFilter { fn contains(call: &RuntimeCall) -> bool { - !matches!(call, &RuntimeCall::Balances(pallet_balances::Call::set_balance { .. })) + !matches!(call, &RuntimeCall::Balances(pallet_balances::Call::force_set_balance { .. })) } } @@ -144,6 +144,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { pub static PreimageByteDeposit: u64 = 0; @@ -223,7 +227,7 @@ fn params_should_work() { } fn set_balance_proposal(value: u64) -> BoundedCallOf { - let inner = pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0 }; + let inner = pallet_balances::Call::force_set_balance { who: 42, new_free: value }; let outer = RuntimeCall::Balances(inner); Preimage::bound(outer).unwrap() } diff --git a/frame/democracy/src/tests/lock_voting.rs b/frame/democracy/src/tests/lock_voting.rs index 9b9172ff02b45..31f2e3f3dcc26 100644 --- a/frame/democracy/src/tests/lock_voting.rs +++ b/frame/democracy/src/tests/lock_voting.rs @@ -34,7 +34,7 @@ fn nay(x: u8, balance: u64) -> AccountVote { } fn the_lock(amount: u64) -> BalanceLock { - BalanceLock { id: DEMOCRACY_ID, amount, reasons: pallet_balances::Reasons::Misc } + BalanceLock { id: DEMOCRACY_ID, amount, reasons: pallet_balances::Reasons::All } } #[test] diff --git a/frame/democracy/src/tests/metadata.rs b/frame/democracy/src/tests/metadata.rs index 59229abfe4aab..5a36d80b72637 100644 --- a/frame/democracy/src/tests/metadata.rs +++ b/frame/democracy/src/tests/metadata.rs @@ -33,7 +33,7 @@ fn set_external_metadata_works() { Error::::NoProposal, ); // create an external proposal. - assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2))); assert!(>::exists()); // fails to set metadata with non external origin. assert_noop!( @@ -47,7 +47,7 @@ fn set_external_metadata_works() { ); // set metadata successful. let hash = note_preimage(1); - assert_ok!(Democracy::set_metadata(RuntimeOrigin::signed(2), owner.clone(), Some(hash),),); + assert_ok!(Democracy::set_metadata(RuntimeOrigin::signed(2), owner.clone(), Some(hash))); System::assert_last_event(RuntimeEvent::Democracy(crate::Event::MetadataSet { owner, hash, @@ -61,11 +61,11 @@ fn clear_metadata_works() { // metadata owner is an external proposal. let owner = MetadataOwner::External; // create an external proposal. - assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2))); assert!(>::exists()); // set metadata. let hash = note_preimage(1); - assert_ok!(Democracy::set_metadata(RuntimeOrigin::signed(2), owner.clone(), Some(hash),)); + assert_ok!(Democracy::set_metadata(RuntimeOrigin::signed(2), owner.clone(), Some(hash))); // fails to clear metadata with a wrong origin. assert_noop!( Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), None), @@ -92,18 +92,18 @@ fn set_proposal_metadata_works() { let owner = MetadataOwner::Proposal(Democracy::public_prop_count() - 1); // fails to set non-existing preimage. assert_noop!( - Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), Some(invalid_hash),), + Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), Some(invalid_hash)), Error::::PreimageNotExist, ); // note preimage. let hash = note_preimage(1); // fails to set a preimage if an origin is not a proposer. assert_noop!( - Democracy::set_metadata(RuntimeOrigin::signed(3), owner.clone(), Some(hash),), + Democracy::set_metadata(RuntimeOrigin::signed(3), owner.clone(), Some(hash)), Error::::NoPermission, ); // set metadata successful. - assert_ok!(Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), Some(hash),),); + assert_ok!(Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), Some(hash))); System::assert_last_event(RuntimeEvent::Democracy(crate::Event::MetadataSet { owner, hash, @@ -120,7 +120,7 @@ fn clear_proposal_metadata_works() { let owner = MetadataOwner::Proposal(Democracy::public_prop_count() - 1); // set metadata. let hash = note_preimage(1); - assert_ok!(Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), Some(hash),)); + assert_ok!(Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), Some(hash))); // fails to clear metadata with a wrong origin. assert_noop!( Democracy::set_metadata(RuntimeOrigin::signed(3), owner.clone(), None), @@ -150,16 +150,16 @@ fn set_referendum_metadata_by_root() { let hash = note_preimage(1); // fails to set if not a root. assert_noop!( - Democracy::set_metadata(RuntimeOrigin::signed(3), owner.clone(), Some(hash),), + Democracy::set_metadata(RuntimeOrigin::signed(3), owner.clone(), Some(hash)), Error::::NoPermission, ); // fails to clear if not a root. assert_noop!( - Democracy::set_metadata(RuntimeOrigin::signed(3), owner.clone(), None,), + Democracy::set_metadata(RuntimeOrigin::signed(3), owner.clone(), None), Error::::NoPermission, ); // succeed to set metadata by a root for an ongoing referendum. - assert_ok!(Democracy::set_metadata(RuntimeOrigin::root(), owner.clone(), Some(hash),)); + assert_ok!(Democracy::set_metadata(RuntimeOrigin::root(), owner.clone(), Some(hash))); System::assert_last_event(RuntimeEvent::Democracy(crate::Event::MetadataSet { owner: owner.clone(), hash, diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index 9241b27bbe1fb..a263f2982d862 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -18,26 +18,25 @@ //! Autogenerated weights for pallet_democracy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-b3zmxxc-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_democracy // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_democracy -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/democracy/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -92,10 +91,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) fn propose() -> Weight { // Proof Size summary in bytes: - // Measured: `4864` - // Estimated: `23409` - // Minimum execution time: 42_939 nanoseconds. - Weight::from_parts(43_543_000, 23409) + // Measured: `4801` + // Estimated: `18187` + // Minimum execution time: 43_810_000 picoseconds. + Weight::from_parts(44_439_000, 18187) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -103,10 +102,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) fn second() -> Weight { // Proof Size summary in bytes: - // Measured: `3620` - // Estimated: `5705` - // Minimum execution time: 36_475 nanoseconds. - Weight::from_parts(37_863_000, 5705) + // Measured: `3556` + // Estimated: `6695` + // Minimum execution time: 40_003_000 picoseconds. + Weight::from_parts(40_448_000, 6695) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -116,13 +115,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `3565` - // Estimated: `12720` - // Minimum execution time: 56_372 nanoseconds. - Weight::from_parts(57_483_000, 12720) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `3470` + // Estimated: `7260` + // Minimum execution time: 54_737_000 picoseconds. + Weight::from_parts(55_154_000, 7260) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Democracy ReferendumInfoOf (r:1 w:1) @@ -131,13 +132,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `3587` - // Estimated: `12720` - // Minimum execution time: 56_789 nanoseconds. - Weight::from_parts(57_737_000, 12720) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `3492` + // Estimated: `7260` + // Minimum execution time: 59_545_000 picoseconds. + Weight::from_parts(59_955_000, 7260) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Democracy ReferendumInfoOf (r:1 w:1) @@ -148,10 +151,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) fn emergency_cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `398` - // Estimated: `7712` - // Minimum execution time: 24_379 nanoseconds. - Weight::from_parts(25_302_000, 7712) + // Measured: `366` + // Estimated: `3666` + // Minimum execution time: 27_886_000 picoseconds. + Weight::from_parts(28_372_000, 3666) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -171,10 +174,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) fn blacklist() -> Weight { // Proof Size summary in bytes: - // Measured: `6036` - // Estimated: `36392` - // Minimum execution time: 100_345 nanoseconds. - Weight::from_parts(102_233_000, 36392) + // Measured: `5910` + // Estimated: `18187` + // Minimum execution time: 99_273_000 picoseconds. + Weight::from_parts(100_398_000, 18187) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -184,10 +187,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) fn external_propose() -> Weight { // Proof Size summary in bytes: - // Measured: `3448` - // Estimated: `6340` - // Minimum execution time: 13_155 nanoseconds. - Weight::from_parts(14_158_000, 6340) + // Measured: `3416` + // Estimated: `6703` + // Minimum execution time: 14_946_000 picoseconds. + Weight::from_parts(15_114_000, 6703) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -197,8 +200,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_961 nanoseconds. - Weight::from_parts(3_139_000, 0) + // Minimum execution time: 3_870_000 picoseconds. + Weight::from_parts(4_083_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Democracy NextExternal (r:0 w:1) @@ -207,8 +210,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_040 nanoseconds. - Weight::from_parts(3_261_000, 0) + // Minimum execution time: 3_989_000 picoseconds. + Weight::from_parts(4_166_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Democracy NextExternal (r:1 w:1) @@ -222,9 +225,9 @@ impl WeightInfo for SubstrateWeight { fn fast_track() -> Weight { // Proof Size summary in bytes: // Measured: `286` - // Estimated: `3654` - // Minimum execution time: 26_666 nanoseconds. - Weight::from_parts(28_014_000, 3654) + // Estimated: `3518` + // Minimum execution time: 29_776_000 picoseconds. + Weight::from_parts(30_186_000, 3518) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -236,10 +239,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) fn veto_external() -> Weight { // Proof Size summary in bytes: - // Measured: `3551` - // Estimated: `8868` - // Minimum execution time: 30_180 nanoseconds. - Weight::from_parts(31_593_000, 8868) + // Measured: `3519` + // Estimated: `6703` + // Minimum execution time: 33_891_000 picoseconds. + Weight::from_parts(34_265_000, 6703) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -253,10 +256,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) fn cancel_proposal() -> Weight { // Proof Size summary in bytes: - // Measured: `5915` - // Estimated: `28033` - // Minimum execution time: 80_780 nanoseconds. - Weight::from_parts(82_070_000, 28033) + // Measured: `5821` + // Estimated: `18187` + // Minimum execution time: 81_510_000 picoseconds. + Weight::from_parts(82_483_000, 18187) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -267,9 +270,9 @@ impl WeightInfo for SubstrateWeight { fn cancel_referendum() -> Weight { // Proof Size summary in bytes: // Measured: `271` - // Estimated: `2528` - // Minimum execution time: 18_117 nanoseconds. - Weight::from_parts(19_027_000, 2528) + // Estimated: `3518` + // Minimum execution time: 21_164_000 picoseconds. + Weight::from_parts(21_624_000, 3518) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -282,12 +285,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 99]`. fn on_initialize_base(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `244 + r * (117 ±0)` - // Estimated: `998 + r * (2676 ±0)` - // Minimum execution time: 7_093 nanoseconds. - Weight::from_parts(8_792_955, 998) - // Standard Error: 6_630 - .saturating_add(Weight::from_parts(3_091_565, 0).saturating_mul(r.into())) + // Measured: `244 + r * (86 ±0)` + // Estimated: `1489 + r * (2676 ±0)` + // Minimum execution time: 6_925_000 picoseconds. + Weight::from_parts(10_624_198, 1489) + // Standard Error: 5_780 + .saturating_add(Weight::from_parts(2_934_169, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -308,12 +311,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 99]`. fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `244 + r * (117 ±0)` - // Estimated: `19318 + r * (2676 ±0)` - // Minimum execution time: 10_372 nanoseconds. - Weight::from_parts(10_961_165, 19318) - // Standard Error: 7_284 - .saturating_add(Weight::from_parts(3_112_573, 0).saturating_mul(r.into())) + // Measured: `244 + r * (86 ±0)` + // Estimated: `18187 + r * (2676 ±0)` + // Minimum execution time: 10_551_000 picoseconds. + Weight::from_parts(13_126_123, 18187) + // Standard Error: 6_391 + .saturating_add(Weight::from_parts(2_952_789, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -325,16 +328,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `958 + r * (139 ±0)` - // Estimated: `22584 + r * (2676 ±0)` - // Minimum execution time: 36_618 nanoseconds. - Weight::from_parts(42_803_184, 22584) - // Standard Error: 7_268 - .saturating_add(Weight::from_parts(4_537_902, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `830 + r * (108 ±0)` + // Estimated: `19800 + r * (2676 ±0)` + // Minimum execution time: 47_172_000 picoseconds. + Weight::from_parts(49_667_954, 19800) + // Standard Error: 6_129 + .saturating_add(Weight::from_parts(4_230_402, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -347,12 +352,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 99]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `557 + r * (139 ±0)` - // Estimated: `12540 + r * (2676 ±0)` - // Minimum execution time: 19_758 nanoseconds. - Weight::from_parts(21_641_793, 12540) - // Standard Error: 6_889 - .saturating_add(Weight::from_parts(4_478_884, 0).saturating_mul(r.into())) + // Measured: `493 + r * (108 ±0)` + // Estimated: `13530 + r * (2676 ±0)` + // Minimum execution time: 22_360_000 picoseconds. + Weight::from_parts(25_063_237, 13530) + // Standard Error: 5_326 + .saturating_add(Weight::from_parts(4_163_683, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -365,44 +370,48 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_844 nanoseconds. - Weight::from_parts(3_017_000, 0) + // Minimum execution time: 4_047_000 picoseconds. + Weight::from_parts(4_139_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Democracy VotingOf (r:1 w:1) /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `627` - // Estimated: `12647` - // Minimum execution time: 20_380 nanoseconds. - Weight::from_parts(28_295_875, 12647) - // Standard Error: 2_331 - .saturating_add(Weight::from_parts(67_348, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `563` + // Estimated: `7260` + // Minimum execution time: 27_322_000 picoseconds. + Weight::from_parts(39_909_589, 7260) + // Standard Error: 2_758 + .saturating_add(Weight::from_parts(29_497, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Democracy VotingOf (r:1 w:1) /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `628 + r * (22 ±0)` - // Estimated: `12647` - // Minimum execution time: 24_475 nanoseconds. - Weight::from_parts(27_102_576, 12647) - // Standard Error: 1_464 - .saturating_add(Weight::from_parts(128_921, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `564 + r * (22 ±0)` + // Estimated: `7260` + // Minimum execution time: 37_082_000 picoseconds. + Weight::from_parts(38_580_061, 7260) + // Standard Error: 664 + .saturating_add(Weight::from_parts(62_401, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Democracy ReferendumInfoOf (r:1 w:1) @@ -412,12 +421,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `791 + r * (26 ±0)` - // Estimated: `8946` - // Minimum execution time: 15_039 nanoseconds. - Weight::from_parts(19_252_498, 8946) - // Standard Error: 1_798 - .saturating_add(Weight::from_parts(131_855, 0).saturating_mul(r.into())) + // Measured: `728 + r * (26 ±0)` + // Estimated: `7260` + // Minimum execution time: 17_528_000 picoseconds. + Weight::from_parts(20_075_412, 7260) + // Standard Error: 1_072 + .saturating_add(Weight::from_parts(81_734, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -428,12 +437,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `791 + r * (26 ±0)` - // Estimated: `8946` - // Minimum execution time: 14_837 nanoseconds. - Weight::from_parts(19_144_929, 8946) - // Standard Error: 1_875 - .saturating_add(Weight::from_parts(136_819, 0).saturating_mul(r.into())) + // Measured: `728 + r * (26 ±0)` + // Estimated: `7260` + // Minimum execution time: 17_517_000 picoseconds. + Weight::from_parts(20_090_718, 7260) + // Standard Error: 1_105 + .saturating_add(Weight::from_parts(82_651, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -446,9 +455,9 @@ impl WeightInfo for SubstrateWeight { fn set_external_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `356` - // Estimated: `3193` - // Minimum execution time: 17_338 nanoseconds. - Weight::from_parts(17_946_000, 3193) + // Estimated: `3556` + // Minimum execution time: 19_234_000 picoseconds. + Weight::from_parts(19_755_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -459,9 +468,9 @@ impl WeightInfo for SubstrateWeight { fn clear_external_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `286` - // Estimated: `3155` - // Minimum execution time: 15_364 nanoseconds. - Weight::from_parts(15_990_000, 3155) + // Estimated: `3518` + // Minimum execution time: 17_621_000 picoseconds. + Weight::from_parts(17_861_000, 3518) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -473,10 +482,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) fn set_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4919` - // Estimated: `19763` - // Minimum execution time: 37_147 nanoseconds. - Weight::from_parts(37_778_000, 19763) + // Measured: `4888` + // Estimated: `18187` + // Minimum execution time: 35_785_000 picoseconds. + Weight::from_parts(36_102_000, 18187) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -486,10 +495,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) fn clear_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4853` - // Estimated: `19725` - // Minimum execution time: 34_118 nanoseconds. - Weight::from_parts(34_737_000, 19725) + // Measured: `4822` + // Estimated: `18187` + // Minimum execution time: 33_493_000 picoseconds. + Weight::from_parts(33_747_000, 18187) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -500,9 +509,9 @@ impl WeightInfo for SubstrateWeight { fn set_referendum_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `2566` - // Minimum execution time: 12_787 nanoseconds. - Weight::from_parts(13_463_000, 2566) + // Estimated: `3556` + // Minimum execution time: 15_557_000 picoseconds. + Weight::from_parts(15_844_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -513,9 +522,9 @@ impl WeightInfo for SubstrateWeight { fn clear_referendum_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `302` - // Estimated: `5204` - // Minimum execution time: 17_636 nanoseconds. - Weight::from_parts(18_399_000, 5204) + // Estimated: `3666` + // Minimum execution time: 19_940_000 picoseconds. + Weight::from_parts(20_301_000, 3666) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -533,10 +542,10 @@ impl WeightInfo for () { /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) fn propose() -> Weight { // Proof Size summary in bytes: - // Measured: `4864` - // Estimated: `23409` - // Minimum execution time: 42_939 nanoseconds. - Weight::from_parts(43_543_000, 23409) + // Measured: `4801` + // Estimated: `18187` + // Minimum execution time: 43_810_000 picoseconds. + Weight::from_parts(44_439_000, 18187) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -544,10 +553,10 @@ impl WeightInfo for () { /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) fn second() -> Weight { // Proof Size summary in bytes: - // Measured: `3620` - // Estimated: `5705` - // Minimum execution time: 36_475 nanoseconds. - Weight::from_parts(37_863_000, 5705) + // Measured: `3556` + // Estimated: `6695` + // Minimum execution time: 40_003_000 picoseconds. + Weight::from_parts(40_448_000, 6695) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -557,13 +566,15 @@ impl WeightInfo for () { /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `3565` - // Estimated: `12720` - // Minimum execution time: 56_372 nanoseconds. - Weight::from_parts(57_483_000, 12720) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `3470` + // Estimated: `7260` + // Minimum execution time: 54_737_000 picoseconds. + Weight::from_parts(55_154_000, 7260) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Democracy ReferendumInfoOf (r:1 w:1) @@ -572,13 +583,15 @@ impl WeightInfo for () { /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `3587` - // Estimated: `12720` - // Minimum execution time: 56_789 nanoseconds. - Weight::from_parts(57_737_000, 12720) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `3492` + // Estimated: `7260` + // Minimum execution time: 59_545_000 picoseconds. + Weight::from_parts(59_955_000, 7260) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Democracy ReferendumInfoOf (r:1 w:1) @@ -589,10 +602,10 @@ impl WeightInfo for () { /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) fn emergency_cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `398` - // Estimated: `7712` - // Minimum execution time: 24_379 nanoseconds. - Weight::from_parts(25_302_000, 7712) + // Measured: `366` + // Estimated: `3666` + // Minimum execution time: 27_886_000 picoseconds. + Weight::from_parts(28_372_000, 3666) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -612,10 +625,10 @@ impl WeightInfo for () { /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) fn blacklist() -> Weight { // Proof Size summary in bytes: - // Measured: `6036` - // Estimated: `36392` - // Minimum execution time: 100_345 nanoseconds. - Weight::from_parts(102_233_000, 36392) + // Measured: `5910` + // Estimated: `18187` + // Minimum execution time: 99_273_000 picoseconds. + Weight::from_parts(100_398_000, 18187) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -625,10 +638,10 @@ impl WeightInfo for () { /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) fn external_propose() -> Weight { // Proof Size summary in bytes: - // Measured: `3448` - // Estimated: `6340` - // Minimum execution time: 13_155 nanoseconds. - Weight::from_parts(14_158_000, 6340) + // Measured: `3416` + // Estimated: `6703` + // Minimum execution time: 14_946_000 picoseconds. + Weight::from_parts(15_114_000, 6703) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -638,8 +651,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_961 nanoseconds. - Weight::from_parts(3_139_000, 0) + // Minimum execution time: 3_870_000 picoseconds. + Weight::from_parts(4_083_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Democracy NextExternal (r:0 w:1) @@ -648,8 +661,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_040 nanoseconds. - Weight::from_parts(3_261_000, 0) + // Minimum execution time: 3_989_000 picoseconds. + Weight::from_parts(4_166_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Democracy NextExternal (r:1 w:1) @@ -663,9 +676,9 @@ impl WeightInfo for () { fn fast_track() -> Weight { // Proof Size summary in bytes: // Measured: `286` - // Estimated: `3654` - // Minimum execution time: 26_666 nanoseconds. - Weight::from_parts(28_014_000, 3654) + // Estimated: `3518` + // Minimum execution time: 29_776_000 picoseconds. + Weight::from_parts(30_186_000, 3518) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -677,10 +690,10 @@ impl WeightInfo for () { /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) fn veto_external() -> Weight { // Proof Size summary in bytes: - // Measured: `3551` - // Estimated: `8868` - // Minimum execution time: 30_180 nanoseconds. - Weight::from_parts(31_593_000, 8868) + // Measured: `3519` + // Estimated: `6703` + // Minimum execution time: 33_891_000 picoseconds. + Weight::from_parts(34_265_000, 6703) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -694,10 +707,10 @@ impl WeightInfo for () { /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) fn cancel_proposal() -> Weight { // Proof Size summary in bytes: - // Measured: `5915` - // Estimated: `28033` - // Minimum execution time: 80_780 nanoseconds. - Weight::from_parts(82_070_000, 28033) + // Measured: `5821` + // Estimated: `18187` + // Minimum execution time: 81_510_000 picoseconds. + Weight::from_parts(82_483_000, 18187) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -708,9 +721,9 @@ impl WeightInfo for () { fn cancel_referendum() -> Weight { // Proof Size summary in bytes: // Measured: `271` - // Estimated: `2528` - // Minimum execution time: 18_117 nanoseconds. - Weight::from_parts(19_027_000, 2528) + // Estimated: `3518` + // Minimum execution time: 21_164_000 picoseconds. + Weight::from_parts(21_624_000, 3518) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -723,12 +736,12 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 99]`. fn on_initialize_base(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `244 + r * (117 ±0)` - // Estimated: `998 + r * (2676 ±0)` - // Minimum execution time: 7_093 nanoseconds. - Weight::from_parts(8_792_955, 998) - // Standard Error: 6_630 - .saturating_add(Weight::from_parts(3_091_565, 0).saturating_mul(r.into())) + // Measured: `244 + r * (86 ±0)` + // Estimated: `1489 + r * (2676 ±0)` + // Minimum execution time: 6_925_000 picoseconds. + Weight::from_parts(10_624_198, 1489) + // Standard Error: 5_780 + .saturating_add(Weight::from_parts(2_934_169, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -749,12 +762,12 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 99]`. fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `244 + r * (117 ±0)` - // Estimated: `19318 + r * (2676 ±0)` - // Minimum execution time: 10_372 nanoseconds. - Weight::from_parts(10_961_165, 19318) - // Standard Error: 7_284 - .saturating_add(Weight::from_parts(3_112_573, 0).saturating_mul(r.into())) + // Measured: `244 + r * (86 ±0)` + // Estimated: `18187 + r * (2676 ±0)` + // Minimum execution time: 10_551_000 picoseconds. + Weight::from_parts(13_126_123, 18187) + // Standard Error: 6_391 + .saturating_add(Weight::from_parts(2_952_789, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -766,16 +779,18 @@ impl WeightInfo for () { /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `958 + r * (139 ±0)` - // Estimated: `22584 + r * (2676 ±0)` - // Minimum execution time: 36_618 nanoseconds. - Weight::from_parts(42_803_184, 22584) - // Standard Error: 7_268 - .saturating_add(Weight::from_parts(4_537_902, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `830 + r * (108 ±0)` + // Estimated: `19800 + r * (2676 ±0)` + // Minimum execution time: 47_172_000 picoseconds. + Weight::from_parts(49_667_954, 19800) + // Standard Error: 6_129 + .saturating_add(Weight::from_parts(4_230_402, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -788,12 +803,12 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 99]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `557 + r * (139 ±0)` - // Estimated: `12540 + r * (2676 ±0)` - // Minimum execution time: 19_758 nanoseconds. - Weight::from_parts(21_641_793, 12540) - // Standard Error: 6_889 - .saturating_add(Weight::from_parts(4_478_884, 0).saturating_mul(r.into())) + // Measured: `493 + r * (108 ±0)` + // Estimated: `13530 + r * (2676 ±0)` + // Minimum execution time: 22_360_000 picoseconds. + Weight::from_parts(25_063_237, 13530) + // Standard Error: 5_326 + .saturating_add(Weight::from_parts(4_163_683, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -806,44 +821,48 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_844 nanoseconds. - Weight::from_parts(3_017_000, 0) + // Minimum execution time: 4_047_000 picoseconds. + Weight::from_parts(4_139_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Democracy VotingOf (r:1 w:1) /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `627` - // Estimated: `12647` - // Minimum execution time: 20_380 nanoseconds. - Weight::from_parts(28_295_875, 12647) - // Standard Error: 2_331 - .saturating_add(Weight::from_parts(67_348, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `563` + // Estimated: `7260` + // Minimum execution time: 27_322_000 picoseconds. + Weight::from_parts(39_909_589, 7260) + // Standard Error: 2_758 + .saturating_add(Weight::from_parts(29_497, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Democracy VotingOf (r:1 w:1) /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `628 + r * (22 ±0)` - // Estimated: `12647` - // Minimum execution time: 24_475 nanoseconds. - Weight::from_parts(27_102_576, 12647) - // Standard Error: 1_464 - .saturating_add(Weight::from_parts(128_921, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `564 + r * (22 ±0)` + // Estimated: `7260` + // Minimum execution time: 37_082_000 picoseconds. + Weight::from_parts(38_580_061, 7260) + // Standard Error: 664 + .saturating_add(Weight::from_parts(62_401, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Democracy ReferendumInfoOf (r:1 w:1) @@ -853,12 +872,12 @@ impl WeightInfo for () { /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `791 + r * (26 ±0)` - // Estimated: `8946` - // Minimum execution time: 15_039 nanoseconds. - Weight::from_parts(19_252_498, 8946) - // Standard Error: 1_798 - .saturating_add(Weight::from_parts(131_855, 0).saturating_mul(r.into())) + // Measured: `728 + r * (26 ±0)` + // Estimated: `7260` + // Minimum execution time: 17_528_000 picoseconds. + Weight::from_parts(20_075_412, 7260) + // Standard Error: 1_072 + .saturating_add(Weight::from_parts(81_734, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -869,12 +888,12 @@ impl WeightInfo for () { /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `791 + r * (26 ±0)` - // Estimated: `8946` - // Minimum execution time: 14_837 nanoseconds. - Weight::from_parts(19_144_929, 8946) - // Standard Error: 1_875 - .saturating_add(Weight::from_parts(136_819, 0).saturating_mul(r.into())) + // Measured: `728 + r * (26 ±0)` + // Estimated: `7260` + // Minimum execution time: 17_517_000 picoseconds. + Weight::from_parts(20_090_718, 7260) + // Standard Error: 1_105 + .saturating_add(Weight::from_parts(82_651, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -887,9 +906,9 @@ impl WeightInfo for () { fn set_external_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `356` - // Estimated: `3193` - // Minimum execution time: 17_338 nanoseconds. - Weight::from_parts(17_946_000, 3193) + // Estimated: `3556` + // Minimum execution time: 19_234_000 picoseconds. + Weight::from_parts(19_755_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -900,9 +919,9 @@ impl WeightInfo for () { fn clear_external_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `286` - // Estimated: `3155` - // Minimum execution time: 15_364 nanoseconds. - Weight::from_parts(15_990_000, 3155) + // Estimated: `3518` + // Minimum execution time: 17_621_000 picoseconds. + Weight::from_parts(17_861_000, 3518) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -914,10 +933,10 @@ impl WeightInfo for () { /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) fn set_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4919` - // Estimated: `19763` - // Minimum execution time: 37_147 nanoseconds. - Weight::from_parts(37_778_000, 19763) + // Measured: `4888` + // Estimated: `18187` + // Minimum execution time: 35_785_000 picoseconds. + Weight::from_parts(36_102_000, 18187) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -927,10 +946,10 @@ impl WeightInfo for () { /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) fn clear_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4853` - // Estimated: `19725` - // Minimum execution time: 34_118 nanoseconds. - Weight::from_parts(34_737_000, 19725) + // Measured: `4822` + // Estimated: `18187` + // Minimum execution time: 33_493_000 picoseconds. + Weight::from_parts(33_747_000, 18187) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -941,9 +960,9 @@ impl WeightInfo for () { fn set_referendum_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `2566` - // Minimum execution time: 12_787 nanoseconds. - Weight::from_parts(13_463_000, 2566) + // Estimated: `3556` + // Minimum execution time: 15_557_000 picoseconds. + Weight::from_parts(15_844_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -954,9 +973,9 @@ impl WeightInfo for () { fn clear_referendum_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `302` - // Estimated: `5204` - // Minimum execution time: 17_636 nanoseconds. - Weight::from_parts(18_399_000, 5204) + // Estimated: `3666` + // Minimum execution time: 19_940_000 picoseconds. + Weight::from_parts(20_301_000, 3666) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index aa734850aae43..c88ed0120fe59 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.1.1", default-features = false, features = [ +scale-info = { version = "2.5.0", default-features = false, features = [ "derive", ] } log = { version = "0.4.17", default-features = false } diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs index 4d48f17909c0e..a5946c6a1d3c1 100644 --- a/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -219,7 +219,7 @@ frame_benchmarking::benchmarks! { finalize_signed_phase_accept_solution { let receiver = account("receiver", 0, SEED); - let initial_balance = T::Currency::minimum_balance() * 10u32.into(); + let initial_balance = T::Currency::minimum_balance() + 10u32.into(); T::Currency::make_free_balance_be(&receiver, initial_balance); let ready = Default::default(); let deposit: BalanceOf = 10u32.into(); @@ -228,7 +228,7 @@ frame_benchmarking::benchmarks! { let call_fee: BalanceOf = 30u32.into(); assert_ok!(T::Currency::reserve(&receiver, deposit)); - assert_eq!(T::Currency::free_balance(&receiver), initial_balance - 10u32.into()); + assert_eq!(T::Currency::free_balance(&receiver), T::Currency::minimum_balance()); }: { >::finalize_signed_phase_accept_solution( ready, @@ -246,17 +246,17 @@ frame_benchmarking::benchmarks! { finalize_signed_phase_reject_solution { let receiver = account("receiver", 0, SEED); - let initial_balance = T::Currency::minimum_balance().max(One::one()) * 10u32.into(); + let initial_balance = T::Currency::minimum_balance() + 10u32.into(); let deposit: BalanceOf = 10u32.into(); T::Currency::make_free_balance_be(&receiver, initial_balance); assert_ok!(T::Currency::reserve(&receiver, deposit)); - assert_eq!(T::Currency::free_balance(&receiver), initial_balance - 10u32.into()); + assert_eq!(T::Currency::free_balance(&receiver), T::Currency::minimum_balance()); assert_eq!(T::Currency::reserved_balance(&receiver), 10u32.into()); }: { >::finalize_signed_phase_reject_solution(&receiver, deposit) } verify { - assert_eq!(T::Currency::free_balance(&receiver), initial_balance - 10u32.into()); + assert_eq!(T::Currency::free_balance(&receiver), T::Currency::minimum_balance()); assert_eq!(T::Currency::reserved_balance(&receiver), 0u32.into()); } diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 80bab68074c87..3599037104d18 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -502,10 +502,10 @@ where fn eq(&self, other: &Self) -> bool { use ElectionError::*; match (self, other) { - (&Feasibility(ref x), &Feasibility(ref y)) if x == y => true, - (&Miner(ref x), &Miner(ref y)) if x == y => true, - (&DataProvider(ref x), &DataProvider(ref y)) if x == y => true, - (&Fallback(ref x), &Fallback(ref y)) if x == y => true, + (Feasibility(x), Feasibility(y)) if x == y => true, + (Miner(x), Miner(y)) if x == y => true, + (DataProvider(x), DataProvider(y)) if x == y => true, + (Fallback(x), Fallback(y)) if x == y => true, _ => false, } } diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index da7a0cf1dd263..8c18777606048 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -254,6 +254,10 @@ impl pallet_balances::Config for Runtime { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } #[derive(Default, Eq, PartialEq, Debug, Clone, Copy)] diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index 90be98ece489a..a1ba3514ca469 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_election_provider_multi_phase //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -82,9 +82,9 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_nothing() -> Weight { // Proof Size summary in bytes: // Measured: `994` - // Estimated: `6983` - // Minimum execution time: 17_801 nanoseconds. - Weight::from_parts(18_364_000, 6983) + // Estimated: `3481` + // Minimum execution time: 21_239_000 picoseconds. + Weight::from_parts(21_970_000, 3481) .saturating_add(T::DbWeight::get().reads(8_u64)) } /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) @@ -94,9 +94,9 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_open_signed() -> Weight { // Proof Size summary in bytes: // Measured: `114` - // Estimated: `1218` - // Minimum execution time: 12_814 nanoseconds. - Weight::from_parts(13_154_000, 1218) + // Estimated: `1599` + // Minimum execution time: 13_913_000 picoseconds. + Weight::from_parts(14_329_000, 1599) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -107,9 +107,9 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_open_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `114` - // Estimated: `1218` - // Minimum execution time: 14_565 nanoseconds. - Weight::from_parts(15_097_000, 1218) + // Estimated: `1599` + // Minimum execution time: 15_377_000 picoseconds. + Weight::from_parts(15_714_000, 1599) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -119,10 +119,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) fn finalize_signed_phase_accept_solution() -> Weight { // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `2809` - // Minimum execution time: 23_341 nanoseconds. - Weight::from_parts(23_770_000, 2809) + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 32_899_000 picoseconds. + Weight::from_parts(33_455_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -130,10 +130,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn finalize_signed_phase_reject_solution() -> Weight { // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `2603` - // Minimum execution time: 16_662 nanoseconds. - Weight::from_parts(16_898_000, 2603) + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 22_532_000 picoseconds. + Weight::from_parts(23_039_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -149,10 +149,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 215_168 nanoseconds. - Weight::from_parts(219_887_000, 0) - // Standard Error: 1_444 - .saturating_add(Weight::from_parts(146_388, 0).saturating_mul(v.into())) + // Minimum execution time: 253_511_000 picoseconds. + Weight::from_parts(261_190_000, 0) + // Standard Error: 1_621 + .saturating_add(Weight::from_parts(157_608, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) @@ -177,18 +177,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `d` is `[200, 400]`. fn elect_queued(a: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `368 + a * (768 ±0) + d * (48 ±0)` - // Estimated: `9540 + a * (6912 ±0) + d * (441 ±0)` - // Minimum execution time: 268_021 nanoseconds. - Weight::from_parts(72_491_937, 9540) - // Standard Error: 2_910 - .saturating_add(Weight::from_parts(303_955, 0).saturating_mul(a.into())) - // Standard Error: 4_363 - .saturating_add(Weight::from_parts(167_369, 0).saturating_mul(d.into())) + // Measured: `337 + a * (768 ±0) + d * (48 ±0)` + // Estimated: `3889 + a * (768 ±0) + d * (49 ±0)` + // Minimum execution time: 284_994_000 picoseconds. + Weight::from_parts(97_696_734, 3889) + // Standard Error: 4_172 + .saturating_add(Weight::from_parts(331_569, 0).saturating_mul(a.into())) + // Standard Error: 6_254 + .saturating_add(Weight::from_parts(92_198, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) - .saturating_add(Weight::from_parts(0, 6912).saturating_mul(a.into())) - .saturating_add(Weight::from_parts(0, 441).saturating_mul(d.into())) + .saturating_add(Weight::from_parts(0, 768).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(0, 49).saturating_mul(d.into())) } /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) @@ -204,10 +204,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionsMap (max_values: None, max_size: None, mode: Measured) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `924` - // Estimated: `7111` - // Minimum execution time: 44_177 nanoseconds. - Weight::from_parts(44_663_000, 7111) + // Measured: `893` + // Estimated: `2378` + // Minimum execution time: 52_194_000 picoseconds. + Weight::from_parts(53_062_000, 2378) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -221,54 +221,54 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) /// Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. /// The range of component `a` is `[500, 800]`. /// The range of component `d` is `[200, 400]`. fn submit_unsigned(v: u32, t: u32, a: u32, _d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `251 + v * (553 ±0) + t * (32 ±0)` - // Estimated: `5222 + v * (3871 ±0) + t * (224 ±0)` - // Minimum execution time: 4_425_457 nanoseconds. - Weight::from_parts(4_445_889_000, 5222) - // Standard Error: 13_250 - .saturating_add(Weight::from_parts(48_844, 0).saturating_mul(v.into())) - // Standard Error: 39_266 - .saturating_add(Weight::from_parts(4_144_034, 0).saturating_mul(a.into())) + // Measured: `219 + t * (32 ±0) + v * (553 ±0)` + // Estimated: `1704 + t * (32 ±0) + v * (553 ±0)` + // Minimum execution time: 4_843_067_000 picoseconds. + Weight::from_parts(4_860_833_000, 1704) + // Standard Error: 14_594 + .saturating_add(Weight::from_parts(76_611, 0).saturating_mul(v.into())) + // Standard Error: 43_249 + .saturating_add(Weight::from_parts(4_347_887, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 3871).saturating_mul(v.into())) - .saturating_add(Weight::from_parts(0, 224).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) } - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) /// Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) /// Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. /// The range of component `a` is `[500, 800]`. /// The range of component `d` is `[200, 400]`. fn feasibility_check(v: u32, t: u32, a: u32, _d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `226 + v * (553 ±0) + t * (32 ±0)` - // Estimated: `2884 + v * (2212 ±0) + t * (128 ±0)` - // Minimum execution time: 3_812_071 nanoseconds. - Weight::from_parts(3_826_375_000, 2884) - // Standard Error: 11_601 - .saturating_add(Weight::from_parts(145_309, 0).saturating_mul(v.into())) - // Standard Error: 34_378 - .saturating_add(Weight::from_parts(3_223_977, 0).saturating_mul(a.into())) + // Measured: `194 + t * (32 ±0) + v * (553 ±0)` + // Estimated: `1679 + t * (32 ±0) + v * (553 ±0)` + // Minimum execution time: 4_190_524_000 picoseconds. + Weight::from_parts(4_200_207_000, 1679) + // Standard Error: 12_454 + .saturating_add(Weight::from_parts(166_342, 0).saturating_mul(v.into())) + // Standard Error: 36_906 + .saturating_add(Weight::from_parts(3_493_372, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(Weight::from_parts(0, 2212).saturating_mul(v.into())) - .saturating_add(Weight::from_parts(0, 128).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) } } @@ -293,9 +293,9 @@ impl WeightInfo for () { fn on_initialize_nothing() -> Weight { // Proof Size summary in bytes: // Measured: `994` - // Estimated: `6983` - // Minimum execution time: 17_801 nanoseconds. - Weight::from_parts(18_364_000, 6983) + // Estimated: `3481` + // Minimum execution time: 21_239_000 picoseconds. + Weight::from_parts(21_970_000, 3481) .saturating_add(RocksDbWeight::get().reads(8_u64)) } /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) @@ -305,9 +305,9 @@ impl WeightInfo for () { fn on_initialize_open_signed() -> Weight { // Proof Size summary in bytes: // Measured: `114` - // Estimated: `1218` - // Minimum execution time: 12_814 nanoseconds. - Weight::from_parts(13_154_000, 1218) + // Estimated: `1599` + // Minimum execution time: 13_913_000 picoseconds. + Weight::from_parts(14_329_000, 1599) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -318,9 +318,9 @@ impl WeightInfo for () { fn on_initialize_open_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `114` - // Estimated: `1218` - // Minimum execution time: 14_565 nanoseconds. - Weight::from_parts(15_097_000, 1218) + // Estimated: `1599` + // Minimum execution time: 15_377_000 picoseconds. + Weight::from_parts(15_714_000, 1599) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -330,10 +330,10 @@ impl WeightInfo for () { /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) fn finalize_signed_phase_accept_solution() -> Weight { // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `2809` - // Minimum execution time: 23_341 nanoseconds. - Weight::from_parts(23_770_000, 2809) + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 32_899_000 picoseconds. + Weight::from_parts(33_455_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -341,10 +341,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn finalize_signed_phase_reject_solution() -> Weight { // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `2603` - // Minimum execution time: 16_662 nanoseconds. - Weight::from_parts(16_898_000, 2603) + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 22_532_000 picoseconds. + Weight::from_parts(23_039_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -360,10 +360,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 215_168 nanoseconds. - Weight::from_parts(219_887_000, 0) - // Standard Error: 1_444 - .saturating_add(Weight::from_parts(146_388, 0).saturating_mul(v.into())) + // Minimum execution time: 253_511_000 picoseconds. + Weight::from_parts(261_190_000, 0) + // Standard Error: 1_621 + .saturating_add(Weight::from_parts(157_608, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) @@ -388,18 +388,18 @@ impl WeightInfo for () { /// The range of component `d` is `[200, 400]`. fn elect_queued(a: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `368 + a * (768 ±0) + d * (48 ±0)` - // Estimated: `9540 + a * (6912 ±0) + d * (441 ±0)` - // Minimum execution time: 268_021 nanoseconds. - Weight::from_parts(72_491_937, 9540) - // Standard Error: 2_910 - .saturating_add(Weight::from_parts(303_955, 0).saturating_mul(a.into())) - // Standard Error: 4_363 - .saturating_add(Weight::from_parts(167_369, 0).saturating_mul(d.into())) + // Measured: `337 + a * (768 ±0) + d * (48 ±0)` + // Estimated: `3889 + a * (768 ±0) + d * (49 ±0)` + // Minimum execution time: 284_994_000 picoseconds. + Weight::from_parts(97_696_734, 3889) + // Standard Error: 4_172 + .saturating_add(Weight::from_parts(331_569, 0).saturating_mul(a.into())) + // Standard Error: 6_254 + .saturating_add(Weight::from_parts(92_198, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) - .saturating_add(Weight::from_parts(0, 6912).saturating_mul(a.into())) - .saturating_add(Weight::from_parts(0, 441).saturating_mul(d.into())) + .saturating_add(Weight::from_parts(0, 768).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(0, 49).saturating_mul(d.into())) } /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) @@ -415,10 +415,10 @@ impl WeightInfo for () { /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionsMap (max_values: None, max_size: None, mode: Measured) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `924` - // Estimated: `7111` - // Minimum execution time: 44_177 nanoseconds. - Weight::from_parts(44_663_000, 7111) + // Measured: `893` + // Estimated: `2378` + // Minimum execution time: 52_194_000 picoseconds. + Weight::from_parts(53_062_000, 2378) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -432,53 +432,53 @@ impl WeightInfo for () { /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) /// Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. /// The range of component `a` is `[500, 800]`. /// The range of component `d` is `[200, 400]`. fn submit_unsigned(v: u32, t: u32, a: u32, _d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `251 + v * (553 ±0) + t * (32 ±0)` - // Estimated: `5222 + v * (3871 ±0) + t * (224 ±0)` - // Minimum execution time: 4_425_457 nanoseconds. - Weight::from_parts(4_445_889_000, 5222) - // Standard Error: 13_250 - .saturating_add(Weight::from_parts(48_844, 0).saturating_mul(v.into())) - // Standard Error: 39_266 - .saturating_add(Weight::from_parts(4_144_034, 0).saturating_mul(a.into())) + // Measured: `219 + t * (32 ±0) + v * (553 ±0)` + // Estimated: `1704 + t * (32 ±0) + v * (553 ±0)` + // Minimum execution time: 4_843_067_000 picoseconds. + Weight::from_parts(4_860_833_000, 1704) + // Standard Error: 14_594 + .saturating_add(Weight::from_parts(76_611, 0).saturating_mul(v.into())) + // Standard Error: 43_249 + .saturating_add(Weight::from_parts(4_347_887, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 3871).saturating_mul(v.into())) - .saturating_add(Weight::from_parts(0, 224).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) } - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) /// Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) /// Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. /// The range of component `a` is `[500, 800]`. /// The range of component `d` is `[200, 400]`. fn feasibility_check(v: u32, t: u32, a: u32, _d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `226 + v * (553 ±0) + t * (32 ±0)` - // Estimated: `2884 + v * (2212 ±0) + t * (128 ±0)` - // Minimum execution time: 3_812_071 nanoseconds. - Weight::from_parts(3_826_375_000, 2884) - // Standard Error: 11_601 - .saturating_add(Weight::from_parts(145_309, 0).saturating_mul(v.into())) - // Standard Error: 34_378 - .saturating_add(Weight::from_parts(3_223_977, 0).saturating_mul(a.into())) + // Measured: `194 + t * (32 ±0) + v * (553 ±0)` + // Estimated: `1679 + t * (32 ±0) + v * (553 ±0)` + // Minimum execution time: 4_190_524_000 picoseconds. + Weight::from_parts(4_200_207_000, 1679) + // Standard Error: 12_454 + .saturating_add(Weight::from_parts(166_342, 0).saturating_mul(v.into())) + // Standard Error: 36_906 + .saturating_add(Weight::from_parts(3_493_372, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(Weight::from_parts(0, 2212).saturating_mul(v.into())) - .saturating_add(Weight::from_parts(0, 128).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) } } diff --git a/frame/election-provider-multi-phase/test-staking-e2e/Cargo.lock b/frame/election-provider-multi-phase/test-staking-e2e/Cargo.lock new file mode 100644 index 0000000000000..b179ab7059a48 --- /dev/null +++ b/frame/election-provider-multi-phase/test-staking-e2e/Cargo.lock @@ -0,0 +1,3609 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + +[[package]] +name = "addr2line" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ahash" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +dependencies = [ + "getrandom 0.2.8", + "once_cell", + "version_check", +] + +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if", + "getrandom 0.2.8", + "once_cell", + "version_check", +] + +[[package]] +name = "aho-corasick" +version = "0.7.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +dependencies = [ + "memchr", +] + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + +[[package]] +name = "anyhow" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" + +[[package]] +name = "approx" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" +dependencies = [ + "num-traits", +] + +[[package]] +name = "array-bytes" +version = "4.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52f63c5c1316a16a4b35eaac8b76a98248961a533f061684cb2a7cb0eafb6c6" + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + +[[package]] +name = "arrayvec" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" + +[[package]] +name = "async-trait" +version = "0.1.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "backtrace" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + +[[package]] +name = "base58" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64ct" +version = "1.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e" +dependencies = [ + "digest 0.10.6", +] + +[[package]] +name = "blake2b_simd" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" +dependencies = [ + "arrayref", + "arrayvec 0.7.2", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding", + "byte-tools", + "byteorder", + "generic-array 0.12.4", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array 0.14.6", +] + +[[package]] +name = "block-buffer" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +dependencies = [ + "generic-array 0.14.6", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", +] + +[[package]] +name = "bounded-collections" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a071c348a5ef6da1d3a87166b408170b46002382b1dda83992b5c2208cefb370" +dependencies = [ + "log", + "parity-scale-codec", + "scale-info", + "serde", +] + +[[package]] +name = "bumpalo" +version = "3.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" + +[[package]] +name = "byte-slice-cast" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" + +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + +[[package]] +name = "bytemuck" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "bytes" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" + +[[package]] +name = "cc" +version = "1.0.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4" + +[[package]] +name = "cfg-expr" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0aacacf4d96c24b2ad6eb8ee6df040e4f27b0d0b39a5710c30091baa830485db" +dependencies = [ + "smallvec", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +dependencies = [ + "iana-time-zone", + "num-integer", + "num-traits", + "winapi", +] + +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + +[[package]] +name = "const-oid" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" + +[[package]] +name = "constant_time_eq" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13418e745008f7349ec7e449155f419a61b92b58a99cc3616942b926825ec76b" + +[[package]] +name = "core-foundation-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" + +[[package]] +name = "cpp_demangle" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eeaa953eaad386a53111e47172c2fedba671e5684c8dd601a5f474f4f118710f" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "cpufeatures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +dependencies = [ + "libc", +] + +[[package]] +name = "cranelift-entity" +version = "0.93.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cf583f7b093f291005f9fb1323e2c37f6ee4c7909e39ce016b2e8360d461705" +dependencies = [ + "serde", +] + +[[package]] +name = "crc32fast" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array 0.14.6", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array 0.14.6", + "typenum", +] + +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array 0.14.6", + "subtle", +] + +[[package]] +name = "crypto-mac" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" +dependencies = [ + "generic-array 0.14.6", + "subtle", +] + +[[package]] +name = "curve25519-dalek" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" +dependencies = [ + "byteorder", + "digest 0.8.1", + "rand_core 0.5.1", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.5.1", + "subtle", + "zeroize", +] + +[[package]] +name = "cxx" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdf07d07d6531bfcdbe9b8b739b104610c6508dcc4d63b410585faf338241daf" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2eb5b96ecdc99f72657332953d4d9c50135af1bac34277801cc3937906ebd39" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac040a39517fd1674e0f32177648334b0f4074625b5588a64519804ba0553b12" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1362b0ddcfc4eb0a1f57b68bd77dd99f0e826958a96abd0ae9bd092e114ffed6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "der" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid", + "zeroize", +] + +[[package]] +name = "derive-syn-parse" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e79116f119dd1dba1abf1f3405f03b9b0e79a27a3883864bfebded8a3dc768cd" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "derive_more" +version = "0.99.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array 0.12.4", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.6", +] + +[[package]] +name = "digest" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +dependencies = [ + "block-buffer 0.10.3", + "crypto-common", + "subtle", +] + +[[package]] +name = "downcast-rs" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" + +[[package]] +name = "dyn-clonable" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" +dependencies = [ + "dyn-clonable-impl", + "dyn-clone", +] + +[[package]] +name = "dyn-clonable-impl" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dyn-clone" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f94fa09c2aeea5b8839e414b7b841bf429fd25b9c522116ac97ee87856d88b2" + +[[package]] +name = "ecdsa" +version = "0.14.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der", + "elliptic-curve", + "rfc6979", + "signature", +] + +[[package]] +name = "ed25519" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" +dependencies = [ + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +dependencies = [ + "curve25519-dalek 3.2.0", + "ed25519", + "sha2 0.9.9", + "zeroize", +] + +[[package]] +name = "ed25519-zebra" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c24f403d068ad0b359e577a77f92392118be3f3c927538f2bb544a5ecd828c6" +dependencies = [ + "curve25519-dalek 3.2.0", + "hashbrown 0.12.3", + "hex", + "rand_core 0.6.4", + "sha2 0.9.9", + "zeroize", +] + +[[package]] +name = "either" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" + +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct", + "crypto-bigint", + "der", + "digest 0.10.6", + "ff", + "generic-array 0.14.6", + "group", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + +[[package]] +name = "environmental" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48c92028aaa870e83d51c64e5d4e0b6981b360c522198c23959f219a4e1b15b" + +[[package]] +name = "errno" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +dependencies = [ + "errno-dragonfly", + "libc", + "winapi", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "expander" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f360349150728553f92e4c997a16af8915f418d3a0f21b440d34c5632f16ed84" +dependencies = [ + "blake2", + "fs-err", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "form_urlencoded" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "frame-benchmarking" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "frame-support-procedural", + "frame-system", + "linregress", + "log", + "parity-scale-codec", + "paste", + "scale-info", + "serde", + "sp-api", + "sp-application-crypto", + "sp-core", + "sp-io", + "sp-runtime", + "sp-runtime-interface", + "sp-std", + "sp-storage", + "static_assertions", +] + +[[package]] +name = "frame-election-provider-solution-type" +version = "4.0.0-dev" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-election-provider-support" +version = "4.0.0-dev" +dependencies = [ + "frame-election-provider-solution-type", + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-arithmetic", + "sp-core", + "sp-npos-elections", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "frame-metadata" +version = "15.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df6bb8542ef006ef0de09a5c4420787d79823c0ed7924225822362fd2bf2ff2d" +dependencies = [ + "cfg-if", + "parity-scale-codec", + "scale-info", + "serde", +] + +[[package]] +name = "frame-support" +version = "4.0.0-dev" +dependencies = [ + "bitflags", + "environmental", + "frame-metadata", + "frame-support-procedural", + "impl-trait-for-tuples", + "k256", + "log", + "once_cell", + "parity-scale-codec", + "paste", + "scale-info", + "serde", + "smallvec", + "sp-api", + "sp-arithmetic", + "sp-core", + "sp-core-hashing-proc-macro", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-state-machine", + "sp-std", + "sp-tracing", + "sp-weights", + "tt-call", +] + +[[package]] +name = "frame-support-procedural" +version = "4.0.0-dev" +dependencies = [ + "Inflector", + "cfg-expr", + "derive-syn-parse", + "frame-support-procedural-tools", + "itertools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools" +version = "4.0.0-dev" +dependencies = [ + "frame-support-procedural-tools-derive", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools-derive" +version = "3.0.0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-system" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-version", + "sp-weights", +] + +[[package]] +name = "fs-err" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0845fa252299212f0389d64ba26f34fa32cfe41588355f21ed507c59a0f64541" + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" + +[[package]] +name = "futures-executor" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", + "num_cpus", +] + +[[package]] +name = "futures-io" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" + +[[package]] +name = "futures-macro" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" + +[[package]] +name = "futures-task" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" + +[[package]] +name = "futures-timer" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" + +[[package]] +name = "futures-util" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" +dependencies = [ + "typenum", +] + +[[package]] +name = "generic-array" +version = "0.14.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "gimli" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" +dependencies = [ + "fallible-iterator", + "stable_deref_trait", +] + +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "hash-db" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e7d7786361d7425ae2fe4f9e407eb0efaa0840f5212d109cc018c40c35c6ab4" + +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.6", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash 0.8.3", +] + +[[package]] +name = "heck" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] + +[[package]] +name = "hmac" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" +dependencies = [ + "crypto-mac 0.11.1", + "digest 0.9.0", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.6", +] + +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array 0.14.6", + "hmac 0.8.1", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-serde" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "indexmap" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "integer-sqrt" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" +dependencies = [ + "num-traits", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +dependencies = [ + "libc", + "windows-sys 0.45.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" + +[[package]] +name = "js-sys" +version = "0.3.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "k256" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "sha2 0.10.6", +] + +[[package]] +name = "keccak" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.138" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8" + +[[package]] +name = "libm" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" + +[[package]] +name = "libsecp256k1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +dependencies = [ + "arrayref", + "base64", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.8.5", + "serde", + "sha2 0.9.9", + "typenum", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "link-cplusplus" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" +dependencies = [ + "cc", +] + +[[package]] +name = "linregress" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "475015a7f8f017edb28d2e69813be23500ad4b32cfe3421c4148efc97324ee52" +dependencies = [ + "nalgebra", +] + +[[package]] +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + +[[package]] +name = "lock_api" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "mach" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +dependencies = [ + "libc", +] + +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matrixmultiply" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84" +dependencies = [ + "rawpointer", +] + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "memfd" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b20a59d985586e4a5aef64564ac77299f8586d8be6cf9106a5a40207e8908efb" +dependencies = [ + "rustix", +] + +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memory-db" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "808b50db46293432a45e63bc15ea51e0ab4c0a1647b8eb114e31a3e698dd6fbe" +dependencies = [ + "hash-db", +] + +[[package]] +name = "memory_units" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" + +[[package]] +name = "merlin" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", +] + +[[package]] +name = "miniz_oxide" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" +dependencies = [ + "adler", +] + +[[package]] +name = "nalgebra" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d68d47bba83f9e2006d117a9a33af1524e655516b8919caac694427a6fb1e511" +dependencies = [ + "approx", + "matrixmultiply", + "nalgebra-macros", + "num-complex", + "num-rational", + "num-traits", + "simba", + "typenum", +] + +[[package]] +name = "nalgebra-macros" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d232c68884c0c99810a5a4d333ef7e47689cfd0edc85efc9e54e1e6bf5212766" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "nohash-hasher" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" + +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-format" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" +dependencies = [ + "arrayvec 0.7.2", + "itoa", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +dependencies = [ + "autocfg", + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +dependencies = [ + "crc32fast", + "hashbrown 0.12.3", + "indexmap", + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" + +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "pallet-authorship" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "parity-scale-codec", + "scale-info", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-bags-list" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", + "log", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-tracing", +] + +[[package]] +name = "pallet-balances" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-election-provider-e2e-test" +version = "1.0.0" +dependencies = [ + "frame-election-provider-support", + "frame-support", + "frame-system", + "log", + "pallet-bags-list", + "pallet-balances", + "pallet-election-provider-multi-phase", + "pallet-session", + "pallet-staking", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-npos-elections", + "sp-runtime", + "sp-staking", + "sp-std", + "sp-tracing", +] + +[[package]] +name = "pallet-election-provider-multi-phase" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", + "log", + "pallet-election-provider-support-benchmarking", + "parity-scale-codec", + "rand 0.8.5", + "scale-info", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-npos-elections", + "sp-runtime", + "sp-std", + "strum", +] + +[[package]] +name = "pallet-election-provider-support-benchmarking" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-election-provider-support", + "frame-system", + "parity-scale-codec", + "sp-npos-elections", + "sp-runtime", +] + +[[package]] +name = "pallet-session" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "log", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-session", + "sp-staking", + "sp-std", + "sp-trie", +] + +[[package]] +name = "pallet-staking" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", + "log", + "pallet-authorship", + "pallet-session", + "parity-scale-codec", + "scale-info", + "serde", + "sp-application-crypto", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", +] + +[[package]] +name = "pallet-timestamp" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + +[[package]] +name = "parity-scale-codec" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac" +dependencies = [ + "arrayvec 0.7.2", + "bitvec", + "byte-slice-cast", + "bytes", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "parity-wasm" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-sys 0.42.0", +] + +[[package]] +name = "paste" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1c2c742266c2f1041c914ba65355a83ae8747b05f208319784083583494b4b" + +[[package]] +name = "pbkdf2" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" +dependencies = [ + "crypto-mac 0.11.1", +] + +[[package]] +name = "pbkdf2" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +dependencies = [ + "digest 0.10.6", +] + +[[package]] +name = "percent-encoding" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" + +[[package]] +name = "pin-project-lite" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "primitive-types" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-serde", + "scale-info", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" +dependencies = [ + "once_cell", + "thiserror", + "toml", +] + +[[package]] +name = "proc-macro2" +version = "1.0.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "psm" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874" +dependencies = [ + "cc", +] + +[[package]] +name = "quote" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.8", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags", +] + +[[package]] +name = "ref-cast" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53b15debb4f9d60d767cd8ca9ef7abb2452922f3214671ff052defc7f3502c44" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abfa8511e9e94fd3de6585a3d3cd00e01ed556dc9814829280af0e8dc72a8f36" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "regex" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" + +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint", + "hmac 0.12.1", + "zeroize", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustix" +version = "0.36.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.45.0", +] + +[[package]] +name = "rustversion" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" + +[[package]] +name = "ryu" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" + +[[package]] +name = "safe_arch" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "794821e4ccb0d9f979512f9c1973480123f9bd62a90d74ab0f9426fcf8f4a529" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "scale-info" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" +dependencies = [ + "bitvec", + "cfg-if", + "derive_more", + "parity-scale-codec", + "scale-info-derive", + "serde", +] + +[[package]] +name = "scale-info-derive" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "303959cf613a6f6efd19ed4b4ad5bf79966a13352716299ad532cfb115f4205c" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "schnellru" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "772575a524feeb803e5b0fcbc6dd9f367e579488197c94c6e4023aad2305774d" +dependencies = [ + "ahash 0.8.3", + "cfg-if", + "hashbrown 0.13.2", +] + +[[package]] +name = "schnorrkel" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "curve25519-dalek 2.1.3", + "getrandom 0.1.16", + "merlin", + "rand 0.7.3", + "rand_core 0.5.1", + "sha2 0.8.2", + "subtle", + "zeroize", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "scratch" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" + +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct", + "der", + "generic-array 0.14.6", + "pkcs8", + "subtle", + "zeroize", +] + +[[package]] +name = "secp256k1" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9512ffd81e3a3503ed401f79c33168b9148c75038956039166cd750eaa037c3" +dependencies = [ + "secp256k1-sys", +] + +[[package]] +name = "secp256k1-sys" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" +dependencies = [ + "cc", +] + +[[package]] +name = "secrecy" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" +dependencies = [ + "zeroize", +] + +[[package]] +name = "serde" +version = "1.0.150" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e326c9ec8042f1b5da33252c8a37e9ffbd2c9bef0155215b6e6c80c790e05f91" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.150" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42a3df25b0713732468deadad63ab9da1f1fd75a48a15024b50363f128db627e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +dependencies = [ + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", +] + +[[package]] +name = "sha2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sha2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.6", +] + +[[package]] +name = "sha3" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" +dependencies = [ + "digest 0.10.6", + "keccak", +] + +[[package]] +name = "sharded-slab" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest 0.10.6", + "rand_core 0.6.4", +] + +[[package]] +name = "simba" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50582927ed6f77e4ac020c057f37a268fc6aebc29225050365aacbb9deeeddc4" +dependencies = [ + "approx", + "num-complex", + "num-traits", + "paste", + "wide", +] + +[[package]] +name = "slab" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" + +[[package]] +name = "sp-api" +version = "4.0.0-dev" +dependencies = [ + "hash-db", + "log", + "parity-scale-codec", + "sp-api-proc-macro", + "sp-core", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-trie", + "sp-version", + "thiserror", +] + +[[package]] +name = "sp-api-proc-macro" +version = "4.0.0-dev" +dependencies = [ + "Inflector", + "blake2", + "expander", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-application-crypto" +version = "7.0.0" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-arithmetic" +version = "6.0.0" +dependencies = [ + "integer-sqrt", + "num-traits", + "parity-scale-codec", + "scale-info", + "serde", + "sp-std", + "static_assertions", +] + +[[package]] +name = "sp-core" +version = "7.0.0" +dependencies = [ + "array-bytes", + "base58", + "bitflags", + "blake2", + "bounded-collections", + "dyn-clonable", + "ed25519-zebra", + "futures", + "hash-db", + "hash256-std-hasher", + "impl-serde", + "lazy_static", + "libsecp256k1", + "log", + "merlin", + "parity-scale-codec", + "parking_lot", + "primitive-types", + "rand 0.8.5", + "regex", + "scale-info", + "schnorrkel", + "secp256k1", + "secrecy", + "serde", + "sp-core-hashing", + "sp-debug-derive", + "sp-externalities", + "sp-runtime-interface", + "sp-std", + "sp-storage", + "ss58-registry", + "substrate-bip39", + "thiserror", + "tiny-bip39", + "zeroize", +] + +[[package]] +name = "sp-core-hashing" +version = "5.0.0" +dependencies = [ + "blake2b_simd", + "byteorder", + "digest 0.10.6", + "sha2 0.10.6", + "sha3", + "sp-std", + "twox-hash", +] + +[[package]] +name = "sp-core-hashing-proc-macro" +version = "5.0.0" +dependencies = [ + "proc-macro2", + "quote", + "sp-core-hashing", + "syn", +] + +[[package]] +name = "sp-debug-derive" +version = "5.0.0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-externalities" +version = "0.13.0" +dependencies = [ + "environmental", + "parity-scale-codec", + "sp-std", + "sp-storage", +] + +[[package]] +name = "sp-inherents" +version = "4.0.0-dev" +dependencies = [ + "async-trait", + "impl-trait-for-tuples", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-runtime", + "sp-std", + "thiserror", +] + +[[package]] +name = "sp-io" +version = "7.0.0" +dependencies = [ + "bytes", + "ed25519", + "ed25519-dalek", + "futures", + "libsecp256k1", + "log", + "parity-scale-codec", + "secp256k1", + "sp-core", + "sp-externalities", + "sp-keystore", + "sp-runtime-interface", + "sp-state-machine", + "sp-std", + "sp-tracing", + "sp-trie", + "tracing", + "tracing-core", +] + +[[package]] +name = "sp-keystore" +version = "0.13.0" +dependencies = [ + "async-trait", + "futures", + "merlin", + "parity-scale-codec", + "parking_lot", + "schnorrkel", + "sp-core", + "sp-externalities", + "thiserror", +] + +[[package]] +name = "sp-npos-elections" +version = "4.0.0-dev" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-arithmetic", + "sp-core", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-panic-handler" +version = "5.0.0" +dependencies = [ + "backtrace", + "lazy_static", + "regex", +] + +[[package]] +name = "sp-runtime" +version = "7.0.0" +dependencies = [ + "either", + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "paste", + "rand 0.8.5", + "scale-info", + "serde", + "sp-application-crypto", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-std", + "sp-weights", +] + +[[package]] +name = "sp-runtime-interface" +version = "7.0.0" +dependencies = [ + "bytes", + "impl-trait-for-tuples", + "parity-scale-codec", + "primitive-types", + "sp-externalities", + "sp-runtime-interface-proc-macro", + "sp-std", + "sp-storage", + "sp-tracing", + "sp-wasm-interface", + "static_assertions", +] + +[[package]] +name = "sp-runtime-interface-proc-macro" +version = "6.0.0" +dependencies = [ + "Inflector", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-session" +version = "4.0.0-dev" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-core", + "sp-runtime", + "sp-staking", + "sp-std", +] + +[[package]] +name = "sp-staking" +version = "4.0.0-dev" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-state-machine" +version = "0.13.0" +dependencies = [ + "hash-db", + "log", + "parity-scale-codec", + "parking_lot", + "rand 0.8.5", + "smallvec", + "sp-core", + "sp-externalities", + "sp-panic-handler", + "sp-std", + "sp-trie", + "thiserror", + "tracing", +] + +[[package]] +name = "sp-std" +version = "5.0.0" + +[[package]] +name = "sp-storage" +version = "7.0.0" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "sp-timestamp" +version = "4.0.0-dev" +dependencies = [ + "async-trait", + "futures-timer", + "log", + "parity-scale-codec", + "sp-inherents", + "sp-runtime", + "sp-std", + "thiserror", +] + +[[package]] +name = "sp-tracing" +version = "6.0.0" +dependencies = [ + "parity-scale-codec", + "sp-std", + "tracing", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "sp-trie" +version = "7.0.0" +dependencies = [ + "ahash 0.8.3", + "hash-db", + "hashbrown 0.12.3", + "lazy_static", + "memory-db", + "nohash-hasher", + "parity-scale-codec", + "parking_lot", + "scale-info", + "schnellru", + "sp-core", + "sp-std", + "thiserror", + "tracing", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-version" +version = "5.0.0" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "parity-wasm", + "scale-info", + "serde", + "sp-core-hashing-proc-macro", + "sp-runtime", + "sp-std", + "sp-version-proc-macro", + "thiserror", +] + +[[package]] +name = "sp-version-proc-macro" +version = "4.0.0-dev" +dependencies = [ + "parity-scale-codec", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-wasm-interface" +version = "7.0.0" +dependencies = [ + "anyhow", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "sp-std", + "wasmi", + "wasmtime", +] + +[[package]] +name = "sp-weights" +version = "4.0.0" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "smallvec", + "sp-arithmetic", + "sp-core", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "ss58-registry" +version = "1.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23d92659e7d18d82b803824a9ba5a6022cff101c3491d027c1c1d8d30e749284" +dependencies = [ + "Inflector", + "num-format", + "proc-macro2", + "quote", + "serde", + "serde_json", + "unicode-xid", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strum" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn", +] + +[[package]] +name = "substrate-bip39" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c" +dependencies = [ + "hmac 0.11.0", + "pbkdf2 0.8.0", + "schnorrkel", + "sha2 0.9.9", + "zeroize", +] + +[[package]] +name = "subtle" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" + +[[package]] +name = "syn" +version = "1.0.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "synstructure" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "target-lexicon" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9410d0f6853b1d94f0e519fb95df60f29d2c1eff2d921ffdf01a4c8a3b54f12d" + +[[package]] +name = "termcolor" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thiserror" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +dependencies = [ + "once_cell", +] + +[[package]] +name = "tiny-bip39" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62cc94d358b5a1e84a5cb9109f559aa3c4d634d2b1b4de3d0fa4adc7c78e2861" +dependencies = [ + "anyhow", + "hmac 0.12.1", + "once_cell", + "pbkdf2 0.11.0", + "rand 0.8.5", + "rustc-hash", + "sha2 0.10.6", + "thiserror", + "unicode-normalization", + "wasm-bindgen", + "zeroize", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + +[[package]] +name = "toml" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +dependencies = [ + "serde", +] + +[[package]] +name = "tracing" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +dependencies = [ + "cfg-if", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +dependencies = [ + "ansi_term", + "chrono", + "lazy_static", + "matchers", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "trie-db" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d75c77ea43f2ad8ea9d9c58de49dfc9c3995bdef32b503df7883ff054e7f1" +dependencies = [ + "hash-db", + "hashbrown 0.13.2", + "log", + "rustc-hex", + "smallvec", +] + +[[package]] +name = "trie-root" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4ed310ef5ab98f5fa467900ed906cb9232dd5376597e00fd4cba2a449d06c0b" +dependencies = [ + "hash-db", +] + +[[package]] +name = "tt-call" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e66dcbec4290c69dd03c57e76c2469ea5c7ce109c6dd4351c13055cf71ea055" + +[[package]] +name = "twox-hash" +version = "1.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" +dependencies = [ + "cfg-if", + "digest 0.10.6", + "rand 0.8.5", + "static_assertions", +] + +[[package]] +name = "typenum" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" + +[[package]] +name = "uint" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" + +[[package]] +name = "unicode-ident" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-width" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" + +[[package]] +name = "unicode-xid" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" + +[[package]] +name = "url" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" + +[[package]] +name = "wasmi" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06c326c93fbf86419608361a2c925a31754cf109da1b8b55737070b4d6669422" +dependencies = [ + "parity-wasm", + "wasmi-validation", + "wasmi_core", +] + +[[package]] +name = "wasmi-validation" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ff416ad1ff0c42e5a926ed5d5fab74c0f098749aa0ad8b2a34b982ce0e867b" +dependencies = [ + "parity-wasm", +] + +[[package]] +name = "wasmi_core" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d20cb3c59b788653d99541c646c561c9dd26506f25c0cebfe810659c54c6d7" +dependencies = [ + "downcast-rs", + "libm", + "memory_units", + "num-rational", + "num-traits", +] + +[[package]] +name = "wasmparser" +version = "0.100.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64b20236ab624147dfbb62cf12a19aaf66af0e41b8398838b66e997d07d269d4" +dependencies = [ + "indexmap", + "url", +] + +[[package]] +name = "wasmtime" +version = "6.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e89f9819523447330ffd70367ef4a18d8c832e24e8150fe054d1d912841632" +dependencies = [ + "anyhow", + "bincode", + "cfg-if", + "indexmap", + "libc", + "log", + "object", + "once_cell", + "paste", + "psm", + "serde", + "target-lexicon", + "wasmparser", + "wasmtime-environ", + "wasmtime-jit", + "wasmtime-runtime", + "windows-sys 0.42.0", +] + +[[package]] +name = "wasmtime-asm-macros" +version = "6.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd3a5e46c198032da934469f3a6e48649d1f9142438e4fd4617b68a35644b8a" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "wasmtime-environ" +version = "6.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a6db9fc52985ba06ca601f2ff0ff1f526c5d724c7ac267b47326304b0c97883" +dependencies = [ + "anyhow", + "cranelift-entity", + "gimli", + "indexmap", + "log", + "object", + "serde", + "target-lexicon", + "thiserror", + "wasmparser", + "wasmtime-types", +] + +[[package]] +name = "wasmtime-jit" +version = "6.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b77e3a52cd84d0f7f18554afa8060cfe564ccac61e3b0802d3fd4084772fa5f6" +dependencies = [ + "addr2line", + "anyhow", + "bincode", + "cfg-if", + "cpp_demangle", + "gimli", + "log", + "object", + "rustc-demangle", + "serde", + "target-lexicon", + "wasmtime-environ", + "wasmtime-jit-icache-coherence", + "wasmtime-runtime", + "windows-sys 0.42.0", +] + +[[package]] +name = "wasmtime-jit-debug" +version = "6.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0245e8a9347017c7185a72e215218a802ff561545c242953c11ba00fccc930f" +dependencies = [ + "once_cell", +] + +[[package]] +name = "wasmtime-jit-icache-coherence" +version = "6.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67d412e9340ab1c83867051d8d1d7c90aa8c9afc91da086088068e2734e25064" +dependencies = [ + "cfg-if", + "libc", + "windows-sys 0.42.0", +] + +[[package]] +name = "wasmtime-runtime" +version = "6.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d594e791b5fdd4dbaf8cf7ae62f2e4ff85018ce90f483ca6f42947688e48827d" +dependencies = [ + "anyhow", + "cc", + "cfg-if", + "indexmap", + "libc", + "log", + "mach", + "memfd", + "memoffset", + "paste", + "rand 0.8.5", + "rustix", + "wasmtime-asm-macros", + "wasmtime-environ", + "wasmtime-jit-debug", + "windows-sys 0.42.0", +] + +[[package]] +name = "wasmtime-types" +version = "6.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6688d6f96d4dbc1f89fab626c56c1778936d122b5f4ae7a57c2eb42b8d982e2" +dependencies = [ + "cranelift-entity", + "serde", + "thiserror", + "wasmparser", +] + +[[package]] +name = "wide" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b689b6c49d6549434bf944e6b0f39238cf63693cb7a147e9d887507fffa3b223" +dependencies = [ + "bytemuck", + "safe_arch", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "zeroize" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] diff --git a/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml new file mode 100644 index 0000000000000..432b732b48e9d --- /dev/null +++ b/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "pallet-election-provider-e2e-test" +version = "1.0.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME election provider multi phase pallet tests with staking pallet, bags-list and session pallets" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[workspace] + +[dev-dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +scale-info = { version = "2.0.1", features = ["derive"] } +log = { version = "0.4.17", default-features = false } + +sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-io = { version = "7.0.0", path = "../../../primitives/io" } +sp-std = { version = "5.0.0", path = "../../../primitives/std" } +sp-staking = { version = "4.0.0-dev", path = "../../../primitives/staking" } +sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/npos-elections" } +sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } + +frame-system = { version = "4.0.0-dev", path = "../../system" } +frame-support = { version = "4.0.0-dev", path = "../../support" } +frame-election-provider-support = { version = "4.0.0-dev", path = "../../election-provider-support" } + +pallet-election-provider-multi-phase = { version = "4.0.0-dev", path = "../../election-provider-multi-phase" } +pallet-staking = { version = "4.0.0-dev", path = "../../staking" } +pallet-bags-list = { version = "4.0.0-dev", path = "../../bags-list" } +pallet-balances = { version = "4.0.0-dev", path = "../../balances" } +pallet-timestamp = { version = "4.0.0-dev", path = "../../timestamp" } +pallet-session = { version = "4.0.0-dev", path = "../../session" } + diff --git a/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs new file mode 100644 index 0000000000000..c8f7d48a89ecf --- /dev/null +++ b/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -0,0 +1,206 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] +mod mock; + +pub(crate) const LOG_TARGET: &str = "tests::e2e-epm"; + +use mock::*; +use sp_core::Get; +use sp_npos_elections::{to_supports, StakedAssignment}; +use sp_runtime::Perbill; + +use crate::mock::RuntimeOrigin; + +// syntactic sugar for logging. +#[macro_export] +macro_rules! log { + ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: crate::LOG_TARGET, + concat!("🛠️ ", $patter) $(, $values)* + ) + }; +} + +fn log_current_time() { + log!( + trace, + "block: {:?}, session: {:?}, era: {:?}, EPM phase: {:?} ts: {:?}", + System::block_number(), + Session::current_index(), + Staking::current_era(), + ElectionProviderMultiPhase::current_phase(), + Timestamp::now() + ); +} + +#[test] +fn block_progression_works() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 0); + assert!(ElectionProviderMultiPhase::current_phase().is_off()); + + assert!(start_next_active_era().is_ok()); + assert_eq!(active_era(), 1); + assert_eq!(Session::current_index(), >::get()); + + assert!(ElectionProviderMultiPhase::current_phase().is_off()); + + roll_to_epm_signed(); + assert!(ElectionProviderMultiPhase::current_phase().is_signed()); + }); + + ExtBuilder::default().build_and_execute(|| { + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 0); + assert!(ElectionProviderMultiPhase::current_phase().is_off()); + + assert!(start_next_active_era_delayed_solution().is_ok()); + // if the solution is delayed, EPM will end up in emergency mode.. + assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); + // .. era won't progress.. + assert_eq!(active_era(), 0); + // .. but session does. + assert_eq!(Session::current_index(), 2); + }) +} + +#[test] +/// Replicates the Kusama incident of 8th Dec 2022 and its resolution through the governance +/// fallback. +/// +/// After enough slashes exceeded the `Staking::OffendingValidatorsThreshold`, the staking pallet +/// set `Forcing::ForceNew`. When a new session starts, staking will start to force a new era and +/// calls ::elect(). If at this point EPM and the staking miners did not +/// have enough time to queue a new solution (snapshot + solution submission), the election request +/// fails. If there is no election fallback mechanism in place, EPM enters in emergency mode. +/// Recovery: Once EPM is in emergency mode, subsequent calls to `elect()` will fail until a new +/// solution is added to EPM's `QueuedSolution` queue. This can be achieved through +/// `Call::set_emergency_election_result` or `Call::governance_fallback` dispatchables. Once a new +/// solution is added to the queue, EPM phase transitions to `Phase::Off` and the election flow +/// restarts. Note that in this test case, the emergency throttling is disabled. +fn enters_emergency_phase_after_forcing_before_elect() { + let epm_builder = EpmExtBuilder::default().disable_emergency_throttling(); + + ExtBuilder::default().epm(epm_builder).build_and_execute(|| { + log!( + trace, + "current validators (staking): {:?}", + >::validators() + ); + let session_validators_before = Session::validators(); + + roll_to_epm_off(); + assert!(ElectionProviderMultiPhase::current_phase().is_off()); + + assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::NotForcing); + // slashes so that staking goes into `Forcing::ForceNew`. + slash_through_offending_threshold(); + + assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::ForceNew); + + advance_session_delayed_solution(); + assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); + log_current_time(); + + let era_before_delayed_next = Staking::current_era(); + // try to advance 2 eras. + assert!(start_next_active_era_delayed_solution().is_ok()); + assert_eq!(Staking::current_era(), era_before_delayed_next); + assert!(start_next_active_era().is_err()); + assert_eq!(Staking::current_era(), era_before_delayed_next); + + // EPM is still in emergency phase. + assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); + + // session validator set remains the same. + assert_eq!(Session::validators(), session_validators_before); + + // performs recovery through the set emergency result. + let supports = to_supports(&vec![ + StakedAssignment { who: 21, distribution: vec![(21, 10)] }, + StakedAssignment { who: 31, distribution: vec![(21, 10), (31, 10)] }, + StakedAssignment { who: 41, distribution: vec![(41, 10)] }, + ]); + assert!(ElectionProviderMultiPhase::set_emergency_election_result( + RuntimeOrigin::root(), + supports + ) + .is_ok()); + + // EPM can now roll to signed phase to proceed with elections. The validator set is the + // expected (ie. set through `set_emergency_election_result`). + roll_to_epm_signed(); + //assert!(ElectionProviderMultiPhase::current_phase().is_signed()); + assert_eq!(Session::validators(), vec![21, 31, 41]); + assert_eq!(Staking::current_era(), era_before_delayed_next.map(|e| e + 1)); + }); +} + +#[test] +/// Continuously slash 10% of the active validators per era. +/// +/// Since the `OffendingValidatorsThreshold` is only checked per era staking does not force a new +/// era even as the number of active validators is decreasing across eras. When processing a new +/// slash, staking calculates the offending threshold based on the length of the current list of +/// active validators. Thus, slashing a percentage of the current validators that is lower than +/// `OffendingValidatorsThreshold` will never force a new era. However, as the slashes progress, if +/// the subsequent elections do not meet the minimum election untrusted score, the election will +/// fail and enter in emenergency mode. +fn continous_slashes_below_offending_threshold() { + let staking_builder = StakingExtBuilder::default().validator_count(10); + let epm_builder = EpmExtBuilder::default().disable_emergency_throttling(); + + ExtBuilder::default() + .staking(staking_builder) + .epm(epm_builder) + .build_and_execute(|| { + assert_eq!(Session::validators().len(), 10); + let mut active_validator_set = Session::validators(); + + roll_to_epm_signed(); + + // set a minimum election score. + assert!(set_minimum_election_score(500, 1000, 500).is_ok()); + + // slash 10% of the active validators and progress era until the minimum trusted score + // is reached. + while active_validator_set.len() > 0 { + let slashed = slash_percentage(Perbill::from_percent(10)); + assert_eq!(slashed.len(), 1); + + // break loop when era does not progress; EPM is in emergency phase as election + // failed due to election minimum score. + if start_next_active_era().is_err() { + assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); + break + } + + active_validator_set = Session::validators(); + + log!( + trace, + "slashed 10% of active validators ({:?}). After slash: {:?}", + slashed, + active_validator_set + ); + } + }); +} diff --git a/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs new file mode 100644 index 0000000000000..19f568737c852 --- /dev/null +++ b/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -0,0 +1,779 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![allow(dead_code)] + +use _feps::ExtendedBalance; +use frame_support::{ + parameter_types, traits, + traits::{GenesisBuild, Hooks}, + weights::constants, +}; +use frame_system::EnsureRoot; +use sp_core::{ConstU32, Get, H256}; +use sp_npos_elections::{ElectionScore, VoteWeight}; +use sp_runtime::{ + testing, + traits::{IdentityLookup, Zero}, + transaction_validity, PerU16, Perbill, +}; +use sp_staking::{ + offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + EraIndex, SessionIndex, +}; +use sp_std::prelude::*; +use std::collections::BTreeMap; + +use frame_election_provider_support::{onchain, ElectionDataProvider, SequentialPhragmen, Weight}; +use pallet_election_provider_multi_phase::{ + unsigned::MinerConfig, ElectionCompute, QueuedSolution, SolutionAccuracyOf, +}; +use pallet_staking::StakerStatus; + +use crate::{log, log_current_time}; + +pub const INIT_TIMESTAMP: u64 = 30_000; +pub const BLOCK_TIME: u64 = 1000; + +type Block = frame_system::mocking::MockBlock; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Extrinsic = testing::TestXt; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system, + ElectionProviderMultiPhase: pallet_election_provider_multi_phase, + Staking: pallet_staking, + Balances: pallet_balances, + BagsList: pallet_bags_list, + Session: pallet_session, + Historical: pallet_session::historical, + Timestamp: pallet_timestamp, + } +); + +pub(crate) type AccountId = u128; +pub(crate) type AccountIndex = u32; +pub(crate) type BlockNumber = u64; +pub(crate) type Balance = u64; +pub(crate) type VoterIndex = u32; +pub(crate) type TargetIndex = u16; +pub(crate) type Moment = u64; + +impl frame_system::Config for Runtime { + type BaseCallFilter = traits::Everything; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = AccountIndex; + type BlockNumber = BlockNumber; + type RuntimeCall = RuntimeCall; + type Hash = H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = sp_runtime::testing::Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = traits::ConstU32<16>; +} + +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +parameter_types! { + pub static ExistentialDeposit: Balance = 1; + pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights + ::with_sensible_defaults( + Weight::from_parts(2u64 * constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX), + NORMAL_DISPATCH_RATIO, + ); +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = traits::ConstU32<1024>; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +impl pallet_timestamp::Config for Runtime { + type Moment = Moment; + type OnTimestampSet = (); + type MinimumPeriod = traits::ConstU64<5>; + type WeightInfo = (); +} + +parameter_types! { + pub static Period: BlockNumber = 30; + pub static Offset: BlockNumber = 0; +} + +sp_runtime::impl_opaque_keys! { + pub struct SessionKeys { + pub other: OtherSessionHandler, + } +} + +impl pallet_session::Config for Runtime { + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type Keys = SessionKeys; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionHandler = (OtherSessionHandler,); + type RuntimeEvent = RuntimeEvent; + type ValidatorId = AccountId; + type ValidatorIdOf = pallet_staking::StashOf; + type WeightInfo = (); +} +impl pallet_session::historical::Config for Runtime { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; +} + +frame_election_provider_support::generate_solution_type!( + #[compact] + pub struct MockNposSolution::< + VoterIndex = VoterIndex, + TargetIndex = TargetIndex, + Accuracy = PerU16, + MaxVoters = ConstU32::<2_000> + >(16) +); + +parameter_types! { + pub static SignedPhase: BlockNumber = 10; + pub static UnsignedPhase: BlockNumber = 10; + // we expect a minimum of 3 blocks in signed phase and unsigned phases before trying + // enetering in emergency phase after the election failed. + pub static MinBlocksBeforeEmergency: BlockNumber = 3; + pub static MaxElectingVoters: VoterIndex = 1000; + pub static MaxElectableTargets: TargetIndex = 1000; + pub static MaxActiveValidators: u32 = 1000; + pub static OffchainRepeat: u32 = 5; + pub static MinerMaxLength: u32 = 256; + pub static MinerMaxWeight: Weight = BlockWeights::get().max_block; + pub static TransactionPriority: transaction_validity::TransactionPriority = 1; + pub static MaxWinners: u32 = 100; + pub static MaxVotesPerVoter: u32 = 16; + pub static MaxNominations: u32 = 16; +} + +impl pallet_election_provider_multi_phase::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type EstimateCallFee = frame_support::traits::ConstU32<8>; + type SignedPhase = SignedPhase; + type UnsignedPhase = UnsignedPhase; + type BetterSignedThreshold = (); + type BetterUnsignedThreshold = (); + type OffchainRepeat = OffchainRepeat; + type MinerTxPriority = TransactionPriority; + type MinerConfig = Self; + type SignedMaxSubmissions = ConstU32<10>; + type SignedRewardBase = (); + type SignedDepositBase = (); + type SignedDepositByte = (); + type SignedMaxRefunds = ConstU32<3>; + type SignedDepositWeight = (); + type SignedMaxWeight = (); + type SlashHandler = (); + type RewardHandler = (); + type DataProvider = Staking; + type Fallback = + frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, MaxWinners)>; + type GovernanceFallback = onchain::OnChainExecution; + type Solver = SequentialPhragmen, ()>; + type ForceOrigin = EnsureRoot; + type MaxElectableTargets = MaxElectableTargets; + type MaxElectingVoters = MaxElectingVoters; + type MaxWinners = MaxWinners; + type BenchmarkingConfig = NoopElectionProviderBenchmarkConfig; + type WeightInfo = (); +} + +impl MinerConfig for Runtime { + type AccountId = AccountId; + type Solution = MockNposSolution; + type MaxVotesPerVoter = + <::DataProvider as ElectionDataProvider>::MaxVotesPerVoter; + type MaxLength = MinerMaxLength; + type MaxWeight = MinerMaxWeight; + type MaxWinners = MaxWinners; + + fn solution_weight(_v: u32, _t: u32, _a: u32, _d: u32) -> Weight { + Weight::zero() + } +} + +const THRESHOLDS: [VoteWeight; 9] = [10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]; + +parameter_types! { + pub static BagThresholds: &'static [sp_npos_elections::VoteWeight] = &THRESHOLDS; + pub const SessionsPerEra: sp_staking::SessionIndex = 2; + pub const BondingDuration: sp_staking::EraIndex = 28; + pub const SlashDeferDuration: sp_staking::EraIndex = 7; // 1/4 the bonding duration. + pub const MaxNominatorRewardedPerValidator: u32 = 256; + pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(40); + pub HistoryDepth: u32 = 84; +} + +impl pallet_bags_list::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type ScoreProvider = Staking; + type BagThresholds = BagThresholds; + type Score = VoteWeight; +} + +impl pallet_staking::Config for Runtime { + type MaxNominations = MaxNominations; + type Currency = Balances; + type CurrencyBalance = Balance; + type UnixTime = Timestamp; + type CurrencyToVote = traits::SaturatingCurrencyToVote; + type RewardRemainder = (); + type RuntimeEvent = RuntimeEvent; + type Slash = (); // burn slashes + type Reward = (); // rewards are minted from the void + type SessionsPerEra = SessionsPerEra; + type BondingDuration = BondingDuration; + type SlashDeferDuration = SlashDeferDuration; + type AdminOrigin = EnsureRoot; // root can cancel slashes + type SessionInterface = Self; + type EraPayout = (); + type NextNewSession = Session; + type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; + type ElectionProvider = ElectionProviderMultiPhase; + type GenesisElectionProvider = onchain::OnChainExecution; + type VoterList = BagsList; + type TargetList = pallet_staking::UseValidatorsMap; + type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = HistoryDepth; + type OnStakerSlash = (); + type WeightInfo = pallet_staking::weights::SubstrateWeight; + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; +} + +impl frame_system::offchain::SendTransactionTypes for Runtime +where + RuntimeCall: From, +{ + type OverarchingCall = RuntimeCall; + type Extrinsic = Extrinsic; +} + +pub struct OnChainSeqPhragmen; + +parameter_types! { + pub static VotersBound: u32 = 600; + pub static TargetsBound: u32 = 400; +} + +impl onchain::Config for OnChainSeqPhragmen { + type System = Runtime; + type Solver = SequentialPhragmen< + AccountId, + pallet_election_provider_multi_phase::SolutionAccuracyOf, + >; + type DataProvider = Staking; + type WeightInfo = (); + type MaxWinners = MaxWinners; + type VotersBound = VotersBound; + type TargetsBound = TargetsBound; +} + +pub struct NoopElectionProviderBenchmarkConfig; + +impl pallet_election_provider_multi_phase::BenchmarkingConfig + for NoopElectionProviderBenchmarkConfig +{ + const VOTERS: [u32; 2] = [0, 0]; + const TARGETS: [u32; 2] = [0, 0]; + const ACTIVE_VOTERS: [u32; 2] = [0, 0]; + const DESIRED_TARGETS: [u32; 2] = [0, 0]; + const SNAPSHOT_MAXIMUM_VOTERS: u32 = 0; + const MINER_MAXIMUM_VOTERS: u32 = 0; + const MAXIMUM_TARGETS: u32 = 0; +} + +pub struct OtherSessionHandler; +impl traits::OneSessionHandler for OtherSessionHandler { + type Key = testing::UintAuthorityId; + + fn on_genesis_session<'a, I: 'a>(_: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_new_session<'a, I: 'a>(_: bool, _: I, _: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_disabled(_validator_index: u32) {} +} + +impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { + type Public = testing::UintAuthorityId; +} + +pub struct StakingExtBuilder { + validator_count: u32, + minimum_validator_count: u32, + min_nominator_bond: Balance, + min_validator_bond: Balance, + status: BTreeMap>, + stakes: BTreeMap, + stakers: Vec<(AccountId, AccountId, Balance, StakerStatus)>, +} + +impl Default for StakingExtBuilder { + fn default() -> Self { + let stakers = vec![ + // (stash, ctrl, stake, status) + // these two will be elected in the default test where we elect 2. + (11, 10, 1000, StakerStatus::::Validator), + (21, 20, 1000, StakerStatus::::Validator), + // loser validatos if validator_count() is default. + (31, 30, 500, StakerStatus::::Validator), + (41, 40, 500, StakerStatus::::Validator), + (51, 50, 500, StakerStatus::::Validator), + (61, 60, 500, StakerStatus::::Validator), + (71, 70, 500, StakerStatus::::Validator), + (81, 80, 500, StakerStatus::::Validator), + (91, 90, 500, StakerStatus::::Validator), + (101, 100, 500, StakerStatus::::Validator), + // an idle validator + (201, 200, 1000, StakerStatus::::Idle), + ]; + + Self { + validator_count: 2, + minimum_validator_count: 0, + min_nominator_bond: ExistentialDeposit::get(), + min_validator_bond: ExistentialDeposit::get(), + status: Default::default(), + stakes: Default::default(), + stakers, + } + } +} + +impl StakingExtBuilder { + pub fn validator_count(mut self, n: u32) -> Self { + self.validator_count = n; + self + } +} + +pub struct EpmExtBuilder {} + +impl Default for EpmExtBuilder { + fn default() -> Self { + EpmExtBuilder {} + } +} + +impl EpmExtBuilder { + pub fn disable_emergency_throttling(self) -> Self { + ::set(0); + self + } + + pub fn phases(self, signed: BlockNumber, unsigned: BlockNumber) -> Self { + ::set(signed); + ::set(unsigned); + self + } +} + +pub struct BalancesExtBuilder { + balances: Vec<(AccountId, Balance)>, +} + +impl Default for BalancesExtBuilder { + fn default() -> Self { + let balances = vec![ + // (account_id, balance) + (1, 10), + (2, 20), + (3, 300), + (4, 400), + // controllers + (10, 100), + (20, 100), + (30, 100), + (40, 100), + (50, 100), + (60, 100), + (70, 100), + (80, 100), + (90, 100), + (100, 100), + (200, 100), + // stashes + (11, 1000), + (21, 2000), + (31, 3000), + (41, 4000), + (51, 5000), + (61, 6000), + (71, 7000), + (81, 8000), + (91, 9000), + (101, 10000), + (201, 20000), + // This allows us to have a total_payout different from 0. + (999, 1_000_000_000_000), + ]; + Self { balances } + } +} + +pub struct ExtBuilder { + staking_builder: StakingExtBuilder, + epm_builder: EpmExtBuilder, + balances_builder: BalancesExtBuilder, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { + staking_builder: StakingExtBuilder::default(), + epm_builder: EpmExtBuilder::default(), + balances_builder: BalancesExtBuilder::default(), + } + } +} + +impl ExtBuilder { + pub fn build(self) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); + let mut storage = + frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let _ = + pallet_balances::GenesisConfig:: { balances: self.balances_builder.balances } + .assimilate_storage(&mut storage); + + let mut stakers = self.staking_builder.stakers.clone(); + self.staking_builder.status.into_iter().for_each(|(stash, status)| { + let (_, _, _, ref mut prev_status) = stakers + .iter_mut() + .find(|s| s.0 == stash) + .expect("set_status staker should exist; qed"); + *prev_status = status; + }); + // replaced any of the stakes if needed. + self.staking_builder.stakes.into_iter().for_each(|(stash, stake)| { + let (_, _, ref mut prev_stake, _) = stakers + .iter_mut() + .find(|s| s.0 == stash) + .expect("set_stake staker should exits; qed."); + *prev_stake = stake; + }); + + let _ = pallet_staking::GenesisConfig:: { + stakers: stakers.clone(), + validator_count: self.staking_builder.validator_count, + minimum_validator_count: self.staking_builder.minimum_validator_count, + slash_reward_fraction: Perbill::from_percent(10), + min_nominator_bond: self.staking_builder.min_nominator_bond, + min_validator_bond: self.staking_builder.min_validator_bond, + ..Default::default() + } + .assimilate_storage(&mut storage); + + let _ = pallet_session::GenesisConfig:: { + // set the keys for the first session. + keys: stakers + .into_iter() + .map(|(id, ..)| (id, id, SessionKeys { other: (id as u64).into() })) + .collect(), + } + .assimilate_storage(&mut storage); + + let mut ext = sp_io::TestExternalities::from(storage); + + // We consider all test to start after timestamp is initialized This must be ensured by + // having `timestamp::on_initialize` called before `staking::on_initialize`. + ext.execute_with(|| { + System::set_block_number(1); + Session::on_initialize(1); + >::on_initialize(1); + Timestamp::set_timestamp(INIT_TIMESTAMP); + }); + + ext + } + pub fn staking(mut self, builder: StakingExtBuilder) -> Self { + self.staking_builder = builder; + self + } + + pub fn epm(mut self, builder: EpmExtBuilder) -> Self { + self.epm_builder = builder; + self + } + + pub fn balances(mut self, builder: BalancesExtBuilder) -> Self { + self.balances_builder = builder; + self + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(test) + } +} + +// Progress to given block, triggering session and era changes as we progress and ensuring that +// there is a solution queued when expected. +pub fn roll_to(n: BlockNumber, delay_solution: bool) { + for b in (System::block_number()) + 1..=n { + System::set_block_number(b); + Session::on_initialize(b); + Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); + + // TODO(gpestana): implement a realistic OCW worker insted of simulating it + // https://github.com/paritytech/substrate/issues/13589 + // if there's no solution queued and the solution should not be delayed, try mining and + // queue a solution. + if ElectionProviderMultiPhase::current_phase().is_signed() && !delay_solution { + let _ = try_queue_solution(ElectionCompute::Signed).map_err(|e| { + log!(info, "failed to mine/queue solution: {:?}", e); + }); + } + ElectionProviderMultiPhase::on_initialize(b); + + Staking::on_initialize(b); + if b != n { + Staking::on_finalize(System::block_number()); + } + + log_current_time(); + } +} + +/// Progresses from the current block number (whatever that may be) to the block where the session +/// `session_index` starts. +pub(crate) fn start_session(session_index: SessionIndex, delay_solution: bool) { + let end: u64 = if Offset::get().is_zero() { + Period::get() * (session_index as u64) + } else { + Offset::get() * (session_index as u64) + Period::get() * (session_index as u64) + }; + + assert!(end >= System::block_number()); + + roll_to(end, delay_solution); + + // session must have progressed properly. + assert_eq!( + Session::current_index(), + session_index, + "current session index = {}, expected = {}", + Session::current_index(), + session_index, + ); +} + +/// Go one session forward. +pub(crate) fn advance_session() { + let current_index = Session::current_index(); + start_session(current_index + 1, false); +} + +pub(crate) fn advance_session_delayed_solution() { + let current_index = Session::current_index(); + start_session(current_index + 1, true); +} + +pub(crate) fn start_next_active_era() -> Result<(), ()> { + start_active_era(active_era() + 1, false) +} + +pub(crate) fn start_next_active_era_delayed_solution() -> Result<(), ()> { + start_active_era(active_era() + 1, true) +} + +/// Progress until the given era. +pub(crate) fn start_active_era(era_index: EraIndex, delay_solution: bool) -> Result<(), ()> { + let era_before = current_era(); + + start_session((era_index * >::get()).into(), delay_solution); + + log!( + info, + "start_active_era - era_before: {}, current era: {} -> progress to: {} -> after era: {}", + era_before, + active_era(), + era_index, + current_era(), + ); + + // if the solution was not delayed, era should have progressed. + if !delay_solution && (active_era() != era_index || current_era() != active_era()) { + Err(()) + } else { + Ok(()) + } +} + +pub(crate) fn active_era() -> EraIndex { + Staking::active_era().unwrap().index +} + +pub(crate) fn current_era() -> EraIndex { + Staking::current_era().unwrap() +} + +// Fast forward until EPM signed phase. +pub fn roll_to_epm_signed() { + while !matches!( + ElectionProviderMultiPhase::current_phase(), + pallet_election_provider_multi_phase::Phase::Signed + ) { + roll_to(System::block_number() + 1, false); + } +} + +// Fast forward until EPM unsigned phase. +pub fn roll_to_epm_unsigned() { + while !matches!( + ElectionProviderMultiPhase::current_phase(), + pallet_election_provider_multi_phase::Phase::Unsigned(_) + ) { + roll_to(System::block_number() + 1, false); + } +} + +// Fast forward until EPM off. +pub fn roll_to_epm_off() { + while !matches!( + ElectionProviderMultiPhase::current_phase(), + pallet_election_provider_multi_phase::Phase::Off + ) { + roll_to(System::block_number() + 1, false); + } +} + +// Queue a solution based on the current snapshot. +pub(crate) fn try_queue_solution(when: ElectionCompute) -> Result<(), String> { + let raw_solution = ElectionProviderMultiPhase::mine_solution() + .map_err(|e| format!("error mining solution: {:?}", e))?; + + ElectionProviderMultiPhase::feasibility_check(raw_solution.0, when) + .map(|ready| { + QueuedSolution::::put(ready); + }) + .map_err(|e| format!("error in solution feasibility: {:?}", e)) +} + +pub(crate) fn on_offence_now( + offenders: &[OffenceDetails< + AccountId, + pallet_session::historical::IdentificationTuple, + >], + slash_fraction: &[Perbill], +) { + let now = Staking::active_era().unwrap().index; + let _ = Staking::on_offence( + offenders, + slash_fraction, + Staking::eras_start_session_index(now).unwrap(), + DisableStrategy::WhenSlashed, + ); +} + +// Add offence to validator, slash it. +pub(crate) fn add_slash(who: &AccountId) { + on_offence_now( + &[OffenceDetails { + offender: (*who, Staking::eras_stakers(active_era(), *who)), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); +} + +// Slashes enough validators to cross the `Staking::OffendingValidatorsThreshold`. +pub(crate) fn slash_through_offending_threshold() { + let validators = Session::validators(); + let mut remaining_slashes = + ::OffendingValidatorsThreshold::get() * + validators.len() as u32; + + for v in validators.into_iter() { + if remaining_slashes != 0 { + add_slash(&v); + remaining_slashes -= 1; + } + } +} + +// Slashes a percentage of the active nominators that haven't been slashed yet, with +// a minimum of 1 validator slash. +pub(crate) fn slash_percentage(percentage: Perbill) -> Vec { + let validators = Session::validators(); + let mut remaining_slashes = (percentage * validators.len() as u32).max(1); + let mut slashed = vec![]; + + for v in validators.into_iter() { + if remaining_slashes != 0 { + add_slash(&v); + slashed.push(v); + remaining_slashes -= 1; + } + } + slashed +} + +pub(crate) fn set_minimum_election_score( + minimal_stake: ExtendedBalance, + sum_stake: ExtendedBalance, + sum_stake_squared: ExtendedBalance, +) -> Result<(), ()> { + let election_score = ElectionScore { minimal_stake, sum_stake, sum_stake_squared }; + ElectionProviderMultiPhase::set_minimum_untrusted_score( + RuntimeOrigin::root(), + Some(election_score), + ) + .map(|_| ()) + .map_err(|_| ()) +} diff --git a/frame/election-provider-support/Cargo.toml b/frame/election-provider-support/Cargo.toml index 114caee793f1a..a519d964c60dc 100644 --- a/frame/election-provider-support/Cargo.toml +++ b/frame/election-provider-support/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { version = "4.0.0-dev", path = "solution-type" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/election-provider-support/solution-type/Cargo.toml b/frame/election-provider-support/solution-type/Cargo.toml index eb9598dca20b8..95ad6f226663b 100644 --- a/frame/election-provider-support/solution-type/Cargo.toml +++ b/frame/election-provider-support/solution-type/Cargo.toml @@ -15,9 +15,9 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.98", features = ["full", "visit"] } -quote = "1.0" -proc-macro2 = "1.0.37" +syn = { version = "2.0.14", features = ["full", "visit"] } +quote = "1.0.26" +proc-macro2 = "1.0.56" proc-macro-crate = "1.1.3" [dev-dependencies] diff --git a/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index 9275692788d48..580e5eba9247d 100644 --- a/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -18,7 +18,7 @@ honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { version = "4.0.0-dev", path = ".." } frame-election-provider-support = { version = "4.0.0-dev", path = "../.." } sp-arithmetic = { version = "6.0.0", path = "../../../../primitives/arithmetic" } diff --git a/frame/election-provider-support/solution-type/src/lib.rs b/frame/election-provider-support/solution-type/src/lib.rs index 884ed66c9d4a0..791ef64a7ee0b 100644 --- a/frame/election-provider-support/solution-type/src/lib.rs +++ b/frame/election-provider-support/solution-type/src/lib.rs @@ -161,7 +161,7 @@ fn check_attributes(input: ParseStream) -> syn::Result { return Ok(false) } let attr = attrs.pop().expect("attributes vec with len 1 can be popped."); - if attr.path.is_ident("compact") { + if attr.path().is_ident("compact") { Ok(true) } else { Err(syn::Error::new_spanned(attr, "compact solution can accept only #[compact]")) @@ -200,7 +200,7 @@ impl Parse for SolutionDef { format!("Expected binding: `{} = ...`", expected), )) }, - syn::GenericArgument::Binding(syn::Binding { ident, ty, .. }) => { + syn::GenericArgument::AssocType(syn::AssocType { ident, ty, .. }) => { // check that we have the right keyword for this position in the argument list if ident == expected { Ok(ty.clone()) diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index d8e1a0cf13846..3ad143e9cbb7c 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -1252,6 +1252,10 @@ mod tests { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } frame_support::parameter_types! { @@ -2125,7 +2129,8 @@ mod tests { assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); // User has 100 free and 50 reserved. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 100, 50)); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 2, 150)); + assert_ok!(Balances::reserve(&2, 50)); // User tries to vote with 150 tokens. assert_ok!(vote(RuntimeOrigin::signed(2), vec![4, 5], 150)); // We truncate to only their free balance, after reserving additional for voting. diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index bbe66d52981c5..f0ebb8639e259 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -18,26 +18,25 @@ //! Autogenerated weights for pallet_elections_phragmen //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-02-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-b3zmxxc-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_elections_phragmen // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_elections_phragmen -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/elections-phragmen/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -76,18 +75,20 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `499 + v * (80 ±0)` - // Estimated: `9726 + v * (320 ±0)` - // Minimum execution time: 27_362 nanoseconds. - Weight::from_parts(28_497_963, 9726) - // Standard Error: 3_968 - .saturating_add(Weight::from_parts(176_840, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `403 + v * (80 ±0)` + // Estimated: `4764 + v * (80 ±0)` + // Minimum execution time: 33_623_000 picoseconds. + Weight::from_parts(34_531_239, 4764) + // Standard Error: 1_913 + .saturating_add(Weight::from_parts(131_360, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 320).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } /// Storage: Elections Candidates (r:1 w:0) /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) @@ -99,18 +100,20 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `467 + v * (80 ±0)` - // Estimated: `9598 + v * (320 ±0)` - // Minimum execution time: 37_120 nanoseconds. - Weight::from_parts(38_455_302, 9598) - // Standard Error: 5_478 - .saturating_add(Weight::from_parts(219_678, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `371 + v * (80 ±0)` + // Estimated: `4764 + v * (80 ±0)` + // Minimum execution time: 46_106_000 picoseconds. + Weight::from_parts(47_067_453, 4764) + // Standard Error: 2_441 + .saturating_add(Weight::from_parts(130_306, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 320).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } /// Storage: Elections Candidates (r:1 w:0) /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) @@ -122,30 +125,34 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `499 + v * (80 ±0)` - // Estimated: `9726 + v * (320 ±0)` - // Minimum execution time: 36_928 nanoseconds. - Weight::from_parts(38_334_669, 9726) - // Standard Error: 5_271 - .saturating_add(Weight::from_parts(232_355, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `403 + v * (80 ±0)` + // Estimated: `4764 + v * (80 ±0)` + // Minimum execution time: 46_094_000 picoseconds. + Weight::from_parts(47_054_638, 4764) + // Standard Error: 2_651 + .saturating_add(Weight::from_parts(137_251, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 320).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } /// Storage: Elections Voting (r:1 w:1) /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn remove_voter() -> Weight { // Proof Size summary in bytes: - // Measured: `989` - // Estimated: `7238` - // Minimum execution time: 34_338 nanoseconds. - Weight::from_parts(35_672_000, 7238) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `925` + // Estimated: `4764` + // Minimum execution time: 49_652_000 picoseconds. + Weight::from_parts(50_217_000, 4764) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: Elections Candidates (r:1 w:1) @@ -157,27 +164,27 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[1, 64]`. fn submit_candidacy(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1697 + c * (48 ±0)` - // Estimated: `6576 + c * (144 ±0)` - // Minimum execution time: 31_864 nanoseconds. - Weight::from_parts(33_490_161, 6576) - // Standard Error: 2_643 - .saturating_add(Weight::from_parts(158_386, 0).saturating_mul(c.into())) + // Measured: `1570 + c * (48 ±0)` + // Estimated: `3055 + c * (48 ±0)` + // Minimum execution time: 37_797_000 picoseconds. + Weight::from_parts(38_384_713, 3055) + // Standard Error: 1_008 + .saturating_add(Weight::from_parts(71_486, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 144).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) } /// Storage: Elections Candidates (r:1 w:1) /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `c` is `[1, 64]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `349 + c * (48 ±0)` - // Estimated: `844 + c * (48 ±0)` - // Minimum execution time: 27_292 nanoseconds. - Weight::from_parts(28_364_955, 844) - // Standard Error: 1_335 - .saturating_add(Weight::from_parts(78_086, 0).saturating_mul(c.into())) + // Measured: `285 + c * (48 ±0)` + // Estimated: `1770 + c * (48 ±0)` + // Minimum execution time: 31_112_000 picoseconds. + Weight::from_parts(31_660_924, 1770) + // Standard Error: 754 + .saturating_add(Weight::from_parts(48_689, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -194,10 +201,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) fn renounce_candidacy_members() -> Weight { // Proof Size summary in bytes: - // Measured: `2027` - // Estimated: `12115` - // Minimum execution time: 45_975 nanoseconds. - Weight::from_parts(47_103_000, 12115) + // Measured: `1900` + // Estimated: `3385` + // Minimum execution time: 47_487_000 picoseconds. + Weight::from_parts(47_795_000, 3385) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -205,10 +212,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) fn renounce_candidacy_runners_up() -> Weight { // Proof Size summary in bytes: - // Measured: `975` - // Estimated: `1470` - // Minimum execution time: 29_243 nanoseconds. - Weight::from_parts(30_582_000, 1470) + // Measured: `880` + // Estimated: `2365` + // Minimum execution time: 31_479_000 picoseconds. + Weight::from_parts(32_093_000, 2365) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -218,7 +225,7 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_000_000_000 nanoseconds. + // Minimum execution time: 2_000_000_000_000 picoseconds. Weight::from_parts(2_000_000_000_000, 0) } /// Storage: Elections Members (r:1 w:1) @@ -235,10 +242,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) fn remove_member_with_replacement() -> Weight { // Proof Size summary in bytes: - // Measured: `2027` - // Estimated: `14718` - // Minimum execution time: 52_527 nanoseconds. - Weight::from_parts(53_538_000, 14718) + // Measured: `1900` + // Estimated: `3593` + // Minimum execution time: 53_395_000 picoseconds. + Weight::from_parts(53_952_000, 3593) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -252,22 +259,24 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) /// Storage: Balances Locks (r:512 w:512) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:512 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:512 w:512) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `v` is `[256, 512]`. /// The range of component `d` is `[0, 256]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115 + v * (875 ±0)` - // Estimated: `8448 + v * (12352 ±0)` - // Minimum execution time: 14_934_185 nanoseconds. - Weight::from_parts(15_014_057_000, 8448) - // Standard Error: 245_588 - .saturating_add(Weight::from_parts(35_586_946, 0).saturating_mul(v.into())) + // Measured: `1115 + v * (811 ±0)` + // Estimated: `4587 + v * (3774 ±0)` + // Minimum execution time: 18_089_406_000 picoseconds. + Weight::from_parts(18_125_024_000, 4587) + // Standard Error: 296_666 + .saturating_add(Weight::from_parts(42_527_045, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 12352).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(v.into())) } /// Storage: Elections Candidates (r:1 w:1) /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) @@ -292,22 +301,22 @@ impl WeightInfo for SubstrateWeight { /// The range of component `e` is `[512, 8192]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + v * (638 ±0) + e * (28 ±0)` - // Estimated: `330033 + v * (5229 ±6) + e * (89 ±0) + c * (2135 ±7)` - // Minimum execution time: 1_273_671 nanoseconds. - Weight::from_parts(1_279_716_000, 330033) - // Standard Error: 543_277 - .saturating_add(Weight::from_parts(20_613_753, 0).saturating_mul(v.into())) - // Standard Error: 34_857 - .saturating_add(Weight::from_parts(688_354, 0).saturating_mul(e.into())) + // Measured: `0 + e * (28 ±0) + v * (606 ±0)` + // Estimated: `178887 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` + // Minimum execution time: 1_193_774_000 picoseconds. + Weight::from_parts(1_196_649_000, 178887) + // Standard Error: 617_531 + .saturating_add(Weight::from_parts(17_672_923, 0).saturating_mul(v.into())) + // Standard Error: 39_622 + .saturating_add(Weight::from_parts(846_866, 0).saturating_mul(e.into())) .saturating_add(T::DbWeight::get().reads(21_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 5229).saturating_mul(v.into())) - .saturating_add(Weight::from_parts(0, 89).saturating_mul(e.into())) .saturating_add(Weight::from_parts(0, 2135).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 12).saturating_mul(e.into())) + .saturating_add(Weight::from_parts(0, 2653).saturating_mul(v.into())) } } @@ -323,18 +332,20 @@ impl WeightInfo for () { /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `499 + v * (80 ±0)` - // Estimated: `9726 + v * (320 ±0)` - // Minimum execution time: 27_362 nanoseconds. - Weight::from_parts(28_497_963, 9726) - // Standard Error: 3_968 - .saturating_add(Weight::from_parts(176_840, 0).saturating_mul(v.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `403 + v * (80 ±0)` + // Estimated: `4764 + v * (80 ±0)` + // Minimum execution time: 33_623_000 picoseconds. + Weight::from_parts(34_531_239, 4764) + // Standard Error: 1_913 + .saturating_add(Weight::from_parts(131_360, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 320).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } /// Storage: Elections Candidates (r:1 w:0) /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) @@ -346,18 +357,20 @@ impl WeightInfo for () { /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `467 + v * (80 ±0)` - // Estimated: `9598 + v * (320 ±0)` - // Minimum execution time: 37_120 nanoseconds. - Weight::from_parts(38_455_302, 9598) - // Standard Error: 5_478 - .saturating_add(Weight::from_parts(219_678, 0).saturating_mul(v.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `371 + v * (80 ±0)` + // Estimated: `4764 + v * (80 ±0)` + // Minimum execution time: 46_106_000 picoseconds. + Weight::from_parts(47_067_453, 4764) + // Standard Error: 2_441 + .saturating_add(Weight::from_parts(130_306, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 320).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } /// Storage: Elections Candidates (r:1 w:0) /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) @@ -369,30 +382,34 @@ impl WeightInfo for () { /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `499 + v * (80 ±0)` - // Estimated: `9726 + v * (320 ±0)` - // Minimum execution time: 36_928 nanoseconds. - Weight::from_parts(38_334_669, 9726) - // Standard Error: 5_271 - .saturating_add(Weight::from_parts(232_355, 0).saturating_mul(v.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `403 + v * (80 ±0)` + // Estimated: `4764 + v * (80 ±0)` + // Minimum execution time: 46_094_000 picoseconds. + Weight::from_parts(47_054_638, 4764) + // Standard Error: 2_651 + .saturating_add(Weight::from_parts(137_251, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 320).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } /// Storage: Elections Voting (r:1 w:1) /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn remove_voter() -> Weight { // Proof Size summary in bytes: - // Measured: `989` - // Estimated: `7238` - // Minimum execution time: 34_338 nanoseconds. - Weight::from_parts(35_672_000, 7238) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `925` + // Estimated: `4764` + // Minimum execution time: 49_652_000 picoseconds. + Weight::from_parts(50_217_000, 4764) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: Elections Candidates (r:1 w:1) @@ -404,27 +421,27 @@ impl WeightInfo for () { /// The range of component `c` is `[1, 64]`. fn submit_candidacy(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1697 + c * (48 ±0)` - // Estimated: `6576 + c * (144 ±0)` - // Minimum execution time: 31_864 nanoseconds. - Weight::from_parts(33_490_161, 6576) - // Standard Error: 2_643 - .saturating_add(Weight::from_parts(158_386, 0).saturating_mul(c.into())) + // Measured: `1570 + c * (48 ±0)` + // Estimated: `3055 + c * (48 ±0)` + // Minimum execution time: 37_797_000 picoseconds. + Weight::from_parts(38_384_713, 3055) + // Standard Error: 1_008 + .saturating_add(Weight::from_parts(71_486, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 144).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) } /// Storage: Elections Candidates (r:1 w:1) /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `c` is `[1, 64]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `349 + c * (48 ±0)` - // Estimated: `844 + c * (48 ±0)` - // Minimum execution time: 27_292 nanoseconds. - Weight::from_parts(28_364_955, 844) - // Standard Error: 1_335 - .saturating_add(Weight::from_parts(78_086, 0).saturating_mul(c.into())) + // Measured: `285 + c * (48 ±0)` + // Estimated: `1770 + c * (48 ±0)` + // Minimum execution time: 31_112_000 picoseconds. + Weight::from_parts(31_660_924, 1770) + // Standard Error: 754 + .saturating_add(Weight::from_parts(48_689, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -441,10 +458,10 @@ impl WeightInfo for () { /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) fn renounce_candidacy_members() -> Weight { // Proof Size summary in bytes: - // Measured: `2027` - // Estimated: `12115` - // Minimum execution time: 45_975 nanoseconds. - Weight::from_parts(47_103_000, 12115) + // Measured: `1900` + // Estimated: `3385` + // Minimum execution time: 47_487_000 picoseconds. + Weight::from_parts(47_795_000, 3385) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -452,10 +469,10 @@ impl WeightInfo for () { /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) fn renounce_candidacy_runners_up() -> Weight { // Proof Size summary in bytes: - // Measured: `975` - // Estimated: `1470` - // Minimum execution time: 29_243 nanoseconds. - Weight::from_parts(30_582_000, 1470) + // Measured: `880` + // Estimated: `2365` + // Minimum execution time: 31_479_000 picoseconds. + Weight::from_parts(32_093_000, 2365) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -465,7 +482,7 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_000_000_000 nanoseconds. + // Minimum execution time: 2_000_000_000_000 picoseconds. Weight::from_parts(2_000_000_000_000, 0) } /// Storage: Elections Members (r:1 w:1) @@ -482,10 +499,10 @@ impl WeightInfo for () { /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) fn remove_member_with_replacement() -> Weight { // Proof Size summary in bytes: - // Measured: `2027` - // Estimated: `14718` - // Minimum execution time: 52_527 nanoseconds. - Weight::from_parts(53_538_000, 14718) + // Measured: `1900` + // Estimated: `3593` + // Minimum execution time: 53_395_000 picoseconds. + Weight::from_parts(53_952_000, 3593) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -499,22 +516,24 @@ impl WeightInfo for () { /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) /// Storage: Balances Locks (r:512 w:512) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:512 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:512 w:512) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `v` is `[256, 512]`. /// The range of component `d` is `[0, 256]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115 + v * (875 ±0)` - // Estimated: `8448 + v * (12352 ±0)` - // Minimum execution time: 14_934_185 nanoseconds. - Weight::from_parts(15_014_057_000, 8448) - // Standard Error: 245_588 - .saturating_add(Weight::from_parts(35_586_946, 0).saturating_mul(v.into())) + // Measured: `1115 + v * (811 ±0)` + // Estimated: `4587 + v * (3774 ±0)` + // Minimum execution time: 18_089_406_000 picoseconds. + Weight::from_parts(18_125_024_000, 4587) + // Standard Error: 296_666 + .saturating_add(Weight::from_parts(42_527_045, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 12352).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(v.into())) } /// Storage: Elections Candidates (r:1 w:1) /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) @@ -539,21 +558,21 @@ impl WeightInfo for () { /// The range of component `e` is `[512, 8192]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + v * (638 ±0) + e * (28 ±0)` - // Estimated: `330033 + v * (5229 ±6) + e * (89 ±0) + c * (2135 ±7)` - // Minimum execution time: 1_273_671 nanoseconds. - Weight::from_parts(1_279_716_000, 330033) - // Standard Error: 543_277 - .saturating_add(Weight::from_parts(20_613_753, 0).saturating_mul(v.into())) - // Standard Error: 34_857 - .saturating_add(Weight::from_parts(688_354, 0).saturating_mul(e.into())) + // Measured: `0 + e * (28 ±0) + v * (606 ±0)` + // Estimated: `178887 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` + // Minimum execution time: 1_193_774_000 picoseconds. + Weight::from_parts(1_196_649_000, 178887) + // Standard Error: 617_531 + .saturating_add(Weight::from_parts(17_672_923, 0).saturating_mul(v.into())) + // Standard Error: 39_622 + .saturating_add(Weight::from_parts(846_866, 0).saturating_mul(e.into())) .saturating_add(RocksDbWeight::get().reads(21_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 5229).saturating_mul(v.into())) - .saturating_add(Weight::from_parts(0, 89).saturating_mul(e.into())) .saturating_add(Weight::from_parts(0, 2135).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 12).saturating_mul(e.into())) + .saturating_add(Weight::from_parts(0, 2653).saturating_mul(v.into())) } } diff --git a/frame/examples/basic/Cargo.toml b/frame/examples/basic/Cargo.toml index 47f623f45381c..848d4bbc94d72 100644 --- a/frame/examples/basic/Cargo.toml +++ b/frame/examples/basic/Cargo.toml @@ -3,7 +3,7 @@ name = "pallet-example-basic" version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2021" -license = "Unlicense" +license = "MIT-0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME example pallet" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } diff --git a/frame/examples/basic/README.md b/frame/examples/basic/README.md index 358829192f11d..2af50573f8075 100644 --- a/frame/examples/basic/README.md +++ b/frame/examples/basic/README.md @@ -237,4 +237,4 @@ pub trait Config: ::Config { } // that the implementation is based on.

-License: Unlicense +License: MIT-0 diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index 6665c3b78b024..bbeaac19f809c 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -441,7 +441,7 @@ pub mod pallet { // against them as the first thing you do in your function. There are three convenience calls // in system that do the matching for you and return a convenient result: `ensure_signed`, // `ensure_root` and `ensure_none`. - #[pallet::call] + #[pallet::call(weight(::WeightInfo))] impl Pallet { /// This is your public interface. Be extremely careful. /// This is just a simple example of how to interact with the pallet from the external @@ -497,9 +497,6 @@ pub mod pallet { // The weight for this extrinsic we rely on the auto-generated `WeightInfo` from the // benchmark toolchain. #[pallet::call_index(0)] - #[pallet::weight( - ::WeightInfo::accumulate_dummy() - )] pub fn accumulate_dummy(origin: OriginFor, increase_by: T::Balance) -> DispatchResult { // This is a public call, so we ensure that the origin is some signed account. let _sender = ensure_signed(origin)?; diff --git a/frame/examples/basic/src/tests.rs b/frame/examples/basic/src/tests.rs index 2e6a03bdd857c..1d9cf81a5074c 100644 --- a/frame/examples/basic/src/tests.rs +++ b/frame/examples/basic/src/tests.rs @@ -87,6 +87,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl Config for Test { @@ -188,6 +192,7 @@ fn weights_work() { // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` // TODO: account for proof size weight assert!(info1.weight.ref_time() > 0); + assert_eq!(info1.weight, ::WeightInfo::accumulate_dummy()); // `set_dummy` is simpler than `accumulate_dummy`, and the weight // should be less. diff --git a/frame/examples/dev-mode/Cargo.toml b/frame/examples/dev-mode/Cargo.toml new file mode 100644 index 0000000000000..a1f8de4b09d74 --- /dev/null +++ b/frame/examples/dev-mode/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "pallet-dev-mode" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "MIT-0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME example pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +log = { version = "0.4.17", default-features = false } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } +pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../balances" } +sp-io = { version = "7.0.0", default-features = false, path = "../../../primitives/io" } +sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } + +[dev-dependencies] +sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-balances/std", + "scale-info/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/examples/dev-mode/README.md b/frame/examples/dev-mode/README.md new file mode 100644 index 0000000000000..4c9ee88629c23 --- /dev/null +++ b/frame/examples/dev-mode/README.md @@ -0,0 +1,11 @@ + +# Dev Mode Example Pallet + +A simple example of a FRAME pallet demonstrating +the ease of requirements for a pallet in dev mode. + +Run `cargo doc --package pallet-dev-mode --open` to view this pallet's documentation. + +**Dev mode is not meant to be used in production.** + +License: MIT-0 diff --git a/frame/examples/dev-mode/src/lib.rs b/frame/examples/dev-mode/src/lib.rs new file mode 100644 index 0000000000000..dbc302f4997d4 --- /dev/null +++ b/frame/examples/dev-mode/src/lib.rs @@ -0,0 +1,118 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! +//! # Dev Mode Example Pallet +//! +//! A simple example of a FRAME pallet demonstrating +//! the ease of requirements for a pallet in dev mode. +//! +//! Run `cargo doc --package pallet-dev-mode --open` to view this pallet's documentation. +//! +//! **Dev mode is not meant to be used in production.** + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::dispatch::DispatchResult; +use frame_system::ensure_signed; + +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; + +#[cfg(test)] +mod tests; + +/// A type alias for the balance type from this pallet's point of view. +type BalanceOf = ::Balance; + +/// Enable `dev_mode` for this pallet. +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: pallet_balances::Config + frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + // Simple declaration of the `Pallet` type. It is placeholder we use to implement traits and + // method. + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + /// No need to define a `weight` attribute here because of `dev_mode`. + pub fn add_dummy(origin: OriginFor, id: T::AccountId) -> DispatchResult { + ensure_root(origin)?; + + if let Some(mut dummies) = Dummy::::get() { + dummies.push(id.clone()); + Dummy::::set(Some(dummies)); + } else { + Dummy::::set(Some(vec![id.clone()])); + } + + // Let's deposit an event to let the outside world know this happened. + Self::deposit_event(Event::AddDummy { account: id }); + + Ok(()) + } + + #[pallet::call_index(1)] + /// No need to define a `weight` attribute here because of `dev_mode`. + pub fn set_bar( + origin: OriginFor, + #[pallet::compact] new_value: T::Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + // Put the new value into storage. + >::insert(&sender, new_value); + + Self::deposit_event(Event::SetBar { account: sender, balance: new_value }); + + Ok(()) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + AddDummy { account: T::AccountId }, + SetBar { account: T::AccountId, balance: BalanceOf }, + } + + /// The MEL requirement for bounded pallets is skipped by `dev_mode`. + /// This means that all storages are marked as unbounded. + /// This is equivalent to specifying `#[pallet::unbounded]` on this type definitions. + /// When the dev_mode is removed, we would need to implement implement `MaxEncodedLen`. + #[pallet::storage] + pub type Dummy = StorageValue<_, Vec>; + + /// The Hasher requirement is skipped by `dev_mode`. So, second parameter can be `_` + /// and `Blake2_128Concat` is used as a default. + /// When the dev_mode is removed, we would need to specify the hasher like so: + /// `pub type Bar = StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance>;`. + #[pallet::storage] + pub type Bar = StorageMap<_, _, T::AccountId, T::Balance>; +} diff --git a/frame/examples/dev-mode/src/tests.rs b/frame/examples/dev-mode/src/tests.rs new file mode 100644 index 0000000000000..e2f06ddda6cd7 --- /dev/null +++ b/frame/examples/dev-mode/src/tests.rs @@ -0,0 +1,130 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-dev-mode. + +use crate::*; +use frame_support::{assert_ok, traits::ConstU64}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; +// Reexport crate as its pallet name for construct_runtime. +use crate as pallet_dev_mode; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +// For testing the pallet, we construct a mock runtime. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Example: pallet_dev_mode::{Pallet, Call, Storage, Event}, + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type RuntimeCall = RuntimeCall; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = GenesisConfig { + // We use default for brevity, but you can configure as desired if needed. + system: Default::default(), + balances: Default::default(), + } + .build_storage() + .unwrap(); + t.into() +} + +#[test] +fn it_works_for_optional_value() { + new_test_ext().execute_with(|| { + assert_eq!(Dummy::::get(), None); + + let val1 = 42; + assert_ok!(Example::add_dummy(RuntimeOrigin::root(), val1)); + assert_eq!(Dummy::::get(), Some(vec![val1])); + + // Check that accumulate works when we have Some value in Dummy already. + let val2 = 27; + assert_ok!(Example::add_dummy(RuntimeOrigin::root(), val2)); + assert_eq!(Dummy::::get(), Some(vec![val1, val2])); + }); +} + +#[test] +fn set_dummy_works() { + new_test_ext().execute_with(|| { + let test_val = 133; + assert_ok!(Example::set_bar(RuntimeOrigin::signed(1), test_val.into())); + assert_eq!(Bar::::get(1), Some(test_val)); + }); +} diff --git a/frame/examples/offchain-worker/Cargo.toml b/frame/examples/offchain-worker/Cargo.toml index f94cf36b2b992..e582b0f99714f 100644 --- a/frame/examples/offchain-worker/Cargo.toml +++ b/frame/examples/offchain-worker/Cargo.toml @@ -3,7 +3,7 @@ name = "pallet-example-offchain-worker" version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2021" -license = "Unlicense" +license = "MIT-0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME example pallet for offchain worker" @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } lite-json = { version = "0.2.0", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } diff --git a/frame/examples/offchain-worker/README.md b/frame/examples/offchain-worker/README.md index 587431c92c0ed..7b8905cda3074 100644 --- a/frame/examples/offchain-worker/README.md +++ b/frame/examples/offchain-worker/README.md @@ -26,4 +26,4 @@ Additional logic in OCW is put in place to prevent spamming the network with bot and unsigned transactions, and custom `UnsignedValidator` makes sure that there is only one unsigned transaction floating in the network. -License: Unlicense +License: MIT-0 diff --git a/frame/examples/offchain-worker/src/lib.rs b/frame/examples/offchain-worker/src/lib.rs index 418ad5d139ef3..6ce8524174200 100644 --- a/frame/examples/offchain-worker/src/lib.rs +++ b/frame/examples/offchain-worker/src/lib.rs @@ -166,7 +166,7 @@ pub mod pallet { /// /// By implementing `fn offchain_worker` you declare a new offchain worker. /// This function will be called when the node is fully synced and a new best block is - /// succesfuly imported. + /// successfully imported. /// Note that it's not guaranteed for offchain workers to run on EVERY block, there might /// be cases where some blocks are skipped, or for some the worker runs twice (re-orgs), /// so the code should be able to handle that. @@ -224,12 +224,12 @@ pub mod pallet { /// The transaction needs to be signed (see `ensure_signed`) check, so that the caller /// pays a fee to execute it. /// This makes sure that it's not easy (or rather cheap) to attack the chain by submitting - /// excesive transactions, but note that it doesn't ensure the price oracle is actually + /// excessive transactions, but note that it doesn't ensure the price oracle is actually /// working and receives (and provides) meaningful data. /// This example is not focused on correctness of the oracle itself, but rather its /// purpose is to showcase offchain worker capabilities. #[pallet::call_index(0)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn submit_price(origin: OriginFor, price: u32) -> DispatchResultWithPostInfo { // Retrieve sender of the transaction. let who = ensure_signed(origin)?; @@ -255,7 +255,7 @@ pub mod pallet { /// This example is not focused on correctness of the oracle itself, but rather its /// purpose is to showcase offchain worker capabilities. #[pallet::call_index(1)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn submit_price_unsigned( origin: OriginFor, _block_number: T::BlockNumber, @@ -272,7 +272,7 @@ pub mod pallet { } #[pallet::call_index(2)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn submit_price_unsigned_with_signed_payload( origin: OriginFor, price_payload: PricePayload, @@ -336,7 +336,7 @@ pub mod pallet { /// Defines the block when next unsigned transaction will be accepted. /// - /// To prevent spam of unsigned (and unpayed!) transactions on the network, + /// To prevent spam of unsigned (and unpaid!) transactions on the network, /// we only allow one transaction every `T::UnsignedInterval` blocks. /// This storage entry defines when new transaction is going to be accepted. #[pallet::storage] @@ -570,12 +570,12 @@ impl Pallet { fn fetch_price() -> Result { // We want to keep the offchain worker execution time reasonable, so we set a hard-coded // deadline to 2s to complete the external call. - // You can also wait idefinitely for the response, however you may still get a timeout + // You can also wait indefinitely for the response, however you may still get a timeout // coming from the host machine. let deadline = sp_io::offchain::timestamp().add(Duration::from_millis(2_000)); // Initiate an external HTTP GET request. // This is using high-level wrappers from `sp_runtime`, for the low-level calls that - // you can find in `sp_io`. The API is trying to be similar to `reqwest`, but + // you can find in `sp_io`. The API is trying to be similar to `request`, but // since we are running in a custom WASM execution environment we can't simply // import the library here. let request = diff --git a/frame/examples/offchain-worker/src/tests.rs b/frame/examples/offchain-worker/src/tests.rs index 75a42e872ae52..3df7f4a8d5439 100644 --- a/frame/examples/offchain-worker/src/tests.rs +++ b/frame/examples/offchain-worker/src/tests.rs @@ -27,9 +27,8 @@ use sp_core::{ sr25519::Signature, H256, }; -use std::sync::Arc; -use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStore}; +use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; use sp_runtime::{ testing::{Header, TestXt}, traits::{BlakeTwo256, Extrinsic as ExtrinsicT, IdentifyAccount, IdentityLookup, Verify}, @@ -205,18 +204,15 @@ fn should_submit_signed_transaction_on_chain() { let (offchain, offchain_state) = testing::TestOffchainExt::new(); let (pool, pool_state) = testing::TestTransactionPoolExt::new(); - let keystore = KeyStore::new(); - SyncCryptoStore::sr25519_generate_new( - &keystore, - crate::crypto::Public::ID, - Some(&format!("{}/hunter1", PHRASE)), - ) - .unwrap(); + let keystore = MemoryKeystore::new(); + keystore + .sr25519_generate_new(crate::crypto::Public::ID, Some(&format!("{}/hunter1", PHRASE))) + .unwrap(); let mut t = sp_io::TestExternalities::default(); t.register_extension(OffchainWorkerExt::new(offchain)); t.register_extension(TransactionPoolExt::new(pool)); - t.register_extension(KeystoreExt(Arc::new(keystore))); + t.register_extension(KeystoreExt::new(keystore)); price_oracle_response(&mut offchain_state.write()); @@ -239,23 +235,18 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { let (offchain, offchain_state) = testing::TestOffchainExt::new(); let (pool, pool_state) = testing::TestTransactionPoolExt::new(); - let keystore = KeyStore::new(); - - SyncCryptoStore::sr25519_generate_new( - &keystore, - crate::crypto::Public::ID, - Some(&format!("{}/hunter1", PHRASE)), - ) - .unwrap(); + let keystore = MemoryKeystore::new(); - let public_key = *SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) - .get(0) + keystore + .sr25519_generate_new(crate::crypto::Public::ID, Some(&format!("{}/hunter1", PHRASE))) .unwrap(); + let public_key = *keystore.sr25519_public_keys(crate::crypto::Public::ID).get(0).unwrap(); + let mut t = sp_io::TestExternalities::default(); t.register_extension(OffchainWorkerExt::new(offchain)); t.register_extension(TransactionPoolExt::new(pool)); - t.register_extension(KeystoreExt(Arc::new(keystore))); + t.register_extension(KeystoreExt::new(keystore)); price_oracle_response(&mut offchain_state.write()); @@ -298,23 +289,18 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { let (offchain, offchain_state) = testing::TestOffchainExt::new(); let (pool, pool_state) = testing::TestTransactionPoolExt::new(); - let keystore = KeyStore::new(); + let keystore = MemoryKeystore::new(); - SyncCryptoStore::sr25519_generate_new( - &keystore, - crate::crypto::Public::ID, - Some(&format!("{}/hunter1", PHRASE)), - ) - .unwrap(); - - let public_key = *SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) - .get(0) + keystore + .sr25519_generate_new(crate::crypto::Public::ID, Some(&format!("{}/hunter1", PHRASE))) .unwrap(); + let public_key = *keystore.sr25519_public_keys(crate::crypto::Public::ID).get(0).unwrap(); + let mut t = sp_io::TestExternalities::default(); t.register_extension(OffchainWorkerExt::new(offchain)); t.register_extension(TransactionPoolExt::new(pool)); - t.register_extension(KeystoreExt(Arc::new(keystore))); + t.register_extension(KeystoreExt::new(keystore)); price_oracle_response(&mut offchain_state.write()); @@ -355,12 +341,12 @@ fn should_submit_raw_unsigned_transaction_on_chain() { let (offchain, offchain_state) = testing::TestOffchainExt::new(); let (pool, pool_state) = testing::TestTransactionPoolExt::new(); - let keystore = KeyStore::new(); + let keystore = MemoryKeystore::new(); let mut t = sp_io::TestExternalities::default(); t.register_extension(OffchainWorkerExt::new(offchain)); t.register_extension(TransactionPoolExt::new(pool)); - t.register_extension(KeystoreExt(Arc::new(keystore))); + t.register_extension(KeystoreExt::new(keystore)); price_oracle_response(&mut offchain_state.write()); diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index 8d809b575ac6f..f08dfa503678e 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -17,7 +17,7 @@ aquamarine = "0.1.12" codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../try-runtime", optional = true } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 6950677d71b12..f9574e6ffa457 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -142,6 +142,9 @@ use sp_runtime::{ }; use sp_std::{collections::btree_set::BTreeSet, marker::PhantomData, prelude::*}; +#[allow(dead_code)] +const LOG_TARGET: &str = "runtime::executive"; + pub type CheckedOf = >::Checked; pub type CallOf = as Applyable>::Call; pub type OriginOf = as Dispatchable>::RuntimeOrigin; @@ -307,7 +310,7 @@ where select: frame_try_runtime::TryStateSelect, ) -> Result { frame_support::log::info!( - target: "frame::executive", + target: LOG_TARGET, "try-runtime: executing block #{:?} / state root check: {:?} / signature check: {:?} / try-state-select: {:?}", block.header().number(), state_root_check, @@ -344,7 +347,7 @@ where for e in extrinsics { if let Err(err) = try_apply_extrinsic(e.clone()) { frame_support::log::error!( - target: "runtime::executive", "executing transaction {:?} failed due to {:?}. Aborting the rest of the block execution.", + target: LOG_TARGET, "executing transaction {:?} failed due to {:?}. Aborting the rest of the block execution.", e, err, ); @@ -363,7 +366,7 @@ where select, ) .map_err(|e| { - frame_support::log::error!(target: "runtime::executive", "failure: {:?}", e); + frame_support::log::error!(target: LOG_TARGET, "failure: {:?}", e); e })?; drop(_guard); @@ -575,17 +578,11 @@ where // any initial checks Self::initial_checks(&block); - let signature_batching = sp_runtime::SignatureBatching::start(); - // execute extrinsics let (header, extrinsics) = block.deconstruct(); Self::execute_extrinsics_with_book_keeping(extrinsics, *header.number()); - if !signature_batching.verify() { - panic!("Signature verification failed."); - } - // any final checks Self::final_checks(&header); } @@ -605,8 +602,6 @@ where >::set_block_seed(&block.header().seed().seed); Self::initial_checks(&block); - let signature_batching = sp_runtime::SignatureBatching::start(); - let poped_txs_count = *block.header().count(); let popped_elems = >::pop_txs(poped_txs_count.saturated_into()); @@ -665,10 +660,6 @@ where } } - - if !signature_batching.verify() { - panic!("Signature verification failed."); - } Self::final_checks(&header); } } @@ -863,26 +854,22 @@ where mod tests { use super::*; use hex_literal::hex; - use sp_core::{sr25519, testing::SR25519, Pair, ShufflingSeed, H256}; + use sp_core::{sr25519, testing::SR25519, Pair, ShufflingSeed, H256, H512}; use sp_ver::calculate_next_seed_from_bytes; use frame_support::{ assert_err, parameter_types, - traits::{ - ConstU32, ConstU64, ConstU8, Currency, LockIdentifier, LockableCurrency, - WithdrawReasons, - }, + traits::{fungible, ConstU32, ConstU64, ConstU8, Currency}, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightToFee}, }; - use frame_system::{Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; + use frame_system::{ChainContext, LastRuntimeUpgradeInfo}; use pallet_balances::Call as BalancesCall; use pallet_transaction_payment::CurrencyAdapter; use sp_core::crypto::key_types::AURA; - use sp_keystore::{ - vrf::{VRFTranscriptData, VRFTranscriptValue}, - SyncCryptoStore, - }; + use sp_core::sr25519::vrf::{VrfOutput, VrfProof, VrfSignature, VrfTranscript}; + use sp_keystore::testing::MemoryKeystore; + use sp_core::hexdisplay::AsBytesRef; use sp_runtime::{ generic::{DigestItem, Era}, testing::{BlockVer as Block, Digest, HeaderVer as Header}, @@ -895,7 +882,7 @@ mod tests { const TEST_KEY: &[u8] = b":test:key:"; - #[frame_support::pallet] + #[frame_support::pallet(dev_mode)] mod custom { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -1097,6 +1084,10 @@ mod tests { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = ConstU32<1>; + type HoldIdentifier = (); + type MaxHolds = ConstU32<1>; } parameter_types! { @@ -1170,7 +1161,7 @@ mod tests { } fn call_transfer(dest: u64, value: u64) -> RuntimeCall { - RuntimeCall::Balances(BalancesCall::transfer { dest, value }) + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }) } fn enqueue_txs( @@ -1229,13 +1220,13 @@ mod tests { block_import_works_inner( new_test_ext_v0(1), array_bytes::hex_n_into_unchecked( - "1e4e3699be2cec577f164e32b88f0f6f2124557be8eaab02cb751f4e561ac902", + "d1d38dfbb0537af5d25007407f38233f372280d9092c702337a333559dc43b92", ), ); block_import_works_inner( new_test_ext(1), array_bytes::hex_n_into_unchecked( - "a5991b9204bb6ebb83e0da0abeef3b3a91ea7f7d1e547a62df6c62752fe9295d", + "933ded67b9e4e60948c030e9f934f525e9a383b202190bd8377cd61c96f54188", ), ); } @@ -1326,7 +1317,7 @@ mod tests { let mut t = new_test_ext(10000); // given: TestXt uses the encoded len as fixed Len: let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), sign_extra(1, 0, 0), ); let encoded = xt.encode(); @@ -1349,7 +1340,10 @@ mod tests { for nonce in 0..=num_to_exhaust_block { let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { + dest: 33, + value: 0, + }), sign_extra(1, nonce.into(), 0), ); let res = Executive::apply_extrinsic(xt); @@ -1374,15 +1368,15 @@ mod tests { #[test] fn block_weight_and_size_is_stored_per_tx() { let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), sign_extra(1, 0, 0), ); let x1 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), sign_extra(1, 1, 0), ); let x2 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), sign_extra(1, 2, 0), ); let len = xt.clone().encode().len() as u32; @@ -1468,50 +1462,30 @@ mod tests { } #[test] - fn can_pay_for_tx_fee_on_full_lock() { - let id: LockIdentifier = *b"0 "; - let execute_with_lock = |lock: WithdrawReasons| { - let mut t = new_test_ext(1); - t.execute_with(|| { - as LockableCurrency>::set_lock( - id, &1, 110, lock, - ); - let xt = TestXt::new( - RuntimeCall::System(SystemCall::remark { remark: vec![1u8] }), - sign_extra(1, 0, 0), - ); - let weight = xt.get_dispatch_info().weight + - ::BlockWeights::get() - .get(DispatchClass::Normal) - .base_extrinsic; - let fee: Balance = - ::WeightToFee::weight_to_fee( - &weight, - ); - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - if lock == WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT) { - assert!(Executive::apply_extrinsic(xt).unwrap().is_ok()); - // tx fee has been deducted. - assert_eq!(>::total_balance(&1), 111 - fee); - } else { - assert_eq!( - Executive::apply_extrinsic(xt), - Err(InvalidTransaction::Payment.into()), - ); - assert_eq!(>::total_balance(&1), 111); - } - }); - }; + fn can_not_pay_for_tx_fee_on_full_lock() { + let mut t = new_test_ext(1); + t.execute_with(|| { + as fungible::MutateFreeze>::set_freeze( + &(), + &1, + 110, + ) + .unwrap(); + let xt = TestXt::new( + RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), + sign_extra(1, 0, 0), + ); + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); - execute_with_lock(WithdrawReasons::all()); - execute_with_lock(WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT)); + assert_eq!(Executive::apply_extrinsic(xt), Err(InvalidTransaction::Payment.into()),); + assert_eq!(>::total_balance(&1), 111); + }); } #[test] @@ -1653,7 +1627,7 @@ mod tests { #[test] fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), sign_extra(1, 0, 0), ); @@ -1779,7 +1753,7 @@ mod tests { // System::enqueue_txs needs to be executed after extrinsics fn invalid_inherent_position_fail() { let xt1 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), sign_extra(1, 0, 0), ); let xt2 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); @@ -1892,21 +1866,18 @@ mod tests { new_test_ext(1).execute_with(|| { let prev_seed = vec![0u8; 32]; let secret_uri = "//Alice"; - let keystore = sp_keystore::testing::KeyStore::new(); + let keystore = Arc::new(MemoryKeystore::new()); let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); keystore - .insert_unknown(AURA, secret_uri, key_pair.public().as_ref()) + .insert(AURA, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); - let transcript = VRFTranscriptData { - label: b"shuffling_seed", - items: vec![("prev_seed", VRFTranscriptValue::Bytes(prev_seed))], - }; + let transcript = VrfTranscript::new(b"shuffling_seed", &[(b"prev_seed",&prev_seed)]); let signature = keystore - .sr25519_vrf_sign(AURA, &key_pair.public(), transcript.clone()) + .sr25519_vrf_sign(AURA, &key_pair.public(), &transcript) .unwrap() .unwrap(); @@ -1920,7 +1891,7 @@ mod tests { parent_hash: [69u8; 32].into(), number: 1, state_root: hex!( - "7c3644ad634bf7d91f11984ebb149e389c92f99fef8ac181f7a9a43ee31d94e3" + "3f6b94972b987d9c76943ad7031349c8e62d5900fd98ba6345da8435dad66d09" ) .into(), extrinsics_root: hex!( @@ -1930,8 +1901,8 @@ mod tests { digest: Digest { logs: vec![] }, count: 0, seed: ShufflingSeed { - seed: signature.output.to_bytes().into(), - proof: signature.proof.to_bytes().into(), + seed: H256::from_slice(signature.output.encode().as_bytes_ref()), + proof: H512::from_slice(signature.proof.encode().as_bytes_ref()), }, }, extrinsics: vec![], @@ -1945,12 +1916,10 @@ mod tests { fn accept_block_that_fetches_txs_from_the_queue() { new_test_ext(1).execute_with(|| { let secret_uri = "//Alice"; - let keystore = sp_keystore::testing::KeyStore::new(); - + let keystore = MemoryKeystore::new(); let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); - keystore - .insert_unknown(AURA, secret_uri, key_pair.public().as_ref()) + keystore.insert(AURA, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); let xt = TestXt::new(call_transfer(2, 69), sign_extra(1, 0, 0)); @@ -1978,7 +1947,7 @@ mod tests { parent_hash: System::parent_hash(), number: 1, state_root: hex!( - "10b8fe2ef82cb245fc71dab724fde5462bacc4f0d2b3b6bf0581aa89d63ef3a1" + "102bbfb2d146b9313489419e816d176b1f7280e8b1000cd27852bed7d495abbd" ) .into(), extrinsics_root: hex!( @@ -2005,7 +1974,7 @@ mod tests { parent_hash: System::parent_hash(), number: 2, state_root: hex!( - "9bd12b1263d49dd1d6cf7fdf0d1c8330db2c927bb2d55e77b725ccdcaaefcba5" + "da228fb69aec5dd5f76ebb155e8faf848c49581528c06f776543c834369032fa" ) .into(), extrinsics_root: hex!( @@ -2033,12 +2002,12 @@ mod tests { fn rejects_block_that_enqueues_too_many_transactions_to_storage_queue() { new_test_ext(1).execute_with(|| { let secret_uri = "//Alice"; - let keystore = sp_keystore::testing::KeyStore::new(); + let keystore = MemoryKeystore::new(); let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); keystore - .insert_unknown(AURA, secret_uri, key_pair.public().as_ref()) + .insert(AURA, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); let txs = (0..100000) @@ -2067,7 +2036,7 @@ mod tests { parent_hash: System::parent_hash(), number: 1, state_root: hex!( - "5bc40cfd524119a0f1ca2fbd9f0357806d0041f56e0de1750b1fe0011915ca4c" + "831e2467d5152af868a45ef85d03eff185fd9c2b29df12b2daec5e0ea069acb4" ) .into(), extrinsics_root: hex!( @@ -2095,12 +2064,12 @@ mod tests { fn rejects_block_that_enqueues_new_txs_but_doesnt_execute_any() { new_test_ext(1).execute_with(|| { let secret_uri = "//Alice"; - let keystore = sp_keystore::testing::KeyStore::new(); + let keystore = MemoryKeystore::new(); let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); keystore - .insert_unknown(AURA, secret_uri, key_pair.public().as_ref()) + .insert(AURA, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); let txs = (0..10) @@ -2129,7 +2098,7 @@ mod tests { parent_hash: System::parent_hash(), number: 1, state_root: hex!( - "5bc40cfd524119a0f1ca2fbd9f0357806d0041f56e0de1750b1fe0011915ca4c" + "831e2467d5152af868a45ef85d03eff185fd9c2b29df12b2daec5e0ea069acb4" ) .into(), extrinsics_root: hex!( @@ -2184,12 +2153,12 @@ mod tests { fn do_not_allow_to_accept_binary_blobs_that_does_not_deserialize_into_valid_tx() { new_test_ext(1).execute_with(|| { let secret_uri = "//Alice"; - let keystore = sp_keystore::testing::KeyStore::new(); + let keystore = MemoryKeystore::new(); let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); keystore - .insert_unknown(AURA, secret_uri, key_pair.public().as_ref()) + .insert(AURA, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); let pub_key_bytes = AsRef::<[u8; 32]>::as_ref(&key_pair.public()) @@ -2237,12 +2206,12 @@ mod tests { fn do_not_panic_when_tx_poped_from_storage_queue_cannot_be_deserialized() { new_test_ext(1).execute_with(|| { let secret_uri = "//Alice"; - let keystore = sp_keystore::testing::KeyStore::new(); + let keystore = MemoryKeystore::new(); let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); keystore - .insert_unknown(AURA, secret_uri, key_pair.public().as_ref()) + .insert(AURA, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); let pub_key_bytes = AsRef::<[u8; 32]>::as_ref(&key_pair.public()) @@ -2268,7 +2237,7 @@ mod tests { parent_hash: System::parent_hash(), number: 1, state_root: hex!( - "10b8fe2ef82cb245fc71dab724fde5462bacc4f0d2b3b6bf0581aa89d63ef3a1" + "102bbfb2d146b9313489419e816d176b1f7280e8b1000cd27852bed7d495abbd" ) .into(), extrinsics_root: hex!( @@ -2301,7 +2270,7 @@ mod tests { parent_hash: System::parent_hash(), number: 2, state_root: hex!( - "9a3734f7495f8d2cdeaf71b8908040428848f8333274f9b871f522aa8838cc2e" + "30bb4cf688e7331e3149053da3e83aa56b4b7e2e289106fd7fd523369fb1cbe5" ) .into(), extrinsics_root: hex!( @@ -2329,12 +2298,12 @@ mod tests { // inject txs with wrong nonces new_test_ext(1).execute_with(|| { let secret_uri = "//Alice"; - let keystore = sp_keystore::testing::KeyStore::new(); + let keystore = MemoryKeystore::new(); let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); keystore - .insert_unknown(AURA, secret_uri, key_pair.public().as_ref()) + .insert(AURA, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); let pub_key_bytes = AsRef::<[u8; 32]>::as_ref(&key_pair.public()) @@ -2366,7 +2335,7 @@ mod tests { parent_hash: System::parent_hash(), number: 1, state_root: hex!( - "19fd2bb5ce39066549e0f84e2fcabb715e3541e3c26ec8047554bbcd9c7885a4" + "9768ee8cbc4d885d73464409ef4adbe4b6997d9accd75eb205eaa09140aace7f" ) .into(), extrinsics_root: hex!( @@ -2394,7 +2363,7 @@ mod tests { parent_hash: System::parent_hash(), number: 2, state_root: hex!( - "15a8610abb49b6649f043cf75c2ff9ed4209fb5b657fd345d0e0fc9b8165ba72" + "f4c46903988d1f2877c965ea22f33853c94ebbd7da3c8597335e46b0392d638b" ) .into(), extrinsics_root: hex!( @@ -2422,12 +2391,12 @@ mod tests { fn reject_block_that_tries_to_enqueue_same_tx_mulitple_times() { new_test_ext(1).execute_with(|| { let secret_uri = "//Alice"; - let keystore = sp_keystore::testing::KeyStore::new(); + let keystore = MemoryKeystore::new(); let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); keystore - .insert_unknown(AURA, secret_uri, key_pair.public().as_ref()) + .insert(AURA, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); let pub_key_bytes = AsRef::<[u8; 32]>::as_ref(&key_pair.public()) @@ -2457,7 +2426,7 @@ mod tests { parent_hash: System::parent_hash(), number: 1, state_root: hex!( - "10b8fe2ef82cb245fc71dab724fde5462bacc4f0d2b3b6bf0581aa89d63ef3a1" + "102bbfb2d146b9313489419e816d176b1f7280e8b1000cd27852bed7d495abbd" ) .into(), extrinsics_root: hex!( @@ -2485,12 +2454,12 @@ mod tests { fn reject_block_that_enqueus_same_tx_multiple_times() { new_test_ext(1).execute_with(|| { let secret_uri = "//Alice"; - let keystore = sp_keystore::testing::KeyStore::new(); + let keystore = MemoryKeystore::new(); let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); keystore - .insert_unknown(AURA, secret_uri, key_pair.public().as_ref()) + .insert(AURA, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); let pub_key_bytes = AsRef::<[u8; 32]>::as_ref(&key_pair.public()) @@ -2516,7 +2485,7 @@ mod tests { parent_hash: System::parent_hash(), number: 1, state_root: hex!( - "10b8fe2ef82cb245fc71dab724fde5462bacc4f0d2b3b6bf0581aa89d63ef3a1" + "102bbfb2d146b9313489419e816d176b1f7280e8b1000cd27852bed7d495abbd" ) .into(), extrinsics_root: hex!( @@ -2562,7 +2531,7 @@ mod tests { // Inherents are created by the runtime and don't need to be validated. #[test] fn inherents_fail_validate_block() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), sign_extra(1, 0, 0)); new_test_ext(1).execute_with(|| { assert_eq!( @@ -2583,12 +2552,12 @@ mod tests { fn reject_block_that_tries_to_pop_more_txs_than_available() { new_test_ext(1).execute_with(|| { let secret_uri = "//Alice"; - let keystore = sp_keystore::testing::KeyStore::new(); + let keystore = MemoryKeystore::new(); let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); keystore - .insert_unknown(AURA, secret_uri, key_pair.public().as_ref()) + .insert(AURA, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); let pub_key_bytes = AsRef::<[u8; 32]>::as_ref(&key_pair.public()) @@ -2616,11 +2585,11 @@ mod tests { state_root: hex!( "c6bbd33a1161f1b0d719594304a81c6cc97a183a64a09e1903cb58ed6e247148" ) - .into(), + .into(), extrinsics_root: hex!( "9f907f07e03a93bbb696e4071f58237edc3 5a701d24e5a2155cf52a2b32a4ef3" ) - .into(), + .into(), digest: Digest { logs: vec![DigestItem::Other(tx_hashes_list.encode())] }, count: 1, seed: calculate_next_seed_from_bytes( @@ -2628,7 +2597,7 @@ mod tests { &key_pair.public(), System::block_seed().as_bytes().to_vec(), ) - .unwrap(), + .unwrap(), }, extrinsics: vec![enqueue_txs_inherent.clone(), enqueue_txs_inherent], }, @@ -2663,12 +2632,8 @@ mod tests { new_test_ext(1).execute_with(|| { assert_eq!( - Executive::validate_transaction( - TransactionSource::External, - xt1, - H256::random() - ) - .unwrap_err(), + Executive::validate_transaction(TransactionSource::External, xt1, H256::random()) + .unwrap_err(), InvalidTransaction::MandatoryValidation.into() ); }) diff --git a/frame/fast-unstake/Cargo.toml b/frame/fast-unstake/Cargo.toml index 36b4c3a5ec1cb..93fceefa2de51 100644 --- a/frame/fast-unstake/Cargo.toml +++ b/frame/fast-unstake/Cargo.toml @@ -3,7 +3,7 @@ name = "pallet-fast-unstake" version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2021" -license = "Unlicense" +license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME fast unstake pallet" @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index c9a1d6d517308..fbe6c4592bf67 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -89,6 +89,10 @@ impl pallet_balances::Config for Runtime { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } pallet_staking_reward_curve::build! { diff --git a/frame/fast-unstake/src/weights.rs b/frame/fast-unstake/src/weights.rs index ced7acb951229..27414a8a8cc0d 100644 --- a/frame/fast-unstake/src/weights.rs +++ b/frame/fast-unstake/src/weights.rs @@ -18,25 +18,25 @@ //! Autogenerated weights for pallet_fast_unstake //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `runner-b3zmxxc-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_fast_unstake // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_fast_unstake -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/fast-unstake/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -58,172 +58,298 @@ pub trait WeightInfo { /// Weights for pallet_fast_unstake using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking ValidatorCount (r:1 w:0) - // Storage: FastUnstake Head (r:1 w:1) - // Storage: FastUnstake CounterForQueue (r:1 w:0) - // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: Staking Bonded (r:1 w:1) - // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) - // Storage: System Account (r:1 w:1) - // Storage: Balances Locks (r:1 w:1) - // Storage: Staking Ledger (r:0 w:1) - // Storage: Staking Payee (r:0 w:1) + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking ValidatorCount (r:1 w:0) + /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:1) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:0) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Staking CurrentEra (r:1 w:0) + /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking SlashingSpans (r:64 w:0) + /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) + /// Storage: Staking Bonded (r:64 w:64) + /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + /// Storage: Staking Validators (r:64 w:0) + /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: Staking Nominators (r:64 w:0) + /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) + /// Storage: System Account (r:64 w:64) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:64 w:64) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:64 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: Staking Ledger (r:0 w:64) + /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: Staking Payee (r:0 w:64) + /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// The range of component `b` is `[1, 64]`. fn on_idle_unstake(b: u32, ) -> Weight { - // Minimum execution time: 92_833 nanoseconds. - Weight::from_parts(62_136_346, 0) - // Standard Error: 25_541 - .saturating_add(Weight::from_parts(42_904_859, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(b.into()))) - .saturating_add(T::DbWeight::get().writes(1)) + // Proof Size summary in bytes: + // Measured: `1344 + b * (343 ±0)` + // Estimated: `7253 + b * (3774 ±0)` + // Minimum execution time: 92_282_000 picoseconds. + Weight::from_parts(31_665_344, 7253) + // Standard Error: 35_348 + .saturating_add(Weight::from_parts(57_005_152, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(b.into()))) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking ValidatorCount (r:1 w:0) - // Storage: FastUnstake Head (r:1 w:1) - // Storage: FastUnstake CounterForQueue (r:1 w:0) - // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking ErasStakers (r:2 w:0) + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking ValidatorCount (r:1 w:0) + /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:1) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:0) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Staking CurrentEra (r:1 w:0) + /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking ErasStakers (r:257 w:0) + /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) /// The range of component `v` is `[1, 256]`. /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { - // Minimum execution time: 1_775_293 nanoseconds. - Weight::from_parts(1_787_133_000, 0) - // Standard Error: 17_109_142 - .saturating_add(Weight::from_parts(546_766_552, 0).saturating_mul(v.into())) - // Standard Error: 68_455_625 - .saturating_add(Weight::from_parts(2_135_980_830, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(7)) + // Proof Size summary in bytes: + // Measured: `1512 + b * (48 ±0) + v * (10037 ±0)` + // Estimated: `7253 + b * (49 ±0) + v * (12513 ±0)` + // Minimum execution time: 1_547_716_000 picoseconds. + Weight::from_parts(1_552_476_000, 7253) + // Standard Error: 13_914_457 + .saturating_add(Weight::from_parts(445_314_876, 0).saturating_mul(v.into())) + // Standard Error: 55_673_329 + .saturating_add(Weight::from_parts(1_749_024_692, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) - .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_parts(0, 49).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 12513).saturating_mul(v.into())) } - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking Ledger (r:1 w:1) - // Storage: FastUnstake Queue (r:1 w:1) - // Storage: FastUnstake Head (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) - // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:1) - // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:1 w:1) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Balances Locks (r:1 w:1) - // Storage: FastUnstake CounterForQueue (r:1 w:1) + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking Ledger (r:1 w:1) + /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: FastUnstake Queue (r:1 w:1) + /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:0) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) + /// Storage: Staking Bonded (r:1 w:0) + /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + /// Storage: Staking Validators (r:1 w:0) + /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: Staking Nominators (r:1 w:1) + /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) + /// Storage: Staking CounterForNominators (r:1 w:1) + /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: VoterList ListNodes (r:1 w:1) + /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) + /// Storage: VoterList ListBags (r:1 w:1) + /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: VoterList CounterForListNodes (r:1 w:1) + /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking CurrentEra (r:1 w:0) + /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:1) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn register_fast_unstake() -> Weight { - // Minimum execution time: 124_849 nanoseconds. - Weight::from_parts(128_176_000, 0) - .saturating_add(T::DbWeight::get().reads(14)) - .saturating_add(T::DbWeight::get().writes(9)) + // Proof Size summary in bytes: + // Measured: `1964` + // Estimated: `7253` + // Minimum execution time: 124_644_000 picoseconds. + Weight::from_parts(125_793_000, 7253) + .saturating_add(T::DbWeight::get().reads(15_u64)) + .saturating_add(T::DbWeight::get().writes(9_u64)) } - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking Ledger (r:1 w:0) - // Storage: FastUnstake Queue (r:1 w:1) - // Storage: FastUnstake Head (r:1 w:0) - // Storage: FastUnstake CounterForQueue (r:1 w:1) + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking Ledger (r:1 w:0) + /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: FastUnstake Queue (r:1 w:1) + /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:0) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:1) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn deregister() -> Weight { - // Minimum execution time: 48_246 nanoseconds. - Weight::from_parts(49_720_000, 0) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) + // Proof Size summary in bytes: + // Measured: `1223` + // Estimated: `7253` + // Minimum execution time: 45_037_000 picoseconds. + Weight::from_parts(45_545_000, 7253) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } - // Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) + /// Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn control() -> Weight { - // Minimum execution time: 4_611 nanoseconds. - Weight::from_parts(4_844_000, 0) - .saturating_add(T::DbWeight::get().writes(1)) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_228_000 picoseconds. + Weight::from_parts(3_428_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } // For backwards compatibility and tests impl WeightInfo for () { - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking ValidatorCount (r:1 w:0) - // Storage: FastUnstake Head (r:1 w:1) - // Storage: FastUnstake CounterForQueue (r:1 w:0) - // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: Staking Bonded (r:1 w:1) - // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) - // Storage: System Account (r:1 w:1) - // Storage: Balances Locks (r:1 w:1) - // Storage: Staking Ledger (r:0 w:1) - // Storage: Staking Payee (r:0 w:1) + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking ValidatorCount (r:1 w:0) + /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:1) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:0) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Staking CurrentEra (r:1 w:0) + /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking SlashingSpans (r:64 w:0) + /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) + /// Storage: Staking Bonded (r:64 w:64) + /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + /// Storage: Staking Validators (r:64 w:0) + /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: Staking Nominators (r:64 w:0) + /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) + /// Storage: System Account (r:64 w:64) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:64 w:64) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:64 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: Staking Ledger (r:0 w:64) + /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: Staking Payee (r:0 w:64) + /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// The range of component `b` is `[1, 64]`. fn on_idle_unstake(b: u32, ) -> Weight { - // Minimum execution time: 92_833 nanoseconds. - Weight::from_parts(62_136_346, 0) - // Standard Error: 25_541 - .saturating_add(Weight::from_parts(42_904_859, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(6)) - .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(b.into()))) - .saturating_add(RocksDbWeight::get().writes(1)) + // Proof Size summary in bytes: + // Measured: `1344 + b * (343 ±0)` + // Estimated: `7253 + b * (3774 ±0)` + // Minimum execution time: 92_282_000 picoseconds. + Weight::from_parts(31_665_344, 7253) + // Standard Error: 35_348 + .saturating_add(Weight::from_parts(57_005_152, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(b.into()))) + .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(b.into()))) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking ValidatorCount (r:1 w:0) - // Storage: FastUnstake Head (r:1 w:1) - // Storage: FastUnstake CounterForQueue (r:1 w:0) - // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking ErasStakers (r:2 w:0) + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking ValidatorCount (r:1 w:0) + /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:1) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:0) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Staking CurrentEra (r:1 w:0) + /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking ErasStakers (r:257 w:0) + /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) /// The range of component `v` is `[1, 256]`. /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { - // Minimum execution time: 1_775_293 nanoseconds. - Weight::from_parts(1_787_133_000, 0) - // Standard Error: 17_109_142 - .saturating_add(Weight::from_parts(546_766_552, 0).saturating_mul(v.into())) - // Standard Error: 68_455_625 - .saturating_add(Weight::from_parts(2_135_980_830, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(7)) + // Proof Size summary in bytes: + // Measured: `1512 + b * (48 ±0) + v * (10037 ±0)` + // Estimated: `7253 + b * (49 ±0) + v * (12513 ±0)` + // Minimum execution time: 1_547_716_000 picoseconds. + Weight::from_parts(1_552_476_000, 7253) + // Standard Error: 13_914_457 + .saturating_add(Weight::from_parts(445_314_876, 0).saturating_mul(v.into())) + // Standard Error: 55_673_329 + .saturating_add(Weight::from_parts(1_749_024_692, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) - .saturating_add(RocksDbWeight::get().writes(1)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_parts(0, 49).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 12513).saturating_mul(v.into())) } - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking Ledger (r:1 w:1) - // Storage: FastUnstake Queue (r:1 w:1) - // Storage: FastUnstake Head (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) - // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:1) - // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:1 w:1) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Balances Locks (r:1 w:1) - // Storage: FastUnstake CounterForQueue (r:1 w:1) + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking Ledger (r:1 w:1) + /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: FastUnstake Queue (r:1 w:1) + /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:0) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) + /// Storage: Staking Bonded (r:1 w:0) + /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + /// Storage: Staking Validators (r:1 w:0) + /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: Staking Nominators (r:1 w:1) + /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) + /// Storage: Staking CounterForNominators (r:1 w:1) + /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: VoterList ListNodes (r:1 w:1) + /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) + /// Storage: VoterList ListBags (r:1 w:1) + /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: VoterList CounterForListNodes (r:1 w:1) + /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking CurrentEra (r:1 w:0) + /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:1) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn register_fast_unstake() -> Weight { - // Minimum execution time: 124_849 nanoseconds. - Weight::from_parts(128_176_000, 0) - .saturating_add(RocksDbWeight::get().reads(14)) - .saturating_add(RocksDbWeight::get().writes(9)) + // Proof Size summary in bytes: + // Measured: `1964` + // Estimated: `7253` + // Minimum execution time: 124_644_000 picoseconds. + Weight::from_parts(125_793_000, 7253) + .saturating_add(RocksDbWeight::get().reads(15_u64)) + .saturating_add(RocksDbWeight::get().writes(9_u64)) } - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking Ledger (r:1 w:0) - // Storage: FastUnstake Queue (r:1 w:1) - // Storage: FastUnstake Head (r:1 w:0) - // Storage: FastUnstake CounterForQueue (r:1 w:1) + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking Ledger (r:1 w:0) + /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: FastUnstake Queue (r:1 w:1) + /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:0) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:1) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn deregister() -> Weight { - // Minimum execution time: 48_246 nanoseconds. - Weight::from_parts(49_720_000, 0) - .saturating_add(RocksDbWeight::get().reads(5)) - .saturating_add(RocksDbWeight::get().writes(2)) + // Proof Size summary in bytes: + // Measured: `1223` + // Estimated: `7253` + // Minimum execution time: 45_037_000 picoseconds. + Weight::from_parts(45_545_000, 7253) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } - // Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) + /// Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn control() -> Weight { - // Minimum execution time: 4_611 nanoseconds. - Weight::from_parts(4_844_000, 0) - .saturating_add(RocksDbWeight::get().writes(1)) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_228_000 picoseconds. + Weight::from_parts(3_428_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/frame/glutton/Cargo.toml b/frame/glutton/Cargo.toml index c0f0312464ee6..bee0d3db625fb 100644 --- a/frame/glutton/Cargo.toml +++ b/frame/glutton/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] blake2 = { version = "0.10.4", default-features = false } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.14", default-features = false } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } diff --git a/frame/glutton/src/weights.rs b/frame/glutton/src/weights.rs index 1a7020c17a9ab..82bac91c6d785 100644 --- a/frame/glutton/src/weights.rs +++ b/frame/glutton/src/weights.rs @@ -18,26 +18,25 @@ //! Autogenerated weights for pallet_glutton //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-02-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_glutton // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_glutton -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/glutton/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -72,11 +71,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1489` - // Minimum execution time: 10_218 nanoseconds. - Weight::from_parts(10_510_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - // Standard Error: 1_582 - .saturating_add(Weight::from_parts(1_577_660, 0).saturating_mul(n.into())) + // Minimum execution time: 10_410_000 picoseconds. + Weight::from_parts(10_515_000, 1489) + // Standard Error: 1_069 + .saturating_add(Weight::from_parts(1_513_013, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -90,11 +88,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `65` // Estimated: `1489` - // Minimum execution time: 10_993 nanoseconds. - Weight::from_parts(11_208_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - // Standard Error: 1_386 - .saturating_add(Weight::from_parts(1_072_330, 0).saturating_mul(n.into())) + // Minimum execution time: 11_105_000 picoseconds. + Weight::from_parts(584_850, 1489) + // Standard Error: 1_417 + .saturating_add(Weight::from_parts(1_054_988, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -104,24 +101,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 740 nanoseconds. - Weight::from_parts(770_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 24 - .saturating_add(Weight::from_parts(96_434, 0).saturating_mul(i.into())) + // Minimum execution time: 709_000 picoseconds. + Weight::from_parts(7_409_096, 0) + // Standard Error: 23 + .saturating_add(Weight::from_parts(95_342, 0).saturating_mul(i.into())) } /// Storage: Glutton TrashData (r:5000 w:0) /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) /// The range of component `i` is `[0, 5000]`. fn waste_proof_size_some(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `119036 + i * (1053 ±0)` + // Measured: `119036 + i * (1022 ±0)` // Estimated: `990 + i * (3016 ±0)` - // Minimum execution time: 630 nanoseconds. - Weight::from_parts(712_000, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 4_326 - .saturating_add(Weight::from_parts(5_500_880, 0).saturating_mul(i.into())) + // Minimum execution time: 584_000 picoseconds. + Weight::from_parts(674_000, 990) + // Standard Error: 1_802 + .saturating_add(Weight::from_parts(5_360_522, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } @@ -133,11 +128,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) fn on_idle_high_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `1954313` - // Estimated: `5242760` - // Minimum execution time: 56_743_236 nanoseconds. - Weight::from_parts(57_088_040_000, 0) - .saturating_add(Weight::from_parts(0, 5242760)) + // Measured: `1900466` + // Estimated: `5239782` + // Minimum execution time: 57_124_610_000 picoseconds. + Weight::from_parts(57_256_059_000, 5239782) .saturating_add(T::DbWeight::get().reads(1739_u64)) } /// Storage: Glutton Storage (r:1 w:0) @@ -148,11 +142,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) fn on_idle_low_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `9671` - // Estimated: `19048` - // Minimum execution time: 100_387_042 nanoseconds. - Weight::from_parts(100_987_577_000, 0) - .saturating_add(Weight::from_parts(0, 19048)) + // Measured: `9516` + // Estimated: `16070` + // Minimum execution time: 101_500_066_000 picoseconds. + Weight::from_parts(101_621_640_000, 16070) .saturating_add(T::DbWeight::get().reads(7_u64)) } /// Storage: Glutton Storage (r:1 w:0) @@ -162,10 +155,9 @@ impl WeightInfo for SubstrateWeight { fn empty_on_idle() -> Weight { // Proof Size summary in bytes: // Measured: `4` - // Estimated: `2978` - // Minimum execution time: 4_256 nanoseconds. - Weight::from_parts(4_447_000, 0) - .saturating_add(Weight::from_parts(0, 2978)) + // Estimated: `1489` + // Minimum execution time: 4_164_000 picoseconds. + Weight::from_parts(4_378_000, 1489) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: Glutton Compute (r:0 w:1) @@ -174,9 +166,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_663 nanoseconds. - Weight::from_parts(8_864_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 8_795_000 picoseconds. + Weight::from_parts(9_076_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Glutton Storage (r:0 w:1) @@ -185,9 +176,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_653 nanoseconds. - Weight::from_parts(8_998_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 8_979_000 picoseconds. + Weight::from_parts(9_195_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -203,11 +193,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1489` - // Minimum execution time: 10_218 nanoseconds. - Weight::from_parts(10_510_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - // Standard Error: 1_582 - .saturating_add(Weight::from_parts(1_577_660, 0).saturating_mul(n.into())) + // Minimum execution time: 10_410_000 picoseconds. + Weight::from_parts(10_515_000, 1489) + // Standard Error: 1_069 + .saturating_add(Weight::from_parts(1_513_013, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -221,11 +210,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `65` // Estimated: `1489` - // Minimum execution time: 10_993 nanoseconds. - Weight::from_parts(11_208_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - // Standard Error: 1_386 - .saturating_add(Weight::from_parts(1_072_330, 0).saturating_mul(n.into())) + // Minimum execution time: 11_105_000 picoseconds. + Weight::from_parts(584_850, 1489) + // Standard Error: 1_417 + .saturating_add(Weight::from_parts(1_054_988, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -235,24 +223,22 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 740 nanoseconds. - Weight::from_parts(770_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 24 - .saturating_add(Weight::from_parts(96_434, 0).saturating_mul(i.into())) + // Minimum execution time: 709_000 picoseconds. + Weight::from_parts(7_409_096, 0) + // Standard Error: 23 + .saturating_add(Weight::from_parts(95_342, 0).saturating_mul(i.into())) } /// Storage: Glutton TrashData (r:5000 w:0) /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) /// The range of component `i` is `[0, 5000]`. fn waste_proof_size_some(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `119036 + i * (1053 ±0)` + // Measured: `119036 + i * (1022 ±0)` // Estimated: `990 + i * (3016 ±0)` - // Minimum execution time: 630 nanoseconds. - Weight::from_parts(712_000, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 4_326 - .saturating_add(Weight::from_parts(5_500_880, 0).saturating_mul(i.into())) + // Minimum execution time: 584_000 picoseconds. + Weight::from_parts(674_000, 990) + // Standard Error: 1_802 + .saturating_add(Weight::from_parts(5_360_522, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } @@ -264,11 +250,10 @@ impl WeightInfo for () { /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) fn on_idle_high_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `1954313` - // Estimated: `5242760` - // Minimum execution time: 56_743_236 nanoseconds. - Weight::from_parts(57_088_040_000, 0) - .saturating_add(Weight::from_parts(0, 5242760)) + // Measured: `1900466` + // Estimated: `5239782` + // Minimum execution time: 57_124_610_000 picoseconds. + Weight::from_parts(57_256_059_000, 5239782) .saturating_add(RocksDbWeight::get().reads(1739_u64)) } /// Storage: Glutton Storage (r:1 w:0) @@ -279,11 +264,10 @@ impl WeightInfo for () { /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) fn on_idle_low_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `9671` - // Estimated: `19048` - // Minimum execution time: 100_387_042 nanoseconds. - Weight::from_parts(100_987_577_000, 0) - .saturating_add(Weight::from_parts(0, 19048)) + // Measured: `9516` + // Estimated: `16070` + // Minimum execution time: 101_500_066_000 picoseconds. + Weight::from_parts(101_621_640_000, 16070) .saturating_add(RocksDbWeight::get().reads(7_u64)) } /// Storage: Glutton Storage (r:1 w:0) @@ -293,10 +277,9 @@ impl WeightInfo for () { fn empty_on_idle() -> Weight { // Proof Size summary in bytes: // Measured: `4` - // Estimated: `2978` - // Minimum execution time: 4_256 nanoseconds. - Weight::from_parts(4_447_000, 0) - .saturating_add(Weight::from_parts(0, 2978)) + // Estimated: `1489` + // Minimum execution time: 4_164_000 picoseconds. + Weight::from_parts(4_378_000, 1489) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: Glutton Compute (r:0 w:1) @@ -305,9 +288,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_663 nanoseconds. - Weight::from_parts(8_864_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 8_795_000 picoseconds. + Weight::from_parts(9_076_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Glutton Storage (r:0 w:1) @@ -316,9 +298,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_653 nanoseconds. - Weight::from_parts(8_998_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Minimum execution time: 8_979_000 picoseconds. + Weight::from_parts(9_195_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 3ffb516423906..288d63512588b 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } @@ -31,7 +31,7 @@ sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../pr sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -grandpa = { package = "finality-grandpa", version = "0.16.1", features = ["derive-codec"] } +grandpa = { package = "finality-grandpa", version = "0.16.2", features = ["derive-codec"] } frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 4e4f2f79d040e..a7359f6896db7 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -142,6 +142,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU128<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl pallet_timestamp::Config for Test { diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index c1a250368fade..2479b0609a0ab 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } -enumflags2 = { version = "0.7.4" } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +enumflags2 = { version = "0.7.7" } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 0e15f08ddd7e6..ba9749172e5f6 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -85,6 +85,10 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { diff --git a/frame/identity/src/types.rs b/frame/identity/src/types.rs index 4d59a528021a0..1837b30b027f9 100644 --- a/frame/identity/src/types.rs +++ b/frame/identity/src/types.rs @@ -442,7 +442,7 @@ mod tests { let mut registry = scale_info::Registry::new(); let type_id = registry.register_type(&scale_info::meta_type::()); let registry: scale_info::PortableRegistry = registry.into(); - let type_info = registry.resolve(type_id.id()).unwrap(); + let type_info = registry.resolve(type_id.id).unwrap(); let check_type_info = |data: &Data| { let variant_name = match data { @@ -453,20 +453,20 @@ mod tests { Data::ShaThree256(_) => "ShaThree256".to_string(), Data::Raw(bytes) => format!("Raw{}", bytes.len()), }; - if let scale_info::TypeDef::Variant(variant) = type_info.type_def() { + if let scale_info::TypeDef::Variant(variant) = &type_info.type_def { let variant = variant - .variants() + .variants .iter() - .find(|v| v.name() == &variant_name) + .find(|v| v.name == variant_name) .expect(&format!("Expected to find variant {}", variant_name)); let field_arr_len = variant - .fields() + .fields .first() - .and_then(|f| registry.resolve(f.ty().id())) + .and_then(|f| registry.resolve(f.ty.id)) .map(|ty| { - if let scale_info::TypeDef::Array(arr) = ty.type_def() { - arr.len() + if let scale_info::TypeDef::Array(arr) = &ty.type_def { + arr.len } else { panic!("Should be an array type") } @@ -474,7 +474,7 @@ mod tests { .unwrap_or(0); let encoded = data.encode(); - assert_eq!(encoded[0], variant.index()); + assert_eq!(encoded[0], variant.index); assert_eq!(encoded.len() as u32 - 1, field_arr_len); } else { panic!("Should be a variant type") diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs index 65fe2b17ba527..faefd00fb961f 100644 --- a/frame/identity/src/weights.rs +++ b/frame/identity/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_identity //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -74,12 +74,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `64 + r * (57 ±0)` - // Estimated: `1636` - // Minimum execution time: 10_964 nanoseconds. - Weight::from_parts(11_800_935, 1636) - // Standard Error: 1_334 - .saturating_add(Weight::from_parts(96_038, 0).saturating_mul(r.into())) + // Measured: `32 + r * (57 ±0)` + // Estimated: `2626` + // Minimum execution time: 12_851_000 picoseconds. + Weight::from_parts(13_448_645, 2626) + // Standard Error: 1_636 + .saturating_add(Weight::from_parts(113_654, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -89,14 +89,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[0, 100]`. fn set_identity(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `474 + r * (5 ±0)` - // Estimated: `10013` - // Minimum execution time: 26_400 nanoseconds. - Weight::from_parts(26_060_549, 10013) - // Standard Error: 1_561 - .saturating_add(Weight::from_parts(72_083, 0).saturating_mul(r.into())) - // Standard Error: 304 - .saturating_add(Weight::from_parts(306_994, 0).saturating_mul(x.into())) + // Measured: `442 + r * (5 ±0)` + // Estimated: `11003` + // Minimum execution time: 33_342_000 picoseconds. + Weight::from_parts(33_155_116, 11003) + // Standard Error: 2_307 + .saturating_add(Weight::from_parts(56_409, 0).saturating_mul(r.into())) + // Standard Error: 450 + .saturating_add(Weight::from_parts(437_684, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -110,11 +110,11 @@ impl WeightInfo for SubstrateWeight { fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `15746 + s * (2589 ±0)` - // Minimum execution time: 8_492 nanoseconds. - Weight::from_parts(21_645_924, 15746) - // Standard Error: 3_452 - .saturating_add(Weight::from_parts(2_442_604, 0).saturating_mul(s.into())) + // Estimated: `11003 + s * (2589 ±0)` + // Minimum execution time: 10_315_000 picoseconds. + Weight::from_parts(26_535_526, 11003) + // Standard Error: 4_344 + .saturating_add(Weight::from_parts(3_016_873, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -130,12 +130,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `226 + p * (32 ±0)` - // Estimated: `15746` - // Minimum execution time: 8_488 nanoseconds. - Weight::from_parts(20_202_601, 15746) - // Standard Error: 2_834 - .saturating_add(Weight::from_parts(1_082_941, 0).saturating_mul(p.into())) + // Measured: `194 + p * (32 ±0)` + // Estimated: `11003` + // Minimum execution time: 10_220_000 picoseconds. + Weight::from_parts(25_050_056, 11003) + // Standard Error: 3_621 + .saturating_add(Weight::from_parts(1_291_143, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) @@ -149,18 +149,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. /// The range of component `x` is `[0, 100]`. - fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `533 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `15746` - // Minimum execution time: 41_319 nanoseconds. - Weight::from_parts(25_850_055, 15746) - // Standard Error: 4_144 - .saturating_add(Weight::from_parts(59_619, 0).saturating_mul(r.into())) - // Standard Error: 809 - .saturating_add(Weight::from_parts(1_076_550, 0).saturating_mul(s.into())) - // Standard Error: 809 - .saturating_add(Weight::from_parts(163_191, 0).saturating_mul(x.into())) + fn clear_identity(_r: u32, s: u32, x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` + // Estimated: `11003` + // Minimum execution time: 56_018_000 picoseconds. + Weight::from_parts(37_757_186, 11003) + // Standard Error: 1_852 + .saturating_add(Weight::from_parts(1_257_980, 0).saturating_mul(s.into())) + // Standard Error: 1_852 + .saturating_add(Weight::from_parts(215_426, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -173,14 +171,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[0, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `431 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11649` - // Minimum execution time: 28_118 nanoseconds. - Weight::from_parts(27_359_471, 11649) - // Standard Error: 2_707 - .saturating_add(Weight::from_parts(107_279, 0).saturating_mul(r.into())) - // Standard Error: 528 - .saturating_add(Weight::from_parts(325_165, 0).saturating_mul(x.into())) + // Measured: `367 + r * (57 ±0) + x * (66 ±0)` + // Estimated: `11003` + // Minimum execution time: 34_792_000 picoseconds. + Weight::from_parts(35_368_327, 11003) + // Standard Error: 3_476 + .saturating_add(Weight::from_parts(78_981, 0).saturating_mul(r.into())) + // Standard Error: 678 + .saturating_add(Weight::from_parts(459_077, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -188,16 +186,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[0, 100]`. - fn cancel_request(r: u32, x: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `430 + x * (66 ±0)` - // Estimated: `10013` - // Minimum execution time: 24_817 nanoseconds. - Weight::from_parts(24_749_808, 10013) - // Standard Error: 1_938 - .saturating_add(Weight::from_parts(63_396, 0).saturating_mul(r.into())) - // Standard Error: 378 - .saturating_add(Weight::from_parts(327_083, 0).saturating_mul(x.into())) + fn cancel_request(_r: u32, x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `398 + x * (66 ±0)` + // Estimated: `11003` + // Minimum execution time: 31_306_000 picoseconds. + Weight::from_parts(33_304_799, 11003) + // Standard Error: 892 + .saturating_add(Weight::from_parts(451_655, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -206,12 +202,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `121 + r * (57 ±0)` - // Estimated: `1636` - // Minimum execution time: 6_664 nanoseconds. - Weight::from_parts(7_286_307, 1636) - // Standard Error: 1_560 - .saturating_add(Weight::from_parts(96_416, 0).saturating_mul(r.into())) + // Measured: `89 + r * (57 ±0)` + // Estimated: `2626` + // Minimum execution time: 8_215_000 picoseconds. + Weight::from_parts(8_692_102, 2626) + // Standard Error: 1_455 + .saturating_add(Weight::from_parts(110_912, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -220,12 +216,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `121 + r * (57 ±0)` - // Estimated: `1636` - // Minimum execution time: 7_054 nanoseconds. - Weight::from_parts(7_382_954, 1636) - // Standard Error: 1_621 - .saturating_add(Weight::from_parts(101_595, 0).saturating_mul(r.into())) + // Measured: `89 + r * (57 ±0)` + // Estimated: `2626` + // Minimum execution time: 8_397_000 picoseconds. + Weight::from_parts(8_787_656, 2626) + // Standard Error: 1_440 + .saturating_add(Weight::from_parts(111_212, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -234,12 +230,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `121 + r * (57 ±0)` - // Estimated: `1636` - // Minimum execution time: 6_659 nanoseconds. - Weight::from_parts(7_188_883, 1636) - // Standard Error: 1_377 - .saturating_add(Weight::from_parts(98_965, 0).saturating_mul(r.into())) + // Measured: `89 + r * (57 ±0)` + // Estimated: `2626` + // Minimum execution time: 8_466_000 picoseconds. + Weight::from_parts(8_689_763, 2626) + // Standard Error: 1_536 + .saturating_add(Weight::from_parts(106_371, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -251,14 +247,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[0, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `509 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11649` - // Minimum execution time: 21_567 nanoseconds. - Weight::from_parts(21_015_310, 11649) - // Standard Error: 2_516 - .saturating_add(Weight::from_parts(123_992, 0).saturating_mul(r.into())) - // Standard Error: 465 - .saturating_add(Weight::from_parts(552_116, 0).saturating_mul(x.into())) + // Measured: `445 + r * (57 ±0) + x * (66 ±0)` + // Estimated: `11003` + // Minimum execution time: 25_132_000 picoseconds. + Weight::from_parts(20_582_313, 11003) + // Standard Error: 10_427 + .saturating_add(Weight::from_parts(277_545, 0).saturating_mul(r.into())) + // Standard Error: 1_929 + .saturating_add(Weight::from_parts(747_966, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -275,16 +271,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[0, 100]`. fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `772 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `18349` - // Minimum execution time: 52_881 nanoseconds. - Weight::from_parts(38_504_388, 18349) - // Standard Error: 3_909 - .saturating_add(Weight::from_parts(51_452, 0).saturating_mul(r.into())) - // Standard Error: 763 - .saturating_add(Weight::from_parts(1_069_924, 0).saturating_mul(s.into())) - // Standard Error: 763 - .saturating_add(Weight::from_parts(164_906, 0).saturating_mul(x.into())) + // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` + // Estimated: `11003` + // Minimum execution time: 72_043_000 picoseconds. + Weight::from_parts(50_994_735, 11003) + // Standard Error: 9_304 + .saturating_add(Weight::from_parts(123_052, 0).saturating_mul(r.into())) + // Standard Error: 1_817 + .saturating_add(Weight::from_parts(1_256_713, 0).saturating_mul(s.into())) + // Standard Error: 1_817 + .saturating_add(Weight::from_parts(219_242, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -298,12 +294,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 99]`. fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `507 + s * (36 ±0)` - // Estimated: `18335` - // Minimum execution time: 24_556 nanoseconds. - Weight::from_parts(28_641_160, 18335) - // Standard Error: 1_327 - .saturating_add(Weight::from_parts(66_150, 0).saturating_mul(s.into())) + // Measured: `475 + s * (36 ±0)` + // Estimated: `11003` + // Minimum execution time: 30_747_000 picoseconds. + Weight::from_parts(35_975_985, 11003) + // Standard Error: 1_625 + .saturating_add(Weight::from_parts(73_263, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -314,12 +310,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `623 + s * (3 ±0)` - // Estimated: `12602` - // Minimum execution time: 11_347 nanoseconds. - Weight::from_parts(13_299_367, 12602) - // Standard Error: 525 - .saturating_add(Weight::from_parts(16_472, 0).saturating_mul(s.into())) + // Measured: `591 + s * (3 ±0)` + // Estimated: `11003` + // Minimum execution time: 13_586_000 picoseconds. + Weight::from_parts(15_909_245, 11003) + // Standard Error: 611 + .saturating_add(Weight::from_parts(16_949, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -332,12 +328,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `702 + s * (35 ±0)` - // Estimated: `18335` - // Minimum execution time: 27_810 nanoseconds. - Weight::from_parts(30_347_763, 18335) - // Standard Error: 928 - .saturating_add(Weight::from_parts(55_342, 0).saturating_mul(s.into())) + // Measured: `638 + s * (35 ±0)` + // Estimated: `11003` + // Minimum execution time: 34_286_000 picoseconds. + Weight::from_parts(37_391_401, 11003) + // Standard Error: 1_099 + .saturating_add(Weight::from_parts(61_165, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -348,12 +344,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `628 + s * (37 ±0)` - // Estimated: `8322` - // Minimum execution time: 17_601 nanoseconds. - Weight::from_parts(19_794_971, 8322) - // Standard Error: 934 - .saturating_add(Weight::from_parts(59_289, 0).saturating_mul(s.into())) + // Measured: `564 + s * (37 ±0)` + // Estimated: `6723` + // Minimum execution time: 22_197_000 picoseconds. + Weight::from_parts(24_630_311, 6723) + // Standard Error: 1_092 + .saturating_add(Weight::from_parts(63_415, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -366,12 +362,12 @@ impl WeightInfo for () { /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `64 + r * (57 ±0)` - // Estimated: `1636` - // Minimum execution time: 10_964 nanoseconds. - Weight::from_parts(11_800_935, 1636) - // Standard Error: 1_334 - .saturating_add(Weight::from_parts(96_038, 0).saturating_mul(r.into())) + // Measured: `32 + r * (57 ±0)` + // Estimated: `2626` + // Minimum execution time: 12_851_000 picoseconds. + Weight::from_parts(13_448_645, 2626) + // Standard Error: 1_636 + .saturating_add(Weight::from_parts(113_654, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -381,14 +377,14 @@ impl WeightInfo for () { /// The range of component `x` is `[0, 100]`. fn set_identity(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `474 + r * (5 ±0)` - // Estimated: `10013` - // Minimum execution time: 26_400 nanoseconds. - Weight::from_parts(26_060_549, 10013) - // Standard Error: 1_561 - .saturating_add(Weight::from_parts(72_083, 0).saturating_mul(r.into())) - // Standard Error: 304 - .saturating_add(Weight::from_parts(306_994, 0).saturating_mul(x.into())) + // Measured: `442 + r * (5 ±0)` + // Estimated: `11003` + // Minimum execution time: 33_342_000 picoseconds. + Weight::from_parts(33_155_116, 11003) + // Standard Error: 2_307 + .saturating_add(Weight::from_parts(56_409, 0).saturating_mul(r.into())) + // Standard Error: 450 + .saturating_add(Weight::from_parts(437_684, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -402,11 +398,11 @@ impl WeightInfo for () { fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `15746 + s * (2589 ±0)` - // Minimum execution time: 8_492 nanoseconds. - Weight::from_parts(21_645_924, 15746) - // Standard Error: 3_452 - .saturating_add(Weight::from_parts(2_442_604, 0).saturating_mul(s.into())) + // Estimated: `11003 + s * (2589 ±0)` + // Minimum execution time: 10_315_000 picoseconds. + Weight::from_parts(26_535_526, 11003) + // Standard Error: 4_344 + .saturating_add(Weight::from_parts(3_016_873, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -422,12 +418,12 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `226 + p * (32 ±0)` - // Estimated: `15746` - // Minimum execution time: 8_488 nanoseconds. - Weight::from_parts(20_202_601, 15746) - // Standard Error: 2_834 - .saturating_add(Weight::from_parts(1_082_941, 0).saturating_mul(p.into())) + // Measured: `194 + p * (32 ±0)` + // Estimated: `11003` + // Minimum execution time: 10_220_000 picoseconds. + Weight::from_parts(25_050_056, 11003) + // Standard Error: 3_621 + .saturating_add(Weight::from_parts(1_291_143, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) @@ -441,18 +437,16 @@ impl WeightInfo for () { /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. /// The range of component `x` is `[0, 100]`. - fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `533 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `15746` - // Minimum execution time: 41_319 nanoseconds. - Weight::from_parts(25_850_055, 15746) - // Standard Error: 4_144 - .saturating_add(Weight::from_parts(59_619, 0).saturating_mul(r.into())) - // Standard Error: 809 - .saturating_add(Weight::from_parts(1_076_550, 0).saturating_mul(s.into())) - // Standard Error: 809 - .saturating_add(Weight::from_parts(163_191, 0).saturating_mul(x.into())) + fn clear_identity(_r: u32, s: u32, x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` + // Estimated: `11003` + // Minimum execution time: 56_018_000 picoseconds. + Weight::from_parts(37_757_186, 11003) + // Standard Error: 1_852 + .saturating_add(Weight::from_parts(1_257_980, 0).saturating_mul(s.into())) + // Standard Error: 1_852 + .saturating_add(Weight::from_parts(215_426, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -465,14 +459,14 @@ impl WeightInfo for () { /// The range of component `x` is `[0, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `431 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11649` - // Minimum execution time: 28_118 nanoseconds. - Weight::from_parts(27_359_471, 11649) - // Standard Error: 2_707 - .saturating_add(Weight::from_parts(107_279, 0).saturating_mul(r.into())) - // Standard Error: 528 - .saturating_add(Weight::from_parts(325_165, 0).saturating_mul(x.into())) + // Measured: `367 + r * (57 ±0) + x * (66 ±0)` + // Estimated: `11003` + // Minimum execution time: 34_792_000 picoseconds. + Weight::from_parts(35_368_327, 11003) + // Standard Error: 3_476 + .saturating_add(Weight::from_parts(78_981, 0).saturating_mul(r.into())) + // Standard Error: 678 + .saturating_add(Weight::from_parts(459_077, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -480,16 +474,14 @@ impl WeightInfo for () { /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[0, 100]`. - fn cancel_request(r: u32, x: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `430 + x * (66 ±0)` - // Estimated: `10013` - // Minimum execution time: 24_817 nanoseconds. - Weight::from_parts(24_749_808, 10013) - // Standard Error: 1_938 - .saturating_add(Weight::from_parts(63_396, 0).saturating_mul(r.into())) - // Standard Error: 378 - .saturating_add(Weight::from_parts(327_083, 0).saturating_mul(x.into())) + fn cancel_request(_r: u32, x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `398 + x * (66 ±0)` + // Estimated: `11003` + // Minimum execution time: 31_306_000 picoseconds. + Weight::from_parts(33_304_799, 11003) + // Standard Error: 892 + .saturating_add(Weight::from_parts(451_655, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -498,12 +490,12 @@ impl WeightInfo for () { /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `121 + r * (57 ±0)` - // Estimated: `1636` - // Minimum execution time: 6_664 nanoseconds. - Weight::from_parts(7_286_307, 1636) - // Standard Error: 1_560 - .saturating_add(Weight::from_parts(96_416, 0).saturating_mul(r.into())) + // Measured: `89 + r * (57 ±0)` + // Estimated: `2626` + // Minimum execution time: 8_215_000 picoseconds. + Weight::from_parts(8_692_102, 2626) + // Standard Error: 1_455 + .saturating_add(Weight::from_parts(110_912, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -512,12 +504,12 @@ impl WeightInfo for () { /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `121 + r * (57 ±0)` - // Estimated: `1636` - // Minimum execution time: 7_054 nanoseconds. - Weight::from_parts(7_382_954, 1636) - // Standard Error: 1_621 - .saturating_add(Weight::from_parts(101_595, 0).saturating_mul(r.into())) + // Measured: `89 + r * (57 ±0)` + // Estimated: `2626` + // Minimum execution time: 8_397_000 picoseconds. + Weight::from_parts(8_787_656, 2626) + // Standard Error: 1_440 + .saturating_add(Weight::from_parts(111_212, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -526,12 +518,12 @@ impl WeightInfo for () { /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `121 + r * (57 ±0)` - // Estimated: `1636` - // Minimum execution time: 6_659 nanoseconds. - Weight::from_parts(7_188_883, 1636) - // Standard Error: 1_377 - .saturating_add(Weight::from_parts(98_965, 0).saturating_mul(r.into())) + // Measured: `89 + r * (57 ±0)` + // Estimated: `2626` + // Minimum execution time: 8_466_000 picoseconds. + Weight::from_parts(8_689_763, 2626) + // Standard Error: 1_536 + .saturating_add(Weight::from_parts(106_371, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -543,14 +535,14 @@ impl WeightInfo for () { /// The range of component `x` is `[0, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `509 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11649` - // Minimum execution time: 21_567 nanoseconds. - Weight::from_parts(21_015_310, 11649) - // Standard Error: 2_516 - .saturating_add(Weight::from_parts(123_992, 0).saturating_mul(r.into())) - // Standard Error: 465 - .saturating_add(Weight::from_parts(552_116, 0).saturating_mul(x.into())) + // Measured: `445 + r * (57 ±0) + x * (66 ±0)` + // Estimated: `11003` + // Minimum execution time: 25_132_000 picoseconds. + Weight::from_parts(20_582_313, 11003) + // Standard Error: 10_427 + .saturating_add(Weight::from_parts(277_545, 0).saturating_mul(r.into())) + // Standard Error: 1_929 + .saturating_add(Weight::from_parts(747_966, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -567,16 +559,16 @@ impl WeightInfo for () { /// The range of component `x` is `[0, 100]`. fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `772 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `18349` - // Minimum execution time: 52_881 nanoseconds. - Weight::from_parts(38_504_388, 18349) - // Standard Error: 3_909 - .saturating_add(Weight::from_parts(51_452, 0).saturating_mul(r.into())) - // Standard Error: 763 - .saturating_add(Weight::from_parts(1_069_924, 0).saturating_mul(s.into())) - // Standard Error: 763 - .saturating_add(Weight::from_parts(164_906, 0).saturating_mul(x.into())) + // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` + // Estimated: `11003` + // Minimum execution time: 72_043_000 picoseconds. + Weight::from_parts(50_994_735, 11003) + // Standard Error: 9_304 + .saturating_add(Weight::from_parts(123_052, 0).saturating_mul(r.into())) + // Standard Error: 1_817 + .saturating_add(Weight::from_parts(1_256_713, 0).saturating_mul(s.into())) + // Standard Error: 1_817 + .saturating_add(Weight::from_parts(219_242, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -590,12 +582,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 99]`. fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `507 + s * (36 ±0)` - // Estimated: `18335` - // Minimum execution time: 24_556 nanoseconds. - Weight::from_parts(28_641_160, 18335) - // Standard Error: 1_327 - .saturating_add(Weight::from_parts(66_150, 0).saturating_mul(s.into())) + // Measured: `475 + s * (36 ±0)` + // Estimated: `11003` + // Minimum execution time: 30_747_000 picoseconds. + Weight::from_parts(35_975_985, 11003) + // Standard Error: 1_625 + .saturating_add(Weight::from_parts(73_263, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -606,12 +598,12 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `623 + s * (3 ±0)` - // Estimated: `12602` - // Minimum execution time: 11_347 nanoseconds. - Weight::from_parts(13_299_367, 12602) - // Standard Error: 525 - .saturating_add(Weight::from_parts(16_472, 0).saturating_mul(s.into())) + // Measured: `591 + s * (3 ±0)` + // Estimated: `11003` + // Minimum execution time: 13_586_000 picoseconds. + Weight::from_parts(15_909_245, 11003) + // Standard Error: 611 + .saturating_add(Weight::from_parts(16_949, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -624,12 +616,12 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `702 + s * (35 ±0)` - // Estimated: `18335` - // Minimum execution time: 27_810 nanoseconds. - Weight::from_parts(30_347_763, 18335) - // Standard Error: 928 - .saturating_add(Weight::from_parts(55_342, 0).saturating_mul(s.into())) + // Measured: `638 + s * (35 ±0)` + // Estimated: `11003` + // Minimum execution time: 34_286_000 picoseconds. + Weight::from_parts(37_391_401, 11003) + // Standard Error: 1_099 + .saturating_add(Weight::from_parts(61_165, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -640,12 +632,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `628 + s * (37 ±0)` - // Estimated: `8322` - // Minimum execution time: 17_601 nanoseconds. - Weight::from_parts(19_794_971, 8322) - // Standard Error: 934 - .saturating_add(Weight::from_parts(59_289, 0).saturating_mul(s.into())) + // Measured: `564 + s * (37 ±0)` + // Estimated: `6723` + // Minimum execution time: 22_197_000 picoseconds. + Weight::from_parts(24_630_311, 6723) + // Standard Error: 1_092 + .saturating_add(Weight::from_parts(63_415, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index d76fba4fb6495..881139ad5aa84 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs index 675feb075473f..64c1eb5f3a9b0 100644 --- a/frame/im-online/src/weights.rs +++ b/frame/im-online/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_im_online //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -68,18 +68,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `e` is `[1, 100]`. fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `359 + k * (32 ±0)` - // Estimated: `10345712 + e * (25 ±0) + k * (64 ±0)` - // Minimum execution time: 91_116 nanoseconds. - Weight::from_parts(72_526_877, 10345712) - // Standard Error: 95 - .saturating_add(Weight::from_parts(20_461, 0).saturating_mul(k.into())) - // Standard Error: 967 - .saturating_add(Weight::from_parts(307_869, 0).saturating_mul(e.into())) + // Measured: `295 + k * (32 ±0)` + // Estimated: `10024497 + e * (35 ±0) + k * (32 ±0)` + // Minimum execution time: 95_573_000 picoseconds. + Weight::from_parts(78_856_572, 10024497) + // Standard Error: 315 + .saturating_add(Weight::from_parts(22_926, 0).saturating_mul(k.into())) + // Standard Error: 3_181 + .saturating_add(Weight::from_parts(362_093, 0).saturating_mul(e.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 25).saturating_mul(e.into())) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(k.into())) + .saturating_add(Weight::from_parts(0, 35).saturating_mul(e.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(k.into())) } } @@ -99,17 +99,17 @@ impl WeightInfo for () { /// The range of component `e` is `[1, 100]`. fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `359 + k * (32 ±0)` - // Estimated: `10345712 + e * (25 ±0) + k * (64 ±0)` - // Minimum execution time: 91_116 nanoseconds. - Weight::from_parts(72_526_877, 10345712) - // Standard Error: 95 - .saturating_add(Weight::from_parts(20_461, 0).saturating_mul(k.into())) - // Standard Error: 967 - .saturating_add(Weight::from_parts(307_869, 0).saturating_mul(e.into())) + // Measured: `295 + k * (32 ±0)` + // Estimated: `10024497 + e * (35 ±0) + k * (32 ±0)` + // Minimum execution time: 95_573_000 picoseconds. + Weight::from_parts(78_856_572, 10024497) + // Standard Error: 315 + .saturating_add(Weight::from_parts(22_926, 0).saturating_mul(k.into())) + // Standard Error: 3_181 + .saturating_add(Weight::from_parts(362_093, 0).saturating_mul(e.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 25).saturating_mul(e.into())) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(k.into())) + .saturating_add(Weight::from_parts(0, 35).saturating_mul(e.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(k.into())) } } diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index b667916ebcb49..6895f076da22d 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index 6cf1f9d9740c9..8bd05d04ab4e1 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -76,6 +76,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl Config for Test { diff --git a/frame/indices/src/weights.rs b/frame/indices/src/weights.rs index fcb71177a5951..21d01c14ef9a2 100644 --- a/frame/indices/src/weights.rs +++ b/frame/indices/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_indices //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -63,9 +63,9 @@ impl WeightInfo for SubstrateWeight { fn claim() -> Weight { // Proof Size summary in bytes: // Measured: `76` - // Estimated: `2544` - // Minimum execution time: 19_738 nanoseconds. - Weight::from_parts(20_029_000, 2544) + // Estimated: `3534` + // Minimum execution time: 27_250_000 picoseconds. + Weight::from_parts(27_829_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -75,10 +75,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `5147` - // Minimum execution time: 24_494 nanoseconds. - Weight::from_parts(24_794_000, 5147) + // Measured: `275` + // Estimated: `3593` + // Minimum execution time: 37_880_000 picoseconds. + Weight::from_parts(38_329_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -86,10 +86,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) fn free() -> Weight { // Proof Size summary in bytes: - // Measured: `204` - // Estimated: `2544` - // Minimum execution time: 20_041 nanoseconds. - Weight::from_parts(20_473_000, 2544) + // Measured: `172` + // Estimated: `3534` + // Minimum execution time: 27_455_000 picoseconds. + Weight::from_parts(27_788_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -99,10 +99,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `5147` - // Minimum execution time: 21_874 nanoseconds. - Weight::from_parts(22_438_000, 5147) + // Measured: `275` + // Estimated: `3593` + // Minimum execution time: 29_865_000 picoseconds. + Weight::from_parts(30_420_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -110,10 +110,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `204` - // Estimated: `2544` - // Minimum execution time: 22_407 nanoseconds. - Weight::from_parts(22_768_000, 2544) + // Measured: `172` + // Estimated: `3534` + // Minimum execution time: 29_689_000 picoseconds. + Weight::from_parts(30_443_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -126,9 +126,9 @@ impl WeightInfo for () { fn claim() -> Weight { // Proof Size summary in bytes: // Measured: `76` - // Estimated: `2544` - // Minimum execution time: 19_738 nanoseconds. - Weight::from_parts(20_029_000, 2544) + // Estimated: `3534` + // Minimum execution time: 27_250_000 picoseconds. + Weight::from_parts(27_829_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -138,10 +138,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `5147` - // Minimum execution time: 24_494 nanoseconds. - Weight::from_parts(24_794_000, 5147) + // Measured: `275` + // Estimated: `3593` + // Minimum execution time: 37_880_000 picoseconds. + Weight::from_parts(38_329_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -149,10 +149,10 @@ impl WeightInfo for () { /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) fn free() -> Weight { // Proof Size summary in bytes: - // Measured: `204` - // Estimated: `2544` - // Minimum execution time: 20_041 nanoseconds. - Weight::from_parts(20_473_000, 2544) + // Measured: `172` + // Estimated: `3534` + // Minimum execution time: 27_455_000 picoseconds. + Weight::from_parts(27_788_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -162,10 +162,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `5147` - // Minimum execution time: 21_874 nanoseconds. - Weight::from_parts(22_438_000, 5147) + // Measured: `275` + // Estimated: `3593` + // Minimum execution time: 29_865_000 picoseconds. + Weight::from_parts(30_420_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -173,10 +173,10 @@ impl WeightInfo for () { /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `204` - // Estimated: `2544` - // Minimum execution time: 22_407 nanoseconds. - Weight::from_parts(22_768_000, 2544) + // Measured: `172` + // Estimated: `3534` + // Minimum execution time: 29_689_000 picoseconds. + Weight::from_parts(30_443_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/insecure-randomness-collective-flip/Cargo.toml b/frame/insecure-randomness-collective-flip/Cargo.toml index 68ccfadfc8edf..bcebff6a9663f 100644 --- a/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/frame/insecure-randomness-collective-flip/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } safe-mix = { version = "1.0", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/lottery/Cargo.toml b/frame/lottery/Cargo.toml index 8c1bf30106852..4d8839612b2ef 100644 --- a/frame/lottery/Cargo.toml +++ b/frame/lottery/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/lottery/src/benchmarking.rs b/frame/lottery/src/benchmarking.rs index 9216464b07d35..1510d250dbeaf 100644 --- a/frame/lottery/src/benchmarking.rs +++ b/frame/lottery/src/benchmarking.rs @@ -21,7 +21,12 @@ use super::*; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; +use crate::Pallet as Lottery; +use frame_benchmarking::{ + impl_benchmark_test_suite, + v1::{account, whitelisted_caller, BenchmarkError}, + v2::*, +}; use frame_support::{ storage::bounded_vec::BoundedVec, traits::{EnsureOrigin, OnInitialize}, @@ -29,8 +34,6 @@ use frame_support::{ use frame_system::RawOrigin; use sp_runtime::traits::{Bounded, Zero}; -use crate::Pallet as Lottery; - // Set up and start a lottery fn setup_lottery(repeat: bool) -> Result<(), &'static str> { let price = T::Currency::minimum_balance(); @@ -50,72 +53,100 @@ fn setup_lottery(repeat: bool) -> Result<(), &'static str> { Ok(()) } -benchmarks! { - buy_ticket { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn buy_ticket() -> Result<(), BenchmarkError> { let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); setup_lottery::(false)?; // force user to have a long vec of calls participating let set_code_index: CallIndex = Lottery::::call_to_index( - &frame_system::Call::::set_code{ code: vec![] }.into() + &frame_system::Call::::set_code { code: vec![] }.into(), )?; let already_called: (u32, BoundedVec) = ( LotteryIndex::::get(), BoundedVec::::try_from(vec![ set_code_index; - T::MaxCalls::get().saturating_sub(1) as usize - ]).unwrap(), + T::MaxCalls::get().saturating_sub(1) + as usize + ]) + .unwrap(), ); Participants::::insert(&caller, already_called); let call = frame_system::Call::::remark { remark: vec![] }; - }: _(RawOrigin::Signed(caller), Box::new(call.into())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), Box::new(call.into())); + assert_eq!(TicketsCount::::get(), 1); + + Ok(()) } - set_calls { - let n in 0 .. T::MaxCalls::get() as u32; + #[benchmark] + fn set_calls(n: Linear<0, { T::MaxCalls::get() }>) -> Result<(), BenchmarkError> { let calls = vec![frame_system::Call::::remark { remark: vec![] }.into(); n as usize]; let origin = T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; assert!(CallIndices::::get().is_empty()); - }: _(origin, calls) - verify { + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, calls); + if !n.is_zero() { assert!(!CallIndices::::get().is_empty()); } + + Ok(()) } - start_lottery { + #[benchmark] + fn start_lottery() -> Result<(), BenchmarkError> { let price = BalanceOf::::max_value(); let end = 10u32.into(); let payout = 5u32.into(); let origin = T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: _(origin, price, end, payout, true) - verify { + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, price, end, payout, true); + assert!(crate::Lottery::::get().is_some()); + + Ok(()) } - stop_repeat { + #[benchmark] + fn stop_repeat() -> Result<(), BenchmarkError> { setup_lottery::(true)?; assert_eq!(crate::Lottery::::get().unwrap().repeat, true); let origin = T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: _(origin) - verify { + + #[extrinsic_call] + _(origin as T::RuntimeOrigin); + assert_eq!(crate::Lottery::::get().unwrap().repeat, false); + + Ok(()) } - on_initialize_end { + #[benchmark] + fn on_initialize_end() -> Result<(), BenchmarkError> { setup_lottery::(false)?; let winner = account("winner", 0, 0); // User needs more than min balance to get ticket T::Currency::make_free_balance_be(&winner, T::Currency::minimum_balance() * 10u32.into()); // Make sure lottery account has at least min balance too let lottery_account = Lottery::::account_id(); - T::Currency::make_free_balance_be(&lottery_account, T::Currency::minimum_balance() * 10u32.into()); + T::Currency::make_free_balance_be( + &lottery_account, + T::Currency::minimum_balance() * 10u32.into(), + ); // Buy a ticket let call = frame_system::Call::::remark { remark: vec![] }; Lottery::::buy_ticket(RawOrigin::Signed(winner.clone()).into(), Box::new(call.into()))?; @@ -124,29 +155,37 @@ benchmarks! { // Assert that lotto is set up for winner assert_eq!(TicketsCount::::get(), 1); assert!(!Lottery::::pot().1.is_zero()); - }: { - // Generate `MaxGenerateRandom` numbers for worst case scenario - for i in 0 .. T::MaxGenerateRandom::get() { - Lottery::::generate_random_number(i); + + #[block] + { + // Generate `MaxGenerateRandom` numbers for worst case scenario + for i in 0..T::MaxGenerateRandom::get() { + Lottery::::generate_random_number(i); + } + // Start lottery has block 15 configured for payout + Lottery::::on_initialize(15u32.into()); } - // Start lottery has block 15 configured for payout - Lottery::::on_initialize(15u32.into()); - } - verify { + assert!(crate::Lottery::::get().is_none()); assert_eq!(TicketsCount::::get(), 0); assert_eq!(Lottery::::pot().1, 0u32.into()); - assert!(!T::Currency::free_balance(&winner).is_zero()) + assert!(!T::Currency::free_balance(&winner).is_zero()); + + Ok(()) } - on_initialize_repeat { + #[benchmark] + fn on_initialize_repeat() -> Result<(), BenchmarkError> { setup_lottery::(true)?; let winner = account("winner", 0, 0); // User needs more than min balance to get ticket T::Currency::make_free_balance_be(&winner, T::Currency::minimum_balance() * 10u32.into()); // Make sure lottery account has at least min balance too let lottery_account = Lottery::::account_id(); - T::Currency::make_free_balance_be(&lottery_account, T::Currency::minimum_balance() * 10u32.into()); + T::Currency::make_free_balance_be( + &lottery_account, + T::Currency::minimum_balance() * 10u32.into(), + ); // Buy a ticket let call = frame_system::Call::::remark { remark: vec![] }; Lottery::::buy_ticket(RawOrigin::Signed(winner.clone()).into(), Box::new(call.into()))?; @@ -155,20 +194,24 @@ benchmarks! { // Assert that lotto is set up for winner assert_eq!(TicketsCount::::get(), 1); assert!(!Lottery::::pot().1.is_zero()); - }: { - // Generate `MaxGenerateRandom` numbers for worst case scenario - for i in 0 .. T::MaxGenerateRandom::get() { - Lottery::::generate_random_number(i); + + #[block] + { + // Generate `MaxGenerateRandom` numbers for worst case scenario + for i in 0..T::MaxGenerateRandom::get() { + Lottery::::generate_random_number(i); + } + // Start lottery has block 15 configured for payout + Lottery::::on_initialize(15u32.into()); } - // Start lottery has block 15 configured for payout - Lottery::::on_initialize(15u32.into()); - } - verify { + assert!(crate::Lottery::::get().is_some()); assert_eq!(LotteryIndex::::get(), 2); assert_eq!(TicketsCount::::get(), 0); assert_eq!(Lottery::::pot().1, 0u32.into()); - assert!(!T::Currency::free_balance(&winner).is_zero()) + assert!(!T::Currency::free_balance(&winner).is_zero()); + + Ok(()) } impl_benchmark_test_suite!(Lottery, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/lottery/src/mock.rs b/frame/lottery/src/mock.rs index 9b1a933d0a4ac..7afd0e319db34 100644 --- a/frame/lottery/src/mock.rs +++ b/frame/lottery/src/mock.rs @@ -89,6 +89,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { diff --git a/frame/lottery/src/tests.rs b/frame/lottery/src/tests.rs index 09386c7e348d8..ae3a6c858f242 100644 --- a/frame/lottery/src/tests.rs +++ b/frame/lottery/src/tests.rs @@ -23,8 +23,7 @@ use mock::{ new_test_ext, run_to_block, Balances, BalancesCall, Lottery, RuntimeCall, RuntimeOrigin, SystemCall, Test, }; -use pallet_balances::Error as BalancesError; -use sp_runtime::traits::BadOrigin; +use sp_runtime::{traits::BadOrigin, TokenError}; #[test] fn initial_state() { @@ -45,7 +44,7 @@ fn basic_end_to_end_works() { let delay = 5; let calls = vec![ RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 }), ]; // Set calls for the lottery @@ -56,7 +55,10 @@ fn basic_end_to_end_works() { assert!(crate::Lottery::::get().is_some()); assert_eq!(Balances::free_balance(&1), 100); - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 20 })); + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer_allow_death { + dest: 2, + value: 20, + })); assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); // 20 from the transfer, 10 from buying a ticket assert_eq!(Balances::free_balance(&1), 100 - 20 - 10); @@ -129,7 +131,7 @@ fn set_calls_works() { let calls = vec![ RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 }), ]; assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls)); @@ -137,7 +139,7 @@ fn set_calls_works() { let too_many_calls = vec![ RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 }), RuntimeCall::System(SystemCall::remark { remark: vec![] }), ]; @@ -157,7 +159,7 @@ fn call_to_indices_works() { new_test_ext().execute_with(|| { let calls = vec![ RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 }), ]; let indices = Lottery::calls_to_indices(&calls).unwrap().into_inner(); // Only comparing the length since it is otherwise dependant on the API @@ -166,7 +168,7 @@ fn call_to_indices_works() { let too_many_calls = vec![ RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 }), RuntimeCall::System(SystemCall::remark { remark: vec![] }), ]; assert_noop!(Lottery::calls_to_indices(&too_many_calls), Error::::TooManyCalls); @@ -203,7 +205,10 @@ fn buy_ticket_works_as_simple_passthrough() { // as a simple passthrough to the real call. new_test_ext().execute_with(|| { // No lottery set up - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 20 })); + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer_allow_death { + dest: 2, + value: 20, + })); // This is just a basic transfer then assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); assert_eq!(Balances::free_balance(&1), 100 - 20); @@ -212,7 +217,7 @@ fn buy_ticket_works_as_simple_passthrough() { // Lottery is set up, but too expensive to enter, so `do_buy_ticket` fails. let calls = vec![ RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 }), ]; assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls)); @@ -223,11 +228,13 @@ fn buy_ticket_works_as_simple_passthrough() { assert_eq!(TicketsCount::::get(), 0); // If call would fail, the whole thing still fails the same - let fail_call = - Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 1000 })); + let fail_call = Box::new(RuntimeCall::Balances(BalancesCall::transfer_allow_death { + dest: 2, + value: 1000, + })); assert_noop!( Lottery::buy_ticket(RuntimeOrigin::signed(1), fail_call), - BalancesError::::InsufficientBalance, + ArithmeticError::Underflow, ); let bad_origin_call = Box::new(RuntimeCall::Balances(BalancesCall::force_transfer { @@ -243,8 +250,10 @@ fn buy_ticket_works_as_simple_passthrough() { assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), remark_call)); assert_eq!(TicketsCount::::get(), 0); - let successful_call = - Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 1 })); + let successful_call = Box::new(RuntimeCall::Balances(BalancesCall::transfer_allow_death { + dest: 2, + value: 1, + })); assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), successful_call)); assert_eq!(TicketsCount::::get(), 1); }); @@ -256,12 +265,15 @@ fn buy_ticket_works() { // Set calls for the lottery. let calls = vec![ RuntimeCall::System(SystemCall::remark { remark: vec![] }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 }), ]; assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls)); // Can't buy ticket before start - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 1 })); + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer_allow_death { + dest: 2, + value: 1, + })); assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); assert_eq!(TicketsCount::::get(), 0); @@ -274,7 +286,10 @@ fn buy_ticket_works() { assert_eq!(TicketsCount::::get(), 1); // Can't buy another of the same ticket (even if call is slightly changed) - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 3, value: 30 })); + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer_allow_death { + dest: 3, + value: 30, + })); assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call)); assert_eq!(TicketsCount::::get(), 1); @@ -302,7 +317,8 @@ fn buy_ticket_works() { #[test] fn do_buy_ticket_already_participating() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + let calls = + vec![RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 })]; assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 1, 10, 10, false)); @@ -317,7 +333,8 @@ fn do_buy_ticket_already_participating() { #[test] fn buy_ticket_already_participating() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + let calls = + vec![RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 })]; assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 1, 10, 10, false)); @@ -337,7 +354,8 @@ fn buy_ticket_already_participating() { #[test] fn buy_ticket_insufficient_balance() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + let calls = + vec![RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 })]; assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); // Price set to 100. assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 100, 10, 10, false)); @@ -352,16 +370,14 @@ fn buy_ticket_insufficient_balance() { #[test] fn do_buy_ticket_insufficient_balance() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + let calls = + vec![RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 })]; assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); // Price set to 101. assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 101, 10, 10, false)); // Buying fails with InsufficientBalance. - assert_noop!( - Lottery::do_buy_ticket(&1, &calls[0]), - BalancesError::::InsufficientBalance - ); + assert_noop!(Lottery::do_buy_ticket(&1, &calls[0]), TokenError::FundsUnavailable,); assert!(TicketsCount::::get().is_zero()); }); } @@ -369,13 +385,13 @@ fn do_buy_ticket_insufficient_balance() { #[test] fn do_buy_ticket_keep_alive() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + let calls = + vec![RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 })]; assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); // Price set to 100. assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 100, 10, 10, false)); - // Buying fails with KeepAlive. - assert_noop!(Lottery::do_buy_ticket(&1, &calls[0]), BalancesError::::KeepAlive); + assert_noop!(Lottery::do_buy_ticket(&1, &calls[0]), TokenError::NotExpendable); assert!(TicketsCount::::get().is_zero()); }); } @@ -421,7 +437,8 @@ fn choose_ticket_trivial_cases() { #[test] fn choose_account_one_participant() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + let calls = + vec![RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 0, value: 0 })]; assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 10, 10, 10, false)); let call = Box::new(calls[0].clone()); diff --git a/frame/lottery/src/weights.rs b/frame/lottery/src/weights.rs index 0038db6210d2b..c21b09e7d5b92 100644 --- a/frame/lottery/src/weights.rs +++ b/frame/lottery/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_lottery //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -75,10 +75,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) fn buy_ticket() -> Weight { // Proof Size summary in bytes: - // Measured: `484` - // Estimated: `7181` - // Minimum execution time: 62_125 nanoseconds. - Weight::from_parts(63_145_000, 7181) + // Measured: `452` + // Estimated: `3593` + // Minimum execution time: 61_502_000 picoseconds. + Weight::from_parts(62_578_000, 3593) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -89,10 +89,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_650 nanoseconds. - Weight::from_parts(8_344_960, 0) - // Standard Error: 2_629 - .saturating_add(Weight::from_parts(268_557, 0).saturating_mul(n.into())) + // Minimum execution time: 8_282_000 picoseconds. + Weight::from_parts(9_271_031, 0) + // Standard Error: 3_756 + .saturating_add(Weight::from_parts(349_990, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Lottery Lottery (r:1 w:1) @@ -104,9 +104,9 @@ impl WeightInfo for SubstrateWeight { fn start_lottery() -> Weight { // Proof Size summary in bytes: // Measured: `161` - // Estimated: `3626` - // Minimum execution time: 31_324 nanoseconds. - Weight::from_parts(31_985_000, 3626) + // Estimated: `3593` + // Minimum execution time: 38_975_000 picoseconds. + Weight::from_parts(39_552_000, 3593) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -115,9 +115,9 @@ impl WeightInfo for SubstrateWeight { fn stop_repeat() -> Weight { // Proof Size summary in bytes: // Measured: `219` - // Estimated: `524` - // Minimum execution time: 7_124 nanoseconds. - Weight::from_parts(7_285_000, 524) + // Estimated: `1514` + // Minimum execution time: 8_243_000 picoseconds. + Weight::from_parts(8_359_000, 1514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -133,10 +133,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) fn on_initialize_end() -> Weight { // Proof Size summary in bytes: - // Measured: `556` - // Estimated: `11837` - // Minimum execution time: 50_745 nanoseconds. - Weight::from_parts(52_232_000, 11837) + // Measured: `524` + // Estimated: `6196` + // Minimum execution time: 76_062_000 picoseconds. + Weight::from_parts(76_547_000, 6196) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -154,10 +154,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn on_initialize_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `556` - // Estimated: `12336` - // Minimum execution time: 52_437 nanoseconds. - Weight::from_parts(53_063_000, 12336) + // Measured: `524` + // Estimated: `6196` + // Minimum execution time: 78_089_000 picoseconds. + Weight::from_parts(78_632_000, 6196) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -181,10 +181,10 @@ impl WeightInfo for () { /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) fn buy_ticket() -> Weight { // Proof Size summary in bytes: - // Measured: `484` - // Estimated: `7181` - // Minimum execution time: 62_125 nanoseconds. - Weight::from_parts(63_145_000, 7181) + // Measured: `452` + // Estimated: `3593` + // Minimum execution time: 61_502_000 picoseconds. + Weight::from_parts(62_578_000, 3593) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -195,10 +195,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_650 nanoseconds. - Weight::from_parts(8_344_960, 0) - // Standard Error: 2_629 - .saturating_add(Weight::from_parts(268_557, 0).saturating_mul(n.into())) + // Minimum execution time: 8_282_000 picoseconds. + Weight::from_parts(9_271_031, 0) + // Standard Error: 3_756 + .saturating_add(Weight::from_parts(349_990, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Lottery Lottery (r:1 w:1) @@ -210,9 +210,9 @@ impl WeightInfo for () { fn start_lottery() -> Weight { // Proof Size summary in bytes: // Measured: `161` - // Estimated: `3626` - // Minimum execution time: 31_324 nanoseconds. - Weight::from_parts(31_985_000, 3626) + // Estimated: `3593` + // Minimum execution time: 38_975_000 picoseconds. + Weight::from_parts(39_552_000, 3593) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -221,9 +221,9 @@ impl WeightInfo for () { fn stop_repeat() -> Weight { // Proof Size summary in bytes: // Measured: `219` - // Estimated: `524` - // Minimum execution time: 7_124 nanoseconds. - Weight::from_parts(7_285_000, 524) + // Estimated: `1514` + // Minimum execution time: 8_243_000 picoseconds. + Weight::from_parts(8_359_000, 1514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -239,10 +239,10 @@ impl WeightInfo for () { /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) fn on_initialize_end() -> Weight { // Proof Size summary in bytes: - // Measured: `556` - // Estimated: `11837` - // Minimum execution time: 50_745 nanoseconds. - Weight::from_parts(52_232_000, 11837) + // Measured: `524` + // Estimated: `6196` + // Minimum execution time: 76_062_000 picoseconds. + Weight::from_parts(76_547_000, 6196) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -260,10 +260,10 @@ impl WeightInfo for () { /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn on_initialize_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `556` - // Estimated: `12336` - // Minimum execution time: 52_437 nanoseconds. - Weight::from_parts(53_063_000, 12336) + // Measured: `524` + // Estimated: `6196` + // Minimum execution time: 78_089_000 picoseconds. + Weight::from_parts(78_632_000, 6196) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } diff --git a/frame/mangata-support/src/traits.rs b/frame/mangata-support/src/traits.rs index 8dfc6b972f506..2907b6881a728 100644 --- a/frame/mangata-support/src/traits.rs +++ b/frame/mangata-support/src/traits.rs @@ -5,7 +5,6 @@ use mangata_types::{ multipurpose_liquidity::{ActivateKind, BondKind}, Balance, TokenId, }; -use sp_core::U256; use sp_runtime::{ traits::{AtLeast32BitUnsigned, MaybeDisplay}, Permill, diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index bba5700cdd848..330d9401df2c7 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 0be33a95613fe..e703b88f3657e 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -168,7 +168,7 @@ pub mod pallet { /// /// May only be called from `T::AddOrigin`. #[pallet::call_index(0)] - #[pallet::weight(50_000_000)] + #[pallet::weight({50_000_000})] pub fn add_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::AddOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; @@ -191,7 +191,7 @@ pub mod pallet { /// /// May only be called from `T::RemoveOrigin`. #[pallet::call_index(1)] - #[pallet::weight(50_000_000)] + #[pallet::weight({50_000_000})] pub fn remove_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::RemoveOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; @@ -215,7 +215,7 @@ pub mod pallet { /// /// Prime membership is *not* passed from `remove` to `add`, if extant. #[pallet::call_index(2)] - #[pallet::weight(50_000_000)] + #[pallet::weight({50_000_000})] pub fn swap_member( origin: OriginFor, remove: AccountIdLookupOf, @@ -249,7 +249,7 @@ pub mod pallet { /// /// May only be called from `T::ResetOrigin`. #[pallet::call_index(3)] - #[pallet::weight(50_000_000)] + #[pallet::weight({50_000_000})] pub fn reset_members(origin: OriginFor, members: Vec) -> DispatchResult { T::ResetOrigin::ensure_origin(origin)?; @@ -272,7 +272,7 @@ pub mod pallet { /// /// Prime membership is passed from the origin account to `new`, if extant. #[pallet::call_index(4)] - #[pallet::weight(50_000_000)] + #[pallet::weight({50_000_000})] pub fn change_key(origin: OriginFor, new: AccountIdLookupOf) -> DispatchResult { let remove = ensure_signed(origin)?; let new = T::Lookup::lookup(new)?; @@ -307,7 +307,7 @@ pub mod pallet { /// /// May only be called from `T::PrimeOrigin`. #[pallet::call_index(5)] - #[pallet::weight(50_000_000)] + #[pallet::weight({50_000_000})] pub fn set_prime(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::PrimeOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; @@ -321,7 +321,7 @@ pub mod pallet { /// /// May only be called from `T::PrimeOrigin`. #[pallet::call_index(6)] - #[pallet::weight(50_000_000)] + #[pallet::weight({50_000_000})] pub fn clear_prime(origin: OriginFor) -> DispatchResult { T::PrimeOrigin::ensure_origin(origin)?; Prime::::kill(); diff --git a/frame/membership/src/weights.rs b/frame/membership/src/weights.rs index f080f842c54b4..70c50d8695dfc 100644 --- a/frame/membership/src/weights.rs +++ b/frame/membership/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_membership //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -71,15 +71,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 99]`. fn add_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `238 + m * (64 ±0)` - // Estimated: `4903 + m * (192 ±0)` - // Minimum execution time: 15_673 nanoseconds. - Weight::from_parts(16_830_288, 4903) - // Standard Error: 570 - .saturating_add(Weight::from_parts(41_959, 0).saturating_mul(m.into())) + // Measured: `174 + m * (64 ±0)` + // Estimated: `4687 + m * (64 ±0)` + // Minimum execution time: 18_264_000 picoseconds. + Weight::from_parts(19_343_697, 4687) + // Standard Error: 699 + .saturating_add(Weight::from_parts(44_401, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 192).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } /// Storage: TechnicalMembership Members (r:1 w:1) /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) @@ -94,15 +94,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[2, 100]`. fn remove_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `342 + m * (64 ±0)` - // Estimated: `5742 + m * (192 ±0)` - // Minimum execution time: 18_231 nanoseconds. - Weight::from_parts(19_081_297, 5742) - // Standard Error: 571 - .saturating_add(Weight::from_parts(41_331, 0).saturating_mul(m.into())) + // Measured: `278 + m * (64 ±0)` + // Estimated: `4687 + m * (64 ±0)` + // Minimum execution time: 21_092_000 picoseconds. + Weight::from_parts(22_140_391, 4687) + // Standard Error: 545 + .saturating_add(Weight::from_parts(40_638, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 192).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } /// Storage: TechnicalMembership Members (r:1 w:1) /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) @@ -117,15 +117,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[2, 100]`. fn swap_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `342 + m * (64 ±0)` - // Estimated: `5742 + m * (192 ±0)` - // Minimum execution time: 18_517 nanoseconds. - Weight::from_parts(19_388_310, 5742) - // Standard Error: 625 - .saturating_add(Weight::from_parts(51_422, 0).saturating_mul(m.into())) + // Measured: `278 + m * (64 ±0)` + // Estimated: `4687 + m * (64 ±0)` + // Minimum execution time: 21_002_000 picoseconds. + Weight::from_parts(22_364_944, 4687) + // Standard Error: 752 + .saturating_add(Weight::from_parts(54_363, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 192).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } /// Storage: TechnicalMembership Members (r:1 w:1) /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) @@ -140,15 +140,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 100]`. fn reset_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `342 + m * (64 ±0)` - // Estimated: `5742 + m * (192 ±0)` - // Minimum execution time: 17_628 nanoseconds. - Weight::from_parts(19_258_882, 5742) - // Standard Error: 820 - .saturating_add(Weight::from_parts(153_956, 0).saturating_mul(m.into())) + // Measured: `278 + m * (64 ±0)` + // Estimated: `4687 + m * (64 ±0)` + // Minimum execution time: 20_443_000 picoseconds. + Weight::from_parts(22_188_301, 4687) + // Standard Error: 945 + .saturating_add(Weight::from_parts(162_799, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 192).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } /// Storage: TechnicalMembership Members (r:1 w:1) /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) @@ -163,15 +163,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 100]`. fn change_key(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `342 + m * (64 ±0)` - // Estimated: `5742 + m * (192 ±0)` - // Minimum execution time: 19_031 nanoseconds. - Weight::from_parts(20_264_948, 5742) - // Standard Error: 707 - .saturating_add(Weight::from_parts(51_060, 0).saturating_mul(m.into())) + // Measured: `278 + m * (64 ±0)` + // Estimated: `4687 + m * (64 ±0)` + // Minimum execution time: 21_527_000 picoseconds. + Weight::from_parts(23_146_706, 4687) + // Standard Error: 724 + .saturating_add(Weight::from_parts(55_027, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 192).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } /// Storage: TechnicalMembership Members (r:1 w:0) /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) @@ -182,12 +182,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 100]`. fn set_prime(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `64 + m * (32 ±0)` - // Estimated: `3761 + m * (32 ±0)` - // Minimum execution time: 6_897 nanoseconds. - Weight::from_parts(7_455_387, 3761) - // Standard Error: 326 - .saturating_add(Weight::from_parts(16_653, 0).saturating_mul(m.into())) + // Measured: `32 + m * (32 ±0)` + // Estimated: `4687 + m * (32 ±0)` + // Minimum execution time: 8_054_000 picoseconds. + Weight::from_parts(8_558_341, 4687) + // Standard Error: 360 + .saturating_add(Weight::from_parts(16_362, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) @@ -197,14 +197,12 @@ impl WeightInfo for SubstrateWeight { /// Storage: TechnicalCommittee Prime (r:0 w:1) /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `m` is `[1, 100]`. - fn clear_prime(m: u32, ) -> Weight { + fn clear_prime(_m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_400 nanoseconds. - Weight::from_parts(3_703_421, 0) - // Standard Error: 119 - .saturating_add(Weight::from_parts(915, 0).saturating_mul(m.into())) + // Minimum execution time: 3_784_000 picoseconds. + Weight::from_parts(4_100_031, 0) .saturating_add(T::DbWeight::get().writes(2_u64)) } } @@ -222,15 +220,15 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 99]`. fn add_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `238 + m * (64 ±0)` - // Estimated: `4903 + m * (192 ±0)` - // Minimum execution time: 15_673 nanoseconds. - Weight::from_parts(16_830_288, 4903) - // Standard Error: 570 - .saturating_add(Weight::from_parts(41_959, 0).saturating_mul(m.into())) + // Measured: `174 + m * (64 ±0)` + // Estimated: `4687 + m * (64 ±0)` + // Minimum execution time: 18_264_000 picoseconds. + Weight::from_parts(19_343_697, 4687) + // Standard Error: 699 + .saturating_add(Weight::from_parts(44_401, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 192).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } /// Storage: TechnicalMembership Members (r:1 w:1) /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) @@ -245,15 +243,15 @@ impl WeightInfo for () { /// The range of component `m` is `[2, 100]`. fn remove_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `342 + m * (64 ±0)` - // Estimated: `5742 + m * (192 ±0)` - // Minimum execution time: 18_231 nanoseconds. - Weight::from_parts(19_081_297, 5742) - // Standard Error: 571 - .saturating_add(Weight::from_parts(41_331, 0).saturating_mul(m.into())) + // Measured: `278 + m * (64 ±0)` + // Estimated: `4687 + m * (64 ±0)` + // Minimum execution time: 21_092_000 picoseconds. + Weight::from_parts(22_140_391, 4687) + // Standard Error: 545 + .saturating_add(Weight::from_parts(40_638, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 192).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } /// Storage: TechnicalMembership Members (r:1 w:1) /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) @@ -268,15 +266,15 @@ impl WeightInfo for () { /// The range of component `m` is `[2, 100]`. fn swap_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `342 + m * (64 ±0)` - // Estimated: `5742 + m * (192 ±0)` - // Minimum execution time: 18_517 nanoseconds. - Weight::from_parts(19_388_310, 5742) - // Standard Error: 625 - .saturating_add(Weight::from_parts(51_422, 0).saturating_mul(m.into())) + // Measured: `278 + m * (64 ±0)` + // Estimated: `4687 + m * (64 ±0)` + // Minimum execution time: 21_002_000 picoseconds. + Weight::from_parts(22_364_944, 4687) + // Standard Error: 752 + .saturating_add(Weight::from_parts(54_363, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 192).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } /// Storage: TechnicalMembership Members (r:1 w:1) /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) @@ -291,15 +289,15 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 100]`. fn reset_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `342 + m * (64 ±0)` - // Estimated: `5742 + m * (192 ±0)` - // Minimum execution time: 17_628 nanoseconds. - Weight::from_parts(19_258_882, 5742) - // Standard Error: 820 - .saturating_add(Weight::from_parts(153_956, 0).saturating_mul(m.into())) + // Measured: `278 + m * (64 ±0)` + // Estimated: `4687 + m * (64 ±0)` + // Minimum execution time: 20_443_000 picoseconds. + Weight::from_parts(22_188_301, 4687) + // Standard Error: 945 + .saturating_add(Weight::from_parts(162_799, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 192).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } /// Storage: TechnicalMembership Members (r:1 w:1) /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) @@ -314,15 +312,15 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 100]`. fn change_key(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `342 + m * (64 ±0)` - // Estimated: `5742 + m * (192 ±0)` - // Minimum execution time: 19_031 nanoseconds. - Weight::from_parts(20_264_948, 5742) - // Standard Error: 707 - .saturating_add(Weight::from_parts(51_060, 0).saturating_mul(m.into())) + // Measured: `278 + m * (64 ±0)` + // Estimated: `4687 + m * (64 ±0)` + // Minimum execution time: 21_527_000 picoseconds. + Weight::from_parts(23_146_706, 4687) + // Standard Error: 724 + .saturating_add(Weight::from_parts(55_027, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 192).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } /// Storage: TechnicalMembership Members (r:1 w:0) /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) @@ -333,12 +331,12 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 100]`. fn set_prime(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `64 + m * (32 ±0)` - // Estimated: `3761 + m * (32 ±0)` - // Minimum execution time: 6_897 nanoseconds. - Weight::from_parts(7_455_387, 3761) - // Standard Error: 326 - .saturating_add(Weight::from_parts(16_653, 0).saturating_mul(m.into())) + // Measured: `32 + m * (32 ±0)` + // Estimated: `4687 + m * (32 ±0)` + // Minimum execution time: 8_054_000 picoseconds. + Weight::from_parts(8_558_341, 4687) + // Standard Error: 360 + .saturating_add(Weight::from_parts(16_362, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) @@ -348,14 +346,12 @@ impl WeightInfo for () { /// Storage: TechnicalCommittee Prime (r:0 w:1) /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `m` is `[1, 100]`. - fn clear_prime(m: u32, ) -> Weight { + fn clear_prime(_m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_400 nanoseconds. - Weight::from_parts(3_703_421, 0) - // Standard Error: 119 - .saturating_add(Weight::from_parts(915, 0).saturating_mul(m.into())) + // Minimum execution time: 3_784_000 picoseconds. + Weight::from_parts(4_100_031, 0) .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index 3f7180f79adbe..5f6094af1648e 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/message-queue/Cargo.toml b/frame/message-queue/Cargo.toml index 115200b826763..404e678137211 100644 --- a/frame/message-queue/Cargo.toml +++ b/frame/message-queue/Cargo.toml @@ -10,7 +10,7 @@ description = "FRAME pallet to queue and process messages" [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.2", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.137", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } diff --git a/frame/message-queue/src/weights.rs b/frame/message-queue/src/weights.rs index fd788f2ba4052..9dae12f518e44 100644 --- a/frame/message-queue/src/weights.rs +++ b/frame/message-queue/src/weights.rs @@ -18,26 +18,25 @@ //! Autogenerated weights for pallet_message_queue //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_message_queue // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet-message-queue -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/message-queue/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -70,10 +69,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `295` - // Estimated: `7527` - // Minimum execution time: 12_283_000 picoseconds. - Weight::from_parts(12_554_000, 7527) + // Measured: `233` + // Estimated: `6038` + // Minimum execution time: 12_076_000 picoseconds. + Weight::from_parts(12_350_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -83,10 +82,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `295` - // Estimated: `7527` - // Minimum execution time: 11_484_000 picoseconds. - Weight::from_parts(11_900_000, 7527) + // Measured: `233` + // Estimated: `6038` + // Minimum execution time: 11_586_000 picoseconds. + Weight::from_parts(11_912_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -96,8 +95,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3514` - // Minimum execution time: 4_793_000 picoseconds. - Weight::from_parts(4_990_000, 3514) + // Minimum execution time: 4_581_000 picoseconds. + Weight::from_parts(4_715_000, 3514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -107,8 +106,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `113` // Estimated: `69049` - // Minimum execution time: 6_231_000 picoseconds. - Weight::from_parts(6_442_000, 69049) + // Minimum execution time: 5_826_000 picoseconds. + Weight::from_parts(5_932_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -118,8 +117,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `113` // Estimated: `69049` - // Minimum execution time: 6_660_000 picoseconds. - Weight::from_parts(6_825_000, 69049) + // Minimum execution time: 6_235_000 picoseconds. + Weight::from_parts(6_430_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -127,8 +126,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 72_805_000 picoseconds. - Weight::from_parts(74_650_000, 0) + // Minimum execution time: 53_860_000 picoseconds. + Weight::from_parts(53_984_000, 0) } /// Storage: MessageQueue ServiceHead (r:1 w:1) /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) @@ -136,10 +135,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `5003` - // Minimum execution time: 7_078_000 picoseconds. - Weight::from_parts(7_230_000, 5003) + // Measured: `140` + // Estimated: `3514` + // Minimum execution time: 7_018_000 picoseconds. + Weight::from_parts(7_205_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -149,10 +148,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65742` - // Estimated: `72563` - // Minimum execution time: 56_799_000 picoseconds. - Weight::from_parts(57_634_000, 72563) + // Measured: `65710` + // Estimated: `69049` + // Minimum execution time: 53_485_000 picoseconds. + Weight::from_parts(54_154_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -162,10 +161,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65742` - // Estimated: `72563` - // Minimum execution time: 72_290_000 picoseconds. - Weight::from_parts(72_754_000, 72563) + // Measured: `65710` + // Estimated: `69049` + // Minimum execution time: 68_830_000 picoseconds. + Weight::from_parts(69_487_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -175,10 +174,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65742` - // Estimated: `72563` - // Minimum execution time: 84_987_000 picoseconds. - Weight::from_parts(85_562_000, 72563) + // Measured: `65710` + // Estimated: `69049` + // Minimum execution time: 81_643_000 picoseconds. + Weight::from_parts(82_399_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -192,10 +191,10 @@ impl WeightInfo for () { /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `295` - // Estimated: `7527` - // Minimum execution time: 12_283_000 picoseconds. - Weight::from_parts(12_554_000, 7527) + // Measured: `233` + // Estimated: `6038` + // Minimum execution time: 12_076_000 picoseconds. + Weight::from_parts(12_350_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -205,10 +204,10 @@ impl WeightInfo for () { /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `295` - // Estimated: `7527` - // Minimum execution time: 11_484_000 picoseconds. - Weight::from_parts(11_900_000, 7527) + // Measured: `233` + // Estimated: `6038` + // Minimum execution time: 11_586_000 picoseconds. + Weight::from_parts(11_912_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -218,8 +217,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3514` - // Minimum execution time: 4_793_000 picoseconds. - Weight::from_parts(4_990_000, 3514) + // Minimum execution time: 4_581_000 picoseconds. + Weight::from_parts(4_715_000, 3514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -229,8 +228,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `113` // Estimated: `69049` - // Minimum execution time: 6_231_000 picoseconds. - Weight::from_parts(6_442_000, 69049) + // Minimum execution time: 5_826_000 picoseconds. + Weight::from_parts(5_932_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -240,8 +239,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `113` // Estimated: `69049` - // Minimum execution time: 6_660_000 picoseconds. - Weight::from_parts(6_825_000, 69049) + // Minimum execution time: 6_235_000 picoseconds. + Weight::from_parts(6_430_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -249,8 +248,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 72_805_000 picoseconds. - Weight::from_parts(74_650_000, 0) + // Minimum execution time: 53_860_000 picoseconds. + Weight::from_parts(53_984_000, 0) } /// Storage: MessageQueue ServiceHead (r:1 w:1) /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) @@ -258,10 +257,10 @@ impl WeightInfo for () { /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `5003` - // Minimum execution time: 7_078_000 picoseconds. - Weight::from_parts(7_230_000, 5003) + // Measured: `140` + // Estimated: `3514` + // Minimum execution time: 7_018_000 picoseconds. + Weight::from_parts(7_205_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -271,10 +270,10 @@ impl WeightInfo for () { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65742` - // Estimated: `72563` - // Minimum execution time: 56_799_000 picoseconds. - Weight::from_parts(57_634_000, 72563) + // Measured: `65710` + // Estimated: `69049` + // Minimum execution time: 53_485_000 picoseconds. + Weight::from_parts(54_154_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -284,10 +283,10 @@ impl WeightInfo for () { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65742` - // Estimated: `72563` - // Minimum execution time: 72_290_000 picoseconds. - Weight::from_parts(72_754_000, 72563) + // Measured: `65710` + // Estimated: `69049` + // Minimum execution time: 68_830_000 picoseconds. + Weight::from_parts(69_487_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -297,10 +296,10 @@ impl WeightInfo for () { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65742` - // Estimated: `72563` - // Minimum execution time: 84_987_000 picoseconds. - Weight::from_parts(85_562_000, 72563) + // Measured: `65710` + // Estimated: `69049` + // Minimum execution time: 81_643_000 picoseconds. + Weight::from_parts(82_399_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index 5cd744124ef24..98a15c5a5aada 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index c5517195d149f..7e7f1668026a2 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -30,6 +30,7 @@ use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, + TokenError, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -84,6 +85,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } pub struct TestBaseCallFilter; @@ -107,7 +112,7 @@ impl Config for Test { type WeightInfo = (); } -use pallet_balances::{Call as BalancesCall, Error as BalancesError}; +use pallet_balances::Call as BalancesCall; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -126,16 +131,16 @@ fn now() -> Timepoint { } fn call_transfer(dest: u64, value: u64) -> Box { - Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest, value })) + Box::new(RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value })) } #[test] fn multisig_deposit_is_taken_and_returned() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; @@ -196,9 +201,9 @@ fn cancel_multisig_returns_deposit() { fn timepoint_checking_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let hash = blake2_256(&call.encode()); @@ -254,9 +259,9 @@ fn timepoint_checking_works() { fn multisig_2_of_3_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; @@ -287,9 +292,9 @@ fn multisig_2_of_3_works() { fn multisig_3_of_3_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; @@ -357,9 +362,9 @@ fn cancel_multisig_works() { fn multisig_2_of_3_as_multi_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; @@ -389,9 +394,9 @@ fn multisig_2_of_3_as_multi_works() { fn multisig_2_of_3_as_multi_with_many_calls_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call1 = call_transfer(6, 10); let call1_weight = call1.get_dispatch_info().weight; @@ -440,9 +445,9 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { fn multisig_2_of_3_cannot_reissue_same_call() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 10); let call_weight = call.get_dispatch_info().weight; @@ -482,14 +487,13 @@ fn multisig_2_of_3_cannot_reissue_same_call() { call_weight )); - let err = DispatchError::from(BalancesError::::InsufficientBalance).stripped(); System::assert_last_event( pallet_multisig::Event::MultisigExecuted { approving: 3, timepoint: now(), multisig: multi, call_hash: hash, - result: Err(err), + result: Err(TokenError::FundsUnavailable.into()), } .into(), ); @@ -593,9 +597,9 @@ fn duplicate_approvals_are_ignored() { fn multisig_1_of_3_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 1); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let hash = blake2_256(&call.encode()); @@ -646,9 +650,9 @@ fn multisig_filters() { fn weight_check_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); assert_ok!(Multisig::as_multi( @@ -682,9 +686,9 @@ fn multisig_handles_no_preimage_after_all_approve() { // the call will go through. new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs index fb155c97f2def..7fda4bec8352d 100644 --- a/frame/multisig/src/weights.rs +++ b/frame/multisig/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_multisig //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -65,10 +65,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_086 nanoseconds. - Weight::from_parts(12_464_828, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(494, 0).saturating_mul(z.into())) + // Minimum execution time: 12_199_000 picoseconds. + Weight::from_parts(12_595_771, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(490, 0).saturating_mul(z.into())) } /// Storage: Multisig Multisigs (r:1 w:1) /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) @@ -76,14 +76,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `352 + s * (2 ±0)` - // Estimated: `5821` - // Minimum execution time: 35_377 nanoseconds. - Weight::from_parts(29_088_956, 5821) - // Standard Error: 335 - .saturating_add(Weight::from_parts(67_846, 0).saturating_mul(s.into())) + // Measured: `301 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 42_810_000 picoseconds. + Weight::from_parts(37_500_997, 6811) + // Standard Error: 308 + .saturating_add(Weight::from_parts(59_961, 0).saturating_mul(s.into())) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_523, 0).saturating_mul(z.into())) + .saturating_add(Weight::from_parts(1_198, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -93,14 +93,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `5821` - // Minimum execution time: 26_138 nanoseconds. - Weight::from_parts(20_479_380, 5821) - // Standard Error: 259 - .saturating_add(Weight::from_parts(64_116, 0).saturating_mul(s.into())) + // Measured: `320` + // Estimated: `6811` + // Minimum execution time: 27_775_000 picoseconds. + Weight::from_parts(22_868_524, 6811) + // Standard Error: 273 + .saturating_add(Weight::from_parts(55_219, 0).saturating_mul(s.into())) // Standard Error: 2 - .saturating_add(Weight::from_parts(1_520, 0).saturating_mul(z.into())) + .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -112,14 +112,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `489 + s * (33 ±0)` - // Estimated: `8424` - // Minimum execution time: 40_323 nanoseconds. - Weight::from_parts(32_311_615, 8424) - // Standard Error: 401 - .saturating_add(Weight::from_parts(85_999, 0).saturating_mul(s.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_534, 0).saturating_mul(z.into())) + // Measured: `426 + s * (33 ±0)` + // Estimated: `6811` + // Minimum execution time: 48_223_000 picoseconds. + Weight::from_parts(39_193_453, 6811) + // Standard Error: 2_162 + .saturating_add(Weight::from_parts(93_763, 0).saturating_mul(s.into())) + // Standard Error: 21 + .saturating_add(Weight::from_parts(1_372, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -128,12 +128,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `359 + s * (2 ±0)` - // Estimated: `5821` - // Minimum execution time: 26_938 nanoseconds. - Weight::from_parts(27_802_216, 5821) - // Standard Error: 342 - .saturating_add(Weight::from_parts(69_282, 0).saturating_mul(s.into())) + // Measured: `301 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 34_775_000 picoseconds. + Weight::from_parts(35_966_626, 6811) + // Standard Error: 464 + .saturating_add(Weight::from_parts(61_492, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -142,12 +142,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `5821` - // Minimum execution time: 18_050 nanoseconds. - Weight::from_parts(19_095_404, 5821) - // Standard Error: 419 - .saturating_add(Weight::from_parts(66_914, 0).saturating_mul(s.into())) + // Measured: `320` + // Estimated: `6811` + // Minimum execution time: 19_947_000 picoseconds. + Weight::from_parts(21_253_025, 6811) + // Standard Error: 402 + .saturating_add(Weight::from_parts(58_491, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -156,12 +156,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `555 + s * (1 ±0)` - // Estimated: `5821` - // Minimum execution time: 27_508 nanoseconds. - Weight::from_parts(28_702_686, 5821) - // Standard Error: 466 - .saturating_add(Weight::from_parts(69_419, 0).saturating_mul(s.into())) + // Measured: `492 + s * (1 ±0)` + // Estimated: `6811` + // Minimum execution time: 35_023_000 picoseconds. + Weight::from_parts(36_756_977, 6811) + // Standard Error: 547 + .saturating_add(Weight::from_parts(62_235, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -174,10 +174,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_086 nanoseconds. - Weight::from_parts(12_464_828, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(494, 0).saturating_mul(z.into())) + // Minimum execution time: 12_199_000 picoseconds. + Weight::from_parts(12_595_771, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(490, 0).saturating_mul(z.into())) } /// Storage: Multisig Multisigs (r:1 w:1) /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) @@ -185,14 +185,14 @@ impl WeightInfo for () { /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `352 + s * (2 ±0)` - // Estimated: `5821` - // Minimum execution time: 35_377 nanoseconds. - Weight::from_parts(29_088_956, 5821) - // Standard Error: 335 - .saturating_add(Weight::from_parts(67_846, 0).saturating_mul(s.into())) + // Measured: `301 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 42_810_000 picoseconds. + Weight::from_parts(37_500_997, 6811) + // Standard Error: 308 + .saturating_add(Weight::from_parts(59_961, 0).saturating_mul(s.into())) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_523, 0).saturating_mul(z.into())) + .saturating_add(Weight::from_parts(1_198, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -202,14 +202,14 @@ impl WeightInfo for () { /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `5821` - // Minimum execution time: 26_138 nanoseconds. - Weight::from_parts(20_479_380, 5821) - // Standard Error: 259 - .saturating_add(Weight::from_parts(64_116, 0).saturating_mul(s.into())) + // Measured: `320` + // Estimated: `6811` + // Minimum execution time: 27_775_000 picoseconds. + Weight::from_parts(22_868_524, 6811) + // Standard Error: 273 + .saturating_add(Weight::from_parts(55_219, 0).saturating_mul(s.into())) // Standard Error: 2 - .saturating_add(Weight::from_parts(1_520, 0).saturating_mul(z.into())) + .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -221,14 +221,14 @@ impl WeightInfo for () { /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `489 + s * (33 ±0)` - // Estimated: `8424` - // Minimum execution time: 40_323 nanoseconds. - Weight::from_parts(32_311_615, 8424) - // Standard Error: 401 - .saturating_add(Weight::from_parts(85_999, 0).saturating_mul(s.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_534, 0).saturating_mul(z.into())) + // Measured: `426 + s * (33 ±0)` + // Estimated: `6811` + // Minimum execution time: 48_223_000 picoseconds. + Weight::from_parts(39_193_453, 6811) + // Standard Error: 2_162 + .saturating_add(Weight::from_parts(93_763, 0).saturating_mul(s.into())) + // Standard Error: 21 + .saturating_add(Weight::from_parts(1_372, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -237,12 +237,12 @@ impl WeightInfo for () { /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `359 + s * (2 ±0)` - // Estimated: `5821` - // Minimum execution time: 26_938 nanoseconds. - Weight::from_parts(27_802_216, 5821) - // Standard Error: 342 - .saturating_add(Weight::from_parts(69_282, 0).saturating_mul(s.into())) + // Measured: `301 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 34_775_000 picoseconds. + Weight::from_parts(35_966_626, 6811) + // Standard Error: 464 + .saturating_add(Weight::from_parts(61_492, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -251,12 +251,12 @@ impl WeightInfo for () { /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `5821` - // Minimum execution time: 18_050 nanoseconds. - Weight::from_parts(19_095_404, 5821) - // Standard Error: 419 - .saturating_add(Weight::from_parts(66_914, 0).saturating_mul(s.into())) + // Measured: `320` + // Estimated: `6811` + // Minimum execution time: 19_947_000 picoseconds. + Weight::from_parts(21_253_025, 6811) + // Standard Error: 402 + .saturating_add(Weight::from_parts(58_491, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -265,12 +265,12 @@ impl WeightInfo for () { /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `555 + s * (1 ±0)` - // Estimated: `5821` - // Minimum execution time: 27_508 nanoseconds. - Weight::from_parts(28_702_686, 5821) - // Standard Error: 466 - .saturating_add(Weight::from_parts(69_419, 0).saturating_mul(s.into())) + // Measured: `492 + s * (1 ±0)` + // Estimated: `6811` + // Minimum execution time: 35_023_000 picoseconds. + Weight::from_parts(36_756_977, 6811) + // Standard Error: 547 + .saturating_add(Weight::from_parts(62_235, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/nfts/Cargo.toml b/frame/nfts/Cargo.toml index 59aa4e091fe68..73df7518e96c0 100644 --- a/frame/nfts/Cargo.toml +++ b/frame/nfts/Cargo.toml @@ -14,9 +14,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -enumflags2 = { version = "0.7.5" } +enumflags2 = { version = "0.7.7" } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/nfts/src/common_functions.rs b/frame/nfts/src/common_functions.rs index 1ef6591db02b4..a3486edec23cb 100644 --- a/frame/nfts/src/common_functions.rs +++ b/frame/nfts/src/common_functions.rs @@ -18,6 +18,7 @@ //! Various pieces of common functionality. use crate::*; +use frame_support::pallet_prelude::*; impl, I: 'static> Pallet { /// Get the owner of the item, if the item exists. @@ -30,6 +31,30 @@ impl, I: 'static> Pallet { Collection::::get(collection).map(|i| i.owner) } + /// Validate the `data` was signed by `signer` and the `signature` is correct. + pub fn validate_signature( + data: &Vec, + signature: &T::OffchainSignature, + signer: &T::AccountId, + ) -> DispatchResult { + if signature.verify(&**data, &signer) { + return Ok(()) + } + + // NOTE: for security reasons modern UIs implicitly wrap the data requested to sign into + // , that's why we support both wrapped and raw versions. + let prefix = b""; + let suffix = b""; + let mut wrapped: Vec = Vec::with_capacity(data.len() + prefix.len() + suffix.len()); + wrapped.extend(prefix); + wrapped.extend(data); + wrapped.extend(suffix); + + ensure!(signature.verify(&*wrapped, &signer), Error::::WrongSignature); + + Ok(()) + } + #[cfg(any(test, feature = "runtime-benchmarks"))] pub fn set_next_id(id: T::CollectionId) { NextCollectionId::::set(Some(id)); diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index f4d0e41593476..4796819df6d2c 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -50,7 +50,7 @@ use frame_support::traits::{ }; use frame_system::Config as SystemConfig; use sp_runtime::{ - traits::{Saturating, StaticLookup, Zero}, + traits::{IdentifyAccount, Saturating, StaticLookup, Verify, Zero}, RuntimeDebug, }; use sp_std::prelude::*; @@ -69,7 +69,6 @@ pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; use frame_system::pallet_prelude::*; - use sp_runtime::traits::{IdentifyAccount, Verify}; /// The current storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); @@ -1841,8 +1840,7 @@ pub mod pallet { signer: T::AccountId, ) -> DispatchResult { let origin = ensure_signed(origin)?; - let msg = Encode::encode(&mint_data); - ensure!(signature.verify(&*msg, &signer), Error::::WrongSignature); + Self::validate_signature(&Encode::encode(&mint_data), &signature, &signer)?; Self::do_mint_pre_signed(origin, mint_data, signer) } @@ -1868,8 +1866,7 @@ pub mod pallet { signer: T::AccountId, ) -> DispatchResult { let origin = ensure_signed(origin)?; - let msg = Encode::encode(&data); - ensure!(signature.verify(&*msg, &signer), Error::::WrongSignature); + Self::validate_signature(&Encode::encode(&data), &signature, &signer)?; Self::do_set_attributes_pre_signed(origin, data, signer) } } diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs index a9db1a62c9d67..e2856a07b994c 100644 --- a/frame/nfts/src/mock.rs +++ b/frame/nfts/src/mock.rs @@ -25,13 +25,12 @@ use frame_support::{ traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, }; use sp_core::H256; -use sp_keystore::{testing::KeyStore, KeystoreExt}; +use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, MultiSignature, }; -use std::sync::Arc; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -89,6 +88,10 @@ impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { @@ -130,9 +133,8 @@ impl Config for Test { pub(crate) fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let keystore = KeyStore::new(); let mut ext = sp_io::TestExternalities::new(t); - ext.register_extension(KeystoreExt(Arc::new(keystore))); + ext.register_extension(KeystoreExt::new(MemoryKeystore::new())); ext.execute_with(|| System::set_block_number(1)); ext } diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 0fc6fcd15c45f..4ab12f0506056 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -2700,7 +2700,11 @@ fn claim_swap_should_work() { Balances::make_free_balance_be(&user_1, initial_balance); Balances::make_free_balance_be(&user_2, initial_balance); - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1.clone(), default_collection_config())); + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + user_1.clone(), + default_collection_config() + )); assert_ok!(Nfts::mint( RuntimeOrigin::signed(user_1.clone()), @@ -3136,6 +3140,34 @@ fn add_remove_item_attributes_approval_should_work() { }) } +#[test] +fn validate_signature() { + new_test_ext().execute_with(|| { + let user_1_pair = sp_core::sr25519::Pair::from_string("//Alice", None).unwrap(); + let user_1_signer = MultiSigner::Sr25519(user_1_pair.public()); + let user_1 = user_1_signer.clone().into_account(); + let mint_data: PreSignedMint = PreSignedMint { + collection: 0, + item: 0, + attributes: vec![], + metadata: vec![], + only_account: None, + deadline: 100000, + }; + let encoded_data = Encode::encode(&mint_data); + let signature = MultiSignature::Sr25519(user_1_pair.sign(&encoded_data)); + assert_ok!(Nfts::validate_signature(&encoded_data, &signature, &user_1)); + + let mut wrapped_data: Vec = Vec::new(); + wrapped_data.extend(b""); + wrapped_data.extend(&encoded_data); + wrapped_data.extend(b""); + + let signature = MultiSignature::Sr25519(user_1_pair.sign(&wrapped_data)); + assert_ok!(Nfts::validate_signature(&encoded_data, &signature, &user_1)); + }) +} + #[test] fn pre_signed_mints_should_work() { new_test_ext().execute_with(|| { diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index 3d447c8d264d4..19a61974a61a7 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -18,26 +18,25 @@ //! Autogenerated weights for pallet_nfts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_nfts // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_nfts -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/nfts/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -105,10 +104,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `214` - // Estimated: `5038` - // Minimum execution time: 37_598_000 picoseconds. - Weight::from_parts(38_703_000, 5038) + // Measured: `182` + // Estimated: `3549` + // Minimum execution time: 40_664_000 picoseconds. + Weight::from_parts(41_224_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -125,9 +124,9 @@ impl WeightInfo for SubstrateWeight { fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `5038` - // Minimum execution time: 25_899_000 picoseconds. - Weight::from_parts(26_385_000, 5038) + // Estimated: `3549` + // Minimum execution time: 24_725_000 picoseconds. + Weight::from_parts(25_147_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -150,16 +149,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 1000]`. /// The range of component `c` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. - fn destroy(m: u32, _c: u32, a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `32218 + a * (364 ±0)` - // Estimated: `2538589 + a * (2921 ±0)` - // Minimum execution time: 1_129_980_000 picoseconds. - Weight::from_parts(1_096_213_543, 2538589) - // Standard Error: 5_210 - .saturating_add(Weight::from_parts(92, 0).saturating_mul(m.into())) - // Standard Error: 5_210 - .saturating_add(Weight::from_parts(5_480_550, 0).saturating_mul(a.into())) + fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `32186 + a * (332 ±0)` + // Estimated: `2523990 + a * (2921 ±0)` + // Minimum execution time: 1_100_509_000 picoseconds. + Weight::from_parts(1_081_634_178, 2523990) + // Standard Error: 3_025 + .saturating_add(Weight::from_parts(5_339_415, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(1004_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(1005_u64)) @@ -180,10 +177,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn mint() -> Weight { // Proof Size summary in bytes: - // Measured: `453` - // Estimated: `18460` - // Minimum execution time: 49_434_000 picoseconds. - Weight::from_parts(50_248_000, 18460) + // Measured: `421` + // Estimated: `4326` + // Minimum execution time: 52_464_000 picoseconds. + Weight::from_parts(52_847_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -201,10 +198,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn force_mint() -> Weight { // Proof Size summary in bytes: - // Measured: `453` - // Estimated: `18460` - // Minimum execution time: 47_393_000 picoseconds. - Weight::from_parts(47_906_000, 18460) + // Measured: `421` + // Estimated: `4326` + // Minimum execution time: 50_327_000 picoseconds. + Weight::from_parts(51_093_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -226,10 +223,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn burn() -> Weight { // Proof Size summary in bytes: - // Measured: `594` - // Estimated: `14993` - // Minimum execution time: 47_188_000 picoseconds. - Weight::from_parts(47_746_000, 14993) + // Measured: `530` + // Estimated: `4326` + // Minimum execution time: 51_342_000 picoseconds. + Weight::from_parts(51_846_000, 4326) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -249,10 +246,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `623` - // Estimated: `14926` - // Minimum execution time: 37_984_000 picoseconds. - Weight::from_parts(38_446_000, 14926) + // Measured: `559` + // Estimated: `4326` + // Minimum execution time: 38_309_000 picoseconds. + Weight::from_parts(38_672_000, 4326) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -265,12 +262,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `761 + i * (140 ±0)` - // Estimated: `8077 + i * (3336 ±0)` - // Minimum execution time: 17_297_000 picoseconds. - Weight::from_parts(17_634_000, 8077) - // Standard Error: 17_773 - .saturating_add(Weight::from_parts(14_395_819, 0).saturating_mul(i.into())) + // Measured: `729 + i * (108 ±0)` + // Estimated: `3549 + i * (3336 ±0)` + // Minimum execution time: 17_525_000 picoseconds. + Weight::from_parts(17_657_000, 3549) + // Standard Error: 15_884 + .saturating_add(Weight::from_parts(16_026_633, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) @@ -283,9 +280,9 @@ impl WeightInfo for SubstrateWeight { fn lock_item_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `401` - // Estimated: `7047` - // Minimum execution time: 21_827_000 picoseconds. - Weight::from_parts(22_134_000, 7047) + // Estimated: `3534` + // Minimum execution time: 21_814_000 picoseconds. + Weight::from_parts(22_171_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -296,9 +293,9 @@ impl WeightInfo for SubstrateWeight { fn unlock_item_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `401` - // Estimated: `7047` - // Minimum execution time: 21_549_000 picoseconds. - Weight::from_parts(21_987_000, 7047) + // Estimated: `3534` + // Minimum execution time: 21_728_000 picoseconds. + Weight::from_parts(21_893_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -308,10 +305,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn lock_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `338` - // Estimated: `7087` - // Minimum execution time: 18_930_000 picoseconds. - Weight::from_parts(19_200_000, 7087) + // Measured: `306` + // Estimated: `3549` + // Minimum execution time: 18_359_000 picoseconds. + Weight::from_parts(19_101_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -323,24 +320,24 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `386` - // Estimated: `7066` - // Minimum execution time: 24_929_000 picoseconds. - Weight::from_parts(25_786_000, 7066) + // Measured: `354` + // Estimated: `3549` + // Minimum execution time: 24_713_000 picoseconds. + Weight::from_parts(25_032_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: Nfts Collection (r:1 w:1) /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:4) + /// Storage: Nfts CollectionRoleOf (r:2 w:4) /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) fn set_team() -> Weight { // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `7083` - // Minimum execution time: 28_245_000 picoseconds. - Weight::from_parts(28_490_000, 7083) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `335` + // Estimated: `6078` + // Minimum execution time: 42_372_000 picoseconds. + Weight::from_parts(42_971_000, 6078) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: Nfts Collection (r:1 w:1) @@ -349,10 +346,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn force_collection_owner() -> Weight { // Proof Size summary in bytes: - // Measured: `309` + // Measured: `277` // Estimated: `3549` - // Minimum execution time: 19_971_000 picoseconds. - Weight::from_parts(20_276_000, 3549) + // Minimum execution time: 19_703_000 picoseconds. + Weight::from_parts(19_993_000, 3549) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -364,8 +361,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `242` // Estimated: `3549` - // Minimum execution time: 15_924_000 picoseconds. - Weight::from_parts(16_258_000, 3549) + // Minimum execution time: 15_500_000 picoseconds. + Weight::from_parts(15_929_000, 3549) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -376,9 +373,9 @@ impl WeightInfo for SubstrateWeight { fn lock_item_properties() -> Weight { // Proof Size summary in bytes: // Measured: `401` - // Estimated: `7047` - // Minimum execution time: 20_780_000 picoseconds. - Weight::from_parts(21_109_000, 7047) + // Estimated: `3534` + // Minimum execution time: 20_778_000 picoseconds. + Weight::from_parts(21_187_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -394,10 +391,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `18045` - // Minimum execution time: 50_817_000 picoseconds. - Weight::from_parts(51_585_000, 18045) + // Measured: `505` + // Estimated: `3911` + // Minimum execution time: 53_016_000 picoseconds. + Weight::from_parts(53_579_000, 3911) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -407,10 +404,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) fn force_set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `342` - // Estimated: `7460` - // Minimum execution time: 28_821_000 picoseconds. - Weight::from_parts(29_417_000, 7460) + // Measured: `310` + // Estimated: `3911` + // Minimum execution time: 28_790_000 picoseconds. + Weight::from_parts(29_157_000, 3911) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -424,10 +421,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `979` - // Estimated: `14507` - // Minimum execution time: 47_021_000 picoseconds. - Weight::from_parts(47_509_000, 14507) + // Measured: `916` + // Estimated: `3911` + // Minimum execution time: 48_584_000 picoseconds. + Weight::from_parts(49_202_000, 3911) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -437,10 +434,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) fn approve_item_attributes() -> Weight { // Proof Size summary in bytes: - // Measured: `379` - // Estimated: `8472` - // Minimum execution time: 19_457_000 picoseconds. - Weight::from_parts(19_738_000, 8472) + // Measured: `347` + // Estimated: `4326` + // Minimum execution time: 19_616_000 picoseconds. + Weight::from_parts(19_972_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -455,12 +452,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1000]`. fn cancel_item_attributes_approval(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `899 + n * (396 ±0)` - // Estimated: `15976 + n * (2921 ±0)` - // Minimum execution time: 29_027_000 picoseconds. - Weight::from_parts(29_376_000, 15976) - // Standard Error: 3_500 - .saturating_add(Weight::from_parts(5_568_116, 0).saturating_mul(n.into())) + // Measured: `803 + n * (364 ±0)` + // Estimated: `4326 + n * (2921 ±0)` + // Minimum execution time: 28_897_000 picoseconds. + Weight::from_parts(29_061_000, 4326) + // Standard Error: 3_139 + .saturating_add(Weight::from_parts(5_396_415, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -479,10 +476,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) fn set_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `17739` - // Minimum execution time: 41_658_000 picoseconds. - Weight::from_parts(42_241_000, 17739) + // Measured: `505` + // Estimated: `3605` + // Minimum execution time: 43_748_000 picoseconds. + Weight::from_parts(44_178_000, 3605) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -496,10 +493,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `672` - // Estimated: `14201` - // Minimum execution time: 39_838_000 picoseconds. - Weight::from_parts(40_757_000, 14201) + // Measured: `608` + // Estimated: `3605` + // Minimum execution time: 42_116_000 picoseconds. + Weight::from_parts(42_455_000, 3605) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -513,10 +510,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `396` - // Estimated: `14173` - // Minimum execution time: 37_327_000 picoseconds. - Weight::from_parts(37_874_000, 14173) + // Measured: `364` + // Estimated: `3552` + // Minimum execution time: 40_926_000 picoseconds. + Weight::from_parts(41_512_000, 3552) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -530,10 +527,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `539` - // Estimated: `14173` - // Minimum execution time: 35_305_000 picoseconds. - Weight::from_parts(36_213_000, 14173) + // Measured: `475` + // Estimated: `3552` + // Minimum execution time: 39_792_000 picoseconds. + Weight::from_parts(40_443_000, 3552) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -543,10 +540,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn approve_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `408` - // Estimated: `7864` - // Minimum execution time: 22_680_000 picoseconds. - Weight::from_parts(23_126_000, 7864) + // Measured: `376` + // Estimated: `4326` + // Minimum execution time: 22_648_000 picoseconds. + Weight::from_parts(23_139_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -554,10 +551,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) fn cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `416` + // Measured: `384` // Estimated: `4326` - // Minimum execution time: 19_837_000 picoseconds. - Weight::from_parts(20_352_000, 4326) + // Minimum execution time: 20_552_000 picoseconds. + Weight::from_parts(20_920_000, 4326) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -565,10 +562,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) fn clear_all_transfer_approvals() -> Weight { // Proof Size summary in bytes: - // Measured: `416` + // Measured: `384` // Estimated: `4326` - // Minimum execution time: 18_911_000 picoseconds. - Weight::from_parts(19_233_000, 4326) + // Minimum execution time: 19_114_000 picoseconds. + Weight::from_parts(19_876_000, 4326) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -578,8 +575,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3517` - // Minimum execution time: 17_292_000 picoseconds. - Weight::from_parts(17_568_000, 3517) + // Minimum execution time: 17_089_000 picoseconds. + Weight::from_parts(17_363_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -589,10 +586,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: - // Measured: `338` - // Estimated: `7087` - // Minimum execution time: 20_323_000 picoseconds. - Weight::from_parts(20_692_000, 7087) + // Measured: `306` + // Estimated: `3549` + // Minimum execution time: 20_667_000 picoseconds. + Weight::from_parts(20_898_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -603,9 +600,9 @@ impl WeightInfo for SubstrateWeight { fn update_mint_settings() -> Weight { // Proof Size summary in bytes: // Measured: `289` - // Estimated: `7072` - // Minimum execution time: 19_971_000 picoseconds. - Weight::from_parts(20_159_000, 7072) + // Estimated: `3538` + // Minimum execution time: 19_666_000 picoseconds. + Weight::from_parts(20_136_000, 3538) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -619,10 +616,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn set_price() -> Weight { // Proof Size summary in bytes: - // Measured: `516` - // Estimated: `11377` - // Minimum execution time: 26_202_000 picoseconds. - Weight::from_parts(26_499_000, 11377) + // Measured: `484` + // Estimated: `4326` + // Minimum execution time: 25_778_000 picoseconds. + Weight::from_parts(26_447_000, 4326) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -642,10 +639,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn buy_item() -> Weight { // Proof Size summary in bytes: - // Measured: `767` - // Estimated: `18480` - // Minimum execution time: 50_601_000 picoseconds. - Weight::from_parts(51_283_000, 18480) + // Measured: `671` + // Estimated: `4326` + // Minimum execution time: 50_809_000 picoseconds. + Weight::from_parts(51_503_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -654,10 +651,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_733_000 picoseconds. - Weight::from_parts(5_568_590, 0) - // Standard Error: 14_136 - .saturating_add(Weight::from_parts(3_763_928, 0).saturating_mul(n.into())) + // Minimum execution time: 2_789_000 picoseconds. + Weight::from_parts(5_528_034, 0) + // Standard Error: 14_405 + .saturating_add(Weight::from_parts(3_788_038, 0).saturating_mul(n.into())) } /// Storage: Nfts Item (r:2 w:0) /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) @@ -665,10 +662,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn create_swap() -> Weight { // Proof Size summary in bytes: - // Measured: `524` + // Measured: `460` // Estimated: `7662` - // Minimum execution time: 22_901_000 picoseconds. - Weight::from_parts(23_673_000, 7662) + // Minimum execution time: 22_884_000 picoseconds. + Weight::from_parts(23_732_000, 7662) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -678,10 +675,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) fn cancel_swap() -> Weight { // Proof Size summary in bytes: - // Measured: `511` - // Estimated: `7862` - // Minimum execution time: 22_532_000 picoseconds. - Weight::from_parts(22_831_000, 7862) + // Measured: `479` + // Estimated: `4326` + // Minimum execution time: 22_686_000 picoseconds. + Weight::from_parts(23_088_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -701,21 +698,21 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn claim_swap() -> Weight { // Proof Size summary in bytes: - // Measured: `896` - // Estimated: `24321` - // Minimum execution time: 77_668_000 picoseconds. - Weight::from_parts(78_407_000, 24321) + // Measured: `800` + // Estimated: `7662` + // Minimum execution time: 77_494_000 picoseconds. + Weight::from_parts(78_650_000, 7662) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) /// Storage: Nfts CollectionRoleOf (r:2 w:0) /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) /// Storage: Nfts CollectionConfigOf (r:1 w:0) /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// Storage: Nfts Item (r:1 w:1) /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: Nfts Collection (r:1 w:1) + /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) /// Storage: Nfts ItemConfigOf (r:1 w:1) /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) @@ -729,12 +726,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 10]`. fn mint_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `659` - // Estimated: `29192 + n * (2921 ±0)` - // Minimum execution time: 133_147_000 picoseconds. - Weight::from_parts(138_031_439, 29192) - // Standard Error: 28_665 - .saturating_add(Weight::from_parts(28_206_062, 0).saturating_mul(n.into())) + // Measured: `595` + // Estimated: `6078 + n * (2921 ±0)` + // Minimum execution time: 139_109_000 picoseconds. + Weight::from_parts(144_449_034, 6078) + // Standard Error: 26_869 + .saturating_add(Weight::from_parts(29_961_772, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -756,12 +753,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 10]`. fn set_attributes_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `721` - // Estimated: `20142 + n * (2921 ±0)` - // Minimum execution time: 78_243_000 picoseconds. - Weight::from_parts(90_947_408, 20142) - // Standard Error: 75_516 - .saturating_add(Weight::from_parts(28_059_623, 0).saturating_mul(n.into())) + // Measured: `625` + // Estimated: `4326 + n * (2921 ±0)` + // Minimum execution time: 78_280_000 picoseconds. + Weight::from_parts(92_826_883, 4326) + // Standard Error: 81_125 + .saturating_add(Weight::from_parts(29_898_245, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -784,10 +781,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `214` - // Estimated: `5038` - // Minimum execution time: 37_598_000 picoseconds. - Weight::from_parts(38_703_000, 5038) + // Measured: `182` + // Estimated: `3549` + // Minimum execution time: 40_664_000 picoseconds. + Weight::from_parts(41_224_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -804,9 +801,9 @@ impl WeightInfo for () { fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `5038` - // Minimum execution time: 25_899_000 picoseconds. - Weight::from_parts(26_385_000, 5038) + // Estimated: `3549` + // Minimum execution time: 24_725_000 picoseconds. + Weight::from_parts(25_147_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -829,16 +826,14 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 1000]`. /// The range of component `c` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. - fn destroy(m: u32, _c: u32, a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `32218 + a * (364 ±0)` - // Estimated: `2538589 + a * (2921 ±0)` - // Minimum execution time: 1_129_980_000 picoseconds. - Weight::from_parts(1_096_213_543, 2538589) - // Standard Error: 5_210 - .saturating_add(Weight::from_parts(92, 0).saturating_mul(m.into())) - // Standard Error: 5_210 - .saturating_add(Weight::from_parts(5_480_550, 0).saturating_mul(a.into())) + fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `32186 + a * (332 ±0)` + // Estimated: `2523990 + a * (2921 ±0)` + // Minimum execution time: 1_100_509_000 picoseconds. + Weight::from_parts(1_081_634_178, 2523990) + // Standard Error: 3_025 + .saturating_add(Weight::from_parts(5_339_415, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(1004_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(1005_u64)) @@ -859,10 +854,10 @@ impl WeightInfo for () { /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn mint() -> Weight { // Proof Size summary in bytes: - // Measured: `453` - // Estimated: `18460` - // Minimum execution time: 49_434_000 picoseconds. - Weight::from_parts(50_248_000, 18460) + // Measured: `421` + // Estimated: `4326` + // Minimum execution time: 52_464_000 picoseconds. + Weight::from_parts(52_847_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -880,10 +875,10 @@ impl WeightInfo for () { /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn force_mint() -> Weight { // Proof Size summary in bytes: - // Measured: `453` - // Estimated: `18460` - // Minimum execution time: 47_393_000 picoseconds. - Weight::from_parts(47_906_000, 18460) + // Measured: `421` + // Estimated: `4326` + // Minimum execution time: 50_327_000 picoseconds. + Weight::from_parts(51_093_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -905,10 +900,10 @@ impl WeightInfo for () { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn burn() -> Weight { // Proof Size summary in bytes: - // Measured: `594` - // Estimated: `14993` - // Minimum execution time: 47_188_000 picoseconds. - Weight::from_parts(47_746_000, 14993) + // Measured: `530` + // Estimated: `4326` + // Minimum execution time: 51_342_000 picoseconds. + Weight::from_parts(51_846_000, 4326) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -928,10 +923,10 @@ impl WeightInfo for () { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `623` - // Estimated: `14926` - // Minimum execution time: 37_984_000 picoseconds. - Weight::from_parts(38_446_000, 14926) + // Measured: `559` + // Estimated: `4326` + // Minimum execution time: 38_309_000 picoseconds. + Weight::from_parts(38_672_000, 4326) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -944,12 +939,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `761 + i * (140 ±0)` - // Estimated: `8077 + i * (3336 ±0)` - // Minimum execution time: 17_297_000 picoseconds. - Weight::from_parts(17_634_000, 8077) - // Standard Error: 17_773 - .saturating_add(Weight::from_parts(14_395_819, 0).saturating_mul(i.into())) + // Measured: `729 + i * (108 ±0)` + // Estimated: `3549 + i * (3336 ±0)` + // Minimum execution time: 17_525_000 picoseconds. + Weight::from_parts(17_657_000, 3549) + // Standard Error: 15_884 + .saturating_add(Weight::from_parts(16_026_633, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) @@ -962,9 +957,9 @@ impl WeightInfo for () { fn lock_item_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `401` - // Estimated: `7047` - // Minimum execution time: 21_827_000 picoseconds. - Weight::from_parts(22_134_000, 7047) + // Estimated: `3534` + // Minimum execution time: 21_814_000 picoseconds. + Weight::from_parts(22_171_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -975,9 +970,9 @@ impl WeightInfo for () { fn unlock_item_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `401` - // Estimated: `7047` - // Minimum execution time: 21_549_000 picoseconds. - Weight::from_parts(21_987_000, 7047) + // Estimated: `3534` + // Minimum execution time: 21_728_000 picoseconds. + Weight::from_parts(21_893_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -987,10 +982,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn lock_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `338` - // Estimated: `7087` - // Minimum execution time: 18_930_000 picoseconds. - Weight::from_parts(19_200_000, 7087) + // Measured: `306` + // Estimated: `3549` + // Minimum execution time: 18_359_000 picoseconds. + Weight::from_parts(19_101_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1002,24 +997,24 @@ impl WeightInfo for () { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `386` - // Estimated: `7066` - // Minimum execution time: 24_929_000 picoseconds. - Weight::from_parts(25_786_000, 7066) + // Measured: `354` + // Estimated: `3549` + // Minimum execution time: 24_713_000 picoseconds. + Weight::from_parts(25_032_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: Nfts Collection (r:1 w:1) /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:4) + /// Storage: Nfts CollectionRoleOf (r:2 w:4) /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) fn set_team() -> Weight { // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `7083` - // Minimum execution time: 28_245_000 picoseconds. - Weight::from_parts(28_490_000, 7083) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `335` + // Estimated: `6078` + // Minimum execution time: 42_372_000 picoseconds. + Weight::from_parts(42_971_000, 6078) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: Nfts Collection (r:1 w:1) @@ -1028,10 +1023,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn force_collection_owner() -> Weight { // Proof Size summary in bytes: - // Measured: `309` + // Measured: `277` // Estimated: `3549` - // Minimum execution time: 19_971_000 picoseconds. - Weight::from_parts(20_276_000, 3549) + // Minimum execution time: 19_703_000 picoseconds. + Weight::from_parts(19_993_000, 3549) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1043,8 +1038,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `242` // Estimated: `3549` - // Minimum execution time: 15_924_000 picoseconds. - Weight::from_parts(16_258_000, 3549) + // Minimum execution time: 15_500_000 picoseconds. + Weight::from_parts(15_929_000, 3549) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1055,9 +1050,9 @@ impl WeightInfo for () { fn lock_item_properties() -> Weight { // Proof Size summary in bytes: // Measured: `401` - // Estimated: `7047` - // Minimum execution time: 20_780_000 picoseconds. - Weight::from_parts(21_109_000, 7047) + // Estimated: `3534` + // Minimum execution time: 20_778_000 picoseconds. + Weight::from_parts(21_187_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1073,10 +1068,10 @@ impl WeightInfo for () { /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `18045` - // Minimum execution time: 50_817_000 picoseconds. - Weight::from_parts(51_585_000, 18045) + // Measured: `505` + // Estimated: `3911` + // Minimum execution time: 53_016_000 picoseconds. + Weight::from_parts(53_579_000, 3911) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1086,10 +1081,10 @@ impl WeightInfo for () { /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) fn force_set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `342` - // Estimated: `7460` - // Minimum execution time: 28_821_000 picoseconds. - Weight::from_parts(29_417_000, 7460) + // Measured: `310` + // Estimated: `3911` + // Minimum execution time: 28_790_000 picoseconds. + Weight::from_parts(29_157_000, 3911) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1103,10 +1098,10 @@ impl WeightInfo for () { /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `979` - // Estimated: `14507` - // Minimum execution time: 47_021_000 picoseconds. - Weight::from_parts(47_509_000, 14507) + // Measured: `916` + // Estimated: `3911` + // Minimum execution time: 48_584_000 picoseconds. + Weight::from_parts(49_202_000, 3911) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1116,10 +1111,10 @@ impl WeightInfo for () { /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) fn approve_item_attributes() -> Weight { // Proof Size summary in bytes: - // Measured: `379` - // Estimated: `8472` - // Minimum execution time: 19_457_000 picoseconds. - Weight::from_parts(19_738_000, 8472) + // Measured: `347` + // Estimated: `4326` + // Minimum execution time: 19_616_000 picoseconds. + Weight::from_parts(19_972_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1134,12 +1129,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1000]`. fn cancel_item_attributes_approval(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `899 + n * (396 ±0)` - // Estimated: `15976 + n * (2921 ±0)` - // Minimum execution time: 29_027_000 picoseconds. - Weight::from_parts(29_376_000, 15976) - // Standard Error: 3_500 - .saturating_add(Weight::from_parts(5_568_116, 0).saturating_mul(n.into())) + // Measured: `803 + n * (364 ±0)` + // Estimated: `4326 + n * (2921 ±0)` + // Minimum execution time: 28_897_000 picoseconds. + Weight::from_parts(29_061_000, 4326) + // Standard Error: 3_139 + .saturating_add(Weight::from_parts(5_396_415, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1158,10 +1153,10 @@ impl WeightInfo for () { /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) fn set_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `17739` - // Minimum execution time: 41_658_000 picoseconds. - Weight::from_parts(42_241_000, 17739) + // Measured: `505` + // Estimated: `3605` + // Minimum execution time: 43_748_000 picoseconds. + Weight::from_parts(44_178_000, 3605) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1175,10 +1170,10 @@ impl WeightInfo for () { /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `672` - // Estimated: `14201` - // Minimum execution time: 39_838_000 picoseconds. - Weight::from_parts(40_757_000, 14201) + // Measured: `608` + // Estimated: `3605` + // Minimum execution time: 42_116_000 picoseconds. + Weight::from_parts(42_455_000, 3605) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1192,10 +1187,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `396` - // Estimated: `14173` - // Minimum execution time: 37_327_000 picoseconds. - Weight::from_parts(37_874_000, 14173) + // Measured: `364` + // Estimated: `3552` + // Minimum execution time: 40_926_000 picoseconds. + Weight::from_parts(41_512_000, 3552) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1209,10 +1204,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `539` - // Estimated: `14173` - // Minimum execution time: 35_305_000 picoseconds. - Weight::from_parts(36_213_000, 14173) + // Measured: `475` + // Estimated: `3552` + // Minimum execution time: 39_792_000 picoseconds. + Weight::from_parts(40_443_000, 3552) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1222,10 +1217,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn approve_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `408` - // Estimated: `7864` - // Minimum execution time: 22_680_000 picoseconds. - Weight::from_parts(23_126_000, 7864) + // Measured: `376` + // Estimated: `4326` + // Minimum execution time: 22_648_000 picoseconds. + Weight::from_parts(23_139_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1233,10 +1228,10 @@ impl WeightInfo for () { /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) fn cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `416` + // Measured: `384` // Estimated: `4326` - // Minimum execution time: 19_837_000 picoseconds. - Weight::from_parts(20_352_000, 4326) + // Minimum execution time: 20_552_000 picoseconds. + Weight::from_parts(20_920_000, 4326) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1244,10 +1239,10 @@ impl WeightInfo for () { /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) fn clear_all_transfer_approvals() -> Weight { // Proof Size summary in bytes: - // Measured: `416` + // Measured: `384` // Estimated: `4326` - // Minimum execution time: 18_911_000 picoseconds. - Weight::from_parts(19_233_000, 4326) + // Minimum execution time: 19_114_000 picoseconds. + Weight::from_parts(19_876_000, 4326) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1257,8 +1252,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3517` - // Minimum execution time: 17_292_000 picoseconds. - Weight::from_parts(17_568_000, 3517) + // Minimum execution time: 17_089_000 picoseconds. + Weight::from_parts(17_363_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1268,10 +1263,10 @@ impl WeightInfo for () { /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: - // Measured: `338` - // Estimated: `7087` - // Minimum execution time: 20_323_000 picoseconds. - Weight::from_parts(20_692_000, 7087) + // Measured: `306` + // Estimated: `3549` + // Minimum execution time: 20_667_000 picoseconds. + Weight::from_parts(20_898_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1282,9 +1277,9 @@ impl WeightInfo for () { fn update_mint_settings() -> Weight { // Proof Size summary in bytes: // Measured: `289` - // Estimated: `7072` - // Minimum execution time: 19_971_000 picoseconds. - Weight::from_parts(20_159_000, 7072) + // Estimated: `3538` + // Minimum execution time: 19_666_000 picoseconds. + Weight::from_parts(20_136_000, 3538) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1298,10 +1293,10 @@ impl WeightInfo for () { /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn set_price() -> Weight { // Proof Size summary in bytes: - // Measured: `516` - // Estimated: `11377` - // Minimum execution time: 26_202_000 picoseconds. - Weight::from_parts(26_499_000, 11377) + // Measured: `484` + // Estimated: `4326` + // Minimum execution time: 25_778_000 picoseconds. + Weight::from_parts(26_447_000, 4326) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1321,10 +1316,10 @@ impl WeightInfo for () { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn buy_item() -> Weight { // Proof Size summary in bytes: - // Measured: `767` - // Estimated: `18480` - // Minimum execution time: 50_601_000 picoseconds. - Weight::from_parts(51_283_000, 18480) + // Measured: `671` + // Estimated: `4326` + // Minimum execution time: 50_809_000 picoseconds. + Weight::from_parts(51_503_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1333,10 +1328,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_733_000 picoseconds. - Weight::from_parts(5_568_590, 0) - // Standard Error: 14_136 - .saturating_add(Weight::from_parts(3_763_928, 0).saturating_mul(n.into())) + // Minimum execution time: 2_789_000 picoseconds. + Weight::from_parts(5_528_034, 0) + // Standard Error: 14_405 + .saturating_add(Weight::from_parts(3_788_038, 0).saturating_mul(n.into())) } /// Storage: Nfts Item (r:2 w:0) /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) @@ -1344,10 +1339,10 @@ impl WeightInfo for () { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn create_swap() -> Weight { // Proof Size summary in bytes: - // Measured: `524` + // Measured: `460` // Estimated: `7662` - // Minimum execution time: 22_901_000 picoseconds. - Weight::from_parts(23_673_000, 7662) + // Minimum execution time: 22_884_000 picoseconds. + Weight::from_parts(23_732_000, 7662) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1357,10 +1352,10 @@ impl WeightInfo for () { /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) fn cancel_swap() -> Weight { // Proof Size summary in bytes: - // Measured: `511` - // Estimated: `7862` - // Minimum execution time: 22_532_000 picoseconds. - Weight::from_parts(22_831_000, 7862) + // Measured: `479` + // Estimated: `4326` + // Minimum execution time: 22_686_000 picoseconds. + Weight::from_parts(23_088_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1380,21 +1375,21 @@ impl WeightInfo for () { /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn claim_swap() -> Weight { // Proof Size summary in bytes: - // Measured: `896` - // Estimated: `24321` - // Minimum execution time: 77_668_000 picoseconds. - Weight::from_parts(78_407_000, 24321) + // Measured: `800` + // Estimated: `7662` + // Minimum execution time: 77_494_000 picoseconds. + Weight::from_parts(78_650_000, 7662) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) /// Storage: Nfts CollectionRoleOf (r:2 w:0) /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) /// Storage: Nfts CollectionConfigOf (r:1 w:0) /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// Storage: Nfts Item (r:1 w:1) /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: Nfts Collection (r:1 w:1) + /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) /// Storage: Nfts ItemConfigOf (r:1 w:1) /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) @@ -1408,12 +1403,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 10]`. fn mint_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `659` - // Estimated: `29192 + n * (2921 ±0)` - // Minimum execution time: 133_147_000 picoseconds. - Weight::from_parts(138_031_439, 29192) - // Standard Error: 28_665 - .saturating_add(Weight::from_parts(28_206_062, 0).saturating_mul(n.into())) + // Measured: `595` + // Estimated: `6078 + n * (2921 ±0)` + // Minimum execution time: 139_109_000 picoseconds. + Weight::from_parts(144_449_034, 6078) + // Standard Error: 26_869 + .saturating_add(Weight::from_parts(29_961_772, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1435,12 +1430,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 10]`. fn set_attributes_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `721` - // Estimated: `20142 + n * (2921 ±0)` - // Minimum execution time: 78_243_000 picoseconds. - Weight::from_parts(90_947_408, 20142) - // Standard Error: 75_516 - .saturating_add(Weight::from_parts(28_059_623, 0).saturating_mul(n.into())) + // Measured: `625` + // Estimated: `4326 + n * (2921 ±0)` + // Minimum execution time: 78_280_000 picoseconds. + Weight::from_parts(92_826_883, 4326) + // Standard Error: 81_125 + .saturating_add(Weight::from_parts(29_898_245, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index 1c829efc2d601..50fc4ef68788f 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index b74d2e6ee40b2..92865c773d886 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -131,7 +131,7 @@ pub mod pallet { /// ## Complexity /// - O(1). #[pallet::call_index(0)] - #[pallet::weight(50_000_000)] + #[pallet::weight({50_000_000})] pub fn set_name(origin: OriginFor, name: Vec) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -160,7 +160,7 @@ pub mod pallet { /// ## Complexity /// - O(1). #[pallet::call_index(1)] - #[pallet::weight(70_000_000)] + #[pallet::weight({70_000_000})] pub fn clear_name(origin: OriginFor) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -183,7 +183,7 @@ pub mod pallet { /// ## Complexity /// - O(1). #[pallet::call_index(2)] - #[pallet::weight(70_000_000)] + #[pallet::weight({70_000_000})] pub fn kill_name(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; @@ -207,7 +207,7 @@ pub mod pallet { /// ## Complexity /// - O(1). #[pallet::call_index(3)] - #[pallet::weight(70_000_000)] + #[pallet::weight({70_000_000})] pub fn force_name( origin: OriginFor, target: AccountIdLookupOf, @@ -295,6 +295,10 @@ mod tests { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } ord_parameter_types! { diff --git a/frame/nis/Cargo.toml b/frame/nis/Cargo.toml index a5fc29cd79150..c12d4f2c0a1eb 100644 --- a/frame/nis/Cargo.toml +++ b/frame/nis/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/nis/README.md b/frame/nis/README.md index 4eaddae1786e7..032df7d01868f 100644 --- a/frame/nis/README.md +++ b/frame/nis/README.md @@ -1,2 +1,5 @@ +# NIS Module + +Provides a non-interactiove variant of staking. License: Apache-2.0 diff --git a/frame/nis/src/benchmarking.rs b/frame/nis/src/benchmarking.rs index 6fc9fbc2bffa2..0cc9e7421d0e6 100644 --- a/frame/nis/src/benchmarking.rs +++ b/frame/nis/src/benchmarking.rs @@ -21,7 +21,9 @@ use super::*; use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; -use frame_support::traits::{nonfungible::Inspect, Currency, EnsureOrigin, Get}; +use frame_support::traits::{ + fungible::Inspect as FunInspect, nonfungible::Inspect, EnsureOrigin, Get, +}; use frame_system::RawOrigin; use sp_arithmetic::Perquintill; use sp_runtime::{ @@ -35,7 +37,7 @@ use crate::Pallet as Nis; const SEED: u32 = 0; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as FunInspect<::AccountId>>::Balance; fn fill_queues() -> Result<(), DispatchError> { // filling queues involves filling the first queue entirely and placing a single item in all @@ -45,10 +47,7 @@ fn fill_queues() -> Result<(), DispatchError> { let bids = T::MaxQueueLen::get(); let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be( - &caller, - T::MinBid::get() * BalanceOf::::from(queues + bids), - ); + T::Currency::set_balance(&caller, T::MinBid::get() * BalanceOf::::from(queues + bids)); for _ in 0..bids { Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; @@ -63,7 +62,9 @@ benchmarks! { place_bid { let l in 0..(T::MaxQueueLen::get() - 1); let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let ed = T::Currency::minimum_balance(); + let bid = T::MinBid::get(); + T::Currency::set_balance(&caller, (ed + bid) * BalanceOf::::from(l + 1) + bid); for i in 0..l { Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; } @@ -75,7 +76,10 @@ benchmarks! { place_bid_max { let caller: T::AccountId = whitelisted_caller(); let origin = RawOrigin::Signed(caller.clone()); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let ed = T::Currency::minimum_balance(); + let bid = T::MinBid::get(); + let ql = T::MaxQueueLen::get(); + T::Currency::set_balance(&caller, (ed + bid) * BalanceOf::::from(ql + 1) + bid); for i in 0..T::MaxQueueLen::get() { Nis::::place_bid(origin.clone().into(), T::MinBid::get(), 1)?; } @@ -90,7 +94,9 @@ benchmarks! { retract_bid { let l in 1..T::MaxQueueLen::get(); let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let ed = T::Currency::minimum_balance(); + let bid = T::MinBid::get(); + T::Currency::set_balance(&caller, (ed + bid) * BalanceOf::::from(l + 1) + bid); for i in 0..l { Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; } @@ -104,66 +110,81 @@ benchmarks! { T::FundOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let caller: T::AccountId = whitelisted_caller(); let bid = T::MinBid::get().max(One::one()); - T::Currency::make_free_balance_be(&caller, bid); + let ed = T::Currency::minimum_balance(); + T::Currency::set_balance(&caller, ed + bid); Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; Nis::::process_queues(Perquintill::one(), 1, 1, &mut WeightCounter::unlimited()); Nis::::communify(RawOrigin::Signed(caller.clone()).into(), 0)?; - let original = T::Currency::free_balance(&Nis::::account_id()); - T::Currency::make_free_balance_be(&Nis::::account_id(), BalanceOf::::min_value()); + let original = T::Currency::balance(&Nis::::account_id()); + T::Currency::set_balance(&Nis::::account_id(), BalanceOf::::min_value()); }: _(origin) verify { // Must fund at least 99.999% of the required amount. let missing = Perquintill::from_rational( - T::Currency::free_balance(&Nis::::account_id()), original).left_from_one(); + T::Currency::balance(&Nis::::account_id()), original).left_from_one(); assert!(missing <= Perquintill::one() / 100_000); } - thaw_private { + communify { let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, T::MinBid::get() * BalanceOf::::from(3u32)); - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; + let bid = T::MinBid::get().max(One::one()) * 100u32.into(); + let ed = T::Currency::minimum_balance(); + T::Currency::set_balance(&caller, ed + bid + bid); + Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; Nis::::process_queues(Perquintill::one(), 1, 2, &mut WeightCounter::unlimited()); - Receipts::::mutate(0, |m_g| if let Some(ref mut g) = m_g { g.expiry = Zero::zero() }); - }: _(RawOrigin::Signed(caller.clone()), 0, None) + }: _(RawOrigin::Signed(caller.clone()), 0) verify { - assert!(Receipts::::get(0).is_none()); + assert_eq!(Nis::::owner(&0), None); } - thaw_communal { + privatize { let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, T::MinBid::get() * BalanceOf::::from(3u32)); - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; + let bid = T::MinBid::get().max(One::one()); + let ed = T::Currency::minimum_balance(); + T::Currency::set_balance(&caller, ed + bid + bid); + Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; Nis::::process_queues(Perquintill::one(), 1, 2, &mut WeightCounter::unlimited()); - Receipts::::mutate(0, |m_g| if let Some(ref mut g) = m_g { g.expiry = Zero::zero() }); Nis::::communify(RawOrigin::Signed(caller.clone()).into(), 0)?; }: _(RawOrigin::Signed(caller.clone()), 0) verify { - assert!(Receipts::::get(0).is_none()); + assert_eq!(Nis::::owner(&0), Some(caller)); } - privatize { + thaw_private { + let whale: T::AccountId = account("whale", 0, SEED); let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, T::MinBid::get() * BalanceOf::::from(3u32)); - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; + let bid = T::MinBid::get().max(One::one()); + let ed = T::Currency::minimum_balance(); + T::Currency::set_balance(&caller, ed + bid + bid); + // Ensure we don't get throttled. + T::Currency::set_balance(&whale, T::ThawThrottle::get().0.saturating_reciprocal_mul_ceil(T::Currency::balance(&caller))); + Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; Nis::::process_queues(Perquintill::one(), 1, 2, &mut WeightCounter::unlimited()); - Nis::::communify(RawOrigin::Signed(caller.clone()).into(), 0)?; - }: _(RawOrigin::Signed(caller.clone()), 0) + frame_system::Pallet::::set_block_number(Receipts::::get(0).unwrap().expiry); + }: _(RawOrigin::Signed(caller.clone()), 0, None) verify { - assert_eq!(Nis::::owner(&0), Some(caller)); + assert!(Receipts::::get(0).is_none()); } - communify { + thaw_communal { + let whale: T::AccountId = account("whale", 0, SEED); let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, T::MinBid::get() * BalanceOf::::from(3u32)); - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; + let bid = T::MinBid::get().max(One::one()); + let ed = T::Currency::minimum_balance(); + T::Currency::set_balance(&caller, ed + bid + bid); + // Ensure we don't get throttled. + T::Currency::set_balance(&whale, T::ThawThrottle::get().0.saturating_reciprocal_mul_ceil(T::Currency::balance(&caller))); + Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; Nis::::process_queues(Perquintill::one(), 1, 2, &mut WeightCounter::unlimited()); + frame_system::Pallet::::set_block_number(Receipts::::get(0).unwrap().expiry); + Nis::::communify(RawOrigin::Signed(caller.clone()).into(), 0)?; }: _(RawOrigin::Signed(caller.clone()), 0) verify { - assert_eq!(Nis::::owner(&0), None); + assert!(Receipts::::get(0).is_none()); } process_queues { @@ -197,6 +218,9 @@ benchmarks! { process_bid { let who = account::("bidder", 0, SEED); + let min_bid = T::MinBid::get().max(One::one()); + let ed = T::Currency::minimum_balance(); + T::Currency::set_balance(&who, ed + min_bid); let bid = Bid { amount: T::MinBid::get(), who, @@ -216,5 +240,5 @@ benchmarks! { ) } - impl_benchmark_test_suite!(Nis, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Nis, crate::mock::new_test_ext_empty(), crate::mock::Test); } diff --git a/frame/nis/src/lib.rs b/frame/nis/src/lib.rs index 6fd9c60a89836..c4d0d0d420290 100644 --- a/frame/nis/src/lib.rs +++ b/frame/nis/src/lib.rs @@ -71,21 +71,21 @@ //! //! ## Terms //! -//! - *Effective total issuance*: The total issuance of balances in the system, including all claims -//! of all outstanding receipts but excluding `IgnoredIssuance`. +//! - *Effective total issuance*: The total issuance of balances in the system, equal to the active +//! issuance plus the value of all outstanding receipts, less `IgnoredIssuance`. #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::{ - dispatch::{DispatchError, DispatchResult}, - traits::fungible::{Inspect as FungibleInspect, Mutate as FungibleMutate}, +use frame_support::traits::{ + fungible::{self, Inspect as FunInspect, Mutate as FunMutate}, + tokens::{DepositConsequence, Fortitude, Preservation, Provenance, WithdrawConsequence}, }; pub use pallet::*; use sp_arithmetic::{traits::Unsigned, RationalArg}; use sp_core::TypedGet; use sp_runtime::{ traits::{Convert, ConvertBack}, - Perquintill, + DispatchError, Perquintill, }; mod benchmarking; @@ -117,7 +117,7 @@ where } pub struct NoCounterpart(sp_std::marker::PhantomData); -impl FungibleInspect for NoCounterpart { +impl FunInspect for NoCounterpart { type Balance = u32; fn total_issuance() -> u32 { 0 @@ -125,34 +125,30 @@ impl FungibleInspect for NoCounterpart { fn minimum_balance() -> u32 { 0 } - fn balance(_who: &T) -> u32 { + fn balance(_: &T) -> u32 { 0 } - fn reducible_balance(_who: &T, _keep_alive: bool) -> u32 { + fn total_balance(_: &T) -> u32 { 0 } - fn can_deposit( - _who: &T, - _amount: u32, - _mint: bool, - ) -> frame_support::traits::tokens::DepositConsequence { - frame_support::traits::tokens::DepositConsequence::Success + fn reducible_balance(_: &T, _: Preservation, _: Fortitude) -> u32 { + 0 } - fn can_withdraw( - _who: &T, - _amount: u32, - ) -> frame_support::traits::tokens::WithdrawConsequence { - frame_support::traits::tokens::WithdrawConsequence::Success + fn can_deposit(_: &T, _: u32, _: Provenance) -> DepositConsequence { + DepositConsequence::Success } -} -impl FungibleMutate for NoCounterpart { - fn mint_into(_who: &T, _amount: u32) -> DispatchResult { - Ok(()) + fn can_withdraw(_: &T, _: u32) -> WithdrawConsequence { + WithdrawConsequence::Success } - fn burn_from(_who: &T, _amount: u32) -> Result { - Ok(0) +} +impl fungible::Unbalanced for NoCounterpart { + fn handle_dust(_: fungible::Dust) {} + fn write_balance(_: &T, _: Self::Balance) -> Result, DispatchError> { + Ok(None) } + fn set_total_issuance(_: Self::Balance) {} } +impl FunMutate for NoCounterpart {} impl Convert for NoCounterpart { fn convert(_: Perquintill) -> u32 { 0 @@ -161,15 +157,24 @@ impl Convert for NoCounterpart { #[frame_support::pallet] pub mod pallet { - use super::{FungibleInspect, FungibleMutate}; + use super::{FunInspect, FunMutate}; pub use crate::weights::WeightInfo; use frame_support::{ pallet_prelude::*, traits::{ - nonfungible::{Inspect as NonfungibleInspect, Transfer as NonfungibleTransfer}, - Currency, Defensive, DefensiveSaturating, - ExistenceRequirement::AllowDeath, - NamedReservableCurrency, OnUnbalanced, + fungible::{ + self, + hold::{Inspect as FunHoldInspect, Mutate as FunHoldMutate}, + Balanced as FunBalanced, + }, + nonfungible::{Inspect as NftInspect, Transfer as NftTransfer}, + tokens::{ + Fortitude::Polite, + Precision::{BestEffort, Exact}, + Preservation::Expendable, + Restriction::{Free, OnHold}, + }, + Defensive, DefensiveSaturating, OnUnbalanced, }, PalletId, }; @@ -177,15 +182,14 @@ pub mod pallet { use sp_arithmetic::{PerThing, Perquintill}; use sp_runtime::{ traits::{AccountIdConversion, Bounded, Convert, ConvertBack, Saturating, Zero}, - TokenError, + Rounding, TokenError, }; use sp_std::prelude::*; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - type PositiveImbalanceOf = <::Currency as Currency< - ::AccountId, - >>::PositiveImbalance; + <::Currency as FunInspect<::AccountId>>::Balance; + type DebtOf = + fungible::Debt<::AccountId, ::Currency>; type ReceiptRecordOf = ReceiptRecord< ::AccountId, ::BlockNumber, @@ -209,7 +213,16 @@ pub mod pallet { type PalletId: Get; /// Currency type that this works on. - type Currency: NamedReservableCurrency; + type Currency: FunInspect + + FunMutate + + FunBalanced + + FunHoldInspect + + FunHoldMutate; + + /// The identifier of the hold reason. + + #[pallet::constant] + type HoldReason: Get<>::Reason>; /// Just the `Currency::Balance` type; we have this item to allow us to constrain it to /// `From`. @@ -231,7 +244,7 @@ pub mod pallet { type IgnoredIssuance: Get>; /// The accounting system for the fungible counterpart tokens. - type Counterpart: FungibleMutate; + type Counterpart: FunMutate; /// The system to convert an overall proportion of issuance into a number of fungible /// counterpart tokens. @@ -239,12 +252,12 @@ pub mod pallet { /// In general it's best to use `WithMaximumOf`. type CounterpartAmount: ConvertBack< Perquintill, - >::Balance, + >::Balance, >; /// Unbalanced handler to account for funds created (in case of a higher total issuance over /// freezing period). - type Deficit: OnUnbalanced>; + type Deficit: OnUnbalanced>; /// The target sum of all receipts' proportions. type Target: Get; @@ -301,12 +314,6 @@ pub mod pallet { /// The maximum proportion which may be thawed and the period over which it is reset. #[pallet::constant] type ThawThrottle: Get<(Perquintill, Self::BlockNumber)>; - - /// The name for the reserve ID. - #[pallet::constant] - type ReserveId: Get< - >::ReserveIdentifier, - >; } #[pallet::pallet] @@ -348,7 +355,7 @@ pub mod pallet { /// /// `issuance - frozen + proportion * issuance` /// - /// where `issuance = total_issuance - IgnoredIssuance` + /// where `issuance = active_issuance - IgnoredIssuance` #[derive( Clone, Eq, PartialEq, Default, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen, )] @@ -475,6 +482,14 @@ pub mod pallet { AlreadyPrivate, } + /// A reason for the NIS pallet placing a hold on funds. + #[pallet::composite_enum] + pub enum HoldReason { + /// The NIS Pallet has reserved it for a non-fungible receipt. + #[codec(index = 0)] + NftReceipt, + } + pub(crate) struct WeightCounter { pub(crate) used: Weight, pub(crate) limit: Weight, @@ -554,16 +569,17 @@ pub mod pallet { |q| -> Result<(u32, BalanceOf), DispatchError> { let queue_full = q.len() == T::MaxQueueLen::get() as usize; ensure!(!queue_full || q[0].amount < amount, Error::::BidTooLow); - T::Currency::reserve_named(&T::ReserveId::get(), &who, amount)?; + T::Currency::hold(&T::HoldReason::get(), &who, amount)?; // queue is let mut bid = Bid { amount, who: who.clone() }; let net = if queue_full { sp_std::mem::swap(&mut q[0], &mut bid); - let _ = T::Currency::unreserve_named( - &T::ReserveId::get(), + let _ = T::Currency::release( + &T::HoldReason::get(), &bid.who, bid.amount, + BestEffort, ); Self::deposit_event(Event::::BidDropped { who: bid.who, @@ -615,19 +631,21 @@ pub mod pallet { ensure!(queue_index < queue_count, Error::::DurationTooBig); let bid = Bid { amount, who }; - let new_len = Queues::::try_mutate(duration, |q| -> Result { - let pos = q.iter().position(|i| i == &bid).ok_or(Error::::UnknownBid)?; - q.remove(pos); - Ok(q.len() as u32) - })?; + let mut queue = Queues::::get(duration); + let pos = queue.iter().position(|i| i == &bid).ok_or(Error::::UnknownBid)?; + queue.remove(pos); + let new_len = queue.len() as u32; + + T::Currency::release(&T::HoldReason::get(), &bid.who, bid.amount, BestEffort)?; + + Queues::::insert(duration, queue); QueueTotals::::mutate(|qs| { qs.bounded_resize(queue_count, (0, Zero::zero())); qs[queue_index].0 = new_len; qs[queue_index].1.saturating_reduce(bid.amount); }); - T::Currency::unreserve_named(&T::ReserveId::get(), &bid.who, bid.amount); Self::deposit_event(Event::BidRetracted { who: bid.who, amount: bid.amount, duration }); Ok(()) @@ -645,7 +663,7 @@ pub mod pallet { let issuance = Self::issuance_with(&our_account, &summary); let deficit = issuance.required.saturating_sub(issuance.holdings); ensure!(!deficit.is_zero(), Error::::AlreadyFunded); - T::Deficit::on_unbalanced(T::Currency::deposit_creating(&our_account, deficit)); + T::Deficit::on_unbalanced(T::Currency::deposit(&our_account, deficit, Exact)?); Self::deposit_event(Event::::Funded { deficit }); Ok(()) } @@ -702,6 +720,7 @@ pub mod pallet { // Multiply the proportion it is by the total issued. let our_account = Self::account_id(); let effective_issuance = Self::issuance_with(&our_account, &summary).effective; + // let amount = proportion.mul_ceil(effective_issuance); let amount = proportion * effective_issuance; receipt.proportion.saturating_reduce(proportion); @@ -710,46 +729,38 @@ pub mod pallet { let dropped = receipt.proportion.is_zero(); if amount > on_hold { - T::Currency::unreserve_named(&T::ReserveId::get(), &who, on_hold); + T::Currency::release(&T::HoldReason::get(), &who, on_hold, Exact)?; let deficit = amount - on_hold; // Try to transfer deficit from pot to receipt owner. summary.receipts_on_hold.saturating_reduce(on_hold); on_hold = Zero::zero(); - T::Currency::transfer(&our_account, &who, deficit, AllowDeath) + T::Currency::transfer(&our_account, &who, deficit, Expendable) .map_err(|_| Error::::Unfunded)?; } else { - T::Currency::unreserve_named(&T::ReserveId::get(), &who, amount); on_hold.saturating_reduce(amount); summary.receipts_on_hold.saturating_reduce(amount); if dropped && !on_hold.is_zero() { // Reclaim any remainder: - // Transfer `excess` to the pot if we have now fully compensated for the - // receipt. - // - // This will legitimately fail if there is no pot account in existance. - // There's nothing we can do about this so we just swallow the error. - // This code is not ideal and could fail in the second phase leaving - // the system in an invalid state. It can be fixed properly with the - // new API in https://github.com/paritytech/substrate/pull/12951 - // - // Below is what it should look like then: - // let _ = T::Currency::repatriate_reserved_named( - // &T::ReserveId::get(), - // &who, - // &our_account, - // excess, - // BalanceStatus::Free, - // ).defensive(); - T::Currency::unreserve_named(&T::ReserveId::get(), &who, on_hold); - // It could theoretically be locked, so really we should be using a more - // forceful variant. But the alternative `repatriate_reserved_named` will - // fail if the destination account doesn't exist. This should be fixed when - // we move to the `fungible::*` traits, which should include a force - // transfer function to transfer the reserved balance into free balance in - // the destination regardless of locks and create it if it doesn't exist. - let _ = T::Currency::transfer(&who, &Self::account_id(), on_hold, AllowDeath); + // Transfer excess of `on_hold` to the pot if we have now fully compensated for + // the receipt. + T::Currency::transfer_on_hold( + &T::HoldReason::get(), + &who, + &our_account, + on_hold, + Exact, + Free, + Polite, + ) + .map(|_| ()) + // We ignore this error as it just means the amount we're trying to deposit is + // dust and the beneficiary account doesn't exist. + .or_else( + |e| if e == TokenError::CannotCreate.into() { Ok(()) } else { Err(e) }, + )?; summary.receipts_on_hold.saturating_reduce(on_hold); } + T::Currency::release(&T::HoldReason::get(), &who, amount, Exact)?; } if dropped { @@ -797,7 +808,8 @@ pub mod pallet { summary.thawed.saturating_accrue(receipt.proportion); ensure!(summary.thawed <= throttle, Error::::Throttled); - T::Counterpart::burn_from(&who, T::CounterpartAmount::convert(receipt.proportion))?; + let cp_amount = T::CounterpartAmount::convert(receipt.proportion); + T::Counterpart::burn_from(&who, cp_amount, Exact, Polite)?; // Multiply the proportion it is by the total issued. let our_account = Self::account_id(); @@ -807,7 +819,7 @@ pub mod pallet { summary.proportion_owed.saturating_reduce(receipt.proportion); // Try to transfer amount owed from pot to receipt owner. - T::Currency::transfer(&our_account, &who, amount, AllowDeath) + T::Currency::transfer(&our_account, &who, amount, Expendable) .map_err(|_| Error::::Unfunded)?; Receipts::::remove(index); @@ -840,11 +852,10 @@ pub mod pallet { ensure!(owner == who, Error::::NotOwner); // Unreserve and transfer the funds to the pot. - T::Currency::unreserve_named(&T::ReserveId::get(), &who, on_hold); - // Transfer `excess` to the pot if we have now fully compensated for the receipt. - T::Currency::transfer(&who, &Self::account_id(), on_hold, AllowDeath) + let reason = T::HoldReason::get(); + let us = Self::account_id(); + T::Currency::transfer_on_hold(&reason, &who, &us, on_hold, Exact, Free, Polite) .map_err(|_| Error::::Unfunded)?; - // TODO #12951: ^^^ The above should be done in a single operation `transfer_on_hold`. // Record that we've moved the amount reserved. let mut summary: SummaryRecordOf = Summary::::get(); @@ -881,16 +892,20 @@ pub mod pallet { let effective_issuance = Self::issuance_with(&our_account, &summary).effective; let max_amount = receipt.proportion * effective_issuance; // Avoid trying to place more in the account's reserve than we have available in the pot - let amount = max_amount.min(T::Currency::free_balance(&our_account)); + let amount = max_amount.min(T::Currency::balance(&our_account)); // Burn fungible counterparts. - T::Counterpart::burn_from(&who, T::CounterpartAmount::convert(receipt.proportion))?; + T::Counterpart::burn_from( + &who, + T::CounterpartAmount::convert(receipt.proportion), + Exact, + Polite, + )?; // Transfer the funds from the pot to the owner and reserve - T::Currency::transfer(&Self::account_id(), &who, amount, AllowDeath) - .map_err(|_| Error::::Unfunded)?; - T::Currency::reserve_named(&T::ReserveId::get(), &who, amount)?; - // TODO: ^^^ The above should be done in a single operation `transfer_and_hold`. + let reason = T::HoldReason::get(); + let us = Self::account_id(); + T::Currency::transfer_and_hold(&reason, &us, &who, amount, Exact, Expendable, Polite)?; // Record that we've moved the amount reserved. summary.receipts_on_hold.saturating_accrue(amount); @@ -920,7 +935,7 @@ pub mod pallet { pub required: Balance, } - impl NonfungibleInspect for Pallet { + impl NftInspect for Pallet { type ItemId = ReceiptIndex; fn owner(item: &ReceiptIndex) -> Option { @@ -939,31 +954,19 @@ pub mod pallet { } } - impl NonfungibleTransfer for Pallet { - fn transfer(index: &ReceiptIndex, destination: &T::AccountId) -> DispatchResult { + impl NftTransfer for Pallet { + fn transfer(index: &ReceiptIndex, dest: &T::AccountId) -> DispatchResult { let mut item = Receipts::::get(index).ok_or(TokenError::UnknownAsset)?; let (owner, on_hold) = item.owner.take().ok_or(Error::::AlreadyCommunal)?; - // TODO: This should all be replaced by a single call `transfer_held`. - let shortfall = T::Currency::unreserve_named(&T::ReserveId::get(), &owner, on_hold); - if !shortfall.is_zero() { - let _ = - T::Currency::reserve_named(&T::ReserveId::get(), &owner, on_hold - shortfall); - return Err(TokenError::NoFunds.into()) - } - if let Err(e) = T::Currency::transfer(&owner, destination, on_hold, AllowDeath) { - let _ = T::Currency::reserve_named(&T::ReserveId::get(), &owner, on_hold); - return Err(e) - } - // This can never fail, and if it somehow does, then we can't handle this gracefully. - let _ = - T::Currency::reserve_named(&T::ReserveId::get(), destination, on_hold).defensive(); + let reason = T::HoldReason::get(); + T::Currency::transfer_on_hold(&reason, &owner, dest, on_hold, Exact, OnHold, Polite)?; - item.owner = Some((destination.clone(), on_hold)); + item.owner = Some((dest.clone(), on_hold)); Receipts::::insert(&index, &item); Pallet::::deposit_event(Event::::Transferred { from: owner, - to: destination.clone(), + to: dest.clone(), index: *index, }); Ok(()) @@ -997,9 +1000,9 @@ pub mod pallet { summary: &SummaryRecordOf, ) -> IssuanceInfo> { let total_issuance = - T::Currency::total_issuance().saturating_sub(T::IgnoredIssuance::get()); + T::Currency::active_issuance().saturating_sub(T::IgnoredIssuance::get()); let holdings = - T::Currency::free_balance(our_account).saturating_add(summary.receipts_on_hold); + T::Currency::balance(our_account).saturating_add(summary.receipts_on_hold); let other = total_issuance.saturating_sub(holdings); let effective = summary.proportion_owed.left_from_one().saturating_reciprocal_mul(other); @@ -1136,7 +1139,8 @@ pub mod pallet { // Now to activate the bid... let n = amount; let d = issuance.effective; - let proportion = Perquintill::from_rational(n, d); + let proportion = Perquintill::from_rational_with_rounding(n, d, Rounding::Down) + .defensive_unwrap_or_default(); let who = bid.who; let index = summary.index; summary.proportion_owed.defensive_saturating_accrue(proportion); diff --git a/frame/nis/src/mock.rs b/frame/nis/src/mock.rs index 8594c88a5927d..0ca6690936818 100644 --- a/frame/nis/src/mock.rs +++ b/frame/nis/src/mock.rs @@ -19,13 +19,18 @@ use crate::{self as pallet_nis, Perquintill, WithMaximumOf}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ ord_parameter_types, parameter_types, - traits::{ConstU16, ConstU32, ConstU64, Currency, OnFinalize, OnInitialize, StorageMapShim}, + traits::{ + fungible::Inspect, ConstU16, ConstU32, ConstU64, Everything, OnFinalize, OnInitialize, + StorageMapShim, + }, weights::Weight, PalletId, }; use pallet_balances::{Instance1, Instance2}; +use scale_info::TypeInfo; use sp_core::{ConstU128, H256}; use sp_runtime::{ testing::Header, @@ -35,6 +40,8 @@ use sp_runtime::{ type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; +pub type Balance = u64; + // Configure a mock runtime to test the pallet. frame_support::construct_runtime!( pub enum Test where @@ -50,7 +57,7 @@ frame_support::construct_runtime!( ); impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; + type BaseCallFilter = Everything; type BlockWeights = (); type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; @@ -67,35 +74,45 @@ impl frame_system::Config for Test { type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = ConstU16<42>; type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type MaxConsumers = ConstU32<16>; } impl pallet_balances::Config for Test { - type Balance = u64; + type Balance = Balance; type DustRemoval = (); type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = frame_support::traits::ConstU64<1>; + type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); type MaxLocks = (); type MaxReserves = ConstU32<1>; type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = HoldIdentifier; + type MaxHolds = ConstU32<1>; +} + +#[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, Debug, TypeInfo, +)] +pub enum HoldIdentifier { + Nis, } impl pallet_balances::Config for Test { type Balance = u128; type DustRemoval = (); type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = frame_support::traits::ConstU128<1>; + type ExistentialDeposit = ConstU128<1>; type AccountStore = StorageMapShim< pallet_balances::Account, - frame_system::Provider, u64, pallet_balances::AccountData, >; @@ -103,16 +120,20 @@ impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = (); type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { - pub IgnoredIssuance: u64 = Balances::total_balance(&0); // Account zero is ignored. + pub IgnoredIssuance: Balance = Balances::total_balance(&0); // Account zero is ignored. pub const NisPalletId: PalletId = PalletId(*b"py/nis "); pub static Target: Perquintill = Perquintill::zero(); pub const MinReceipt: Perquintill = Perquintill::from_percent(1); pub const ThawThrottle: (Perquintill, u64) = (Perquintill::from_percent(25), 5); pub static MaxIntakeWeight: Weight = Weight::from_parts(2_000_000_000_000, 0); - pub const ReserveId: [u8; 8] = *b"py/nis "; + pub const HoldReason: HoldIdentifier = HoldIdentifier::Nis; } ord_parameter_types! { @@ -140,7 +161,7 @@ impl pallet_nis::Config for Test { type MaxIntakeWeight = MaxIntakeWeight; type MinReceipt = MinReceipt; type ThawThrottle = ThawThrottle; - type ReserveId = ReserveId; + type HoldReason = HoldReason; } // This function basically just builds a genesis storage key/value store according to @@ -155,6 +176,13 @@ pub fn new_test_ext() -> sp_io::TestExternalities { t.into() } +// This function basically just builds a genesis storage key/value store according to +// our desired mockup, but without any balances. +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext_empty() -> sp_io::TestExternalities { + frame_system::GenesisConfig::default().build_storage::().unwrap().into() +} + pub fn run_to_block(n: u64) { while System::block_number() < n { Nis::on_finalize(System::block_number()); diff --git a/frame/nis/src/tests.rs b/frame/nis/src/tests.rs index d0808367ee08c..7350da97dc60a 100644 --- a/frame/nis/src/tests.rs +++ b/frame/nis/src/tests.rs @@ -22,19 +22,22 @@ use crate::{mock::*, Error}; use frame_support::{ assert_noop, assert_ok, traits::{ + fungible::{hold::Inspect as InspectHold, Inspect as FunInspect, Mutate as FunMutate}, nonfungible::{Inspect, Transfer}, - Currency, + tokens::{Fortitude::Force, Precision::Exact}, }, }; -use pallet_balances::{Error as BalancesError, Instance1}; use sp_arithmetic::Perquintill; -use sp_runtime::{Saturating, TokenError}; +use sp_runtime::{ + Saturating, + TokenError::{self, FundsUnavailable}, +}; -fn pot() -> u64 { +fn pot() -> Balance { Balances::free_balance(&Nis::account_id()) } -fn holdings() -> u64 { +fn holdings() -> Balance { Nis::issuance().holdings } @@ -42,8 +45,8 @@ fn signed(who: u64) -> RuntimeOrigin { RuntimeOrigin::signed(who) } -fn enlarge(amount: u64, max_bids: u32) { - let summary: SummaryRecord = Summary::::get(); +fn enlarge(amount: Balance, max_bids: u32) { + let summary: SummaryRecord = Summary::::get(); let increase_in_proportion_owed = Perquintill::from_rational(amount, Nis::issuance().effective); let target = summary.proportion_owed.saturating_add(increase_in_proportion_owed); Nis::process_queues(target, u32::max_value(), max_bids, &mut WeightCounter::unlimited()); @@ -75,10 +78,7 @@ fn place_bid_works() { new_test_ext().execute_with(|| { run_to_block(1); assert_noop!(Nis::place_bid(signed(1), 1, 2), Error::::AmountTooSmall); - assert_noop!( - Nis::place_bid(signed(1), 101, 2), - BalancesError::::InsufficientBalance - ); + assert_noop!(Nis::place_bid(signed(1), 101, 2), FundsUnavailable); assert_noop!(Nis::place_bid(signed(1), 10, 4), Error::::DurationTooBig); assert_ok!(Nis::place_bid(signed(1), 10, 2)); assert_eq!(Balances::reserved_balance(1), 10); @@ -451,16 +451,16 @@ fn communify_works() { assert_noop!(Nis::thaw_communal(signed(1), 1), Error::::UnknownReceipt); // Transfer some of the fungibles away. - assert_ok!(NisBalances::transfer(signed(1), 2, 100_000)); + assert_ok!(NisBalances::transfer_allow_death(signed(1), 2, 100_000)); assert_eq!(NisBalances::free_balance(&1), 2_000_000); assert_eq!(NisBalances::free_balance(&2), 100_000); // Communal thawing with the correct index is not possible now. - assert_noop!(Nis::thaw_communal(signed(1), 0), TokenError::NoFunds); - assert_noop!(Nis::thaw_communal(signed(2), 0), TokenError::NoFunds); + assert_noop!(Nis::thaw_communal(signed(1), 0), TokenError::FundsUnavailable); + assert_noop!(Nis::thaw_communal(signed(2), 0), TokenError::FundsUnavailable); // Transfer the rest to 2... - assert_ok!(NisBalances::transfer(signed(1), 2, 2_000_000)); + assert_ok!(NisBalances::transfer_allow_death(signed(1), 2, 2_000_000)); assert_eq!(NisBalances::free_balance(&1), 0); assert_eq!(NisBalances::free_balance(&2), 2_100_000); @@ -487,8 +487,8 @@ fn privatize_works() { assert_ok!(Nis::communify(signed(1), 0)); // Transfer the fungibles to #2 - assert_ok!(NisBalances::transfer(signed(1), 2, 2_100_000)); - assert_noop!(Nis::privatize(signed(1), 0), TokenError::NoFunds); + assert_ok!(NisBalances::transfer_allow_death(signed(1), 2, 2_100_000)); + assert_noop!(Nis::privatize(signed(1), 0), TokenError::FundsUnavailable); // Privatize assert_ok!(Nis::privatize(signed(2), 0)); @@ -513,16 +513,16 @@ fn privatize_and_thaw_with_another_receipt_works() { assert_ok!(Nis::communify(signed(2), 1)); // Transfer half of fungibles to #3 from each of #1 and #2, and the other half from #2 to #4 - assert_ok!(NisBalances::transfer(signed(1), 3, 1_050_000)); - assert_ok!(NisBalances::transfer(signed(2), 3, 1_050_000)); - assert_ok!(NisBalances::transfer(signed(2), 4, 1_050_000)); + assert_ok!(NisBalances::transfer_allow_death(signed(1), 3, 1_050_000)); + assert_ok!(NisBalances::transfer_allow_death(signed(2), 3, 1_050_000)); + assert_ok!(NisBalances::transfer_allow_death(signed(2), 4, 1_050_000)); // #3 privatizes, partially thaws, then re-communifies with #0, then transfers the fungibles // to #2 assert_ok!(Nis::privatize(signed(3), 0)); assert_ok!(Nis::thaw_private(signed(3), 0, Some(Perquintill::from_percent(5)))); assert_ok!(Nis::communify(signed(3), 0)); - assert_ok!(NisBalances::transfer(signed(3), 1, 1_050_000)); + assert_ok!(NisBalances::transfer_allow_death(signed(3), 1, 1_050_000)); // #1 now has enough to thaw using receipt 1 assert_ok!(Nis::thaw_communal(signed(1), 1)); @@ -536,17 +536,21 @@ fn privatize_and_thaw_with_another_receipt_works() { fn communal_thaw_when_issuance_higher_works() { new_test_ext().execute_with(|| { run_to_block(1); + assert_ok!(Balances::transfer_allow_death(signed(2), 1, 1)); assert_ok!(Nis::place_bid(signed(1), 100, 1)); enlarge(100, 1); + assert_eq!(Balances::total_balance(&1), 101); assert_ok!(Nis::communify(signed(1), 0)); + assert_eq!(Balances::total_balance_on_hold(&1), 0); + assert_eq!(Balances::total_balance(&1), 1); - assert_eq!(NisBalances::free_balance(1), 5_250_000); // (25% of 21m) + assert_eq!(NisBalances::free_balance(1), 5_250_000); // (12.5% of 21m) // Everybody else's balances goes up by 50% - Balances::make_free_balance_be(&2, 150); - Balances::make_free_balance_be(&3, 150); - Balances::make_free_balance_be(&4, 150); + assert_ok!(Balances::mint_into(&2, 50)); + assert_ok!(Balances::mint_into(&3, 50)); + assert_ok!(Balances::mint_into(&4, 50)); run_to_block(4); @@ -556,16 +560,20 @@ fn communal_thaw_when_issuance_higher_works() { assert_ok!(Nis::fund_deficit(signed(1))); // Transfer counterparts away... - assert_ok!(NisBalances::transfer(signed(1), 2, 250_000)); + assert_ok!(NisBalances::transfer_allow_death(signed(1), 2, 125_000)); // ...and it's not thawable. - assert_noop!(Nis::thaw_communal(signed(1), 0), TokenError::NoFunds); + assert_noop!(Nis::thaw_communal(signed(1), 0), TokenError::FundsUnavailable); // Transfer counterparts back... - assert_ok!(NisBalances::transfer(signed(2), 1, 250_000)); + assert_ok!(NisBalances::transfer_allow_death(signed(2), 1, 125_000)); // ...and it is. assert_ok!(Nis::thaw_communal(signed(1), 0)); + assert_eq!(Balances::total_balance(&1), 151); + assert_ok!(Balances::transfer_allow_death(signed(1), 2, 1)); + assert_eq!(Balances::total_balance(&1), 150); assert_eq!(Balances::free_balance(1), 150); + assert_eq!(Balances::total_balance_on_hold(&1), 0); assert_eq!(Balances::reserved_balance(1), 0); }); } @@ -574,13 +582,14 @@ fn communal_thaw_when_issuance_higher_works() { fn private_thaw_when_issuance_higher_works() { new_test_ext().execute_with(|| { run_to_block(1); + assert_ok!(Balances::transfer_allow_death(signed(2), 1, 1)); assert_ok!(Nis::place_bid(signed(1), 100, 1)); enlarge(100, 1); // Everybody else's balances goes up by 50% - Balances::make_free_balance_be(&2, 150); - Balances::make_free_balance_be(&3, 150); - Balances::make_free_balance_be(&4, 150); + assert_ok!(Balances::mint_into(&2, 50)); + assert_ok!(Balances::mint_into(&3, 50)); + assert_ok!(Balances::mint_into(&4, 50)); run_to_block(4); @@ -591,6 +600,7 @@ fn private_thaw_when_issuance_higher_works() { assert_ok!(Nis::thaw_private(signed(1), 0, None)); + assert_ok!(Balances::transfer_allow_death(signed(1), 2, 1)); assert_eq!(Balances::free_balance(1), 150); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -601,15 +611,16 @@ fn thaw_with_ignored_issuance_works() { new_test_ext().execute_with(|| { run_to_block(1); // Give account zero some balance. - Balances::make_free_balance_be(&0, 200); + assert_ok!(Balances::mint_into(&0, 200)); + assert_ok!(Balances::transfer_allow_death(signed(2), 1, 1)); assert_ok!(Nis::place_bid(signed(1), 100, 1)); enlarge(100, 1); // Account zero transfers 50 into everyone else's accounts. - assert_ok!(Balances::transfer(signed(0), 2, 50)); - assert_ok!(Balances::transfer(signed(0), 3, 50)); - assert_ok!(Balances::transfer(signed(0), 4, 50)); + assert_ok!(Balances::transfer_allow_death(signed(0), 2, 50)); + assert_ok!(Balances::transfer_allow_death(signed(0), 3, 50)); + assert_ok!(Balances::transfer_allow_death(signed(0), 4, 50)); run_to_block(4); // Unfunded initially... @@ -620,6 +631,7 @@ fn thaw_with_ignored_issuance_works() { assert_ok!(Nis::thaw_private(signed(1), 0, None)); // Account zero changes have been ignored. + assert_ok!(Balances::transfer_allow_death(signed(1), 2, 1)); assert_eq!(Balances::free_balance(1), 150); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -629,17 +641,19 @@ fn thaw_with_ignored_issuance_works() { fn thaw_when_issuance_lower_works() { new_test_ext().execute_with(|| { run_to_block(1); + assert_ok!(Balances::transfer_allow_death(signed(2), 1, 1)); assert_ok!(Nis::place_bid(signed(1), 100, 1)); enlarge(100, 1); // Everybody else's balances goes down by 25% - Balances::make_free_balance_be(&2, 75); - Balances::make_free_balance_be(&3, 75); - Balances::make_free_balance_be(&4, 75); + assert_ok!(Balances::burn_from(&2, 25, Exact, Force)); + assert_ok!(Balances::burn_from(&3, 25, Exact, Force)); + assert_ok!(Balances::burn_from(&4, 25, Exact, Force)); run_to_block(4); assert_ok!(Nis::thaw_private(signed(1), 0, None)); + assert_ok!(Balances::transfer_allow_death(signed(1), 2, 1)); assert_eq!(Balances::free_balance(1), 75); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -649,15 +663,16 @@ fn thaw_when_issuance_lower_works() { fn multiple_thaws_works() { new_test_ext().execute_with(|| { run_to_block(1); + assert_ok!(Balances::transfer_allow_death(signed(3), 1, 1)); assert_ok!(Nis::place_bid(signed(1), 40, 1)); assert_ok!(Nis::place_bid(signed(1), 60, 1)); assert_ok!(Nis::place_bid(signed(2), 50, 1)); enlarge(200, 3); // Double everyone's free balances. - Balances::make_free_balance_be(&2, 100); - Balances::make_free_balance_be(&3, 200); - Balances::make_free_balance_be(&4, 200); + assert_ok!(Balances::mint_into(&2, 50)); + assert_ok!(Balances::mint_into(&3, 100)); + assert_ok!(Balances::mint_into(&4, 100)); assert_ok!(Nis::fund_deficit(signed(1))); run_to_block(4); @@ -667,8 +682,11 @@ fn multiple_thaws_works() { run_to_block(5); assert_ok!(Nis::thaw_private(signed(2), 2, None)); + assert_ok!(Balances::transfer_allow_death(signed(1), 3, 1)); assert_eq!(Balances::free_balance(1), 200); assert_eq!(Balances::free_balance(2), 200); + assert_eq!(Balances::total_balance(&1), 200); + assert_eq!(Balances::total_balance(&2), 200); }); } @@ -676,15 +694,16 @@ fn multiple_thaws_works() { fn multiple_thaws_works_in_alternative_thaw_order() { new_test_ext().execute_with(|| { run_to_block(1); + assert_ok!(Balances::transfer_allow_death(signed(3), 1, 1)); assert_ok!(Nis::place_bid(signed(1), 40, 1)); assert_ok!(Nis::place_bid(signed(1), 60, 1)); assert_ok!(Nis::place_bid(signed(2), 50, 1)); enlarge(200, 3); // Double everyone's free balances. - Balances::make_free_balance_be(&2, 100); - Balances::make_free_balance_be(&3, 200); - Balances::make_free_balance_be(&4, 200); + assert_ok!(Balances::mint_into(&2, 50)); + assert_ok!(Balances::mint_into(&3, 100)); + assert_ok!(Balances::mint_into(&4, 100)); assert_ok!(Nis::fund_deficit(signed(1))); run_to_block(4); @@ -695,8 +714,11 @@ fn multiple_thaws_works_in_alternative_thaw_order() { run_to_block(5); assert_ok!(Nis::thaw_private(signed(1), 1, None)); + assert_ok!(Balances::transfer_allow_death(signed(1), 3, 1)); assert_eq!(Balances::free_balance(1), 200); assert_eq!(Balances::free_balance(2), 200); + assert_eq!(Balances::total_balance(&1), 200); + assert_eq!(Balances::total_balance(&2), 200); }); } diff --git a/frame/nis/src/weights.rs b/frame/nis/src/weights.rs index 27c2f23e0e9ec..4f92da874b5a2 100644 --- a/frame/nis/src/weights.rs +++ b/frame/nis/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_nis //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -52,10 +52,10 @@ pub trait WeightInfo { fn place_bid_max() -> Weight; fn retract_bid(l: u32, ) -> Weight; fn fund_deficit() -> Weight; + fn communify() -> Weight; + fn privatize() -> Weight; fn thaw_private() -> Weight; fn thaw_communal() -> Weight; - fn privatize() -> Weight; - fn communify() -> Weight; fn process_queues() -> Weight; fn process_queue() -> Weight; fn process_bid() -> Weight; @@ -66,52 +66,52 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: Nis Queues (r:1 w:1) /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Reserves (r:1 w:1) - /// Proof: Balances Reserves (max_values: None, max_size: Some(1249), added: 3724, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) /// Storage: Nis QueueTotals (r:1 w:1) /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6245 + l * (48 ±0)` - // Estimated: `60718` - // Minimum execution time: 28_814 nanoseconds. - Weight::from_parts(35_245_917, 60718) - // Standard Error: 189 - .saturating_add(Weight::from_parts(45_322, 0).saturating_mul(l.into())) + // Measured: `6175 + l * (48 ±0)` + // Estimated: `51487` + // Minimum execution time: 49_132_000 picoseconds. + Weight::from_parts(55_373_619, 51487) + // Standard Error: 198 + .saturating_add(Weight::from_parts(44_421, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Nis Queues (r:1 w:1) /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Reserves (r:1 w:1) - /// Proof: Balances Reserves (max_values: None, max_size: Some(1249), added: 3724, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) /// Storage: Nis QueueTotals (r:1 w:1) /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) fn place_bid_max() -> Weight { // Proof Size summary in bytes: - // Measured: `54247` - // Estimated: `60718` - // Minimum execution time: 80_332 nanoseconds. - Weight::from_parts(81_050_000, 60718) + // Measured: `54177` + // Estimated: `51487` + // Minimum execution time: 111_471_000 picoseconds. + Weight::from_parts(112_287_000, 51487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Nis Queues (r:1 w:1) /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) /// Storage: Nis QueueTotals (r:1 w:1) /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) - /// Storage: Balances Reserves (r:1 w:1) - /// Proof: Balances Reserves (max_values: None, max_size: Some(1249), added: 3724, mode: MaxEncodedLen) /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6245 + l * (48 ±0)` - // Estimated: `60718` - // Minimum execution time: 34_426 nanoseconds. - Weight::from_parts(36_434_166, 60718) - // Standard Error: 135 - .saturating_add(Weight::from_parts(33_923, 0).saturating_mul(l.into())) + // Measured: `6175 + l * (48 ±0)` + // Estimated: `51487` + // Minimum execution time: 51_134_000 picoseconds. + Weight::from_parts(52_353_883, 51487) + // Standard Error: 161 + .saturating_add(Weight::from_parts(62_171, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -121,48 +121,33 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn fund_deficit() -> Weight { // Proof Size summary in bytes: - // Measured: `222` - // Estimated: `3138` - // Minimum execution time: 32_566 nanoseconds. - Weight::from_parts(32_880_000, 3138) + // Measured: `191` + // Estimated: `3593` + // Minimum execution time: 41_421_000 picoseconds. + Weight::from_parts(41_762_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Nis Receipts (r:1 w:1) /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Reserves (r:1 w:1) - /// Proof: Balances Reserves (max_values: None, max_size: Some(1249), added: 3724, mode: MaxEncodedLen) - fn thaw_private() -> Weight { - // Proof Size summary in bytes: - // Measured: `423` - // Estimated: `9418` - // Minimum execution time: 46_212 nanoseconds. - Weight::from_parts(46_748_000, 9418) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) /// Storage: Nis Summary (r:1 w:1) /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) /// Storage: Assets Asset (r:1 w:1) /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) /// Storage: Assets Account (r:1 w:1) /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn thaw_communal() -> Weight { + fn communify() -> Weight { // Proof Size summary in bytes: - // Measured: `868` - // Estimated: `10956` - // Minimum execution time: 68_791 nanoseconds. - Weight::from_parts(69_504_000, 10956) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) + // Measured: `667` + // Estimated: `3675` + // Minimum execution time: 74_179_000 picoseconds. + Weight::from_parts(74_795_000, 3675) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: Nis Receipts (r:1 w:1) /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) @@ -174,37 +159,52 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) /// Storage: Assets Account (r:1 w:1) /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) - /// Storage: Balances Reserves (r:1 w:1) - /// Proof: Balances Reserves (max_values: None, max_size: Some(1249), added: 3724, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) fn privatize() -> Weight { // Proof Size summary in bytes: - // Measured: `930` - // Estimated: `14680` - // Minimum execution time: 76_784 nanoseconds. - Weight::from_parts(77_575_000, 14680) + // Measured: `828` + // Estimated: `3675` + // Minimum execution time: 85_252_000 picoseconds. + Weight::from_parts(85_949_000, 3675) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: Nis Receipts (r:1 w:1) /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Balances Reserves (r:1 w:1) - /// Proof: Balances Reserves (max_values: None, max_size: Some(1249), added: 3724, mode: MaxEncodedLen) + /// Storage: Nis Summary (r:1 w:1) + /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + fn thaw_private() -> Weight { + // Proof Size summary in bytes: + // Measured: `456` + // Estimated: `3593` + // Minimum execution time: 82_100_000 picoseconds. + Weight::from_parts(82_563_000, 3593) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: Nis Receipts (r:1 w:1) + /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) /// Storage: Nis Summary (r:1 w:1) /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) /// Storage: Assets Asset (r:1 w:1) /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) /// Storage: Assets Account (r:1 w:1) /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) - fn communify() -> Weight { + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn thaw_communal() -> Weight { // Proof Size summary in bytes: - // Measured: `769` - // Estimated: `14680` - // Minimum execution time: 64_543 nanoseconds. - Weight::from_parts(65_258_000, 14680) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Measured: `773` + // Estimated: `3675` + // Minimum execution time: 86_498_000 picoseconds. + Weight::from_parts(87_175_000, 3675) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: Nis Summary (r:1 w:1) /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) @@ -214,10 +214,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) fn process_queues() -> Weight { // Proof Size summary in bytes: - // Measured: `6655` - // Estimated: `9635` - // Minimum execution time: 21_379 nanoseconds. - Weight::from_parts(21_736_000, 9635) + // Measured: `6624` + // Estimated: `7487` + // Minimum execution time: 22_507_000 picoseconds. + Weight::from_parts(22_788_000, 7487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -226,9 +226,9 @@ impl WeightInfo for SubstrateWeight { fn process_queue() -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `50497` - // Minimum execution time: 4_302 nanoseconds. - Weight::from_parts(4_440_000, 50497) + // Estimated: `51487` + // Minimum execution time: 4_692_000 picoseconds. + Weight::from_parts(4_862_000, 51487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -238,8 +238,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_758 nanoseconds. - Weight::from_parts(6_911_000, 0) + // Minimum execution time: 8_031_000 picoseconds. + Weight::from_parts(8_183_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -248,52 +248,52 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// Storage: Nis Queues (r:1 w:1) /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Reserves (r:1 w:1) - /// Proof: Balances Reserves (max_values: None, max_size: Some(1249), added: 3724, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) /// Storage: Nis QueueTotals (r:1 w:1) /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6245 + l * (48 ±0)` - // Estimated: `60718` - // Minimum execution time: 28_814 nanoseconds. - Weight::from_parts(35_245_917, 60718) - // Standard Error: 189 - .saturating_add(Weight::from_parts(45_322, 0).saturating_mul(l.into())) + // Measured: `6175 + l * (48 ±0)` + // Estimated: `51487` + // Minimum execution time: 49_132_000 picoseconds. + Weight::from_parts(55_373_619, 51487) + // Standard Error: 198 + .saturating_add(Weight::from_parts(44_421, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Nis Queues (r:1 w:1) /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Reserves (r:1 w:1) - /// Proof: Balances Reserves (max_values: None, max_size: Some(1249), added: 3724, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) /// Storage: Nis QueueTotals (r:1 w:1) /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) fn place_bid_max() -> Weight { // Proof Size summary in bytes: - // Measured: `54247` - // Estimated: `60718` - // Minimum execution time: 80_332 nanoseconds. - Weight::from_parts(81_050_000, 60718) + // Measured: `54177` + // Estimated: `51487` + // Minimum execution time: 111_471_000 picoseconds. + Weight::from_parts(112_287_000, 51487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Nis Queues (r:1 w:1) /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) /// Storage: Nis QueueTotals (r:1 w:1) /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) - /// Storage: Balances Reserves (r:1 w:1) - /// Proof: Balances Reserves (max_values: None, max_size: Some(1249), added: 3724, mode: MaxEncodedLen) /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6245 + l * (48 ±0)` - // Estimated: `60718` - // Minimum execution time: 34_426 nanoseconds. - Weight::from_parts(36_434_166, 60718) - // Standard Error: 135 - .saturating_add(Weight::from_parts(33_923, 0).saturating_mul(l.into())) + // Measured: `6175 + l * (48 ±0)` + // Estimated: `51487` + // Minimum execution time: 51_134_000 picoseconds. + Weight::from_parts(52_353_883, 51487) + // Standard Error: 161 + .saturating_add(Weight::from_parts(62_171, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -303,48 +303,33 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn fund_deficit() -> Weight { // Proof Size summary in bytes: - // Measured: `222` - // Estimated: `3138` - // Minimum execution time: 32_566 nanoseconds. - Weight::from_parts(32_880_000, 3138) + // Measured: `191` + // Estimated: `3593` + // Minimum execution time: 41_421_000 picoseconds. + Weight::from_parts(41_762_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Nis Receipts (r:1 w:1) /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Reserves (r:1 w:1) - /// Proof: Balances Reserves (max_values: None, max_size: Some(1249), added: 3724, mode: MaxEncodedLen) - fn thaw_private() -> Weight { - // Proof Size summary in bytes: - // Measured: `423` - // Estimated: `9418` - // Minimum execution time: 46_212 nanoseconds. - Weight::from_parts(46_748_000, 9418) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) /// Storage: Nis Summary (r:1 w:1) /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) /// Storage: Assets Asset (r:1 w:1) /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) /// Storage: Assets Account (r:1 w:1) /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn thaw_communal() -> Weight { + fn communify() -> Weight { // Proof Size summary in bytes: - // Measured: `868` - // Estimated: `10956` - // Minimum execution time: 68_791 nanoseconds. - Weight::from_parts(69_504_000, 10956) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) + // Measured: `667` + // Estimated: `3675` + // Minimum execution time: 74_179_000 picoseconds. + Weight::from_parts(74_795_000, 3675) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: Nis Receipts (r:1 w:1) /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) @@ -356,37 +341,52 @@ impl WeightInfo for () { /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) /// Storage: Assets Account (r:1 w:1) /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) - /// Storage: Balances Reserves (r:1 w:1) - /// Proof: Balances Reserves (max_values: None, max_size: Some(1249), added: 3724, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) fn privatize() -> Weight { // Proof Size summary in bytes: - // Measured: `930` - // Estimated: `14680` - // Minimum execution time: 76_784 nanoseconds. - Weight::from_parts(77_575_000, 14680) + // Measured: `828` + // Estimated: `3675` + // Minimum execution time: 85_252_000 picoseconds. + Weight::from_parts(85_949_000, 3675) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: Nis Receipts (r:1 w:1) /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Balances Reserves (r:1 w:1) - /// Proof: Balances Reserves (max_values: None, max_size: Some(1249), added: 3724, mode: MaxEncodedLen) + /// Storage: Nis Summary (r:1 w:1) + /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + fn thaw_private() -> Weight { + // Proof Size summary in bytes: + // Measured: `456` + // Estimated: `3593` + // Minimum execution time: 82_100_000 picoseconds. + Weight::from_parts(82_563_000, 3593) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: Nis Receipts (r:1 w:1) + /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) /// Storage: Nis Summary (r:1 w:1) /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) /// Storage: Assets Asset (r:1 w:1) /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) /// Storage: Assets Account (r:1 w:1) /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) - fn communify() -> Weight { + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn thaw_communal() -> Weight { // Proof Size summary in bytes: - // Measured: `769` - // Estimated: `14680` - // Minimum execution time: 64_543 nanoseconds. - Weight::from_parts(65_258_000, 14680) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Measured: `773` + // Estimated: `3675` + // Minimum execution time: 86_498_000 picoseconds. + Weight::from_parts(87_175_000, 3675) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: Nis Summary (r:1 w:1) /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) @@ -396,10 +396,10 @@ impl WeightInfo for () { /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) fn process_queues() -> Weight { // Proof Size summary in bytes: - // Measured: `6655` - // Estimated: `9635` - // Minimum execution time: 21_379 nanoseconds. - Weight::from_parts(21_736_000, 9635) + // Measured: `6624` + // Estimated: `7487` + // Minimum execution time: 22_507_000 picoseconds. + Weight::from_parts(22_788_000, 7487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -408,9 +408,9 @@ impl WeightInfo for () { fn process_queue() -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `50497` - // Minimum execution time: 4_302 nanoseconds. - Weight::from_parts(4_440_000, 50497) + // Estimated: `51487` + // Minimum execution time: 4_692_000 picoseconds. + Weight::from_parts(4_862_000, 51487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -420,8 +420,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_758 nanoseconds. - Weight::from_parts(6_911_000, 0) + // Minimum execution time: 8_031_000 picoseconds. + Weight::from_parts(8_183_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/frame/node-authorization/Cargo.toml b/frame/node-authorization/Cargo.toml index c60ce1d12ce09..ed3f59a9a1e24 100644 --- a/frame/node-authorization/Cargo.toml +++ b/frame/node-authorization/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } diff --git a/frame/nomination-pools/Cargo.toml b/frame/nomination-pools/Cargo.toml index d51bcec51f478..c92d9b5124697 100644 --- a/frame/nomination-pools/Cargo.toml +++ b/frame/nomination-pools/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } # FRAME frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } diff --git a/frame/nomination-pools/benchmarking/Cargo.toml b/frame/nomination-pools/benchmarking/Cargo.toml index b96024af7d3c5..4f757cafa2fd1 100644 --- a/frame/nomination-pools/benchmarking/Cargo.toml +++ b/frame/nomination-pools/benchmarking/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } # FRAME frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking" } diff --git a/frame/nomination-pools/benchmarking/src/lib.rs b/frame/nomination-pools/benchmarking/src/lib.rs index d58bbaf3d117c..137b9e9af63e3 100644 --- a/frame/nomination-pools/benchmarking/src/lib.rs +++ b/frame/nomination-pools/benchmarking/src/lib.rs @@ -684,12 +684,12 @@ frame_benchmarking::benchmarks! { .collect(); assert_ok!(T::Staking::nominate(&pool_account, validators)); - assert!(T::Staking::nominations(Pools::::create_bonded_account(1)).is_some()); + assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_some()); whitelist_account!(depositor); }:_(RuntimeOrigin::Signed(depositor.clone()), 1) verify { - assert!(T::Staking::nominations(Pools::::create_bonded_account(1)).is_none()); + assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_none()); } set_commission { diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index 4a1a52868e367..cffb712ea2ae5 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -75,6 +75,10 @@ impl pallet_balances::Config for Runtime { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } pallet_staking_reward_curve::build! { diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index cfde05ffeeabe..78f0c730ce5dd 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -133,7 +133,7 @@ //! * Root: can change the nominator, bouncer, or itself, manage and claim commission, and can //! perform any of the actions the nominator or bouncer can. //! -//! ## Commission +//! ### Commission //! //! A pool can optionally have a commission configuration, via the `root` role, set with //! [`Call::set_commission`] and claimed with [`Call::claim_commission`]. A payee account must be @@ -2617,7 +2617,7 @@ pub mod pallet { /// commission is paid out and added to total claimed commission`. Total pending commission /// is reset to zero. the current. #[pallet::call_index(20)] - #[pallet::weight(0)] + #[pallet::weight(T::WeightInfo::claim_commission())] pub fn claim_commission(origin: OriginFor, pool_id: PoolId) -> DispatchResult { let who = ensure_signed(origin)?; Self::do_claim_commission(who, pool_id) diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index c6b094f0aa0f1..3ab9be516fdb9 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -67,6 +67,14 @@ impl sp_staking::StakingInterface for StakingMock { BondingDuration::get() } + fn status( + _: &Self::AccountId, + ) -> Result, DispatchError> { + Nominations::get() + .map(|noms| sp_staking::StakerStatus::Nominator(noms)) + .ok_or(DispatchError::Other("NotStash")) + } + fn bond_extra(who: &Self::AccountId, extra: Self::Balance) -> DispatchResult { let mut x = BondedBalanceMap::get(); x.get_mut(who).map(|v| *v += extra); @@ -108,7 +116,7 @@ impl sp_staking::StakingInterface for StakingMock { } #[cfg(feature = "runtime-benchmarks")] - fn nominations(_: Self::AccountId) -> Option> { + fn nominations(_: &Self::AccountId) -> Option> { Nominations::get() } @@ -116,15 +124,15 @@ impl sp_staking::StakingInterface for StakingMock { unimplemented!("method currently not used in testing") } - fn stake(who: &Self::AccountId) -> Result, DispatchError> { + fn stake(who: &Self::AccountId) -> Result, DispatchError> { match ( UnbondingBalanceMap::get().get(who).copied(), BondedBalanceMap::get().get(who).copied(), ) { (None, None) => Err(DispatchError::Other("balance not found")), - (Some(v), None) => Ok(Stake { total: v, active: 0, stash: *who }), - (None, Some(v)) => Ok(Stake { total: v, active: v, stash: *who }), - (Some(a), Some(b)) => Ok(Stake { total: a + b, active: b, stash: *who }), + (Some(v), None) => Ok(Stake { total: v, active: 0 }), + (None, Some(v)) => Ok(Stake { total: v, active: v }), + (Some(a), Some(b)) => Ok(Stake { total: a + b, active: b }), } } @@ -196,6 +204,10 @@ impl pallet_balances::Config for Runtime { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } pub struct BalanceToU256; diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index a4fda2e5d91b3..4cb255e23b4b8 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -39,6 +39,16 @@ macro_rules! member_unbonding_eras { pub const DEFAULT_ROLES: PoolRoles = PoolRoles { depositor: 10, root: Some(900), nominator: Some(901), bouncer: Some(902) }; +fn deposit_rewards(r: u128) { + let b = Balances::free_balance(&default_reward_account()).checked_add(r).unwrap(); + Balances::make_free_balance_be(&default_reward_account(), b); +} + +fn remove_rewards(r: u128) { + let b = Balances::free_balance(&default_reward_account()).checked_sub(r).unwrap(); + Balances::make_free_balance_be(&default_reward_account(), b); +} + #[test] fn test_setup_works() { ExtBuilder::default().build_and_execute(|| { @@ -469,6 +479,8 @@ mod sub_pools { } mod join { + use sp_runtime::TokenError; + use super::*; #[test] @@ -592,7 +604,7 @@ mod join { // Balance needs to be gt Balance::MAX / `MaxPointsToBalance` assert_noop!( Pools::join(RuntimeOrigin::signed(11), 5, 123), - pallet_balances::Error::::InsufficientBalance, + TokenError::FundsUnavailable, ); StakingMock::set_bonded_balance(Pools::create_bonded_account(1), max_points_to_balance); @@ -749,7 +761,7 @@ mod claim_payout { // and the reward pool has earned 100 in rewards assert_eq!(Balances::free_balance(default_reward_account()), ed); - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 100)); + deposit_rewards(100); let _ = pool_events_since_last_call(); @@ -796,7 +808,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed); // Given the reward pool has some new rewards - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); + deposit_rewards(50); // When assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -825,7 +837,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 25); // Given del 50 hasn't claimed and the reward pools has just earned 50 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); + deposit_rewards(50); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 75); // When @@ -855,7 +867,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 20); // Given del 40 hasn't claimed and the reward pool has just earned 400 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 400)); + deposit_rewards(400); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 420); // When @@ -874,7 +886,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 380); // Given del 40 + del 50 haven't claimed and the reward pool has earned 20 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 20)); + deposit_rewards(20); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 400); // When @@ -974,7 +986,7 @@ mod claim_payout { ); // The pool earns 10 points - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 10)); + deposit_rewards(10); assert_ok!(Pools::do_reward_payout( &10, @@ -1010,7 +1022,7 @@ mod claim_payout { assert_eq!(reward_pool, rew(0, 0, 0)); // Given the pool has earned some rewards for the first time - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 5)); + deposit_rewards(5); // When let payout = @@ -1031,7 +1043,7 @@ mod claim_payout { assert_eq!(member, del(0.5)); // Given the pool has earned rewards again - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 10)); + deposit_rewards(10); // When let payout = @@ -1090,7 +1102,7 @@ mod claim_payout { assert_eq!(bonded_pool.points, 100); // and the reward pool has earned 100 in rewards - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 100)); + deposit_rewards(100); // When let payout = @@ -1135,7 +1147,7 @@ mod claim_payout { assert_eq!(reward_pool, rew(0, 0, 100)); // Given the reward pool has some new rewards - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); + deposit_rewards(50); // When let payout = @@ -1166,7 +1178,7 @@ mod claim_payout { assert_eq!(reward_pool, rew(0, 0, 125)); // Given del_50 hasn't claimed and the reward pools has just earned 50 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); + deposit_rewards(50); // When let payout = @@ -1197,7 +1209,7 @@ mod claim_payout { assert_eq!(reward_pool, rew(0, 0, 180)); // Given del_40 hasn't claimed and the reward pool has just earned 400 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 400)); + deposit_rewards(400); // When let payout = @@ -1214,7 +1226,7 @@ mod claim_payout { assert_eq!(reward_pool, rew(0, 0, 220)); // Given del_40 + del_50 haven't claimed and the reward pool has earned 20 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 20)); + deposit_rewards(20); // When let payout = @@ -1252,14 +1264,14 @@ mod claim_payout { fn rewards_distribution_is_fair_basic() { ExtBuilder::default().build_and_execute(|| { // reward pool by 10. - Balances::mutate_account(&default_reward_account(), |f| f.free += 10).unwrap(); + deposit_rewards(10); // 20 joins afterwards. Balances::make_free_balance_be(&20, Balances::minimum_balance() + 10); assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); // reward by another 20 - Balances::mutate_account(&default_reward_account(), |f| f.free += 20).unwrap(); + deposit_rewards(20); // 10 should claim 10 + 10, 20 should claim 20 / 2. assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -1276,7 +1288,7 @@ mod claim_payout { ); // any upcoming rewards are shared equally. - Balances::mutate_account(&default_reward_account(), |f| f.free += 20).unwrap(); + deposit_rewards(20); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); @@ -1296,12 +1308,12 @@ mod claim_payout { // basically checks the case where the amount of rewards is less than the pool shares. for // this, we have to rely on fixed point arithmetic. ExtBuilder::default().build_and_execute(|| { - Balances::mutate_account(&default_reward_account(), |f| f.free += 3).unwrap(); + deposit_rewards(3); Balances::make_free_balance_be(&20, Balances::minimum_balance() + 10); assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); - Balances::mutate_account(&default_reward_account(), |f| f.free += 6).unwrap(); + deposit_rewards(6); // 10 should claim 3, 20 should claim 3 + 3. assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -1319,7 +1331,7 @@ mod claim_payout { ); // any upcoming rewards are shared equally. - Balances::mutate_account(&default_reward_account(), |f| f.free += 8).unwrap(); + deposit_rewards(8); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); @@ -1333,7 +1345,7 @@ mod claim_payout { ); // uneven upcoming rewards are shared equally, rounded down. - Balances::mutate_account(&default_reward_account(), |f| f.free += 7).unwrap(); + deposit_rewards(7); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); @@ -1353,17 +1365,17 @@ mod claim_payout { ExtBuilder::default().build_and_execute(|| { let ed = Balances::minimum_balance(); - Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + deposit_rewards(30); Balances::make_free_balance_be(&20, ed + 10); assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); - Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); + deposit_rewards(100); Balances::make_free_balance_be(&30, ed + 10); assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10, 1)); - Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + deposit_rewards(60); // 10 should claim 10, 20 should claim nothing. assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -1384,7 +1396,7 @@ mod claim_payout { ); // any upcoming rewards are shared equally. - Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + deposit_rewards(30); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); @@ -1407,7 +1419,7 @@ mod claim_payout { let ed = Balances::minimum_balance(); assert_eq!(Pools::api_pending_rewards(10), Some(0)); - Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + deposit_rewards(30); assert_eq!(Pools::api_pending_rewards(10), Some(30)); assert_eq!(Pools::api_pending_rewards(20), None); @@ -1417,7 +1429,7 @@ mod claim_payout { assert_eq!(Pools::api_pending_rewards(10), Some(30)); assert_eq!(Pools::api_pending_rewards(20), Some(0)); - Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); + deposit_rewards(100); assert_eq!(Pools::api_pending_rewards(10), Some(30 + 50)); assert_eq!(Pools::api_pending_rewards(20), Some(50)); @@ -1430,7 +1442,7 @@ mod claim_payout { assert_eq!(Pools::api_pending_rewards(20), Some(50)); assert_eq!(Pools::api_pending_rewards(30), Some(0)); - Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + deposit_rewards(60); assert_eq!(Pools::api_pending_rewards(10), Some(30 + 50 + 20)); assert_eq!(Pools::api_pending_rewards(20), Some(50 + 20)); @@ -1464,7 +1476,7 @@ mod claim_payout { Balances::make_free_balance_be(&30, ed + 20); assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10, 1)); - Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); + deposit_rewards(40); // everyone claims. assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -1488,7 +1500,7 @@ mod claim_payout { assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(30), BondExtra::FreeBalance(10))); // more rewards come in. - Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); + deposit_rewards(100); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); @@ -1514,7 +1526,7 @@ mod claim_payout { Balances::make_free_balance_be(&20, ed + 20); assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); - Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + deposit_rewards(30); // everyone claims. assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -1535,7 +1547,7 @@ mod claim_payout { assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); // more rewards come in. - Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); + deposit_rewards(100); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); @@ -1562,7 +1574,7 @@ mod claim_payout { assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10, 1)); // 10 gets 10, 20 gets 20, 30 gets 10 - Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); + deposit_rewards(40); // some claim. assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -1581,7 +1593,7 @@ mod claim_payout { ); // 10 gets 20, 20 gets 40, 30 gets 20 - Balances::mutate_account(&default_reward_account(), |f| f.free += 80).unwrap(); + deposit_rewards(80); // some claim. assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -1596,7 +1608,7 @@ mod claim_payout { ); // 10 gets 20, 20 gets 40, 30 gets 20 - Balances::mutate_account(&default_reward_account(), |f| f.free += 80).unwrap(); + deposit_rewards(80); // some claim. assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -1629,7 +1641,7 @@ mod claim_payout { assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); // 10 gets 10, 20 gets 20, 30 gets 10 - Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + deposit_rewards(30); // some claim. assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -1645,7 +1657,7 @@ mod claim_payout { ); // 20 has not claimed yet, more reward comes - Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + deposit_rewards(60); // and 20 bonds more -- they should not have more share of this reward. assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(20), BondExtra::FreeBalance(10))); @@ -1665,7 +1677,7 @@ mod claim_payout { ); // but in the next round of rewards, the extra10 they bonded has an impact. - Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + deposit_rewards(60); // everyone claim. assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -1695,7 +1707,7 @@ mod claim_payout { assert_eq!(member_10.last_recorded_reward_counter, 0.into()); // transfer some reward to pool 1. - Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + deposit_rewards(60); // create pool 2 Balances::make_free_balance_be(&20, 100); @@ -1779,7 +1791,7 @@ mod claim_payout { } // transfer some reward to pool 1. - Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + deposit_rewards(60); { join(30, 10); @@ -1849,7 +1861,7 @@ mod claim_payout { // 10 bonds extra again with some rewards. This reward should be split equally between // 10 and 20, as they both have equal points now. - Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + deposit_rewards(30); { assert_ok!(Pools::bond_extra( @@ -1905,7 +1917,7 @@ mod claim_payout { MaxPoolMembersPerPool::::set(None); // pool receives some rewards. - Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + deposit_rewards(30); System::reset_events(); // 10 cashes it out, and bonds it. @@ -1975,7 +1987,7 @@ mod claim_payout { } // some rewards come in. - Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + deposit_rewards(30); // and 30 also unbonds half. { @@ -2058,7 +2070,7 @@ mod claim_payout { ); // some rewards come in. - Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); + deposit_rewards(40); // everyone claims assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -2120,8 +2132,7 @@ mod claim_payout { .build_and_execute(|| { // some rewards come in. assert_eq!(Balances::free_balance(&default_reward_account()), unit); - Balances::mutate_account(&default_reward_account(), |f| f.free += unit / 1000) - .unwrap(); + deposit_rewards(unit / 1000); // everyone claims assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -3329,11 +3340,7 @@ mod withdraw_unbonded { ); assert_eq!( balances_events_since_last_call(), - vec![BEvent::BalanceSet { - who: default_bonded_account(), - free: 300, - reserved: 0 - }] + vec![BEvent::BalanceSet { who: default_bonded_account(), free: 300 }] ); // When @@ -3447,11 +3454,7 @@ mod withdraw_unbonded { ); assert_eq!( balances_events_since_last_call(), - vec![BEvent::BalanceSet { - who: default_bonded_account(), - free: 300, - reserved: 0 - },] + vec![BEvent::BalanceSet { who: default_bonded_account(), free: 300 },] ); CurrentEra::set(StakingMock::bonding_duration()); @@ -5078,18 +5081,15 @@ mod reward_counter_precision { let expected_smallest_reward = inflation(50) / 10u128.pow(18); // tad bit less. cannot be paid out. - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += - expected_smallest_reward - 1)); + deposit_rewards(expected_smallest_reward - 1); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_eq!(pool_events_since_last_call(), vec![]); // revert it. - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= - expected_smallest_reward - 1)); + remove_rewards(expected_smallest_reward - 1); // tad bit more. can be claimed. - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += - expected_smallest_reward + 1)); + deposit_rewards(expected_smallest_reward + 1); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_eq!( pool_events_since_last_call(), @@ -5114,9 +5114,7 @@ mod reward_counter_precision { assert_ok!(Pools::join(RuntimeOrigin::signed(20), tiny_bond / 2, 1)); // Suddenly, add a shit ton of rewards. - assert_ok!( - Balances::mutate_account(&default_reward_account(), |a| a.free += inflation(1)) - ); + deposit_rewards(inflation(1)); // now claim. assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -5155,8 +5153,7 @@ mod reward_counter_precision { // is earning all of the inflation per year (which is really unrealistic, but worse // case), that will be: let pool_total_earnings_10_years = inflation(10) - POLKADOT_TOTAL_ISSUANCE_GENESIS; - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += - pool_total_earnings_10_years)); + deposit_rewards(pool_total_earnings_10_years); // some whale now joins with the other half ot the total issuance. This will bloat all // the calculation regarding current reward counter. @@ -5186,7 +5183,7 @@ mod reward_counter_precision { assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10 * DOT, 1)); // and give a reasonably small reward to the pool. - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += DOT)); + deposit_rewards(DOT); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); assert_eq!( @@ -5258,9 +5255,7 @@ mod reward_counter_precision { assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10 * DOT, 1)); // earn some small rewards - assert_ok!( - Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) - ); + deposit_rewards(DOT / 1000); // no point in claiming for 20 (nonetheless, it should be harmless) assert!(pending_rewards(20).unwrap().is_zero()); @@ -5280,9 +5275,7 @@ mod reward_counter_precision { // earn some small more, still nothing can be claimed for 20, but 10 claims their // share. - assert_ok!( - Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) - ); + deposit_rewards(DOT / 1000); assert!(pending_rewards(20).unwrap().is_zero()); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_eq!( @@ -5291,9 +5284,7 @@ mod reward_counter_precision { ); // earn some more rewards, this time 20 can also claim. - assert_ok!( - Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) - ); + deposit_rewards(DOT / 1000); assert_eq!(pending_rewards(20).unwrap(), 1); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); @@ -5332,9 +5323,7 @@ mod reward_counter_precision { assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10 * DOT, 1)); // earn some small rewards - assert_ok!( - Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) - ); + deposit_rewards(DOT / 1000); // if 20 claims now, their reward counter should stay the same, so that they have a // chance of claiming this if they let it accumulate. Also see @@ -5442,7 +5431,7 @@ mod commission { // Pool earns 80 points and a payout is triggered. // Given: - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 80)); + deposit_rewards(80); assert_eq!( PoolMembers::::get(10).unwrap(), PoolMember:: { pool_id, points: 10, ..Default::default() } @@ -5489,7 +5478,7 @@ mod commission { // is next called, which is not done in this test segment.. // Given: - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 100)); + deposit_rewards(100); // When: assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -5625,7 +5614,7 @@ mod commission { 1, Some((Perbill::from_percent(10), root)), )); - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 40)); + deposit_rewards(40); // When: assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -6377,7 +6366,7 @@ mod commission { ); // The pool earns 10 points - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 10)); + deposit_rewards(10); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); // Then: @@ -6387,7 +6376,7 @@ mod commission { ); // The pool earns 17 points - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 17)); + deposit_rewards(17); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); // Then: @@ -6397,7 +6386,7 @@ mod commission { ); // The pool earns 50 points - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); + deposit_rewards(50); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); // Then: @@ -6407,7 +6396,7 @@ mod commission { ); // The pool earns 10439 points - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 10439)); + deposit_rewards(10439); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); // Then: @@ -6427,7 +6416,7 @@ mod commission { )); // Given: - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 200)); + deposit_rewards(200); assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); // Then: @@ -6469,7 +6458,7 @@ mod commission { // When: // The pool earns 100 points - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 100)); + deposit_rewards(100); // Change commission to 20% assert_ok!(Pools::set_commission( @@ -6486,7 +6475,7 @@ mod commission { ); // The pool earns 100 points - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 100)); + deposit_rewards(100); // Then: @@ -6535,7 +6524,7 @@ mod commission { // When: // The pool earns 100 points - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 100)); + deposit_rewards(100); // Claim payout: assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); @@ -6592,7 +6581,7 @@ mod commission { ); // The pool earns 10 points - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 10)); + deposit_rewards(10); // execute the payout assert_ok!(Pools::do_reward_payout( @@ -6634,7 +6623,7 @@ mod commission { ); // The pool earns 10 points - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 10)); + deposit_rewards(10); // execute the payout assert_ok!(Pools::do_reward_payout( @@ -6677,7 +6666,7 @@ mod commission { ); // Pool earns 80 points, payout is triggered. - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 80)); + deposit_rewards(80); assert_eq!( PoolMembers::::get(10).unwrap(), PoolMember:: { pool_id, points: 10, ..Default::default() } diff --git a/frame/nomination-pools/src/weights.rs b/frame/nomination-pools/src/weights.rs index cf0048fa48dd9..0ac50d2f745ed 100644 --- a/frame/nomination-pools/src/weights.rs +++ b/frame/nomination-pools/src/weights.rs @@ -18,26 +18,25 @@ //! Autogenerated weights for pallet_nomination_pools //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_nomination_pools // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_nomination_pools -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/nomination-pools/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -98,17 +97,19 @@ impl WeightInfo for SubstrateWeight { /// Proof: NominationPools CounterForPoolMembers (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: VoterList ListBags (r:2 w:2) /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3650` - // Estimated: `52435` - // Minimum execution time: 160_401_000 picoseconds. - Weight::from_parts(161_798_000, 52435) - .saturating_add(T::DbWeight::get().reads(18_u64)) + // Measured: `3300` + // Estimated: `8877` + // Minimum execution time: 186_523_000 picoseconds. + Weight::from_parts(187_817_000, 8877) + .saturating_add(T::DbWeight::get().reads(19_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) } /// Storage: NominationPools PoolMembers (r:1 w:1) @@ -127,17 +128,19 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: VoterList ListBags (r:2 w:2) /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3692` - // Estimated: `49070` - // Minimum execution time: 157_668_000 picoseconds. - Weight::from_parts(161_129_000, 49070) - .saturating_add(T::DbWeight::get().reads(15_u64)) + // Measured: `3310` + // Estimated: `8877` + // Minimum execution time: 183_120_000 picoseconds. + Weight::from_parts(184_749_000, 8877) + .saturating_add(T::DbWeight::get().reads(16_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) } /// Storage: NominationPools ClaimPermissions (r:1 w:0) @@ -158,17 +161,19 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: VoterList ListBags (r:2 w:2) /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3757` - // Estimated: `52576` - // Minimum execution time: 176_034_000 picoseconds. - Weight::from_parts(176_956_000, 52576) - .saturating_add(T::DbWeight::get().reads(16_u64)) + // Measured: `3375` + // Estimated: `8877` + // Minimum execution time: 218_799_000 picoseconds. + Weight::from_parts(220_139_000, 8877) + .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: NominationPools ClaimPermissions (r:1 w:0) @@ -185,10 +190,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn claim_payout() -> Weight { // Proof Size summary in bytes: - // Measured: `1331` - // Estimated: `19532` - // Minimum execution time: 61_551_000 picoseconds. - Weight::from_parts(62_201_000, 19532) + // Measured: `1171` + // Estimated: `3702` + // Minimum execution time: 79_493_000 picoseconds. + Weight::from_parts(80_266_000, 3702) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -214,6 +219,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: VoterList ListBags (r:2 w:2) @@ -224,11 +231,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: NominationPools CounterForSubPoolsStorage (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3935` - // Estimated: `82816` - // Minimum execution time: 162_755_000 picoseconds. - Weight::from_parts(163_518_000, 82816) - .saturating_add(T::DbWeight::get().reads(19_u64)) + // Measured: `3586` + // Estimated: `27847` + // Minimum execution time: 168_592_000 picoseconds. + Weight::from_parts(170_130_000, 27847) + .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: NominationPools BondedPools (r:1 w:0) @@ -241,16 +248,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1783` - // Estimated: `18031` - // Minimum execution time: 54_752_000 picoseconds. - Weight::from_parts(56_248_171, 18031) - // Standard Error: 1_891 - .saturating_add(Weight::from_parts(4_767, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `1687` + // Estimated: `4764` + // Minimum execution time: 63_254_000 picoseconds. + Weight::from_parts(64_154_755, 4764) + // Standard Error: 344 + .saturating_add(Weight::from_parts(8_798, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: NominationPools PoolMembers (r:1 w:1) @@ -267,6 +276,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: NominationPools CounterForPoolMembers (r:1 w:1) @@ -276,13 +287,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2307` - // Estimated: `54662` - // Minimum execution time: 106_166_000 picoseconds. - Weight::from_parts(107_806_373, 54662) - // Standard Error: 6_985 - .saturating_add(Weight::from_parts(18_449, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `2115` + // Estimated: `27847` + // Minimum execution time: 131_339_000 picoseconds. + Weight::from_parts(133_590_267, 27847) + // Standard Error: 1_058 + .saturating_add(Weight::from_parts(9_932, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } /// Storage: NominationPools PoolMembers (r:1 w:1) @@ -307,6 +318,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// Proof: NominationPools CounterForPoolMembers (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: NominationPools ReversePoolIdLookup (r:1 w:1) @@ -328,15 +341,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: NominationPools ClaimPermissions (r:0 w:1) /// Proof: NominationPools ClaimPermissions (max_values: None, max_size: Some(41), added: 2516, mode: MaxEncodedLen) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(s: u32, ) -> Weight { + fn withdraw_unbonded_kill(_s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2694` - // Estimated: `87714` - // Minimum execution time: 170_047_000 picoseconds. - Weight::from_parts(172_125_770, 87714) - // Standard Error: 2_599 - .saturating_add(Weight::from_parts(9_964, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(20_u64)) + // Measured: `2470` + // Estimated: `27847` + // Minimum execution time: 219_026_000 picoseconds. + Weight::from_parts(223_230_356, 27847) + .saturating_add(T::DbWeight::get().reads(21_u64)) .saturating_add(T::DbWeight::get().writes(18_u64)) } /// Storage: NominationPools LastPoolId (r:1 w:1) @@ -369,6 +380,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: NominationPools RewardPools (r:1 w:1) /// Proof: NominationPools RewardPools (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) /// Storage: NominationPools CounterForRewardPools (r:1 w:1) @@ -383,11 +396,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1321` - // Estimated: `51410` - // Minimum execution time: 149_672_000 picoseconds. - Weight::from_parts(153_613_000, 51410) - .saturating_add(T::DbWeight::get().reads(21_u64)) + // Measured: `1289` + // Estimated: `6196` + // Minimum execution time: 189_710_000 picoseconds. + Weight::from_parts(190_251_000, 6196) + .saturating_add(T::DbWeight::get().reads(22_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } /// Storage: NominationPools BondedPools (r:1 w:0) @@ -417,12 +430,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1913` - // Estimated: `33934 + n * (2520 ±0)` - // Minimum execution time: 68_892_000 picoseconds. - Weight::from_parts(69_062_946, 33934) - // Standard Error: 6_448 - .saturating_add(Weight::from_parts(1_422_774, 0).saturating_mul(n.into())) + // Measured: `1849` + // Estimated: `4556 + n * (2520 ±0)` + // Minimum execution time: 70_044_000 picoseconds. + Weight::from_parts(69_307_256, 4556) + // Standard Error: 6_066 + .saturating_add(Weight::from_parts(1_517_942, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -436,10 +449,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1502` - // Estimated: `11778` - // Minimum execution time: 36_447_000 picoseconds. - Weight::from_parts(36_837_000, 11778) + // Measured: `1438` + // Estimated: `4556` + // Minimum execution time: 36_610_000 picoseconds. + Weight::from_parts(37_212_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -452,12 +465,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `563` - // Estimated: `8909` - // Minimum execution time: 15_221_000 picoseconds. - Weight::from_parts(15_632_286, 8909) - // Standard Error: 343 - .saturating_add(Weight::from_parts(2_299, 0).saturating_mul(n.into())) + // Measured: `531` + // Estimated: `3735` + // Minimum execution time: 15_334_000 picoseconds. + Weight::from_parts(15_753_107, 3735) + // Standard Error: 62 + .saturating_add(Weight::from_parts(1_365, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -477,18 +490,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_409_000 picoseconds. - Weight::from_parts(7_702_000, 0) + // Minimum execution time: 7_156_000 picoseconds. + Weight::from_parts(7_596_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: NominationPools BondedPools (r:1 w:1) /// Proof: NominationPools BondedPools (max_values: None, max_size: Some(220), added: 2695, mode: MaxEncodedLen) fn update_roles() -> Weight { // Proof Size summary in bytes: - // Measured: `563` + // Measured: `531` // Estimated: `3685` - // Minimum execution time: 20_451_000 picoseconds. - Weight::from_parts(20_703_000, 3685) + // Minimum execution time: 20_342_000 picoseconds. + Weight::from_parts(20_699_000, 3685) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -512,10 +525,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `2140` - // Estimated: `29455` - // Minimum execution time: 66_001_000 picoseconds. - Weight::from_parts(66_894_000, 29455) + // Measured: `2012` + // Estimated: `4556` + // Minimum execution time: 66_608_000 picoseconds. + Weight::from_parts(67_416_000, 4556) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -529,10 +542,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn set_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `866` - // Estimated: `12324` - // Minimum execution time: 34_011_000 picoseconds. - Weight::from_parts(34_521_000, 12324) + // Measured: `770` + // Estimated: `3685` + // Minimum execution time: 34_619_000 picoseconds. + Weight::from_parts(34_894_000, 3685) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -540,10 +553,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: NominationPools BondedPools (max_values: None, max_size: Some(220), added: 2695, mode: MaxEncodedLen) fn set_commission_max() -> Weight { // Proof Size summary in bytes: - // Measured: `603` + // Measured: `571` // Estimated: `3685` - // Minimum execution time: 19_524_000 picoseconds. - Weight::from_parts(19_855_000, 3685) + // Minimum execution time: 19_676_000 picoseconds. + Weight::from_parts(19_958_000, 3685) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -551,10 +564,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: NominationPools BondedPools (max_values: None, max_size: Some(220), added: 2695, mode: MaxEncodedLen) fn set_commission_change_rate() -> Weight { // Proof Size summary in bytes: - // Measured: `563` + // Measured: `531` // Estimated: `3685` - // Minimum execution time: 20_457_000 picoseconds. - Weight::from_parts(20_698_000, 3685) + // Minimum execution time: 20_404_000 picoseconds. + Weight::from_parts(20_859_000, 3685) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -565,9 +578,9 @@ impl WeightInfo for SubstrateWeight { fn set_claim_permission() -> Weight { // Proof Size summary in bytes: // Measured: `542` - // Estimated: `7208` - // Minimum execution time: 15_183_000 picoseconds. - Weight::from_parts(15_597_000, 7208) + // Estimated: `3702` + // Minimum execution time: 15_401_000 picoseconds. + Weight::from_parts(15_706_000, 3702) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -581,10 +594,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn claim_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `1096` - // Estimated: `12324` - // Minimum execution time: 48_957_000 picoseconds. - Weight::from_parts(50_207_000, 12324) + // Measured: `968` + // Estimated: `3685` + // Minimum execution time: 66_775_000 picoseconds. + Weight::from_parts(67_242_000, 3685) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -616,17 +629,19 @@ impl WeightInfo for () { /// Proof: NominationPools CounterForPoolMembers (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: VoterList ListBags (r:2 w:2) /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3650` - // Estimated: `52435` - // Minimum execution time: 160_401_000 picoseconds. - Weight::from_parts(161_798_000, 52435) - .saturating_add(RocksDbWeight::get().reads(18_u64)) + // Measured: `3300` + // Estimated: `8877` + // Minimum execution time: 186_523_000 picoseconds. + Weight::from_parts(187_817_000, 8877) + .saturating_add(RocksDbWeight::get().reads(19_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) } /// Storage: NominationPools PoolMembers (r:1 w:1) @@ -645,17 +660,19 @@ impl WeightInfo for () { /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: VoterList ListBags (r:2 w:2) /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3692` - // Estimated: `49070` - // Minimum execution time: 157_668_000 picoseconds. - Weight::from_parts(161_129_000, 49070) - .saturating_add(RocksDbWeight::get().reads(15_u64)) + // Measured: `3310` + // Estimated: `8877` + // Minimum execution time: 183_120_000 picoseconds. + Weight::from_parts(184_749_000, 8877) + .saturating_add(RocksDbWeight::get().reads(16_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) } /// Storage: NominationPools ClaimPermissions (r:1 w:0) @@ -676,17 +693,19 @@ impl WeightInfo for () { /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: VoterList ListBags (r:2 w:2) /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3757` - // Estimated: `52576` - // Minimum execution time: 176_034_000 picoseconds. - Weight::from_parts(176_956_000, 52576) - .saturating_add(RocksDbWeight::get().reads(16_u64)) + // Measured: `3375` + // Estimated: `8877` + // Minimum execution time: 218_799_000 picoseconds. + Weight::from_parts(220_139_000, 8877) + .saturating_add(RocksDbWeight::get().reads(17_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: NominationPools ClaimPermissions (r:1 w:0) @@ -703,10 +722,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn claim_payout() -> Weight { // Proof Size summary in bytes: - // Measured: `1331` - // Estimated: `19532` - // Minimum execution time: 61_551_000 picoseconds. - Weight::from_parts(62_201_000, 19532) + // Measured: `1171` + // Estimated: `3702` + // Minimum execution time: 79_493_000 picoseconds. + Weight::from_parts(80_266_000, 3702) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -732,6 +751,8 @@ impl WeightInfo for () { /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: VoterList ListBags (r:2 w:2) @@ -742,11 +763,11 @@ impl WeightInfo for () { /// Proof: NominationPools CounterForSubPoolsStorage (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3935` - // Estimated: `82816` - // Minimum execution time: 162_755_000 picoseconds. - Weight::from_parts(163_518_000, 82816) - .saturating_add(RocksDbWeight::get().reads(19_u64)) + // Measured: `3586` + // Estimated: `27847` + // Minimum execution time: 168_592_000 picoseconds. + Weight::from_parts(170_130_000, 27847) + .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: NominationPools BondedPools (r:1 w:0) @@ -759,16 +780,18 @@ impl WeightInfo for () { /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1783` - // Estimated: `18031` - // Minimum execution time: 54_752_000 picoseconds. - Weight::from_parts(56_248_171, 18031) - // Standard Error: 1_891 - .saturating_add(Weight::from_parts(4_767, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `1687` + // Estimated: `4764` + // Minimum execution time: 63_254_000 picoseconds. + Weight::from_parts(64_154_755, 4764) + // Standard Error: 344 + .saturating_add(Weight::from_parts(8_798, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: NominationPools PoolMembers (r:1 w:1) @@ -785,6 +808,8 @@ impl WeightInfo for () { /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: NominationPools CounterForPoolMembers (r:1 w:1) @@ -794,13 +819,13 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2307` - // Estimated: `54662` - // Minimum execution time: 106_166_000 picoseconds. - Weight::from_parts(107_806_373, 54662) - // Standard Error: 6_985 - .saturating_add(Weight::from_parts(18_449, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `2115` + // Estimated: `27847` + // Minimum execution time: 131_339_000 picoseconds. + Weight::from_parts(133_590_267, 27847) + // Standard Error: 1_058 + .saturating_add(Weight::from_parts(9_932, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } /// Storage: NominationPools PoolMembers (r:1 w:1) @@ -825,6 +850,8 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// Proof: NominationPools CounterForPoolMembers (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: NominationPools ReversePoolIdLookup (r:1 w:1) @@ -846,15 +873,13 @@ impl WeightInfo for () { /// Storage: NominationPools ClaimPermissions (r:0 w:1) /// Proof: NominationPools ClaimPermissions (max_values: None, max_size: Some(41), added: 2516, mode: MaxEncodedLen) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(s: u32, ) -> Weight { + fn withdraw_unbonded_kill(_s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2694` - // Estimated: `87714` - // Minimum execution time: 170_047_000 picoseconds. - Weight::from_parts(172_125_770, 87714) - // Standard Error: 2_599 - .saturating_add(Weight::from_parts(9_964, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(20_u64)) + // Measured: `2470` + // Estimated: `27847` + // Minimum execution time: 219_026_000 picoseconds. + Weight::from_parts(223_230_356, 27847) + .saturating_add(RocksDbWeight::get().reads(21_u64)) .saturating_add(RocksDbWeight::get().writes(18_u64)) } /// Storage: NominationPools LastPoolId (r:1 w:1) @@ -887,6 +912,8 @@ impl WeightInfo for () { /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: NominationPools RewardPools (r:1 w:1) /// Proof: NominationPools RewardPools (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) /// Storage: NominationPools CounterForRewardPools (r:1 w:1) @@ -901,11 +928,11 @@ impl WeightInfo for () { /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1321` - // Estimated: `51410` - // Minimum execution time: 149_672_000 picoseconds. - Weight::from_parts(153_613_000, 51410) - .saturating_add(RocksDbWeight::get().reads(21_u64)) + // Measured: `1289` + // Estimated: `6196` + // Minimum execution time: 189_710_000 picoseconds. + Weight::from_parts(190_251_000, 6196) + .saturating_add(RocksDbWeight::get().reads(22_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } /// Storage: NominationPools BondedPools (r:1 w:0) @@ -935,12 +962,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1913` - // Estimated: `33934 + n * (2520 ±0)` - // Minimum execution time: 68_892_000 picoseconds. - Weight::from_parts(69_062_946, 33934) - // Standard Error: 6_448 - .saturating_add(Weight::from_parts(1_422_774, 0).saturating_mul(n.into())) + // Measured: `1849` + // Estimated: `4556 + n * (2520 ±0)` + // Minimum execution time: 70_044_000 picoseconds. + Weight::from_parts(69_307_256, 4556) + // Standard Error: 6_066 + .saturating_add(Weight::from_parts(1_517_942, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -954,10 +981,10 @@ impl WeightInfo for () { /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1502` - // Estimated: `11778` - // Minimum execution time: 36_447_000 picoseconds. - Weight::from_parts(36_837_000, 11778) + // Measured: `1438` + // Estimated: `4556` + // Minimum execution time: 36_610_000 picoseconds. + Weight::from_parts(37_212_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -970,12 +997,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `563` - // Estimated: `8909` - // Minimum execution time: 15_221_000 picoseconds. - Weight::from_parts(15_632_286, 8909) - // Standard Error: 343 - .saturating_add(Weight::from_parts(2_299, 0).saturating_mul(n.into())) + // Measured: `531` + // Estimated: `3735` + // Minimum execution time: 15_334_000 picoseconds. + Weight::from_parts(15_753_107, 3735) + // Standard Error: 62 + .saturating_add(Weight::from_parts(1_365, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -995,18 +1022,18 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_409_000 picoseconds. - Weight::from_parts(7_702_000, 0) + // Minimum execution time: 7_156_000 picoseconds. + Weight::from_parts(7_596_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: NominationPools BondedPools (r:1 w:1) /// Proof: NominationPools BondedPools (max_values: None, max_size: Some(220), added: 2695, mode: MaxEncodedLen) fn update_roles() -> Weight { // Proof Size summary in bytes: - // Measured: `563` + // Measured: `531` // Estimated: `3685` - // Minimum execution time: 20_451_000 picoseconds. - Weight::from_parts(20_703_000, 3685) + // Minimum execution time: 20_342_000 picoseconds. + Weight::from_parts(20_699_000, 3685) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1030,10 +1057,10 @@ impl WeightInfo for () { /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `2140` - // Estimated: `29455` - // Minimum execution time: 66_001_000 picoseconds. - Weight::from_parts(66_894_000, 29455) + // Measured: `2012` + // Estimated: `4556` + // Minimum execution time: 66_608_000 picoseconds. + Weight::from_parts(67_416_000, 4556) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1047,10 +1074,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn set_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `866` - // Estimated: `12324` - // Minimum execution time: 34_011_000 picoseconds. - Weight::from_parts(34_521_000, 12324) + // Measured: `770` + // Estimated: `3685` + // Minimum execution time: 34_619_000 picoseconds. + Weight::from_parts(34_894_000, 3685) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1058,10 +1085,10 @@ impl WeightInfo for () { /// Proof: NominationPools BondedPools (max_values: None, max_size: Some(220), added: 2695, mode: MaxEncodedLen) fn set_commission_max() -> Weight { // Proof Size summary in bytes: - // Measured: `603` + // Measured: `571` // Estimated: `3685` - // Minimum execution time: 19_524_000 picoseconds. - Weight::from_parts(19_855_000, 3685) + // Minimum execution time: 19_676_000 picoseconds. + Weight::from_parts(19_958_000, 3685) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1069,10 +1096,10 @@ impl WeightInfo for () { /// Proof: NominationPools BondedPools (max_values: None, max_size: Some(220), added: 2695, mode: MaxEncodedLen) fn set_commission_change_rate() -> Weight { // Proof Size summary in bytes: - // Measured: `563` + // Measured: `531` // Estimated: `3685` - // Minimum execution time: 20_457_000 picoseconds. - Weight::from_parts(20_698_000, 3685) + // Minimum execution time: 20_404_000 picoseconds. + Weight::from_parts(20_859_000, 3685) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1083,9 +1110,9 @@ impl WeightInfo for () { fn set_claim_permission() -> Weight { // Proof Size summary in bytes: // Measured: `542` - // Estimated: `7208` - // Minimum execution time: 15_183_000 picoseconds. - Weight::from_parts(15_597_000, 7208) + // Estimated: `3702` + // Minimum execution time: 15_401_000 picoseconds. + Weight::from_parts(15_706_000, 3702) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1099,10 +1126,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn claim_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `1096` - // Estimated: `12324` - // Minimum execution time: 48_957_000 picoseconds. - Weight::from_parts(50_207_000, 12324) + // Measured: `968` + // Estimated: `3685` + // Minimum execution time: 66_775_000 picoseconds. + Weight::from_parts(67_242_000, 3685) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index 0550c8e0ca708..9726f5e6dad27 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -86,6 +86,10 @@ impl pallet_balances::Config for Runtime { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } pallet_staking_reward_curve::build! { diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index 47ae264a69cf8..6ebe870a78c9b 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 0efbdcd48ecf3..894a725b5ce2f 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -335,6 +335,12 @@ benchmarks! { let balance_slash = |id| core::iter::once( ::RuntimeEvent::from(pallet_balances::Event::::Slashed{ who: id, amount: slash_amount.into() }) ); + let balance_locked = |id| core::iter::once( + ::RuntimeEvent::from(pallet_balances::Event::::Locked{ who: id, amount: slash_amount.into() }) + ); + let balance_unlocked = |id| core::iter::once( + ::RuntimeEvent::from(pallet_balances::Event::::Unlocked{ who: id, amount: slash_amount.into() }) + ); let chill = |id| core::iter::once( ::RuntimeEvent::from(StakingEvent::::Chilled{ stash: id }) ); @@ -349,12 +355,15 @@ benchmarks! { let slash_events = raw_offenders.into_iter() .flat_map(|offender| { let nom_slashes = offender.nominator_stashes.into_iter().flat_map(|nom| { - balance_slash(nom.clone()).map(Into::into).chain(slash(nom).map(Into::into)).map(Box::new) + balance_slash(nom.clone()).map(Into::into) + .chain(balance_unlocked(nom.clone()).map(Into::into)) + .chain(slash(nom).map(Into::into)).map(Box::new) }); let events = chill(offender.stash.clone()).map(Into::into).map(Box::new) .chain(slash_report(offender.stash.clone()).map(Into::into).map(Box::new)) .chain(balance_slash(offender.stash.clone()).map(Into::into).map(Box::new)) + .chain(balance_unlocked(offender.stash.clone()).map(Into::into).map(Box::new)) .chain(slash(offender.stash).map(Into::into).map(Box::new)) .chain(nom_slashes) .collect::>(); diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 233aa449d391c..668d88e0bf3d0 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -65,7 +65,7 @@ impl frame_system::Config for Test { } impl pallet_balances::Config for Test { - type MaxLocks = (); + type MaxLocks = ConstU32<128>; type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; @@ -74,6 +74,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<10>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl pallet_timestamp::Config for Test { diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index c89d93c12bd95..1c7ffeca71983 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -26,7 +26,9 @@ pub mod migration; mod mock; mod tests; -use codec::{Decode, Encode}; +use core::marker::PhantomData; + +use codec::Encode; use frame_support::weights::Weight; use sp_runtime::{traits::Hash, Perbill}; use sp_staking::{ @@ -43,12 +45,17 @@ type OpaqueTimeSlot = Vec; /// A type alias for a report identifier. type ReportIdOf = ::Hash; +const LOG_TARGET: &str = "runtime::offences"; + #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] #[pallet::without_storage_info] pub struct Pallet(_); @@ -85,21 +92,6 @@ pub mod pallet { ValueQuery, >; - /// Enumerates all reports of a kind along with the time they happened. - /// - /// All reports are sorted by the time of offence. - /// - /// Note that the actual type of this mapping is `Vec`, this is because values of - /// different types are not supported at the moment so we are doing the manual serialization. - #[pallet::storage] - pub type ReportsByKindIndex = StorageMap< - _, - Twox64Concat, - Kind, - Vec, // (O::TimeSlot, ReportIdOf) - ValueQuery, - >; - /// Events type. #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] @@ -190,7 +182,7 @@ impl Pallet { OffenceDetails { offender, reporters: reporters.clone() }, ); - storage.insert(time_slot, report_id); + storage.insert(report_id); } } @@ -225,7 +217,7 @@ struct TriageOutcome { struct ReportIndexStorage> { opaque_time_slot: OpaqueTimeSlot, concurrent_reports: Vec>, - same_kind_reports: Vec<(O::TimeSlot, ReportIdOf)>, + _phantom: PhantomData, } impl> ReportIndexStorage { @@ -233,30 +225,19 @@ impl> ReportIndexStorage { fn load(time_slot: &O::TimeSlot) -> Self { let opaque_time_slot = time_slot.encode(); - let same_kind_reports = ReportsByKindIndex::::get(&O::ID); - let same_kind_reports = - Vec::<(O::TimeSlot, ReportIdOf)>::decode(&mut &same_kind_reports[..]) - .unwrap_or_default(); - let concurrent_reports = >::get(&O::ID, &opaque_time_slot); - Self { opaque_time_slot, concurrent_reports, same_kind_reports } + Self { opaque_time_slot, concurrent_reports, _phantom: Default::default() } } /// Insert a new report to the index. - fn insert(&mut self, time_slot: &O::TimeSlot, report_id: ReportIdOf) { - // Insert the report id into the list while maintaining the ordering by the time - // slot. - let pos = self.same_kind_reports.partition_point(|&(ref when, _)| when <= time_slot); - self.same_kind_reports.insert(pos, (time_slot.clone(), report_id)); - + fn insert(&mut self, report_id: ReportIdOf) { // Update the list of concurrent reports. self.concurrent_reports.push(report_id); } /// Dump the indexes to the storage. fn save(self) { - ReportsByKindIndex::::insert(&O::ID, self.same_kind_reports.encode()); >::insert( &O::ID, &self.opaque_time_slot, diff --git a/frame/offences/src/migration.rs b/frame/offences/src/migration.rs index 95b94ba87cc9c..07bd68407d378 100644 --- a/frame/offences/src/migration.rs +++ b/frame/offences/src/migration.rs @@ -15,11 +15,84 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{Config, OffenceDetails, Perbill, SessionIndex}; -use frame_support::{pallet_prelude::ValueQuery, storage_alias, traits::Get, weights::Weight}; +use super::{Config, Kind, OffenceDetails, Pallet, Perbill, SessionIndex, LOG_TARGET}; +use frame_support::{ + dispatch::GetStorageVersion, + pallet_prelude::ValueQuery, + storage_alias, + traits::{Get, OnRuntimeUpgrade}, + weights::Weight, + Twox64Concat, +}; use sp_staking::offence::{DisableStrategy, OnOffenceHandler}; use sp_std::vec::Vec; +#[cfg(feature = "try-runtime")] +use frame_support::ensure; + +mod v0 { + use super::*; + + #[storage_alias] + pub type ReportsByKindIndex = StorageMap< + Pallet, + Twox64Concat, + Kind, + Vec, // (O::TimeSlot, ReportIdOf) + ValueQuery, + >; +} + +pub mod v1 { + use frame_support::traits::StorageVersion; + + use super::*; + + pub struct MigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV1 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + let onchain = Pallet::::on_chain_storage_version(); + ensure!(onchain < 1, "pallet_offences::MigrateToV1 migration can be deleted"); + + log::info!( + target: LOG_TARGET, + "Number of reports to refund and delete: {}", + v0::ReportsByKindIndex::::iter_keys().count() + ); + + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + let onchain = Pallet::::on_chain_storage_version(); + + if onchain > 0 { + log::info!(target: LOG_TARGET, "pallet_offences::MigrateToV1 should be removed"); + return T::DbWeight::get().reads(1) + } + + let keys_removed = v0::ReportsByKindIndex::::clear(u32::MAX, None).unique as u64; + let weight = T::DbWeight::get().reads_writes(keys_removed, keys_removed); + + StorageVersion::new(1).put::>(); + + weight + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + let onchain = Pallet::::on_chain_storage_version(); + ensure!(onchain == 1, "pallet_offences::MigrateToV1 needs to be run"); + ensure!( + v0::ReportsByKindIndex::::iter_keys().count() == 0, + "there are some dangling reports that need to be destroyed and refunded" + ); + Ok(()) + } + } +} + /// Type of data stored as a deferred offence type DeferredOffenceOf = ( Vec::AccountId, ::IdentificationTuple>>, @@ -36,7 +109,7 @@ type DeferredOffences = pub fn remove_deferred_storage() -> Weight { let mut weight = T::DbWeight::get().reads_writes(1, 1); let deferred = >::take(); - log::info!(target: "runtime::offences", "have {} deferred offences, applying.", deferred.len()); + log::info!(target: LOG_TARGET, "have {} deferred offences, applying.", deferred.len()); for (offences, perbill, session) in deferred.iter() { let consumed = T::OnOffenceHandler::on_offence( offences, @@ -53,10 +126,32 @@ pub fn remove_deferred_storage() -> Weight { #[cfg(test)] mod test { use super::*; - use crate::mock::{new_test_ext, with_on_offence_fractions, Runtime as T}; + use crate::mock::{new_test_ext, with_on_offence_fractions, Runtime as T, KIND}; + use codec::Encode; use sp_runtime::Perbill; use sp_staking::offence::OffenceDetails; + #[test] + fn migration_to_v1_works() { + let mut ext = new_test_ext(); + + ext.execute_with(|| { + >::insert(KIND, 2u32.encode()); + assert!(>::iter_values().count() > 0); + }); + + ext.commit_all().unwrap(); + + ext.execute_with(|| { + assert_eq!( + v1::MigrateToV1::::on_runtime_upgrade(), + ::DbWeight::get().reads_writes(1, 1), + ); + + assert!(>::iter_values().count() == 0); + }) + } + #[test] fn should_resubmit_deferred_offences() { new_test_ext().execute_with(|| { diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 27e1da8c4e636..17480be76c1d8 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -164,8 +164,3 @@ impl offence::Offence for Offence { Perbill::from_percent(5 + offenders_count * 100 / self.validator_set_count) } } - -/// Create the report id for the given `offender` and `time_slot` combination. -pub fn report_id(time_slot: u128, offender: u64) -> H256 { - Offences::report_id::(&time_slot, &offender) -} diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index 81f0f44f1b939..d525c7c3ab1d9 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -21,8 +21,8 @@ use super::*; use crate::mock::{ - new_test_ext, offence_reports, report_id, with_on_offence_fractions, Offence, Offences, - RuntimeEvent, System, KIND, + new_test_ext, offence_reports, with_on_offence_fractions, Offence, Offences, RuntimeEvent, + System, KIND, }; use frame_system::{EventRecord, Phase}; use sp_runtime::Perbill; @@ -245,48 +245,3 @@ fn should_properly_count_offences() { ); }); } - -/// We insert offences in sorted order using the time slot in the `same_kind_reports`. -/// This test ensures that it works as expected. -#[test] -fn should_properly_sort_offences() { - new_test_ext().execute_with(|| { - // given - let time_slot = 42; - assert_eq!(offence_reports(KIND, time_slot), vec![]); - - let offence1 = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; - let offence2 = Offence { validator_set_count: 5, time_slot, offenders: vec![4] }; - let offence3 = - Offence { validator_set_count: 5, time_slot: time_slot + 1, offenders: vec![6, 7] }; - let offence4 = - Offence { validator_set_count: 5, time_slot: time_slot - 1, offenders: vec![3] }; - Offences::report_offence(vec![], offence1).unwrap(); - with_on_offence_fractions(|f| { - assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); - f.clear(); - }); - - // when - // report for the second time - Offences::report_offence(vec![], offence2).unwrap(); - Offences::report_offence(vec![], offence3).unwrap(); - Offences::report_offence(vec![], offence4).unwrap(); - - // then - let same_kind_reports = Vec::<(u128, sp_core::H256)>::decode( - &mut &crate::ReportsByKindIndex::::get(KIND)[..], - ) - .unwrap(); - assert_eq!( - same_kind_reports, - vec![ - (time_slot - 1, report_id(time_slot - 1, 3)), - (time_slot, report_id(time_slot, 5)), - (time_slot, report_id(time_slot, 4)), - (time_slot + 1, report_id(time_slot + 1, 6)), - (time_slot + 1, report_id(time_slot + 1, 7)), - ] - ); - }); -} diff --git a/frame/preimage/Cargo.toml b/frame/preimage/Cargo.toml index 6bc6c8c53c012..57dda88b1e70a 100644 --- a/frame/preimage/Cargo.toml +++ b/frame/preimage/Cargo.toml @@ -10,7 +10,7 @@ description = "FRAME pallet for storing preimages of hashes" [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/preimage/src/mock.rs b/frame/preimage/src/mock.rs index 23875ccb0ef7a..5054a77a8123f 100644 --- a/frame/preimage/src/mock.rs +++ b/frame/preimage/src/mock.rs @@ -84,6 +84,10 @@ impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } ord_parameter_types! { diff --git a/frame/preimage/src/weights.rs b/frame/preimage/src/weights.rs index fa6bdc972fa47..2177309db1612 100644 --- a/frame/preimage/src/weights.rs +++ b/frame/preimage/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_preimage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -72,12 +72,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `175` - // Estimated: `2566` - // Minimum execution time: 23_484 nanoseconds. - Weight::from_parts(23_828_000, 2566) + // Measured: `143` + // Estimated: `3556` + // Minimum execution time: 31_578_000 picoseconds. + Weight::from_parts(31_955_000, 3556) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_705, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_395, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -89,11 +89,11 @@ impl WeightInfo for SubstrateWeight { fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `2566` - // Minimum execution time: 14_812 nanoseconds. - Weight::from_parts(14_949_000, 2566) + // Estimated: `3556` + // Minimum execution time: 17_017_000 picoseconds. + Weight::from_parts(17_549_000, 3556) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_707, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_394, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -105,11 +105,11 @@ impl WeightInfo for SubstrateWeight { fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `2566` - // Minimum execution time: 14_185 nanoseconds. - Weight::from_parts(14_398_000, 2566) + // Estimated: `3556` + // Minimum execution time: 16_507_000 picoseconds. + Weight::from_parts(16_624_000, 3556) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_709, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_395, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -119,10 +119,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `353` - // Estimated: `2566` - // Minimum execution time: 29_917 nanoseconds. - Weight::from_parts(30_691_000, 2566) + // Measured: `289` + // Estimated: `3556` + // Minimum execution time: 38_016_000 picoseconds. + Weight::from_parts(38_909_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -133,9 +133,9 @@ impl WeightInfo for SubstrateWeight { fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `2566` - // Minimum execution time: 19_281 nanoseconds. - Weight::from_parts(20_121_000, 2566) + // Estimated: `3556` + // Minimum execution time: 21_408_000 picoseconds. + Weight::from_parts(22_343_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -143,10 +143,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) fn request_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `220` - // Estimated: `2566` - // Minimum execution time: 17_192 nanoseconds. - Weight::from_parts(18_637_000, 2566) + // Measured: `188` + // Estimated: `3556` + // Minimum execution time: 20_035_000 picoseconds. + Weight::from_parts(20_639_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -155,9 +155,9 @@ impl WeightInfo for SubstrateWeight { fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `2566` - // Minimum execution time: 10_139 nanoseconds. - Weight::from_parts(10_773_000, 2566) + // Estimated: `3556` + // Minimum execution time: 12_028_000 picoseconds. + Weight::from_parts(12_509_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -166,9 +166,9 @@ impl WeightInfo for SubstrateWeight { fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `2566` - // Minimum execution time: 11_721 nanoseconds. - Weight::from_parts(12_313_000, 2566) + // Estimated: `3556` + // Minimum execution time: 13_568_000 picoseconds. + Weight::from_parts(14_161_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -177,9 +177,9 @@ impl WeightInfo for SubstrateWeight { fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `2566` - // Minimum execution time: 7_263 nanoseconds. - Weight::from_parts(7_547_000, 2566) + // Estimated: `3556` + // Minimum execution time: 8_538_000 picoseconds. + Weight::from_parts(8_933_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -190,9 +190,9 @@ impl WeightInfo for SubstrateWeight { fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `2566` - // Minimum execution time: 17_785 nanoseconds. - Weight::from_parts(18_501_000, 2566) + // Estimated: `3556` + // Minimum execution time: 20_692_000 picoseconds. + Weight::from_parts(21_770_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -201,9 +201,9 @@ impl WeightInfo for SubstrateWeight { fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `2566` - // Minimum execution time: 7_090 nanoseconds. - Weight::from_parts(7_319_000, 2566) + // Estimated: `3556` + // Minimum execution time: 8_572_000 picoseconds. + Weight::from_parts(8_795_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -212,9 +212,9 @@ impl WeightInfo for SubstrateWeight { fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `2566` - // Minimum execution time: 7_253 nanoseconds. - Weight::from_parts(7_508_000, 2566) + // Estimated: `3556` + // Minimum execution time: 8_266_000 picoseconds. + Weight::from_parts(8_721_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -229,12 +229,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `175` - // Estimated: `2566` - // Minimum execution time: 23_484 nanoseconds. - Weight::from_parts(23_828_000, 2566) + // Measured: `143` + // Estimated: `3556` + // Minimum execution time: 31_578_000 picoseconds. + Weight::from_parts(31_955_000, 3556) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_705, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_395, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -246,11 +246,11 @@ impl WeightInfo for () { fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `2566` - // Minimum execution time: 14_812 nanoseconds. - Weight::from_parts(14_949_000, 2566) + // Estimated: `3556` + // Minimum execution time: 17_017_000 picoseconds. + Weight::from_parts(17_549_000, 3556) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_707, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_394, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -262,11 +262,11 @@ impl WeightInfo for () { fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `2566` - // Minimum execution time: 14_185 nanoseconds. - Weight::from_parts(14_398_000, 2566) + // Estimated: `3556` + // Minimum execution time: 16_507_000 picoseconds. + Weight::from_parts(16_624_000, 3556) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_709, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_395, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -276,10 +276,10 @@ impl WeightInfo for () { /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `353` - // Estimated: `2566` - // Minimum execution time: 29_917 nanoseconds. - Weight::from_parts(30_691_000, 2566) + // Measured: `289` + // Estimated: `3556` + // Minimum execution time: 38_016_000 picoseconds. + Weight::from_parts(38_909_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -290,9 +290,9 @@ impl WeightInfo for () { fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `2566` - // Minimum execution time: 19_281 nanoseconds. - Weight::from_parts(20_121_000, 2566) + // Estimated: `3556` + // Minimum execution time: 21_408_000 picoseconds. + Weight::from_parts(22_343_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -300,10 +300,10 @@ impl WeightInfo for () { /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) fn request_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `220` - // Estimated: `2566` - // Minimum execution time: 17_192 nanoseconds. - Weight::from_parts(18_637_000, 2566) + // Measured: `188` + // Estimated: `3556` + // Minimum execution time: 20_035_000 picoseconds. + Weight::from_parts(20_639_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -312,9 +312,9 @@ impl WeightInfo for () { fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `2566` - // Minimum execution time: 10_139 nanoseconds. - Weight::from_parts(10_773_000, 2566) + // Estimated: `3556` + // Minimum execution time: 12_028_000 picoseconds. + Weight::from_parts(12_509_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -323,9 +323,9 @@ impl WeightInfo for () { fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `2566` - // Minimum execution time: 11_721 nanoseconds. - Weight::from_parts(12_313_000, 2566) + // Estimated: `3556` + // Minimum execution time: 13_568_000 picoseconds. + Weight::from_parts(14_161_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -334,9 +334,9 @@ impl WeightInfo for () { fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `2566` - // Minimum execution time: 7_263 nanoseconds. - Weight::from_parts(7_547_000, 2566) + // Estimated: `3556` + // Minimum execution time: 8_538_000 picoseconds. + Weight::from_parts(8_933_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -347,9 +347,9 @@ impl WeightInfo for () { fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `2566` - // Minimum execution time: 17_785 nanoseconds. - Weight::from_parts(18_501_000, 2566) + // Estimated: `3556` + // Minimum execution time: 20_692_000 picoseconds. + Weight::from_parts(21_770_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -358,9 +358,9 @@ impl WeightInfo for () { fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `2566` - // Minimum execution time: 7_090 nanoseconds. - Weight::from_parts(7_319_000, 2566) + // Estimated: `3556` + // Minimum execution time: 8_572_000 picoseconds. + Weight::from_parts(8_795_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -369,9 +369,9 @@ impl WeightInfo for () { fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `2566` - // Minimum execution time: 7_253 nanoseconds. - Weight::from_parts(7_508_000, 2566) + // Estimated: `3556` + // Minimum execution time: 8_266_000 picoseconds. + Weight::from_parts(8_721_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 5c69fcfb9f55e..065cbe5eba252 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["max-encoded-len"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index c49c344aca107..f3771083c4dd4 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -88,6 +88,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl pallet_utility::Config for Test { type RuntimeEvent = RuntimeEvent; @@ -124,7 +128,10 @@ impl InstanceFilter for ProxyType { match self { ProxyType::Any => true, ProxyType::JustTransfer => { - matches!(c, RuntimeCall::Balances(pallet_balances::Call::transfer { .. })) + matches!( + c, + RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { .. }) + ) }, ProxyType::JustUtility => matches!(c, RuntimeCall::Utility { .. }), } @@ -161,7 +168,7 @@ impl Config for Test { use super::{Call as ProxyCall, Event as ProxyEvent}; use frame_system::Call as SystemCall; -use pallet_balances::{Call as BalancesCall, Error as BalancesError, Event as BalancesEvent}; +use pallet_balances::{Call as BalancesCall, Event as BalancesEvent}; use pallet_utility::{Call as UtilityCall, Event as UtilityEvent}; type SystemError = frame_system::Error; @@ -169,7 +176,7 @@ type SystemError = frame_system::Error; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { - balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], + balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 3)], } .assimilate_storage(&mut t) .unwrap(); @@ -193,7 +200,7 @@ fn expect_events(e: Vec) { } fn call_transfer(dest: u64, value: u64) -> RuntimeCall { - RuntimeCall::Balances(BalancesCall::transfer { dest, value }) + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }) } #[test] @@ -345,7 +352,7 @@ fn proxy_announced_removes_announcement_and_returns_deposit() { #[test] fn filtering_works() { new_test_ext().execute_with(|| { - assert!(Balances::mutate_account(&1, |a| a.free = 1000).is_ok()); + Balances::make_free_balance_be(&1, 1000); assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 0)); assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::JustTransfer, 0)); assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 4, ProxyType::JustUtility, 0)); @@ -361,7 +368,7 @@ fn filtering_works() { ); let derivative_id = Utility::derivative_account_id(1, 0); - assert!(Balances::mutate_account(&derivative_id, |a| a.free = 1000).is_ok()); + Balances::make_free_balance_be(&derivative_id, 1000); let inner = Box::new(call_transfer(6, 1)); let call = Box::new(RuntimeCall::Utility(UtilityCall::as_derivative { @@ -516,7 +523,7 @@ fn cannot_add_proxy_without_balance() { assert_eq!(Balances::reserved_balance(5), 2); assert_noop!( Proxy::add_proxy(RuntimeOrigin::signed(5), 4, ProxyType::Any, 0), - BalancesError::::InsufficientBalance + DispatchError::ConsumerRemaining, ); }); } @@ -564,6 +571,7 @@ fn proxying_works() { #[test] fn pure_works() { new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 11); // An extra one for the ED. assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); let anon = Proxy::pure_account(&1, &ProxyType::Any, 0, None); System::assert_last_event( @@ -592,7 +600,7 @@ fn pure_works() { assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); let call = Box::new(call_transfer(6, 1)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), anon, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), anon, 5)); assert_ok!(Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call)); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 1); @@ -611,9 +619,9 @@ fn pure_works() { Proxy::kill_pure(RuntimeOrigin::signed(1), 1, ProxyType::Any, 0, 1, 0), Error::::NoPermission ); - assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::free_balance(1), 1); assert_ok!(Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call.clone())); - assert_eq!(Balances::free_balance(1), 2); + assert_eq!(Balances::free_balance(1), 3); assert_noop!( Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call.clone()), Error::::NotProxy diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs index 5b70b61e569b4..5a6352fc7ed7f 100644 --- a/frame/proxy/src/weights.rs +++ b/frame/proxy/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_proxy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -68,12 +68,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + p * (37 ±0)` - // Estimated: `3716` - // Minimum execution time: 14_461 nanoseconds. - Weight::from_parts(14_913_927, 3716) - // Standard Error: 1_174 - .saturating_add(Weight::from_parts(36_087, 0).saturating_mul(p.into())) + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 16_542_000 picoseconds. + Weight::from_parts(17_131_651, 4706) + // Standard Error: 1_279 + .saturating_add(Weight::from_parts(31_622, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Proxy Proxies (r:1 w:0) @@ -86,14 +86,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `584 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `11027` - // Minimum execution time: 31_523 nanoseconds. - Weight::from_parts(31_116_270, 11027) - // Standard Error: 1_789 - .saturating_add(Weight::from_parts(135_656, 0).saturating_mul(a.into())) - // Standard Error: 1_849 - .saturating_add(Weight::from_parts(53_893, 0).saturating_mul(p.into())) + // Measured: `488 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 41_702_000 picoseconds. + Weight::from_parts(41_868_091, 5698) + // Standard Error: 3_771 + .saturating_add(Weight::from_parts(135_604, 0).saturating_mul(a.into())) + // Standard Error: 3_896 + .saturating_add(Weight::from_parts(32_615, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -103,16 +103,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, p: u32, ) -> Weight { + fn remove_announcement(a: u32, _p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `467 + a * (68 ±0)` - // Estimated: `7311` - // Minimum execution time: 19_363 nanoseconds. - Weight::from_parts(20_282_191, 7311) - // Standard Error: 1_084 - .saturating_add(Weight::from_parts(133_825, 0).saturating_mul(a.into())) - // Standard Error: 1_120 - .saturating_add(Weight::from_parts(3_434, 0).saturating_mul(p.into())) + // Measured: `403 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 25_432_000 picoseconds. + Weight::from_parts(26_301_674, 5698) + // Standard Error: 1_413 + .saturating_add(Weight::from_parts(167_176, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -124,14 +122,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `467 + a * (68 ±0)` - // Estimated: `7311` - // Minimum execution time: 19_363 nanoseconds. - Weight::from_parts(20_211_584, 7311) - // Standard Error: 1_171 - .saturating_add(Weight::from_parts(136_984, 0).saturating_mul(a.into())) - // Standard Error: 1_210 - .saturating_add(Weight::from_parts(3_686, 0).saturating_mul(p.into())) + // Measured: `403 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 25_280_000 picoseconds. + Weight::from_parts(26_099_549, 5698) + // Standard Error: 1_458 + .saturating_add(Weight::from_parts(168_724, 0).saturating_mul(a.into())) + // Standard Error: 1_507 + .saturating_add(Weight::from_parts(2_212, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -145,14 +143,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `516 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `11027` - // Minimum execution time: 27_811 nanoseconds. - Weight::from_parts(27_965_813, 11027) - // Standard Error: 1_987 - .saturating_add(Weight::from_parts(124_133, 0).saturating_mul(a.into())) - // Standard Error: 2_053 - .saturating_add(Weight::from_parts(54_692, 0).saturating_mul(p.into())) + // Measured: `420 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 35_889_000 picoseconds. + Weight::from_parts(37_535_424, 5698) + // Standard Error: 3_899 + .saturating_add(Weight::from_parts(138_757, 0).saturating_mul(a.into())) + // Standard Error: 4_028 + .saturating_add(Weight::from_parts(46_196, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -161,12 +159,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + p * (37 ±0)` - // Estimated: `3716` - // Minimum execution time: 20_922 nanoseconds. - Weight::from_parts(21_551_797, 3716) - // Standard Error: 1_425 - .saturating_add(Weight::from_parts(58_434, 0).saturating_mul(p.into())) + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 26_876_000 picoseconds. + Weight::from_parts(27_356_694, 4706) + // Standard Error: 1_437 + .saturating_add(Weight::from_parts(68_994, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -175,12 +173,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + p * (37 ±0)` - // Estimated: `3716` - // Minimum execution time: 20_812 nanoseconds. - Weight::from_parts(21_660_732, 3716) - // Standard Error: 1_438 - .saturating_add(Weight::from_parts(68_740, 0).saturating_mul(p.into())) + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 26_655_000 picoseconds. + Weight::from_parts(27_726_692, 4706) + // Standard Error: 1_980 + .saturating_add(Weight::from_parts(55_932, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -189,26 +187,24 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + p * (37 ±0)` - // Estimated: `3716` - // Minimum execution time: 16_786 nanoseconds. - Weight::from_parts(17_249_958, 3716) - // Standard Error: 1_007 - .saturating_add(Weight::from_parts(37_546, 0).saturating_mul(p.into())) + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 23_716_000 picoseconds. + Weight::from_parts(24_660_737, 4706) + // Standard Error: 1_400 + .saturating_add(Weight::from_parts(31_679, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Proxy Proxies (r:1 w:1) /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) /// The range of component `p` is `[1, 31]`. - fn create_pure(p: u32, ) -> Weight { + fn create_pure(_p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `3716` - // Minimum execution time: 22_764 nanoseconds. - Weight::from_parts(23_539_039, 3716) - // Standard Error: 814 - .saturating_add(Weight::from_parts(144, 0).saturating_mul(p.into())) + // Estimated: `4706` + // Minimum execution time: 28_233_000 picoseconds. + Weight::from_parts(29_602_422, 4706) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -217,12 +213,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `230 + p * (37 ±0)` - // Estimated: `3716` - // Minimum execution time: 17_720 nanoseconds. - Weight::from_parts(18_428_849, 3716) - // Standard Error: 1_093 - .saturating_add(Weight::from_parts(34_600, 0).saturating_mul(p.into())) + // Measured: `198 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_759_000 picoseconds. + Weight::from_parts(25_533_053, 4706) + // Standard Error: 1_254 + .saturating_add(Weight::from_parts(36_331, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -235,12 +231,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + p * (37 ±0)` - // Estimated: `3716` - // Minimum execution time: 14_461 nanoseconds. - Weight::from_parts(14_913_927, 3716) - // Standard Error: 1_174 - .saturating_add(Weight::from_parts(36_087, 0).saturating_mul(p.into())) + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 16_542_000 picoseconds. + Weight::from_parts(17_131_651, 4706) + // Standard Error: 1_279 + .saturating_add(Weight::from_parts(31_622, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Proxy Proxies (r:1 w:0) @@ -253,14 +249,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `584 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `11027` - // Minimum execution time: 31_523 nanoseconds. - Weight::from_parts(31_116_270, 11027) - // Standard Error: 1_789 - .saturating_add(Weight::from_parts(135_656, 0).saturating_mul(a.into())) - // Standard Error: 1_849 - .saturating_add(Weight::from_parts(53_893, 0).saturating_mul(p.into())) + // Measured: `488 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 41_702_000 picoseconds. + Weight::from_parts(41_868_091, 5698) + // Standard Error: 3_771 + .saturating_add(Weight::from_parts(135_604, 0).saturating_mul(a.into())) + // Standard Error: 3_896 + .saturating_add(Weight::from_parts(32_615, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -270,16 +266,14 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, p: u32, ) -> Weight { + fn remove_announcement(a: u32, _p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `467 + a * (68 ±0)` - // Estimated: `7311` - // Minimum execution time: 19_363 nanoseconds. - Weight::from_parts(20_282_191, 7311) - // Standard Error: 1_084 - .saturating_add(Weight::from_parts(133_825, 0).saturating_mul(a.into())) - // Standard Error: 1_120 - .saturating_add(Weight::from_parts(3_434, 0).saturating_mul(p.into())) + // Measured: `403 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 25_432_000 picoseconds. + Weight::from_parts(26_301_674, 5698) + // Standard Error: 1_413 + .saturating_add(Weight::from_parts(167_176, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -291,14 +285,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `467 + a * (68 ±0)` - // Estimated: `7311` - // Minimum execution time: 19_363 nanoseconds. - Weight::from_parts(20_211_584, 7311) - // Standard Error: 1_171 - .saturating_add(Weight::from_parts(136_984, 0).saturating_mul(a.into())) - // Standard Error: 1_210 - .saturating_add(Weight::from_parts(3_686, 0).saturating_mul(p.into())) + // Measured: `403 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 25_280_000 picoseconds. + Weight::from_parts(26_099_549, 5698) + // Standard Error: 1_458 + .saturating_add(Weight::from_parts(168_724, 0).saturating_mul(a.into())) + // Standard Error: 1_507 + .saturating_add(Weight::from_parts(2_212, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -312,14 +306,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `516 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `11027` - // Minimum execution time: 27_811 nanoseconds. - Weight::from_parts(27_965_813, 11027) - // Standard Error: 1_987 - .saturating_add(Weight::from_parts(124_133, 0).saturating_mul(a.into())) - // Standard Error: 2_053 - .saturating_add(Weight::from_parts(54_692, 0).saturating_mul(p.into())) + // Measured: `420 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 35_889_000 picoseconds. + Weight::from_parts(37_535_424, 5698) + // Standard Error: 3_899 + .saturating_add(Weight::from_parts(138_757, 0).saturating_mul(a.into())) + // Standard Error: 4_028 + .saturating_add(Weight::from_parts(46_196, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -328,12 +322,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + p * (37 ±0)` - // Estimated: `3716` - // Minimum execution time: 20_922 nanoseconds. - Weight::from_parts(21_551_797, 3716) - // Standard Error: 1_425 - .saturating_add(Weight::from_parts(58_434, 0).saturating_mul(p.into())) + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 26_876_000 picoseconds. + Weight::from_parts(27_356_694, 4706) + // Standard Error: 1_437 + .saturating_add(Weight::from_parts(68_994, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -342,12 +336,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + p * (37 ±0)` - // Estimated: `3716` - // Minimum execution time: 20_812 nanoseconds. - Weight::from_parts(21_660_732, 3716) - // Standard Error: 1_438 - .saturating_add(Weight::from_parts(68_740, 0).saturating_mul(p.into())) + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 26_655_000 picoseconds. + Weight::from_parts(27_726_692, 4706) + // Standard Error: 1_980 + .saturating_add(Weight::from_parts(55_932, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -356,26 +350,24 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + p * (37 ±0)` - // Estimated: `3716` - // Minimum execution time: 16_786 nanoseconds. - Weight::from_parts(17_249_958, 3716) - // Standard Error: 1_007 - .saturating_add(Weight::from_parts(37_546, 0).saturating_mul(p.into())) + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 23_716_000 picoseconds. + Weight::from_parts(24_660_737, 4706) + // Standard Error: 1_400 + .saturating_add(Weight::from_parts(31_679, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Proxy Proxies (r:1 w:1) /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) /// The range of component `p` is `[1, 31]`. - fn create_pure(p: u32, ) -> Weight { + fn create_pure(_p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `3716` - // Minimum execution time: 22_764 nanoseconds. - Weight::from_parts(23_539_039, 3716) - // Standard Error: 814 - .saturating_add(Weight::from_parts(144, 0).saturating_mul(p.into())) + // Estimated: `4706` + // Minimum execution time: 28_233_000 picoseconds. + Weight::from_parts(29_602_422, 4706) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -384,12 +376,12 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `230 + p * (37 ±0)` - // Estimated: `3716` - // Minimum execution time: 17_720 nanoseconds. - Weight::from_parts(18_428_849, 3716) - // Standard Error: 1_093 - .saturating_add(Weight::from_parts(34_600, 0).saturating_mul(p.into())) + // Measured: `198 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_759_000 picoseconds. + Weight::from_parts(25_533_053, 4706) + // Standard Error: 1_254 + .saturating_add(Weight::from_parts(36_331, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/ranked-collective/src/weights.rs b/frame/ranked-collective/src/weights.rs index 8d6c1620d81f8..754fcf70aaafc 100644 --- a/frame/ranked-collective/src/weights.rs +++ b/frame/ranked-collective/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_ranked_collective //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -70,9 +70,9 @@ impl WeightInfo for SubstrateWeight { fn add_member() -> Weight { // Proof Size summary in bytes: // Measured: `109` - // Estimated: `5006` - // Minimum execution time: 16_334 nanoseconds. - Weight::from_parts(16_784_000, 5006) + // Estimated: `3507` + // Minimum execution time: 18_480_000 picoseconds. + Weight::from_parts(18_769_000, 3507) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -88,16 +88,16 @@ impl WeightInfo for SubstrateWeight { fn remove_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `583 + r * (281 ±0)` - // Estimated: `10064 + r * (7547 ±0)` - // Minimum execution time: 26_177 nanoseconds. - Weight::from_parts(29_245_248, 10064) - // Standard Error: 18_611 - .saturating_add(Weight::from_parts(10_916_516, 0).saturating_mul(r.into())) + // Estimated: `3519 + r * (2529 ±0)` + // Minimum execution time: 30_087_000 picoseconds. + Weight::from_parts(33_646_239, 3519) + // Standard Error: 22_498 + .saturating_add(Weight::from_parts(12_524_289, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 7547).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) } /// Storage: RankedCollective Members (r:1 w:1) /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) @@ -111,11 +111,11 @@ impl WeightInfo for SubstrateWeight { fn promote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `281 + r * (17 ±0)` - // Estimated: `5006` - // Minimum execution time: 18_953 nanoseconds. - Weight::from_parts(19_570_567, 5006) - // Standard Error: 4_156 - .saturating_add(Weight::from_parts(263_843, 0).saturating_mul(r.into())) + // Estimated: `3507` + // Minimum execution time: 20_974_000 picoseconds. + Weight::from_parts(21_582_135, 3507) + // Standard Error: 4_965 + .saturating_add(Weight::from_parts(294_566, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -131,11 +131,11 @@ impl WeightInfo for SubstrateWeight { fn demote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `599 + r * (72 ±0)` - // Estimated: `10064` - // Minimum execution time: 26_243 nanoseconds. - Weight::from_parts(28_532_816, 10064) - // Standard Error: 22_689 - .saturating_add(Weight::from_parts(614_464, 0).saturating_mul(r.into())) + // Estimated: `3519` + // Minimum execution time: 29_621_000 picoseconds. + Weight::from_parts(32_118_301, 3519) + // Standard Error: 27_596 + .saturating_add(Weight::from_parts(647_979, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -149,10 +149,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn vote() -> Weight { // Proof Size summary in bytes: - // Measured: `626` - // Estimated: `226856` - // Minimum execution time: 41_121 nanoseconds. - Weight::from_parts(41_606_000, 226856) + // Measured: `595` + // Estimated: `219984` + // Minimum execution time: 46_360_000 picoseconds. + Weight::from_parts(46_793_000, 219984) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -160,19 +160,21 @@ impl WeightInfo for SubstrateWeight { /// Proof: RankedPolls ReferendumInfoFor (max_values: None, max_size: Some(330), added: 2805, mode: MaxEncodedLen) /// Storage: RankedCollective VotingCleanup (r:1 w:0) /// Proof: RankedCollective VotingCleanup (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: RankedCollective Voting (r:0 w:100) + /// Storage: RankedCollective Voting (r:100 w:100) /// Proof: RankedCollective Voting (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) /// The range of component `n` is `[0, 100]`. fn cleanup_poll(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `461 + n * (50 ±0)` - // Estimated: `5394` - // Minimum execution time: 13_245 nanoseconds. - Weight::from_parts(17_420_271, 5394) - // Standard Error: 1_503 - .saturating_add(Weight::from_parts(952_500, 0).saturating_mul(n.into())) + // Measured: `429 + n * (50 ±0)` + // Estimated: `3795 + n * (2540 ±0)` + // Minimum execution time: 14_869_000 picoseconds. + Weight::from_parts(18_545_013, 3795) + // Standard Error: 1_376 + .saturating_add(Weight::from_parts(1_005_397, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2540).saturating_mul(n.into())) } } @@ -189,9 +191,9 @@ impl WeightInfo for () { fn add_member() -> Weight { // Proof Size summary in bytes: // Measured: `109` - // Estimated: `5006` - // Minimum execution time: 16_334 nanoseconds. - Weight::from_parts(16_784_000, 5006) + // Estimated: `3507` + // Minimum execution time: 18_480_000 picoseconds. + Weight::from_parts(18_769_000, 3507) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -207,16 +209,16 @@ impl WeightInfo for () { fn remove_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `583 + r * (281 ±0)` - // Estimated: `10064 + r * (7547 ±0)` - // Minimum execution time: 26_177 nanoseconds. - Weight::from_parts(29_245_248, 10064) - // Standard Error: 18_611 - .saturating_add(Weight::from_parts(10_916_516, 0).saturating_mul(r.into())) + // Estimated: `3519 + r * (2529 ±0)` + // Minimum execution time: 30_087_000 picoseconds. + Weight::from_parts(33_646_239, 3519) + // Standard Error: 22_498 + .saturating_add(Weight::from_parts(12_524_289, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 7547).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) } /// Storage: RankedCollective Members (r:1 w:1) /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) @@ -230,11 +232,11 @@ impl WeightInfo for () { fn promote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `281 + r * (17 ±0)` - // Estimated: `5006` - // Minimum execution time: 18_953 nanoseconds. - Weight::from_parts(19_570_567, 5006) - // Standard Error: 4_156 - .saturating_add(Weight::from_parts(263_843, 0).saturating_mul(r.into())) + // Estimated: `3507` + // Minimum execution time: 20_974_000 picoseconds. + Weight::from_parts(21_582_135, 3507) + // Standard Error: 4_965 + .saturating_add(Weight::from_parts(294_566, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -250,11 +252,11 @@ impl WeightInfo for () { fn demote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `599 + r * (72 ±0)` - // Estimated: `10064` - // Minimum execution time: 26_243 nanoseconds. - Weight::from_parts(28_532_816, 10064) - // Standard Error: 22_689 - .saturating_add(Weight::from_parts(614_464, 0).saturating_mul(r.into())) + // Estimated: `3519` + // Minimum execution time: 29_621_000 picoseconds. + Weight::from_parts(32_118_301, 3519) + // Standard Error: 27_596 + .saturating_add(Weight::from_parts(647_979, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -268,10 +270,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn vote() -> Weight { // Proof Size summary in bytes: - // Measured: `626` - // Estimated: `226856` - // Minimum execution time: 41_121 nanoseconds. - Weight::from_parts(41_606_000, 226856) + // Measured: `595` + // Estimated: `219984` + // Minimum execution time: 46_360_000 picoseconds. + Weight::from_parts(46_793_000, 219984) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -279,18 +281,20 @@ impl WeightInfo for () { /// Proof: RankedPolls ReferendumInfoFor (max_values: None, max_size: Some(330), added: 2805, mode: MaxEncodedLen) /// Storage: RankedCollective VotingCleanup (r:1 w:0) /// Proof: RankedCollective VotingCleanup (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: RankedCollective Voting (r:0 w:100) + /// Storage: RankedCollective Voting (r:100 w:100) /// Proof: RankedCollective Voting (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) /// The range of component `n` is `[0, 100]`. fn cleanup_poll(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `461 + n * (50 ±0)` - // Estimated: `5394` - // Minimum execution time: 13_245 nanoseconds. - Weight::from_parts(17_420_271, 5394) - // Standard Error: 1_503 - .saturating_add(Weight::from_parts(952_500, 0).saturating_mul(n.into())) + // Measured: `429 + n * (50 ±0)` + // Estimated: `3795 + n * (2540 ±0)` + // Minimum execution time: 14_869_000 picoseconds. + Weight::from_parts(18_545_013, 3795) + // Standard Error: 1_376 + .saturating_add(Weight::from_parts(1_005_397, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2540).saturating_mul(n.into())) } } diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 3c25c0002b709..b2e3236e4dfc5 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 1d21be801c85c..5c190e2a241a5 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -86,6 +86,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { diff --git a/frame/recovery/src/tests.rs b/frame/recovery/src/tests.rs index 9381d6a881ef1..85024d0bc4a9d 100644 --- a/frame/recovery/src/tests.rs +++ b/frame/recovery/src/tests.rs @@ -45,7 +45,10 @@ fn set_recovered_works() { // Root can set a recovered account though assert_ok!(Recovery::set_recovered(RuntimeOrigin::root(), 5, 1)); // Account 1 should now be able to make a call through account 5 - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 1, value: 100 })); + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer_allow_death { + dest: 1, + value: 100, + })); assert_ok!(Recovery::as_recovered(RuntimeOrigin::signed(1), 5, call)); // Account 1 has successfully drained the funds from account 5 assert_eq!(Balances::free_balance(1), 200); @@ -93,7 +96,10 @@ fn recovery_life_cycle_works() { assert_ok!(Recovery::as_recovered(RuntimeOrigin::signed(1), 5, call)); // Account 1 should now be able to make a call through account 5 to get all of their funds assert_eq!(Balances::free_balance(5), 110); - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 1, value: 110 })); + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer_allow_death { + dest: 1, + value: 110, + })); assert_ok!(Recovery::as_recovered(RuntimeOrigin::signed(1), 5, call)); // All funds have been fully recovered! assert_eq!(Balances::free_balance(1), 200); diff --git a/frame/recovery/src/weights.rs b/frame/recovery/src/weights.rs index 6f8818f819bad..97d4c8b87aa53 100644 --- a/frame/recovery/src/weights.rs +++ b/frame/recovery/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_recovery //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -67,9 +67,9 @@ impl WeightInfo for SubstrateWeight { fn as_recovered() -> Weight { // Proof Size summary in bytes: // Measured: `281` - // Estimated: `2555` - // Minimum execution time: 8_866 nanoseconds. - Weight::from_parts(9_065_000, 2555) + // Estimated: `3545` + // Minimum execution time: 10_405_000 picoseconds. + Weight::from_parts(10_807_000, 3545) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Recovery Proxy (r:0 w:1) @@ -78,8 +78,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_893 nanoseconds. - Weight::from_parts(9_177_000, 0) + // Minimum execution time: 11_198_000 picoseconds. + Weight::from_parts(11_459_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Recovery Recoverable (r:1 w:1) @@ -88,11 +88,11 @@ impl WeightInfo for SubstrateWeight { fn create_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `175` - // Estimated: `2826` - // Minimum execution time: 20_662 nanoseconds. - Weight::from_parts(21_378_064, 2826) - // Standard Error: 3_350 - .saturating_add(Weight::from_parts(83_738, 0).saturating_mul(n.into())) + // Estimated: `3816` + // Minimum execution time: 28_009_000 picoseconds. + Weight::from_parts(28_755_652, 3816) + // Standard Error: 3_536 + .saturating_add(Weight::from_parts(78_348, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -103,9 +103,9 @@ impl WeightInfo for SubstrateWeight { fn initiate_recovery() -> Weight { // Proof Size summary in bytes: // Measured: `272` - // Estimated: `5690` - // Minimum execution time: 24_805 nanoseconds. - Weight::from_parts(25_273_000, 5690) + // Estimated: `3854` + // Minimum execution time: 31_233_000 picoseconds. + Weight::from_parts(31_508_000, 3854) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -116,12 +116,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `423 + n * (64 ±0)` - // Estimated: `5690` - // Minimum execution time: 17_837 nanoseconds. - Weight::from_parts(18_429_664, 5690) - // Standard Error: 3_187 - .saturating_add(Weight::from_parts(143_648, 0).saturating_mul(n.into())) + // Measured: `360 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 20_542_000 picoseconds. + Weight::from_parts(21_224_065, 3854) + // Standard Error: 3_018 + .saturating_add(Weight::from_parts(171_994, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -134,12 +134,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `455 + n * (64 ±0)` - // Estimated: `8245` - // Minimum execution time: 21_960 nanoseconds. - Weight::from_parts(22_529_644, 8245) - // Standard Error: 2_945 - .saturating_add(Weight::from_parts(85_604, 0).saturating_mul(n.into())) + // Measured: `392 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 25_141_000 picoseconds. + Weight::from_parts(25_880_238, 3854) + // Standard Error: 3_156 + .saturating_add(Weight::from_parts(54_405, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -150,12 +150,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `576 + n * (32 ±0)` - // Estimated: `5467` - // Minimum execution time: 26_054 nanoseconds. - Weight::from_parts(26_724_866, 5467) - // Standard Error: 2_645 - .saturating_add(Weight::from_parts(104_301, 0).saturating_mul(n.into())) + // Measured: `513 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 35_314_000 picoseconds. + Weight::from_parts(36_380_338, 3854) + // Standard Error: 7_396 + .saturating_add(Weight::from_parts(3_861, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -166,12 +166,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `302 + n * (32 ±0)` - // Estimated: `5690` - // Minimum execution time: 25_110 nanoseconds. - Weight::from_parts(25_805_837, 5690) - // Standard Error: 2_732 - .saturating_add(Weight::from_parts(73_458, 0).saturating_mul(n.into())) + // Measured: `270 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 33_453_000 picoseconds. + Weight::from_parts(34_078_626, 3854) + // Standard Error: 2_563 + .saturating_add(Weight::from_parts(78_179, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -180,9 +180,9 @@ impl WeightInfo for SubstrateWeight { fn cancel_recovered() -> Weight { // Proof Size summary in bytes: // Measured: `281` - // Estimated: `2555` - // Minimum execution time: 11_061 nanoseconds. - Weight::from_parts(11_291_000, 2555) + // Estimated: `3545` + // Minimum execution time: 12_196_000 picoseconds. + Weight::from_parts(12_580_000, 3545) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -195,9 +195,9 @@ impl WeightInfo for () { fn as_recovered() -> Weight { // Proof Size summary in bytes: // Measured: `281` - // Estimated: `2555` - // Minimum execution time: 8_866 nanoseconds. - Weight::from_parts(9_065_000, 2555) + // Estimated: `3545` + // Minimum execution time: 10_405_000 picoseconds. + Weight::from_parts(10_807_000, 3545) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Recovery Proxy (r:0 w:1) @@ -206,8 +206,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_893 nanoseconds. - Weight::from_parts(9_177_000, 0) + // Minimum execution time: 11_198_000 picoseconds. + Weight::from_parts(11_459_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Recovery Recoverable (r:1 w:1) @@ -216,11 +216,11 @@ impl WeightInfo for () { fn create_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `175` - // Estimated: `2826` - // Minimum execution time: 20_662 nanoseconds. - Weight::from_parts(21_378_064, 2826) - // Standard Error: 3_350 - .saturating_add(Weight::from_parts(83_738, 0).saturating_mul(n.into())) + // Estimated: `3816` + // Minimum execution time: 28_009_000 picoseconds. + Weight::from_parts(28_755_652, 3816) + // Standard Error: 3_536 + .saturating_add(Weight::from_parts(78_348, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -231,9 +231,9 @@ impl WeightInfo for () { fn initiate_recovery() -> Weight { // Proof Size summary in bytes: // Measured: `272` - // Estimated: `5690` - // Minimum execution time: 24_805 nanoseconds. - Weight::from_parts(25_273_000, 5690) + // Estimated: `3854` + // Minimum execution time: 31_233_000 picoseconds. + Weight::from_parts(31_508_000, 3854) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -244,12 +244,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `423 + n * (64 ±0)` - // Estimated: `5690` - // Minimum execution time: 17_837 nanoseconds. - Weight::from_parts(18_429_664, 5690) - // Standard Error: 3_187 - .saturating_add(Weight::from_parts(143_648, 0).saturating_mul(n.into())) + // Measured: `360 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 20_542_000 picoseconds. + Weight::from_parts(21_224_065, 3854) + // Standard Error: 3_018 + .saturating_add(Weight::from_parts(171_994, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -262,12 +262,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `455 + n * (64 ±0)` - // Estimated: `8245` - // Minimum execution time: 21_960 nanoseconds. - Weight::from_parts(22_529_644, 8245) - // Standard Error: 2_945 - .saturating_add(Weight::from_parts(85_604, 0).saturating_mul(n.into())) + // Measured: `392 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 25_141_000 picoseconds. + Weight::from_parts(25_880_238, 3854) + // Standard Error: 3_156 + .saturating_add(Weight::from_parts(54_405, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -278,12 +278,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `576 + n * (32 ±0)` - // Estimated: `5467` - // Minimum execution time: 26_054 nanoseconds. - Weight::from_parts(26_724_866, 5467) - // Standard Error: 2_645 - .saturating_add(Weight::from_parts(104_301, 0).saturating_mul(n.into())) + // Measured: `513 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 35_314_000 picoseconds. + Weight::from_parts(36_380_338, 3854) + // Standard Error: 7_396 + .saturating_add(Weight::from_parts(3_861, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -294,12 +294,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `302 + n * (32 ±0)` - // Estimated: `5690` - // Minimum execution time: 25_110 nanoseconds. - Weight::from_parts(25_805_837, 5690) - // Standard Error: 2_732 - .saturating_add(Weight::from_parts(73_458, 0).saturating_mul(n.into())) + // Measured: `270 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 33_453_000 picoseconds. + Weight::from_parts(34_078_626, 3854) + // Standard Error: 2_563 + .saturating_add(Weight::from_parts(78_179, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -308,9 +308,9 @@ impl WeightInfo for () { fn cancel_recovered() -> Weight { // Proof Size summary in bytes: // Measured: `281` - // Estimated: `2555` - // Minimum execution time: 11_061 nanoseconds. - Weight::from_parts(11_291_000, 2555) + // Estimated: `3545` + // Minimum execution time: 12_196_000 picoseconds. + Weight::from_parts(12_580_000, 3545) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/referenda/Cargo.toml b/frame/referenda/Cargo.toml index ac4c8044da359..5a092c63beec0 100644 --- a/frame/referenda/Cargo.toml +++ b/frame/referenda/Cargo.toml @@ -17,7 +17,7 @@ assert_matches = { version = "1.5", optional = true } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../primitives/arithmetic" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } diff --git a/frame/referenda/README.md b/frame/referenda/README.md index 85031a0113033..b9a8b022cbdb7 100644 --- a/frame/referenda/README.md +++ b/frame/referenda/README.md @@ -1,8 +1,8 @@ # Referenda Pallet -- [`assembly::Config`](https://docs.rs/pallet-assembly/latest/pallet_assembly/trait.Config.html) -- [`Call`](https://docs.rs/pallet-assembly/latest/pallet_assembly/enum.Call.html) +- [`Config`](https://docs.rs/pallet-referenda/latest/pallet_referenda/pallet/trait.Config.html) +- [`Call`](https://docs.rs/pallet-referenda/latest/pallet_referenda/pallet/enum.Call.html) ## Overview -The Assembly pallet handles the administration of general stakeholder voting. +The Referenda pallet handles the administration of general stakeholder voting. diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index f699850062267..68837376c5b33 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -874,22 +874,21 @@ impl, I: 'static> Pallet { let when = (when.saturating_add(alarm_interval.saturating_sub(One::one())) / alarm_interval) .saturating_mul(alarm_interval); - let maybe_result = T::Scheduler::schedule( + let result = T::Scheduler::schedule( DispatchTime::At(when), None, 128u8, frame_system::RawOrigin::Root.into(), call, - ) - .ok() - .map(|x| (when, x)); + ); debug_assert!( - maybe_result.is_some(), - "Unable to schedule a new alarm at #{:?} (now: #{:?})?!", + result.is_ok(), + "Unable to schedule a new alarm at #{:?} (now: #{:?}), scheduler error: `{:?}`", when, - frame_system::Pallet::::block_number() + frame_system::Pallet::::block_number(), + result.unwrap_err(), ); - maybe_result + result.ok().map(|x| (when, x)) } /// Mutate a referendum's `status` into the correct deciding state. @@ -1206,7 +1205,7 @@ impl, I: 'static> Pallet { let until_approval = track.min_approval.delay(approval); let until_support = track.min_support.delay(support); let offset = until_support.max(until_approval); - deciding.since.saturating_add(offset * track.decision_period) + deciding.since.saturating_add(offset.mul_ceil(track.decision_period)) }) } diff --git a/frame/referenda/src/migration.rs b/frame/referenda/src/migration.rs index e15eb499b42fc..c27ab452ac637 100644 --- a/frame/referenda/src/migration.rs +++ b/frame/referenda/src/migration.rs @@ -191,7 +191,7 @@ pub mod test { #[test] pub fn referendum_status_v0() { // make sure the bytes of the encoded referendum v0 is decodable. - let ongoing_encoded = sp_core::Bytes::from_str("0x00000000013001012a000000000000000400000100000000000000010000000000000001000000000000000a00000000000000000000000000000000000100").unwrap(); + let ongoing_encoded = sp_core::Bytes::from_str("0x00000000012c01082a0000000000000004000100000000000000010000000000000001000000000000000a00000000000000000000000000000000000100").unwrap(); let ongoing_dec = v0::ReferendumInfoOf::::decode(&mut &*ongoing_encoded).unwrap(); let ongoing = v0::ReferendumInfoOf::::Ongoing(create_status_v0()); assert_eq!(ongoing, ongoing_dec); diff --git a/frame/referenda/src/mock.rs b/frame/referenda/src/mock.rs index ade2a09a9f9a4..cdedb79556f35 100644 --- a/frame/referenda/src/mock.rs +++ b/frame/referenda/src/mock.rs @@ -57,7 +57,7 @@ frame_support::construct_runtime!( pub struct BaseFilter; impl Contains for BaseFilter { fn contains(call: &RuntimeCall) -> bool { - !matches!(call, &RuntimeCall::Balances(pallet_balances::Call::set_balance { .. })) + !matches!(call, &RuntimeCall::Balances(pallet_balances::Call::force_set_balance { .. })) } } @@ -120,6 +120,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { pub static AlarmInterval: u64 = 1; @@ -295,19 +299,14 @@ impl VoteTally for Tally { } pub fn set_balance_proposal(value: u64) -> Vec { - RuntimeCall::Balances(pallet_balances::Call::set_balance { - who: 42, - new_free: value, - new_reserved: 0, - }) - .encode() + RuntimeCall::Balances(pallet_balances::Call::force_set_balance { who: 42, new_free: value }) + .encode() } pub fn set_balance_proposal_bounded(value: u64) -> BoundedCallOf { - let c = RuntimeCall::Balances(pallet_balances::Call::set_balance { + let c = RuntimeCall::Balances(pallet_balances::Call::force_set_balance { who: 42, new_free: value, - new_reserved: 0, }); ::bound(c).unwrap() } diff --git a/frame/referenda/src/tests.rs b/frame/referenda/src/tests.rs index f728350c37e6f..0a1561d001a7d 100644 --- a/frame/referenda/src/tests.rs +++ b/frame/referenda/src/tests.rs @@ -286,6 +286,24 @@ fn alarm_interval_works() { }); } +#[test] +fn decision_time_is_correct() { + new_test_ext().execute_with(|| { + let decision_time = |since: u64| { + Pallet::::decision_time( + &DecidingStatus { since: since.into(), confirming: None }, + &Tally { ayes: 100, nays: 5 }, + TestTracksInfo::tracks()[0].0, + &TestTracksInfo::tracks()[0].1, + ) + }; + + for i in 0u64..=100 { + assert!(decision_time(i) > i, "The decision time should be delayed by the curve"); + } + }); +} + #[test] fn auto_timeout_should_happen_with_nothing_but_submit() { new_test_ext().execute_with(|| { diff --git a/frame/referenda/src/weights.rs b/frame/referenda/src/weights.rs index 817e0d88d329d..464e60dc581e6 100644 --- a/frame/referenda/src/weights.rs +++ b/frame/referenda/src/weights.rs @@ -18,26 +18,25 @@ //! Autogenerated weights for pallet_referenda //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-b3zmxxc-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_referenda // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_referenda -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/referenda/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -92,10 +91,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `251` - // Estimated: `109996` - // Minimum execution time: 34_540 nanoseconds. - Weight::from_parts(36_144_000, 109996) + // Measured: `220` + // Estimated: `110487` + // Minimum execution time: 42_285_000 picoseconds. + Weight::from_parts(42_646_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -105,10 +104,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `536` - // Estimated: `221835` - // Minimum execution time: 46_963 nanoseconds. - Weight::from_parts(48_459_000, 221835) + // Measured: `473` + // Estimated: `219984` + // Minimum execution time: 53_455_000 picoseconds. + Weight::from_parts(54_034_000, 219984) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -120,10 +119,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3203` - // Estimated: `9817` - // Minimum execution time: 55_798 nanoseconds. - Weight::from_parts(58_260_000, 9817) + // Measured: `3107` + // Estimated: `5477` + // Minimum execution time: 50_138_000 picoseconds. + Weight::from_parts(50_449_000, 5477) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -135,10 +134,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3223` - // Estimated: `9817` - // Minimum execution time: 53_888 nanoseconds. - Weight::from_parts(57_919_000, 9817) + // Measured: `3127` + // Estimated: `5477` + // Minimum execution time: 49_707_000 picoseconds. + Weight::from_parts(50_246_000, 5477) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -150,10 +149,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `536` - // Estimated: `224324` - // Minimum execution time: 56_121 nanoseconds. - Weight::from_parts(58_301_000, 224324) + // Measured: `473` + // Estimated: `219984` + // Minimum execution time: 62_880_000 picoseconds. + Weight::from_parts(63_579_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -165,10 +164,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `536` - // Estimated: `224324` - // Minimum execution time: 54_237 nanoseconds. - Weight::from_parts(55_681_000, 224324) + // Measured: `473` + // Estimated: `219984` + // Minimum execution time: 60_827_000 picoseconds. + Weight::from_parts(61_392_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -176,10 +175,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `415` - // Estimated: `2841` - // Minimum execution time: 25_734 nanoseconds. - Weight::from_parts(26_429_000, 2841) + // Measured: `351` + // Estimated: `3831` + // Minimum execution time: 31_991_000 picoseconds. + Weight::from_parts(32_474_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -187,10 +186,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `405` - // Estimated: `2841` - // Minimum execution time: 26_000 nanoseconds. - Weight::from_parts(26_786_000, 2841) + // Measured: `341` + // Estimated: `3831` + // Minimum execution time: 32_162_000 picoseconds. + Weight::from_parts(32_776_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -200,10 +199,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `412` - // Estimated: `221835` - // Minimum execution time: 34_567 nanoseconds. - Weight::from_parts(35_939_000, 221835) + // Measured: `381` + // Estimated: `219984` + // Minimum execution time: 37_493_000 picoseconds. + Weight::from_parts(37_979_000, 219984) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -215,10 +214,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `717` - // Estimated: `224362` - // Minimum execution time: 67_744 nanoseconds. - Weight::from_parts(70_047_000, 224362) + // Measured: `622` + // Estimated: `219984` + // Minimum execution time: 80_095_000 picoseconds. + Weight::from_parts(80_831_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -229,9 +228,9 @@ impl WeightInfo for SubstrateWeight { fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: // Measured: `174` - // Estimated: `6976` - // Minimum execution time: 9_886 nanoseconds. - Weight::from_parts(10_406_000, 6976) + // Estimated: `5477` + // Minimum execution time: 10_906_000 picoseconds. + Weight::from_parts(11_055_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -243,10 +242,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `4661` - // Estimated: `226322` - // Minimum execution time: 100_449 nanoseconds. - Weight::from_parts(101_812_000, 226322) + // Measured: `4567` + // Estimated: `219984` + // Minimum execution time: 90_747_000 picoseconds. + Weight::from_parts(91_407_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -258,10 +257,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `4661` - // Estimated: `226322` - // Minimum execution time: 101_430 nanoseconds. - Weight::from_parts(103_704_000, 226322) + // Measured: `4567` + // Estimated: `219984` + // Minimum execution time: 93_615_000 picoseconds. + Weight::from_parts(94_245_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -273,10 +272,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `4682` - // Estimated: `116825` - // Minimum execution time: 67_224 nanoseconds. - Weight::from_parts(70_596_000, 116825) + // Measured: `4588` + // Estimated: `110487` + // Minimum execution time: 60_945_000 picoseconds. + Weight::from_parts(61_246_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -288,10 +287,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `4668` - // Estimated: `116825` - // Minimum execution time: 65_461 nanoseconds. - Weight::from_parts(69_624_000, 116825) + // Measured: `4574` + // Estimated: `110487` + // Minimum execution time: 60_105_000 picoseconds. + Weight::from_parts(60_544_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -305,10 +304,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `4642` - // Estimated: `119314` - // Minimum execution time: 69_848 nanoseconds. - Weight::from_parts(74_480_000, 119314) + // Measured: `4548` + // Estimated: `110487` + // Minimum execution time: 62_251_000 picoseconds. + Weight::from_parts(62_952_000, 110487) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -322,10 +321,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `4676` - // Estimated: `119314` - // Minimum execution time: 70_042 nanoseconds. - Weight::from_parts(72_912_000, 119314) + // Measured: `4582` + // Estimated: `110487` + // Minimum execution time: 61_527_000 picoseconds. + Weight::from_parts(62_082_000, 110487) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -335,10 +334,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `364` - // Estimated: `112338` - // Minimum execution time: 23_008 nanoseconds. - Weight::from_parts(23_767_000, 112338) + // Measured: `333` + // Estimated: `110487` + // Minimum execution time: 24_897_000 picoseconds. + Weight::from_parts(25_213_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -348,10 +347,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `412` - // Estimated: `112338` - // Minimum execution time: 23_550 nanoseconds. - Weight::from_parts(24_081_000, 112338) + // Measured: `381` + // Estimated: `110487` + // Minimum execution time: 25_077_000 picoseconds. + Weight::from_parts(25_385_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -359,10 +358,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `310` - // Estimated: `2841` - // Minimum execution time: 15_850 nanoseconds. - Weight::from_parts(16_773_000, 2841) + // Measured: `278` + // Estimated: `3831` + // Minimum execution time: 17_930_000 picoseconds. + Weight::from_parts(18_112_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -374,10 +373,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `412` - // Estimated: `114827` - // Minimum execution time: 32_126 nanoseconds. - Weight::from_parts(33_313_000, 114827) + // Measured: `381` + // Estimated: `110487` + // Minimum execution time: 34_405_000 picoseconds. + Weight::from_parts(34_698_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -389,10 +388,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `412` - // Estimated: `114827` - // Minimum execution time: 34_698 nanoseconds. - Weight::from_parts(35_802_000, 114827) + // Measured: `381` + // Estimated: `110487` + // Minimum execution time: 37_313_000 picoseconds. + Weight::from_parts(37_807_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -402,10 +401,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `465` - // Estimated: `112338` - // Minimum execution time: 28_710 nanoseconds. - Weight::from_parts(29_574_000, 112338) + // Measured: `434` + // Estimated: `110487` + // Minimum execution time: 30_552_000 picoseconds. + Weight::from_parts(30_817_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -415,10 +414,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `448` - // Estimated: `112338` - // Minimum execution time: 29_030 nanoseconds. - Weight::from_parts(30_308_000, 112338) + // Measured: `417` + // Estimated: `110487` + // Minimum execution time: 31_100_000 picoseconds. + Weight::from_parts(31_696_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -428,10 +427,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `465` - // Estimated: `112338` - // Minimum execution time: 26_382 nanoseconds. - Weight::from_parts(27_219_000, 112338) + // Measured: `434` + // Estimated: `110487` + // Minimum execution time: 28_777_000 picoseconds. + Weight::from_parts(29_188_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -441,10 +440,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `469` - // Estimated: `112338` - // Minimum execution time: 25_445 nanoseconds. - Weight::from_parts(26_010_000, 112338) + // Measured: `438` + // Estimated: `110487` + // Minimum execution time: 26_986_000 picoseconds. + Weight::from_parts(27_283_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -456,10 +455,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `469` - // Estimated: `224358` - // Minimum execution time: 41_064 nanoseconds. - Weight::from_parts(42_895_000, 224358) + // Measured: `438` + // Estimated: `219984` + // Minimum execution time: 43_538_000 picoseconds. + Weight::from_parts(44_671_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -469,10 +468,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `465` - // Estimated: `112338` - // Minimum execution time: 29_472 nanoseconds. - Weight::from_parts(30_011_000, 112338) + // Measured: `434` + // Estimated: `110487` + // Minimum execution time: 30_559_000 picoseconds. + Weight::from_parts(31_294_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -484,10 +483,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `454` - // Estimated: `5407` - // Minimum execution time: 19_389 nanoseconds. - Weight::from_parts(20_490_000, 5407) + // Measured: `422` + // Estimated: `3831` + // Minimum execution time: 21_196_000 picoseconds. + Weight::from_parts(21_593_000, 3831) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -497,10 +496,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `387` - // Estimated: `5368` - // Minimum execution time: 18_195 nanoseconds. - Weight::from_parts(19_917_000, 5368) + // Measured: `355` + // Estimated: `3831` + // Minimum execution time: 18_827_000 picoseconds. + Weight::from_parts(19_171_000, 3831) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -516,10 +515,10 @@ impl WeightInfo for () { /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `251` - // Estimated: `109996` - // Minimum execution time: 34_540 nanoseconds. - Weight::from_parts(36_144_000, 109996) + // Measured: `220` + // Estimated: `110487` + // Minimum execution time: 42_285_000 picoseconds. + Weight::from_parts(42_646_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -529,10 +528,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `536` - // Estimated: `221835` - // Minimum execution time: 46_963 nanoseconds. - Weight::from_parts(48_459_000, 221835) + // Measured: `473` + // Estimated: `219984` + // Minimum execution time: 53_455_000 picoseconds. + Weight::from_parts(54_034_000, 219984) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -544,10 +543,10 @@ impl WeightInfo for () { /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3203` - // Estimated: `9817` - // Minimum execution time: 55_798 nanoseconds. - Weight::from_parts(58_260_000, 9817) + // Measured: `3107` + // Estimated: `5477` + // Minimum execution time: 50_138_000 picoseconds. + Weight::from_parts(50_449_000, 5477) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -559,10 +558,10 @@ impl WeightInfo for () { /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3223` - // Estimated: `9817` - // Minimum execution time: 53_888 nanoseconds. - Weight::from_parts(57_919_000, 9817) + // Measured: `3127` + // Estimated: `5477` + // Minimum execution time: 49_707_000 picoseconds. + Weight::from_parts(50_246_000, 5477) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -574,10 +573,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `536` - // Estimated: `224324` - // Minimum execution time: 56_121 nanoseconds. - Weight::from_parts(58_301_000, 224324) + // Measured: `473` + // Estimated: `219984` + // Minimum execution time: 62_880_000 picoseconds. + Weight::from_parts(63_579_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -589,10 +588,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `536` - // Estimated: `224324` - // Minimum execution time: 54_237 nanoseconds. - Weight::from_parts(55_681_000, 224324) + // Measured: `473` + // Estimated: `219984` + // Minimum execution time: 60_827_000 picoseconds. + Weight::from_parts(61_392_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -600,10 +599,10 @@ impl WeightInfo for () { /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `415` - // Estimated: `2841` - // Minimum execution time: 25_734 nanoseconds. - Weight::from_parts(26_429_000, 2841) + // Measured: `351` + // Estimated: `3831` + // Minimum execution time: 31_991_000 picoseconds. + Weight::from_parts(32_474_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -611,10 +610,10 @@ impl WeightInfo for () { /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `405` - // Estimated: `2841` - // Minimum execution time: 26_000 nanoseconds. - Weight::from_parts(26_786_000, 2841) + // Measured: `341` + // Estimated: `3831` + // Minimum execution time: 32_162_000 picoseconds. + Weight::from_parts(32_776_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -624,10 +623,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `412` - // Estimated: `221835` - // Minimum execution time: 34_567 nanoseconds. - Weight::from_parts(35_939_000, 221835) + // Measured: `381` + // Estimated: `219984` + // Minimum execution time: 37_493_000 picoseconds. + Weight::from_parts(37_979_000, 219984) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -639,10 +638,10 @@ impl WeightInfo for () { /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `717` - // Estimated: `224362` - // Minimum execution time: 67_744 nanoseconds. - Weight::from_parts(70_047_000, 224362) + // Measured: `622` + // Estimated: `219984` + // Minimum execution time: 80_095_000 picoseconds. + Weight::from_parts(80_831_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -653,9 +652,9 @@ impl WeightInfo for () { fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: // Measured: `174` - // Estimated: `6976` - // Minimum execution time: 9_886 nanoseconds. - Weight::from_parts(10_406_000, 6976) + // Estimated: `5477` + // Minimum execution time: 10_906_000 picoseconds. + Weight::from_parts(11_055_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -667,10 +666,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `4661` - // Estimated: `226322` - // Minimum execution time: 100_449 nanoseconds. - Weight::from_parts(101_812_000, 226322) + // Measured: `4567` + // Estimated: `219984` + // Minimum execution time: 90_747_000 picoseconds. + Weight::from_parts(91_407_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -682,10 +681,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `4661` - // Estimated: `226322` - // Minimum execution time: 101_430 nanoseconds. - Weight::from_parts(103_704_000, 226322) + // Measured: `4567` + // Estimated: `219984` + // Minimum execution time: 93_615_000 picoseconds. + Weight::from_parts(94_245_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -697,10 +696,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `4682` - // Estimated: `116825` - // Minimum execution time: 67_224 nanoseconds. - Weight::from_parts(70_596_000, 116825) + // Measured: `4588` + // Estimated: `110487` + // Minimum execution time: 60_945_000 picoseconds. + Weight::from_parts(61_246_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -712,10 +711,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `4668` - // Estimated: `116825` - // Minimum execution time: 65_461 nanoseconds. - Weight::from_parts(69_624_000, 116825) + // Measured: `4574` + // Estimated: `110487` + // Minimum execution time: 60_105_000 picoseconds. + Weight::from_parts(60_544_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -729,10 +728,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `4642` - // Estimated: `119314` - // Minimum execution time: 69_848 nanoseconds. - Weight::from_parts(74_480_000, 119314) + // Measured: `4548` + // Estimated: `110487` + // Minimum execution time: 62_251_000 picoseconds. + Weight::from_parts(62_952_000, 110487) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -746,10 +745,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `4676` - // Estimated: `119314` - // Minimum execution time: 70_042 nanoseconds. - Weight::from_parts(72_912_000, 119314) + // Measured: `4582` + // Estimated: `110487` + // Minimum execution time: 61_527_000 picoseconds. + Weight::from_parts(62_082_000, 110487) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -759,10 +758,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `364` - // Estimated: `112338` - // Minimum execution time: 23_008 nanoseconds. - Weight::from_parts(23_767_000, 112338) + // Measured: `333` + // Estimated: `110487` + // Minimum execution time: 24_897_000 picoseconds. + Weight::from_parts(25_213_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -772,10 +771,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `412` - // Estimated: `112338` - // Minimum execution time: 23_550 nanoseconds. - Weight::from_parts(24_081_000, 112338) + // Measured: `381` + // Estimated: `110487` + // Minimum execution time: 25_077_000 picoseconds. + Weight::from_parts(25_385_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -783,10 +782,10 @@ impl WeightInfo for () { /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `310` - // Estimated: `2841` - // Minimum execution time: 15_850 nanoseconds. - Weight::from_parts(16_773_000, 2841) + // Measured: `278` + // Estimated: `3831` + // Minimum execution time: 17_930_000 picoseconds. + Weight::from_parts(18_112_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -798,10 +797,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `412` - // Estimated: `114827` - // Minimum execution time: 32_126 nanoseconds. - Weight::from_parts(33_313_000, 114827) + // Measured: `381` + // Estimated: `110487` + // Minimum execution time: 34_405_000 picoseconds. + Weight::from_parts(34_698_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -813,10 +812,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `412` - // Estimated: `114827` - // Minimum execution time: 34_698 nanoseconds. - Weight::from_parts(35_802_000, 114827) + // Measured: `381` + // Estimated: `110487` + // Minimum execution time: 37_313_000 picoseconds. + Weight::from_parts(37_807_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -826,10 +825,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `465` - // Estimated: `112338` - // Minimum execution time: 28_710 nanoseconds. - Weight::from_parts(29_574_000, 112338) + // Measured: `434` + // Estimated: `110487` + // Minimum execution time: 30_552_000 picoseconds. + Weight::from_parts(30_817_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -839,10 +838,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `448` - // Estimated: `112338` - // Minimum execution time: 29_030 nanoseconds. - Weight::from_parts(30_308_000, 112338) + // Measured: `417` + // Estimated: `110487` + // Minimum execution time: 31_100_000 picoseconds. + Weight::from_parts(31_696_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -852,10 +851,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `465` - // Estimated: `112338` - // Minimum execution time: 26_382 nanoseconds. - Weight::from_parts(27_219_000, 112338) + // Measured: `434` + // Estimated: `110487` + // Minimum execution time: 28_777_000 picoseconds. + Weight::from_parts(29_188_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -865,10 +864,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `469` - // Estimated: `112338` - // Minimum execution time: 25_445 nanoseconds. - Weight::from_parts(26_010_000, 112338) + // Measured: `438` + // Estimated: `110487` + // Minimum execution time: 26_986_000 picoseconds. + Weight::from_parts(27_283_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -880,10 +879,10 @@ impl WeightInfo for () { /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `469` - // Estimated: `224358` - // Minimum execution time: 41_064 nanoseconds. - Weight::from_parts(42_895_000, 224358) + // Measured: `438` + // Estimated: `219984` + // Minimum execution time: 43_538_000 picoseconds. + Weight::from_parts(44_671_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -893,10 +892,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `465` - // Estimated: `112338` - // Minimum execution time: 29_472 nanoseconds. - Weight::from_parts(30_011_000, 112338) + // Measured: `434` + // Estimated: `110487` + // Minimum execution time: 30_559_000 picoseconds. + Weight::from_parts(31_294_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -908,10 +907,10 @@ impl WeightInfo for () { /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `454` - // Estimated: `5407` - // Minimum execution time: 19_389 nanoseconds. - Weight::from_parts(20_490_000, 5407) + // Measured: `422` + // Estimated: `3831` + // Minimum execution time: 21_196_000 picoseconds. + Weight::from_parts(21_593_000, 3831) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -921,10 +920,10 @@ impl WeightInfo for () { /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `387` - // Estimated: `5368` - // Minimum execution time: 18_195 nanoseconds. - Weight::from_parts(19_917_000, 5368) + // Measured: `355` + // Estimated: `3831` + // Minimum execution time: 18_827_000 picoseconds. + Weight::from_parts(19_171_000, 3831) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/remark/Cargo.toml b/frame/remark/Cargo.toml index 151cc38884490..5fc595d710680 100644 --- a/frame/remark/Cargo.toml +++ b/frame/remark/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } diff --git a/frame/remark/src/weights.rs b/frame/remark/src/weights.rs index f9fcb73803716..9aa56eb339fcb 100644 --- a/frame/remark/src/weights.rs +++ b/frame/remark/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_remark //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -59,10 +59,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_404 nanoseconds. - Weight::from_parts(343_031, 0) + // Minimum execution time: 9_301_000 picoseconds. + Weight::from_parts(2_516_065, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_404, 0).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(1_089, 0).saturating_mul(l.into())) } } @@ -73,9 +73,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_404 nanoseconds. - Weight::from_parts(343_031, 0) + // Minimum execution time: 9_301_000 picoseconds. + Weight::from_parts(2_516_065, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_404, 0).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(1_089, 0).saturating_mul(l.into())) } } diff --git a/frame/root-offences/Cargo.toml b/frame/root-offences/Cargo.toml index fd1f7c6325ea2..58fa281dc3d76 100644 --- a/frame/root-offences/Cargo.toml +++ b/frame/root-offences/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } pallet-session = { version = "4.0.0-dev", features = [ "historical" ], path = "../../frame/session", default-features = false } pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../frame/staking" } diff --git a/frame/root-offences/src/mock.rs b/frame/root-offences/src/mock.rs index 0937c43d6e519..828551e4d9c19 100644 --- a/frame/root-offences/src/mock.rs +++ b/frame/root-offences/src/mock.rs @@ -121,6 +121,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } pallet_staking_reward_curve::build! { diff --git a/frame/root-testing/Cargo.toml b/frame/root-testing/Cargo.toml index 09262b3d31db2..6e8c13f775de0 100644 --- a/frame/root-testing/Cargo.toml +++ b/frame/root-testing/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } diff --git a/frame/salary/src/lib.rs b/frame/salary/src/lib.rs index 6f9e63271cc2f..0b2b4b47d8b70 100644 --- a/frame/salary/src/lib.rs +++ b/frame/salary/src/lib.rs @@ -20,18 +20,17 @@ #![cfg_attr(not(feature = "std"), no_std)] #![recursion_limit = "128"] -use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_arithmetic::traits::{Saturating, Zero}; -use sp_core::TypedGet; use sp_runtime::Perbill; -use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; +use sp_std::{marker::PhantomData, prelude::*}; use frame_support::{ dispatch::DispatchResultWithPostInfo, ensure, traits::{ - tokens::{fungible, Balance, GetSalary}, + tokens::{GetSalary, Pay, PaymentStatus}, RankedMembers, }, RuntimeDebug, @@ -50,67 +49,6 @@ pub use weights::WeightInfo; /// Payroll cycle. pub type Cycle = u32; -/// Status for making a payment via the `Pay::pay` trait function. -#[derive(Encode, Decode, Eq, PartialEq, Clone, TypeInfo, MaxEncodedLen, RuntimeDebug)] -pub enum PaymentStatus { - /// Payment is in progress. Nothing to report yet. - InProgress, - /// Payment status is unknowable. It will never be reported successful or failed. - Unknown, - /// Payment happened successfully. - Success, - /// Payment failed. It may safely be retried. - Failure, -} - -/// Can be implemented by `PayFromAccount` using a `fungible` impl, but can also be implemented with -/// XCM/MultiAsset and made generic over assets. -pub trait Pay { - /// The type by which we measure units of the currency in which we make payments. - type Balance: Balance; - /// The type by which we identify the individuals to whom a payment may be made. - type AccountId; - /// An identifier given to an individual payment. - type Id: FullCodec + MaxEncodedLen + TypeInfo + Clone + Eq + PartialEq + Debug + Copy; - /// Make a payment and return an identifier for later evaluation of success in some off-chain - /// mechanism (likely an event, but possibly not on this chain). - fn pay(who: &Self::AccountId, amount: Self::Balance) -> Result; - /// Check how a payment has proceeded. `id` must have been a previously returned by `pay` for - /// the result of this call to be meaningful. - fn check_payment(id: Self::Id) -> PaymentStatus; - /// Ensure that a call to pay with the given parameters will be successful if done immediately - /// after this call. Used in benchmarking code. - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful(who: &Self::AccountId, amount: Self::Balance); - /// Ensure that a call to `check_payment` with the given parameters will return either `Success` - /// or `Failure`. - #[cfg(feature = "runtime-benchmarks")] - fn ensure_concluded(id: Self::Id); -} - -/// Simple implementation of `Pay` which makes a payment from a "pot" - i.e. a single account. -pub struct PayFromAccount(sp_std::marker::PhantomData<(F, A)>); -impl + fungible::Mutate> Pay - for PayFromAccount -{ - type Balance = F::Balance; - type AccountId = A::Type; - type Id = (); - fn pay(who: &Self::AccountId, amount: Self::Balance) -> Result { - >::transfer(&A::get(), who, amount, false).map_err(|_| ())?; - Ok(()) - } - fn check_payment(_: ()) -> PaymentStatus { - PaymentStatus::Success - } - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful(_: &Self::AccountId, amount: Self::Balance) { - >::mint_into(&A::get(), amount).unwrap(); - } - #[cfg(feature = "runtime-benchmarks")] - fn ensure_concluded(_: Self::Id) {} -} - /// The status of the pallet instance. #[derive(Encode, Decode, Eq, PartialEq, Clone, TypeInfo, MaxEncodedLen, RuntimeDebug)] pub struct StatusType { @@ -168,7 +106,7 @@ pub mod pallet { /// Means by which we can make payments to accounts. This also defines the currency and the /// balance which we use to denote that currency. - type Paymaster: Pay::AccountId>; + type Paymaster: Pay::AccountId, AssetKind = ()>; /// The current membership of payees. type Members: RankedMembers::AccountId>; @@ -498,8 +436,8 @@ pub mod pallet { claimant.last_active = status.cycle_index; - let id = - T::Paymaster::pay(&beneficiary, payout).map_err(|()| Error::::PayError)?; + let id = T::Paymaster::pay(&beneficiary, (), payout) + .map_err(|()| Error::::PayError)?; claimant.status = Attempted { registered, id, amount: payout }; diff --git a/frame/salary/src/tests.rs b/frame/salary/src/tests.rs index e54b9612b152c..1b7bc6cbb6df5 100644 --- a/frame/salary/src/tests.rs +++ b/frame/salary/src/tests.rs @@ -99,11 +99,16 @@ fn set_status(id: u64, s: PaymentStatus) { pub struct TestPay; impl Pay for TestPay { - type AccountId = u64; + type Beneficiary = u64; type Balance = u64; type Id = u64; + type AssetKind = (); - fn pay(who: &Self::AccountId, amount: Self::Balance) -> Result { + fn pay( + who: &Self::Beneficiary, + _: Self::AssetKind, + amount: Self::Balance, + ) -> Result { PAID.with(|paid| *paid.borrow_mut().entry(*who).or_default() += amount); Ok(LAST_ID.with(|lid| { let x = *lid.borrow(); @@ -115,7 +120,7 @@ impl Pay for TestPay { STATUS.with(|s| s.borrow().get(&id).cloned().unwrap_or(PaymentStatus::Unknown)) } #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful(_: &Self::AccountId, _: Self::Balance) {} + fn ensure_successful(_: &Self::Beneficiary, _: Self::Balance) {} #[cfg(feature = "runtime-benchmarks")] fn ensure_concluded(id: Self::Id) { set_status(id, PaymentStatus::Failure) diff --git a/frame/salary/src/weights.rs b/frame/salary/src/weights.rs index 6b43c58b6038c..074bff4da170d 100644 --- a/frame/salary/src/weights.rs +++ b/frame/salary/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_salary //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -27,17 +27,17 @@ // ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 -// --extrinsic=* // --pallet=pallet_salary +// --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --chain=dev -// --header=HEADER-APACHE2 -// --output=frame/salary/src/weights.rs -// --template=.maintain/frame-weight-template.hbs +// --output=./frame/salary/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -66,8 +66,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1541` - // Minimum execution time: 11_793_000 picoseconds. - Weight::from_parts(12_080_000, 1541) + // Minimum execution time: 11_785_000 picoseconds. + Weight::from_parts(12_086_000, 1541) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -75,10 +75,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) fn bump() -> Weight { // Proof Size summary in bytes: - // Measured: `118` + // Measured: `86` // Estimated: `1541` - // Minimum execution time: 12_799_000 picoseconds. - Weight::from_parts(13_343_000, 1541) + // Minimum execution time: 12_721_000 picoseconds. + Weight::from_parts(13_033_000, 1541) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -90,10 +90,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) fn induct() -> Weight { // Proof Size summary in bytes: - // Measured: `394` - // Estimated: `8591` - // Minimum execution time: 20_299_000 picoseconds. - Weight::from_parts(20_664_000, 8591) + // Measured: `362` + // Estimated: `3543` + // Minimum execution time: 19_516_000 picoseconds. + Weight::from_parts(19_938_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -105,10 +105,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) fn register() -> Weight { // Proof Size summary in bytes: - // Measured: `461` - // Estimated: `8591` - // Minimum execution time: 24_195_000 picoseconds. - Weight::from_parts(24_528_000, 8591) + // Measured: `429` + // Estimated: `3543` + // Minimum execution time: 23_145_000 picoseconds. + Weight::from_parts(23_804_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -120,10 +120,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `461` - // Estimated: `8591` - // Minimum execution time: 47_609_000 picoseconds. - Weight::from_parts(48_462_000, 8591) + // Measured: `429` + // Estimated: `3543` + // Minimum execution time: 62_187_000 picoseconds. + Weight::from_parts(63_016_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -137,10 +137,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn payout_other() -> Weight { // Proof Size summary in bytes: - // Measured: `461` - // Estimated: `12184` - // Minimum execution time: 48_513_000 picoseconds. - Weight::from_parts(49_053_000, 12184) + // Measured: `429` + // Estimated: `3593` + // Minimum execution time: 63_828_000 picoseconds. + Weight::from_parts(64_791_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -150,10 +150,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) fn check_payment() -> Weight { // Proof Size summary in bytes: - // Measured: `202` - // Estimated: `5084` - // Minimum execution time: 12_663_000 picoseconds. - Weight::from_parts(12_858_000, 5084) + // Measured: `170` + // Estimated: `3543` + // Minimum execution time: 12_911_000 picoseconds. + Weight::from_parts(13_079_000, 3543) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -167,8 +167,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1541` - // Minimum execution time: 11_793_000 picoseconds. - Weight::from_parts(12_080_000, 1541) + // Minimum execution time: 11_785_000 picoseconds. + Weight::from_parts(12_086_000, 1541) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -176,10 +176,10 @@ impl WeightInfo for () { /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) fn bump() -> Weight { // Proof Size summary in bytes: - // Measured: `118` + // Measured: `86` // Estimated: `1541` - // Minimum execution time: 12_799_000 picoseconds. - Weight::from_parts(13_343_000, 1541) + // Minimum execution time: 12_721_000 picoseconds. + Weight::from_parts(13_033_000, 1541) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -191,10 +191,10 @@ impl WeightInfo for () { /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) fn induct() -> Weight { // Proof Size summary in bytes: - // Measured: `394` - // Estimated: `8591` - // Minimum execution time: 20_299_000 picoseconds. - Weight::from_parts(20_664_000, 8591) + // Measured: `362` + // Estimated: `3543` + // Minimum execution time: 19_516_000 picoseconds. + Weight::from_parts(19_938_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -206,10 +206,10 @@ impl WeightInfo for () { /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) fn register() -> Weight { // Proof Size summary in bytes: - // Measured: `461` - // Estimated: `8591` - // Minimum execution time: 24_195_000 picoseconds. - Weight::from_parts(24_528_000, 8591) + // Measured: `429` + // Estimated: `3543` + // Minimum execution time: 23_145_000 picoseconds. + Weight::from_parts(23_804_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -221,10 +221,10 @@ impl WeightInfo for () { /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `461` - // Estimated: `8591` - // Minimum execution time: 47_609_000 picoseconds. - Weight::from_parts(48_462_000, 8591) + // Measured: `429` + // Estimated: `3543` + // Minimum execution time: 62_187_000 picoseconds. + Weight::from_parts(63_016_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -238,10 +238,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn payout_other() -> Weight { // Proof Size summary in bytes: - // Measured: `461` - // Estimated: `12184` - // Minimum execution time: 48_513_000 picoseconds. - Weight::from_parts(49_053_000, 12184) + // Measured: `429` + // Estimated: `3593` + // Minimum execution time: 63_828_000 picoseconds. + Weight::from_parts(64_791_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -251,10 +251,10 @@ impl WeightInfo for () { /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) fn check_payment() -> Weight { // Proof Size summary in bytes: - // Measured: `202` - // Estimated: `5084` - // Minimum execution time: 12_663_000 picoseconds. - Weight::from_parts(12_858_000, 5084) + // Measured: `170` + // Estimated: `3543` + // Minimum execution time: 12_911_000 picoseconds. + Weight::from_parts(13_079_000, 3543) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index 9a67e68a01640..50afac933f119 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -12,7 +12,7 @@ readme = "README.md" [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/scheduler/README.md b/frame/scheduler/README.md index 9a209031d7402..bdd2c2226c88e 100644 --- a/frame/scheduler/README.md +++ b/frame/scheduler/README.md @@ -31,4 +31,4 @@ then those filter will not be used when dispatching the schedule call. `Vec` parameter that can be used for identification. * `cancel_named` - the named complement to the cancel function. -License: Unlicense +License: Apache 2.0 diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 09bb3561188f8..8194f286c8323 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -216,6 +216,10 @@ pub mod pallet { type OriginPrivilegeCmp: PrivilegeCmp; /// The maximum number of scheduled calls in the queue for a single block. + /// + /// NOTE: + /// + Dependent pallets' benchmarks might require a higher limit for the setting. Set a + /// higher limit under `runtime-benchmarks` feature. #[pallet::constant] type MaxScheduledPerBlock: Get; diff --git a/frame/scheduler/src/migration.rs b/frame/scheduler/src/migration.rs index e641d2895aa0c..5b3a7631eeac9 100644 --- a/frame/scheduler/src/migration.rs +++ b/frame/scheduler/src/migration.rs @@ -297,8 +297,9 @@ pub mod v4 { target: TARGET, "Did not clean up any agendas. v4::CleanupAgendas can be removed." ), - Some(n) => - log::info!(target: TARGET, "Cleaned up {} agendas, now {}", n, new_agendas), + Some(n) => { + log::info!(target: TARGET, "Cleaned up {} agendas, now {}", n, new_agendas) + }, None => unreachable!( "Number of agendas cannot increase, old {} new {}", old_agendas, new_agendas @@ -560,8 +561,9 @@ mod test { "Agenda {} should be removed", i ), - Some(new) => - assert_eq!(Agenda::::get(i as u64), new, "Agenda wrong {}", i), + Some(new) => { + assert_eq!(Agenda::::get(i as u64), new, "Agenda wrong {}", i) + }, } } }); diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index 577e5b0bc4b2d..897363f134e7b 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_scheduler //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -69,10 +69,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler IncompleteSince (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn service_agendas_base() -> Weight { // Proof Size summary in bytes: - // Measured: `30` - // Estimated: `499` - // Minimum execution time: 3_670 nanoseconds. - Weight::from_parts(3_838_000, 499) + // Measured: `31` + // Estimated: `1489` + // Minimum execution time: 3_776_000 picoseconds. + Weight::from_parts(3_992_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -81,12 +81,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 512]`. fn service_agenda_base(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `112 + s * (177 ±0)` - // Estimated: `109497` - // Minimum execution time: 3_079 nanoseconds. - Weight::from_parts(7_087_647, 109497) - // Standard Error: 658 - .saturating_add(Weight::from_parts(279_320, 0).saturating_mul(s.into())) + // Measured: `81 + s * (177 ±0)` + // Estimated: `110487` + // Minimum execution time: 3_418_000 picoseconds. + Weight::from_parts(8_606_012, 110487) + // Standard Error: 769 + .saturating_add(Weight::from_parts(309_376, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -94,8 +94,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_192 nanoseconds. - Weight::from_parts(5_528_000, 0) + // Minimum execution time: 5_624_000 picoseconds. + Weight::from_parts(5_758_000, 0) } /// Storage: Preimage PreimageFor (r:1 w:1) /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) @@ -104,12 +104,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `211 + s * (1 ±0)` - // Estimated: `5252 + s * (1 ±0)` - // Minimum execution time: 17_284 nanoseconds. - Weight::from_parts(17_574_000, 5252) + // Measured: `179 + s * (1 ±0)` + // Estimated: `3644 + s * (1 ±0)` + // Minimum execution time: 20_150_000 picoseconds. + Weight::from_parts(20_271_000, 3644) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_126, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_132, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -120,42 +120,42 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_020 nanoseconds. - Weight::from_parts(7_262_000, 0) + // Minimum execution time: 7_451_000 picoseconds. + Weight::from_parts(7_693_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_187 nanoseconds. - Weight::from_parts(5_368_000, 0) + // Minimum execution time: 5_477_000 picoseconds. + Weight::from_parts(5_733_000, 0) } fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_313 nanoseconds. - Weight::from_parts(2_404_000, 0) + // Minimum execution time: 2_675_000 picoseconds. + Weight::from_parts(2_870_000, 0) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_187 nanoseconds. - Weight::from_parts(2_362_000, 0) + // Minimum execution time: 2_697_000 picoseconds. + Weight::from_parts(2_807_000, 0) } /// Storage: Scheduler Agenda (r:1 w:1) /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) /// The range of component `s` is `[0, 511]`. fn schedule(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `112 + s * (177 ±0)` - // Estimated: `109497` - // Minimum execution time: 11_971 nanoseconds. - Weight::from_parts(16_060_361, 109497) - // Standard Error: 665 - .saturating_add(Weight::from_parts(286_324, 0).saturating_mul(s.into())) + // Measured: `81 + s * (177 ±0)` + // Estimated: `110487` + // Minimum execution time: 13_921_000 picoseconds. + Weight::from_parts(18_717_223, 110487) + // Standard Error: 771 + .saturating_add(Weight::from_parts(333_102, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -166,12 +166,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 512]`. fn cancel(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `112 + s * (177 ±0)` - // Estimated: `109497` - // Minimum execution time: 15_594 nanoseconds. - Weight::from_parts(17_191_501, 109497) - // Standard Error: 626 - .saturating_add(Weight::from_parts(425_572, 0).saturating_mul(s.into())) + // Measured: `81 + s * (177 ±0)` + // Estimated: `110487` + // Minimum execution time: 17_552_000 picoseconds. + Weight::from_parts(19_006_016, 110487) + // Standard Error: 1_115 + .saturating_add(Weight::from_parts(495_979, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -182,12 +182,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 511]`. fn schedule_named(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `627 + s * (178 ±0)` - // Estimated: `112020` - // Minimum execution time: 15_127 nanoseconds. - Weight::from_parts(20_932_642, 112020) - // Standard Error: 692 - .saturating_add(Weight::from_parts(288_344, 0).saturating_mul(s.into())) + // Measured: `596 + s * (178 ±0)` + // Estimated: `110487` + // Minimum execution time: 17_240_000 picoseconds. + Weight::from_parts(24_376_370, 110487) + // Standard Error: 928 + .saturating_add(Weight::from_parts(331_209, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -198,12 +198,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `740 + s * (177 ±0)` - // Estimated: `112020` - // Minimum execution time: 16_859 nanoseconds. - Weight::from_parts(19_736_937, 112020) - // Standard Error: 676 - .saturating_add(Weight::from_parts(429_770, 0).saturating_mul(s.into())) + // Measured: `709 + s * (177 ±0)` + // Estimated: `110487` + // Minimum execution time: 19_731_000 picoseconds. + Weight::from_parts(23_787_948, 110487) + // Standard Error: 1_133 + .saturating_add(Weight::from_parts(503_805, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -215,10 +215,10 @@ impl WeightInfo for () { /// Proof: Scheduler IncompleteSince (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn service_agendas_base() -> Weight { // Proof Size summary in bytes: - // Measured: `30` - // Estimated: `499` - // Minimum execution time: 3_670 nanoseconds. - Weight::from_parts(3_838_000, 499) + // Measured: `31` + // Estimated: `1489` + // Minimum execution time: 3_776_000 picoseconds. + Weight::from_parts(3_992_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -227,12 +227,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 512]`. fn service_agenda_base(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `112 + s * (177 ±0)` - // Estimated: `109497` - // Minimum execution time: 3_079 nanoseconds. - Weight::from_parts(7_087_647, 109497) - // Standard Error: 658 - .saturating_add(Weight::from_parts(279_320, 0).saturating_mul(s.into())) + // Measured: `81 + s * (177 ±0)` + // Estimated: `110487` + // Minimum execution time: 3_418_000 picoseconds. + Weight::from_parts(8_606_012, 110487) + // Standard Error: 769 + .saturating_add(Weight::from_parts(309_376, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -240,8 +240,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_192 nanoseconds. - Weight::from_parts(5_528_000, 0) + // Minimum execution time: 5_624_000 picoseconds. + Weight::from_parts(5_758_000, 0) } /// Storage: Preimage PreimageFor (r:1 w:1) /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) @@ -250,12 +250,12 @@ impl WeightInfo for () { /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `211 + s * (1 ±0)` - // Estimated: `5252 + s * (1 ±0)` - // Minimum execution time: 17_284 nanoseconds. - Weight::from_parts(17_574_000, 5252) + // Measured: `179 + s * (1 ±0)` + // Estimated: `3644 + s * (1 ±0)` + // Minimum execution time: 20_150_000 picoseconds. + Weight::from_parts(20_271_000, 3644) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_126, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_132, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -266,42 +266,42 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_020 nanoseconds. - Weight::from_parts(7_262_000, 0) + // Minimum execution time: 7_451_000 picoseconds. + Weight::from_parts(7_693_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_187 nanoseconds. - Weight::from_parts(5_368_000, 0) + // Minimum execution time: 5_477_000 picoseconds. + Weight::from_parts(5_733_000, 0) } fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_313 nanoseconds. - Weight::from_parts(2_404_000, 0) + // Minimum execution time: 2_675_000 picoseconds. + Weight::from_parts(2_870_000, 0) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_187 nanoseconds. - Weight::from_parts(2_362_000, 0) + // Minimum execution time: 2_697_000 picoseconds. + Weight::from_parts(2_807_000, 0) } /// Storage: Scheduler Agenda (r:1 w:1) /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) /// The range of component `s` is `[0, 511]`. fn schedule(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `112 + s * (177 ±0)` - // Estimated: `109497` - // Minimum execution time: 11_971 nanoseconds. - Weight::from_parts(16_060_361, 109497) - // Standard Error: 665 - .saturating_add(Weight::from_parts(286_324, 0).saturating_mul(s.into())) + // Measured: `81 + s * (177 ±0)` + // Estimated: `110487` + // Minimum execution time: 13_921_000 picoseconds. + Weight::from_parts(18_717_223, 110487) + // Standard Error: 771 + .saturating_add(Weight::from_parts(333_102, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -312,12 +312,12 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 512]`. fn cancel(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `112 + s * (177 ±0)` - // Estimated: `109497` - // Minimum execution time: 15_594 nanoseconds. - Weight::from_parts(17_191_501, 109497) - // Standard Error: 626 - .saturating_add(Weight::from_parts(425_572, 0).saturating_mul(s.into())) + // Measured: `81 + s * (177 ±0)` + // Estimated: `110487` + // Minimum execution time: 17_552_000 picoseconds. + Weight::from_parts(19_006_016, 110487) + // Standard Error: 1_115 + .saturating_add(Weight::from_parts(495_979, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -328,12 +328,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 511]`. fn schedule_named(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `627 + s * (178 ±0)` - // Estimated: `112020` - // Minimum execution time: 15_127 nanoseconds. - Weight::from_parts(20_932_642, 112020) - // Standard Error: 692 - .saturating_add(Weight::from_parts(288_344, 0).saturating_mul(s.into())) + // Measured: `596 + s * (178 ±0)` + // Estimated: `110487` + // Minimum execution time: 17_240_000 picoseconds. + Weight::from_parts(24_376_370, 110487) + // Standard Error: 928 + .saturating_add(Weight::from_parts(331_209, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -344,12 +344,12 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `740 + s * (177 ±0)` - // Estimated: `112020` - // Minimum execution time: 16_859 nanoseconds. - Weight::from_parts(19_736_937, 112020) - // Standard Error: 676 - .saturating_add(Weight::from_parts(429_770, 0).saturating_mul(s.into())) + // Measured: `709 + s * (177 ±0)` + // Estimated: `110487` + // Minimum execution time: 19_731_000 picoseconds. + Weight::from_parts(23_787_948, 110487) + // Standard Error: 1_133 + .saturating_add(Weight::from_parts(503_805, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index d44fc1b2f1548..f38743e8b5f96 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index 336a69a798477..8bd44e2ffac8a 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -70,7 +70,7 @@ //! //! #[pallet::call] //! impl Pallet { -//! #[pallet::weight(0)] +//! #[pallet::weight({0})] //! pub fn candidate(origin: OriginFor) -> DispatchResult { //! let who = ensure_signed(origin)?; //! @@ -311,7 +311,7 @@ pub mod pallet { /// The `index` parameter of this function must be set to /// the index of the transactor in the `Pool`. #[pallet::call_index(0)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn submit_candidacy(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!>::contains_key(&who), Error::::AlreadyInPool); @@ -341,7 +341,7 @@ pub mod pallet { /// The `index` parameter of this function must be set to /// the index of the transactor in the `Pool`. #[pallet::call_index(1)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn withdraw_candidacy(origin: OriginFor, index: u32) -> DispatchResult { let who = ensure_signed(origin)?; @@ -360,7 +360,7 @@ pub mod pallet { /// The `index` parameter of this function must be set to /// the index of `dest` in the `Pool`. #[pallet::call_index(2)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn kick( origin: OriginFor, dest: AccountIdLookupOf, @@ -385,7 +385,7 @@ pub mod pallet { /// The `index` parameter of this function must be set to /// the index of the `dest` in the `Pool`. #[pallet::call_index(3)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn score( origin: OriginFor, dest: AccountIdLookupOf, @@ -425,7 +425,7 @@ pub mod pallet { /// /// May only be called from root. #[pallet::call_index(4)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn change_member_count(origin: OriginFor, count: u32) -> DispatchResult { ensure_root(origin)?; Self::update_member_count(count).map_err(Into::into) diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index a91b1a60f4974..f10a1320ef83c 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -91,6 +91,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index bd28bffffd8d9..ccf3f7d86e497 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 4c4accbbfac8f..b7671255f68fb 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -84,6 +84,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<10>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl pallet_timestamp::Config for Test { diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index ed5f80e6099c1..a9f89412a71c4 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -678,7 +678,7 @@ impl Pallet { // since a new validator set always leads to `changed` starting // as true, we can ensure that `now_session_keys` and `next_validators` // have the same length. this function is called once per iteration. - if let Some(&(_, ref old_keys)) = now_session_keys.next() { + if let Some((_, old_keys)) = now_session_keys.next() { if old_keys != keys { changed = true; } diff --git a/frame/session/src/weights.rs b/frame/session/src/weights.rs index c01799ce53050..add7f333590e1 100644 --- a/frame/session/src/weights.rs +++ b/frame/session/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_session //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -63,10 +63,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) fn set_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1955` - // Estimated: `19851` - // Minimum execution time: 40_867 nanoseconds. - Weight::from_parts(41_319_000, 19851) + // Measured: `1891` + // Estimated: `12781` + // Minimum execution time: 48_507_000 picoseconds. + Weight::from_parts(49_214_000, 12781) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -78,10 +78,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) fn purge_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1854` - // Estimated: `9749` - // Minimum execution time: 30_286 nanoseconds. - Weight::from_parts(30_620_000, 9749) + // Measured: `1758` + // Estimated: `5223` + // Minimum execution time: 35_388_000 picoseconds. + Weight::from_parts(35_763_000, 5223) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -97,10 +97,10 @@ impl WeightInfo for () { /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) fn set_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1955` - // Estimated: `19851` - // Minimum execution time: 40_867 nanoseconds. - Weight::from_parts(41_319_000, 19851) + // Measured: `1891` + // Estimated: `12781` + // Minimum execution time: 48_507_000 picoseconds. + Weight::from_parts(49_214_000, 12781) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -112,10 +112,10 @@ impl WeightInfo for () { /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) fn purge_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1854` - // Estimated: `9749` - // Minimum execution time: 30_286 nanoseconds. - Weight::from_parts(30_620_000, 9749) + // Measured: `1758` + // Estimated: `5223` + // Minimum execution time: 35_388_000 picoseconds. + Weight::from_parts(35_763_000, 5223) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index ddc6ea6aac7d7..ab74e39e4f83b 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } rand_chacha = { version = "0.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 73565dedc7cd1..9f72febc2106e 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -93,6 +93,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl Config for Test { diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 79c0bb5c2a32d..c20f003108d76 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -17,7 +17,7 @@ serde = { version = "1.0.136", optional = true } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index c761517ea6829..4a97d20a5f0af 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -16,9 +16,9 @@ proc-macro = true [dependencies] proc-macro-crate = "1.1.3" -proc-macro2 = "1.0.37" -quote = "1.0.10" -syn = { version = "1.0.98", features = ["full", "visit"] } +proc-macro2 = "1.0.56" +quote = "1.0.26" +syn = { version = "2.0.14", features = ["full", "visit"] } [dev-dependencies] sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index d6345c2161f73..28d970b9121e7 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -311,6 +311,7 @@ use sp_runtime::{ traits::{AtLeast32BitUnsigned, Convert, Saturating, StaticLookup, Zero}, Perbill, Perquintill, Rounding, RuntimeDebug, }; +pub use sp_staking::StakerStatus; use sp_staking::{ offence::{Offence, OffenceError, ReportOffence}, EraIndex, SessionIndex, @@ -381,18 +382,6 @@ impl Default for EraRewardPoints { } } -/// Indicates the initial status of the staker. -#[derive(RuntimeDebug, TypeInfo)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize, Clone))] -pub enum StakerStatus { - /// Chilling. - Idle, - /// Declared desire in validating or already participating in it. - Validator, - /// Nominating for a group of other stakers. - Nominator(Vec), -} - /// A destination account for payment. #[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub enum RewardDestination { diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index e876940cb92d3..c2f559a9780eb 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -157,6 +157,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } sp_runtime::impl_opaque_keys! { diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 760345e8ddb28..984871f7f9813 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -22,6 +22,7 @@ use frame_election_provider_support::{ SortedListProvider, VoteWeight, VoterOf, }; use frame_support::{ + defensive, dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ @@ -1230,7 +1231,7 @@ impl historical::SessionManager pallet_authorship::EventHandler for Pallet where T: Config + pallet_authorship::Config + pallet_session::Config, @@ -1299,8 +1300,8 @@ where add_db_reads_writes(1, 0); // Reverse because it's more likely to find reports from recent eras. - match eras.iter().rev().find(|&&(_, ref sesh)| sesh <= &slash_session) { - Some(&(ref slash_era, _)) => *slash_era, + match eras.iter().rev().find(|&(_, sesh)| sesh <= &slash_session) { + Some((slash_era, _)) => *slash_era, // Before bonding period. defensive - should be filtered out. None => return consumed_weight, } @@ -1608,10 +1609,10 @@ impl StakingInterface for Pallet { Self::current_era().unwrap_or(Zero::zero()) } - fn stake(who: &Self::AccountId) -> Result, DispatchError> { + fn stake(who: &Self::AccountId) -> Result>, DispatchError> { Self::bonded(who) .and_then(|c| Self::ledger(c)) - .map(|l| Stake { stash: l.stash, total: l.total, active: l.active }) + .map(|l| Stake { total: l.total, active: l.active }) .ok_or(Error::::NotStash.into()) } @@ -1662,8 +1663,33 @@ impl StakingInterface for Pallet { Self::nominate(RawOrigin::Signed(ctrl).into(), targets) } + fn status( + who: &Self::AccountId, + ) -> Result, DispatchError> { + let is_bonded = Self::bonded(who).is_some(); + if !is_bonded { + return Err(Error::::NotStash.into()) + } + + let is_validator = Validators::::contains_key(&who); + let is_nominator = Nominators::::get(&who); + + use sp_staking::StakerStatus; + match (is_validator, is_nominator.is_some()) { + (false, false) => Ok(StakerStatus::Idle), + (true, false) => Ok(StakerStatus::Validator), + (false, true) => Ok(StakerStatus::Nominator( + is_nominator.expect("is checked above; qed").targets.into_inner(), + )), + (true, true) => { + defensive!("cannot be both validators and nominator"); + Err(Error::::BadState.into()) + }, + } + } + sp_staking::runtime_benchmarks_enabled! { - fn nominations(who: Self::AccountId) -> Option> { + fn nominations(who: &Self::AccountId) -> Option> { Nominators::::get(who).map(|n| n.targets.into_inner()) } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 966eae8c1da9a..affee60026500 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -30,7 +30,7 @@ use pallet_balances::Error as BalancesError; use sp_runtime::{ assert_eq_error_rate, traits::{BadOrigin, Dispatchable}, - Perbill, Percent, Rounding, + Perbill, Percent, Rounding, TokenError, }; use sp_staking::{ offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, @@ -98,8 +98,8 @@ fn force_unstake_works() { add_slash(&11); // Cant transfer assert_noop!( - Balances::transfer(RuntimeOrigin::signed(11), 1, 10), - BalancesError::::LiquidityRestrictions + Balances::transfer_allow_death(RuntimeOrigin::signed(11), 1, 10), + TokenError::Frozen, ); // Force unstake requires root. assert_noop!(Staking::force_unstake(RuntimeOrigin::signed(11), 11, 2), BadOrigin); @@ -113,7 +113,7 @@ fn force_unstake_works() { // No longer bonded. assert_eq!(Staking::bonded(&11), None); // Transfer works. - assert_ok!(Balances::transfer(RuntimeOrigin::signed(11), 1, 10)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(11), 1, 10)); }); } @@ -960,14 +960,14 @@ fn cannot_transfer_staked_balance() { assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); // Confirm account 11 cannot transfer as a result assert_noop!( - Balances::transfer(RuntimeOrigin::signed(11), 20, 1), - BalancesError::::LiquidityRestrictions + Balances::transfer_allow_death(RuntimeOrigin::signed(11), 20, 1), + TokenError::Frozen, ); // Give account 11 extra free balance let _ = Balances::make_free_balance_be(&11, 10000); // Confirm that account 11 can now transfer some balance - assert_ok!(Balances::transfer(RuntimeOrigin::signed(11), 20, 1)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(11), 20, 1)); }); } @@ -985,10 +985,10 @@ fn cannot_transfer_staked_balance_2() { assert_eq!(Staking::eras_stakers(active_era(), 21).total, 1000); // Confirm account 21 can transfer at most 1000 assert_noop!( - Balances::transfer(RuntimeOrigin::signed(21), 20, 1001), - BalancesError::::LiquidityRestrictions + Balances::transfer_allow_death(RuntimeOrigin::signed(21), 20, 1001), + TokenError::Frozen, ); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(21), 20, 1000)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(21), 20, 1000)); }); } @@ -4187,7 +4187,7 @@ fn payout_creates_controller() { bond_nominator(1234, 1337, 100, vec![11]); // kill controller - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1337), 1234, 100)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(1337), 1234, 100)); assert_eq!(Balances::free_balance(1337), 0); mock::start_active_era(1); @@ -5461,7 +5461,7 @@ fn proportional_ledger_slash_works() { ledger.active = unit; ledger.total = unit * 4 + value; // When - assert_eq!(ledger.slash(slash, 0, 0), slash - 5); + assert_eq!(ledger.slash(slash, 0, 0), slash); // Then // The amount slashed out of `unit` let affected_balance = value + unit * 4; @@ -5477,12 +5477,12 @@ fn proportional_ledger_slash_works() { value - value_slash }; assert_eq!(ledger.active, unit_slashed); - assert_eq!(ledger.unlocking, vec![c(5, value_slashed)]); - assert_eq!(ledger.total, value_slashed); + assert_eq!(ledger.unlocking, vec![c(5, value_slashed), c(7, 32)]); + assert_eq!(ledger.total, value_slashed + 32); assert_eq!(LedgerSlashPerEra::get().0, 0); assert_eq!( LedgerSlashPerEra::get().1, - BTreeMap::from([(4, 0), (5, value_slashed), (6, 0), (7, 0)]) + BTreeMap::from([(4, 0), (5, value_slashed), (6, 0), (7, 32)]) ); } @@ -5824,4 +5824,27 @@ mod staking_interface { )); }); } + + #[test] + fn status() { + ExtBuilder::default().build_and_execute(|| { + // stash of a validator is identified as a validator + assert_eq!(Staking::status(&11).unwrap(), StakerStatus::Validator); + // .. but not the controller. + assert!(Staking::status(&10).is_err()); + + // stash of nominator is identified as a nominator + assert_eq!(Staking::status(&101).unwrap(), StakerStatus::Nominator(vec![11, 21])); + // .. but not the controller. + assert!(Staking::status(&100).is_err()); + + // stash of chilled is identified as a chilled + assert_eq!(Staking::status(&41).unwrap(), StakerStatus::Idle); + // .. but not the controller. + assert!(Staking::status(&40).is_err()); + + // random other account. + assert!(Staking::status(&42).is_err()); + }) + } } diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index d1814d89d4d89..34b01445d965a 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -91,15 +91,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: Staking Payee (r:0 w:1) /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `1079` - // Estimated: `10386` - // Minimum execution time: 40_015 nanoseconds. - Weight::from_parts(40_601_000, 10386) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `1047` + // Estimated: `4764` + // Minimum execution time: 54_907_000 picoseconds. + Weight::from_parts(55_685_000, 4764) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: Staking Bonded (r:1 w:0) @@ -108,17 +110,19 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: VoterList ListBags (r:2 w:2) /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn bond_extra() -> Weight { // Proof Size summary in bytes: - // Measured: `2252` - // Estimated: `22888` - // Minimum execution time: 74_781 nanoseconds. - Weight::from_parts(75_188_000, 22888) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `2028` + // Estimated: `8877` + // Minimum execution time: 94_779_000 picoseconds. + Weight::from_parts(95_455_000, 8877) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: Staking Ledger (r:1 w:1) @@ -131,6 +135,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) @@ -141,11 +147,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `2457` - // Estimated: `29534` - // Minimum execution time: 81_299 nanoseconds. - Weight::from_parts(82_242_000, 29534) - .saturating_add(T::DbWeight::get().reads(12_u64)) + // Measured: `2233` + // Estimated: `8877` + // Minimum execution time: 98_004_000 picoseconds. + Weight::from_parts(98_730_000, 8877) + .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } /// Storage: Staking Ledger (r:1 w:1) @@ -154,18 +160,20 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1085` - // Estimated: `10442` - // Minimum execution time: 31_479 nanoseconds. - Weight::from_parts(32_410_035, 10442) - // Standard Error: 313 - .saturating_add(Weight::from_parts(9_090, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `1021` + // Estimated: `4764` + // Minimum execution time: 45_888_000 picoseconds. + Weight::from_parts(47_568_327, 4764) + // Standard Error: 402 + .saturating_add(Weight::from_parts(7_520, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Staking Ledger (r:1 w:1) @@ -192,6 +200,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: Staking Payee (r:0 w:1) /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// Storage: Staking SpanSlash (r:0 w:100) @@ -199,13 +209,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2486 + s * (4 ±0)` - // Estimated: `32303 + s * (4 ±0)` - // Minimum execution time: 71_968 nanoseconds. - Weight::from_parts(76_631_804, 32303) - // Standard Error: 1_613 - .saturating_add(Weight::from_parts(1_058_968, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(13_u64)) + // Measured: `2294 + s * (4 ±0)` + // Estimated: `6248 + s * (4 ±0)` + // Minimum execution time: 93_288_000 picoseconds. + Weight::from_parts(99_415_523, 6248) + // Standard Error: 3_291 + .saturating_add(Weight::from_parts(1_296_734, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) @@ -234,10 +244,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn validate() -> Weight { // Proof Size summary in bytes: - // Measured: `1446` - // Estimated: `19359` - // Minimum execution time: 51_963 nanoseconds. - Weight::from_parts(52_418_000, 19359) + // Measured: `1414` + // Estimated: `4556` + // Minimum execution time: 58_755_000 picoseconds. + Weight::from_parts(59_424_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -248,12 +258,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1292 + k * (601 ±0)` - // Estimated: `3566 + k * (3033 ±0)` - // Minimum execution time: 25_685 nanoseconds. - Weight::from_parts(25_290_286, 3566) - // Standard Error: 5_164 - .saturating_add(Weight::from_parts(6_445_608, 0).saturating_mul(k.into())) + // Measured: `1260 + k * (569 ±0)` + // Estimated: `4556 + k * (3033 ±0)` + // Minimum execution time: 29_399_000 picoseconds. + Weight::from_parts(30_443_621, 4556) + // Standard Error: 10_402 + .saturating_add(Weight::from_parts(7_890_220, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -284,12 +294,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1984 + n * (105 ±0)` - // Estimated: `21988 + n * (2520 ±0)` - // Minimum execution time: 59_542 nanoseconds. - Weight::from_parts(57_558_678, 21988) - // Standard Error: 10_364 - .saturating_add(Weight::from_parts(2_759_713, 0).saturating_mul(n.into())) + // Measured: `1888 + n * (105 ±0)` + // Estimated: `6248 + n * (2520 ±0)` + // Minimum execution time: 68_471_000 picoseconds. + Weight::from_parts(65_972_990, 6248) + // Standard Error: 13_983 + .saturating_add(Weight::from_parts(3_255_731, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -311,10 +321,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1876` - // Estimated: `17932` - // Minimum execution time: 52_132 nanoseconds. - Weight::from_parts(52_648_000, 17932) + // Measured: `1748` + // Estimated: `6248` + // Minimum execution time: 59_537_000 picoseconds. + Weight::from_parts(60_446_000, 6248) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -324,10 +334,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn set_payee() -> Weight { // Proof Size summary in bytes: - // Measured: `840` - // Estimated: `3566` - // Minimum execution time: 13_399 nanoseconds. - Weight::from_parts(13_567_000, 3566) + // Measured: `808` + // Estimated: `4556` + // Minimum execution time: 15_403_000 picoseconds. + Weight::from_parts(15_676_000, 4556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -337,10 +347,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) fn set_controller() -> Weight { // Proof Size summary in bytes: - // Measured: `939` - // Estimated: `9679` - // Minimum execution time: 20_425 nanoseconds. - Weight::from_parts(20_713_000, 9679) + // Measured: `907` + // Estimated: `8122` + // Minimum execution time: 23_316_000 picoseconds. + Weight::from_parts(23_670_000, 8122) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -350,8 +360,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_069 nanoseconds. - Weight::from_parts(3_176_000, 0) + // Minimum execution time: 3_558_000 picoseconds. + Weight::from_parts(3_759_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Staking ForceEra (r:0 w:1) @@ -360,8 +370,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_386 nanoseconds. - Weight::from_parts(11_672_000, 0) + // Minimum execution time: 12_724_000 picoseconds. + Weight::from_parts(13_047_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Staking ForceEra (r:0 w:1) @@ -370,8 +380,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_591 nanoseconds. - Weight::from_parts(11_799_000, 0) + // Minimum execution time: 12_734_000 picoseconds. + Weight::from_parts(13_218_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Staking ForceEra (r:0 w:1) @@ -380,8 +390,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_553 nanoseconds. - Weight::from_parts(11_871_000, 0) + // Minimum execution time: 12_996_000 picoseconds. + Weight::from_parts(13_375_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Staking Invulnerables (r:0 w:1) @@ -391,10 +401,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_292 nanoseconds. - Weight::from_parts(3_754_352, 0) - // Standard Error: 40 - .saturating_add(Weight::from_parts(9_838, 0).saturating_mul(v.into())) + // Minimum execution time: 3_920_000 picoseconds. + Weight::from_parts(4_619_469, 0) + // Standard Error: 22 + .saturating_add(Weight::from_parts(10_108, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Staking Bonded (r:1 w:1) @@ -417,6 +427,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: Staking Ledger (r:0 w:1) /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) /// Storage: Staking Payee (r:0 w:1) @@ -426,13 +438,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2178 + s * (4 ±0)` - // Estimated: `27930 + s * (4 ±0)` - // Minimum execution time: 65_307 nanoseconds. - Weight::from_parts(70_227_980, 27930) - // Standard Error: 2_113 - .saturating_add(Weight::from_parts(1_059_856, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `2018 + s * (4 ±0)` + // Estimated: `6248 + s * (4 ±0)` + // Minimum execution time: 86_516_000 picoseconds. + Weight::from_parts(92_324_464, 6248) + // Standard Error: 2_925 + .saturating_add(Weight::from_parts(1_286_284, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) @@ -442,12 +454,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `66671` - // Estimated: `69146` - // Minimum execution time: 89_123 nanoseconds. - Weight::from_parts(890_989_741, 69146) - // Standard Error: 58_282 - .saturating_add(Weight::from_parts(4_920_413, 0).saturating_mul(s.into())) + // Measured: `66639` + // Estimated: `70104` + // Minimum execution time: 90_193_000 picoseconds. + Weight::from_parts(821_522_318, 70104) + // Standard Error: 57_922 + .saturating_add(Weight::from_parts(4_554_659, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -472,17 +484,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 256]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `20345 + n * (143 ±0)` - // Estimated: `54756 + n * (8024 ±1)` - // Minimum execution time: 73_652 nanoseconds. - Weight::from_parts(127_839_483, 54756) - // Standard Error: 14_195 - .saturating_add(Weight::from_parts(21_932_079, 0).saturating_mul(n.into())) + // Measured: `20217 + n * (143 ±0)` + // Estimated: `19844 + n * (2603 ±1)` + // Minimum execution time: 80_329_000 picoseconds. + Weight::from_parts(97_340_643, 19844) + // Standard Error: 22_713 + .saturating_add(Weight::from_parts(29_087_425, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 8024).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) } /// Storage: Staking CurrentEra (r:1 w:0) /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) @@ -504,25 +516,29 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:257 w:257) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:257 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `35099 + n * (465 ±0)` - // Estimated: `83594 + n * (16026 ±0)` - // Minimum execution time: 94_560 nanoseconds. - Weight::from_parts(154_033_219, 83594) - // Standard Error: 26_663 - .saturating_add(Weight::from_parts(31_269_223, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(n.into()))) + // Measured: `34971 + n * (401 ±0)` + // Estimated: `32376 + n * (3774 ±0)` + // Minimum execution time: 105_591_000 picoseconds. + Weight::from_parts(111_587_915, 32376) + // Standard Error: 15_598 + .saturating_add(Weight::from_parts(48_948_195, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 16026).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(n.into())) } /// Storage: Staking Ledger (r:1 w:1) /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) @@ -534,13 +550,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2253 + l * (7 ±0)` - // Estimated: `25491` - // Minimum execution time: 74_764 nanoseconds. - Weight::from_parts(75_814_067, 25491) - // Standard Error: 1_217 - .saturating_add(Weight::from_parts(64_725, 0).saturating_mul(l.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `2029 + l * (7 ±0)` + // Estimated: `8877` + // Minimum execution time: 89_420_000 picoseconds. + Weight::from_parts(90_743_615, 8877) + // Standard Error: 1_260 + .saturating_add(Weight::from_parts(50_832, 0).saturating_mul(l.into())) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } /// Storage: System Account (r:1 w:1) @@ -565,6 +581,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: Staking Payee (r:0 w:1) /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// Storage: Staking SpanSlash (r:0 w:100) @@ -572,13 +590,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2486 + s * (4 ±0)` - // Estimated: `31810 + s * (4 ±0)` - // Minimum execution time: 77_611 nanoseconds. - Weight::from_parts(79_760_034, 31810) - // Standard Error: 1_597 - .saturating_add(Weight::from_parts(1_039_268, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) + // Measured: `2294 + s * (4 ±0)` + // Estimated: `6248 + s * (4 ±0)` + // Minimum execution time: 100_911_000 picoseconds. + Weight::from_parts(102_678_006, 6248) + // Standard Error: 2_349 + .saturating_add(Weight::from_parts(1_262_431, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) @@ -621,21 +639,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + v * (3662 ±0) + n * (816 ±0)` - // Estimated: `528203 + v * (16743 ±0) + n * (12947 ±0)` - // Minimum execution time: 489_824 nanoseconds. - Weight::from_parts(491_687_000, 528203) - // Standard Error: 1_787_577 - .saturating_add(Weight::from_parts(58_719_498, 0).saturating_mul(v.into())) - // Standard Error: 178_122 - .saturating_add(Weight::from_parts(13_273_555, 0).saturating_mul(n.into())) + // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` + // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` + // Minimum execution time: 554_712_000 picoseconds. + Weight::from_parts(556_603_000, 512390) + // Standard Error: 1_925_251 + .saturating_add(Weight::from_parts(62_627_196, 0).saturating_mul(v.into())) + // Standard Error: 191_840 + .saturating_add(Weight::from_parts(16_681_790, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 16743).saturating_mul(v.into())) - .saturating_add(Weight::from_parts(0, 12947).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } /// Storage: VoterList CounterForListNodes (r:1 w:0) /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) @@ -657,20 +675,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[500, 1000]`. fn get_npos_voters(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3167 + v * (459 ±0) + n * (1007 ±0)` - // Estimated: `511899 + v * (14295 ±0) + n * (11775 ±0)` - // Minimum execution time: 23_373_467 nanoseconds. - Weight::from_parts(23_497_257_000, 511899) - // Standard Error: 299_205 - .saturating_add(Weight::from_parts(3_434_000, 0).saturating_mul(v.into())) - // Standard Error: 299_205 - .saturating_add(Weight::from_parts(2_568_954, 0).saturating_mul(n.into())) + // Measured: `3135 + n * (911 ±0) + v * (395 ±0)` + // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` + // Minimum execution time: 31_770_670_000 picoseconds. + Weight::from_parts(31_839_042_000, 512390) + // Standard Error: 355_382 + .saturating_add(Weight::from_parts(5_044_540, 0).saturating_mul(v.into())) + // Standard Error: 355_382 + .saturating_add(Weight::from_parts(3_205_722, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 14295).saturating_mul(v.into())) - .saturating_add(Weight::from_parts(0, 11775).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } /// Storage: Staking CounterForValidators (r:1 w:0) /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) @@ -680,11 +698,11 @@ impl WeightInfo for SubstrateWeight { fn get_npos_targets(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `983 + v * (50 ±0)` - // Estimated: `3019 + v * (2520 ±0)` - // Minimum execution time: 3_882_120 nanoseconds. - Weight::from_parts(3_951_993_000, 3019) - // Standard Error: 46_729 - .saturating_add(Weight::from_parts(2_856_043, 0).saturating_mul(v.into())) + // Estimated: `3510 + v * (2520 ±0)` + // Minimum execution time: 2_253_567_000 picoseconds. + Weight::from_parts(61_440_613, 3510) + // Standard Error: 5_276 + .saturating_add(Weight::from_parts(4_414_153, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -705,8 +723,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_427 nanoseconds. - Weight::from_parts(8_794_000, 0) + // Minimum execution time: 9_292_000 picoseconds. + Weight::from_parts(9_587_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: Staking MinCommission (r:0 w:1) @@ -725,8 +743,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_620 nanoseconds. - Weight::from_parts(7_901_000, 0) + // Minimum execution time: 8_294_000 picoseconds. + Weight::from_parts(8_597_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: Staking Ledger (r:1 w:0) @@ -751,10 +769,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `2031` - // Estimated: `19438` - // Minimum execution time: 66_188 nanoseconds. - Weight::from_parts(66_767_000, 19438) + // Measured: `1871` + // Estimated: `6248` + // Minimum execution time: 75_742_000 picoseconds. + Weight::from_parts(76_252_000, 6248) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -765,9 +783,9 @@ impl WeightInfo for SubstrateWeight { fn force_apply_min_commission() -> Weight { // Proof Size summary in bytes: // Measured: `694` - // Estimated: `3019` - // Minimum execution time: 14_703 nanoseconds. - Weight::from_parts(15_031_000, 3019) + // Estimated: `3510` + // Minimum execution time: 16_407_000 picoseconds. + Weight::from_parts(16_726_000, 3510) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -777,8 +795,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_518 nanoseconds. - Weight::from_parts(4_656_000, 0) + // Minimum execution time: 4_977_000 picoseconds. + Weight::from_parts(5_224_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -793,15 +811,17 @@ impl WeightInfo for () { /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: Staking Payee (r:0 w:1) /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `1079` - // Estimated: `10386` - // Minimum execution time: 40_015 nanoseconds. - Weight::from_parts(40_601_000, 10386) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `1047` + // Estimated: `4764` + // Minimum execution time: 54_907_000 picoseconds. + Weight::from_parts(55_685_000, 4764) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: Staking Bonded (r:1 w:0) @@ -810,17 +830,19 @@ impl WeightInfo for () { /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: VoterList ListBags (r:2 w:2) /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn bond_extra() -> Weight { // Proof Size summary in bytes: - // Measured: `2252` - // Estimated: `22888` - // Minimum execution time: 74_781 nanoseconds. - Weight::from_parts(75_188_000, 22888) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `2028` + // Estimated: `8877` + // Minimum execution time: 94_779_000 picoseconds. + Weight::from_parts(95_455_000, 8877) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: Staking Ledger (r:1 w:1) @@ -833,6 +855,8 @@ impl WeightInfo for () { /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) @@ -843,11 +867,11 @@ impl WeightInfo for () { /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `2457` - // Estimated: `29534` - // Minimum execution time: 81_299 nanoseconds. - Weight::from_parts(82_242_000, 29534) - .saturating_add(RocksDbWeight::get().reads(12_u64)) + // Measured: `2233` + // Estimated: `8877` + // Minimum execution time: 98_004_000 picoseconds. + Weight::from_parts(98_730_000, 8877) + .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } /// Storage: Staking Ledger (r:1 w:1) @@ -856,18 +880,20 @@ impl WeightInfo for () { /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1085` - // Estimated: `10442` - // Minimum execution time: 31_479 nanoseconds. - Weight::from_parts(32_410_035, 10442) - // Standard Error: 313 - .saturating_add(Weight::from_parts(9_090, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `1021` + // Estimated: `4764` + // Minimum execution time: 45_888_000 picoseconds. + Weight::from_parts(47_568_327, 4764) + // Standard Error: 402 + .saturating_add(Weight::from_parts(7_520, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Staking Ledger (r:1 w:1) @@ -894,6 +920,8 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: Staking Payee (r:0 w:1) /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// Storage: Staking SpanSlash (r:0 w:100) @@ -901,13 +929,13 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2486 + s * (4 ±0)` - // Estimated: `32303 + s * (4 ±0)` - // Minimum execution time: 71_968 nanoseconds. - Weight::from_parts(76_631_804, 32303) - // Standard Error: 1_613 - .saturating_add(Weight::from_parts(1_058_968, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(13_u64)) + // Measured: `2294 + s * (4 ±0)` + // Estimated: `6248 + s * (4 ±0)` + // Minimum execution time: 93_288_000 picoseconds. + Weight::from_parts(99_415_523, 6248) + // Standard Error: 3_291 + .saturating_add(Weight::from_parts(1_296_734, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) @@ -936,10 +964,10 @@ impl WeightInfo for () { /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn validate() -> Weight { // Proof Size summary in bytes: - // Measured: `1446` - // Estimated: `19359` - // Minimum execution time: 51_963 nanoseconds. - Weight::from_parts(52_418_000, 19359) + // Measured: `1414` + // Estimated: `4556` + // Minimum execution time: 58_755_000 picoseconds. + Weight::from_parts(59_424_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -950,12 +978,12 @@ impl WeightInfo for () { /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1292 + k * (601 ±0)` - // Estimated: `3566 + k * (3033 ±0)` - // Minimum execution time: 25_685 nanoseconds. - Weight::from_parts(25_290_286, 3566) - // Standard Error: 5_164 - .saturating_add(Weight::from_parts(6_445_608, 0).saturating_mul(k.into())) + // Measured: `1260 + k * (569 ±0)` + // Estimated: `4556 + k * (3033 ±0)` + // Minimum execution time: 29_399_000 picoseconds. + Weight::from_parts(30_443_621, 4556) + // Standard Error: 10_402 + .saturating_add(Weight::from_parts(7_890_220, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -986,12 +1014,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1984 + n * (105 ±0)` - // Estimated: `21988 + n * (2520 ±0)` - // Minimum execution time: 59_542 nanoseconds. - Weight::from_parts(57_558_678, 21988) - // Standard Error: 10_364 - .saturating_add(Weight::from_parts(2_759_713, 0).saturating_mul(n.into())) + // Measured: `1888 + n * (105 ±0)` + // Estimated: `6248 + n * (2520 ±0)` + // Minimum execution time: 68_471_000 picoseconds. + Weight::from_parts(65_972_990, 6248) + // Standard Error: 13_983 + .saturating_add(Weight::from_parts(3_255_731, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1013,10 +1041,10 @@ impl WeightInfo for () { /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1876` - // Estimated: `17932` - // Minimum execution time: 52_132 nanoseconds. - Weight::from_parts(52_648_000, 17932) + // Measured: `1748` + // Estimated: `6248` + // Minimum execution time: 59_537_000 picoseconds. + Weight::from_parts(60_446_000, 6248) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1026,10 +1054,10 @@ impl WeightInfo for () { /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn set_payee() -> Weight { // Proof Size summary in bytes: - // Measured: `840` - // Estimated: `3566` - // Minimum execution time: 13_399 nanoseconds. - Weight::from_parts(13_567_000, 3566) + // Measured: `808` + // Estimated: `4556` + // Minimum execution time: 15_403_000 picoseconds. + Weight::from_parts(15_676_000, 4556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1039,10 +1067,10 @@ impl WeightInfo for () { /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) fn set_controller() -> Weight { // Proof Size summary in bytes: - // Measured: `939` - // Estimated: `9679` - // Minimum execution time: 20_425 nanoseconds. - Weight::from_parts(20_713_000, 9679) + // Measured: `907` + // Estimated: `8122` + // Minimum execution time: 23_316_000 picoseconds. + Weight::from_parts(23_670_000, 8122) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1052,8 +1080,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_069 nanoseconds. - Weight::from_parts(3_176_000, 0) + // Minimum execution time: 3_558_000 picoseconds. + Weight::from_parts(3_759_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Staking ForceEra (r:0 w:1) @@ -1062,8 +1090,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_386 nanoseconds. - Weight::from_parts(11_672_000, 0) + // Minimum execution time: 12_724_000 picoseconds. + Weight::from_parts(13_047_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Staking ForceEra (r:0 w:1) @@ -1072,8 +1100,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_591 nanoseconds. - Weight::from_parts(11_799_000, 0) + // Minimum execution time: 12_734_000 picoseconds. + Weight::from_parts(13_218_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Staking ForceEra (r:0 w:1) @@ -1082,8 +1110,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_553 nanoseconds. - Weight::from_parts(11_871_000, 0) + // Minimum execution time: 12_996_000 picoseconds. + Weight::from_parts(13_375_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Staking Invulnerables (r:0 w:1) @@ -1093,10 +1121,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_292 nanoseconds. - Weight::from_parts(3_754_352, 0) - // Standard Error: 40 - .saturating_add(Weight::from_parts(9_838, 0).saturating_mul(v.into())) + // Minimum execution time: 3_920_000 picoseconds. + Weight::from_parts(4_619_469, 0) + // Standard Error: 22 + .saturating_add(Weight::from_parts(10_108, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Staking Bonded (r:1 w:1) @@ -1119,6 +1147,8 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: Staking Ledger (r:0 w:1) /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) /// Storage: Staking Payee (r:0 w:1) @@ -1128,13 +1158,13 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2178 + s * (4 ±0)` - // Estimated: `27930 + s * (4 ±0)` - // Minimum execution time: 65_307 nanoseconds. - Weight::from_parts(70_227_980, 27930) - // Standard Error: 2_113 - .saturating_add(Weight::from_parts(1_059_856, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `2018 + s * (4 ±0)` + // Estimated: `6248 + s * (4 ±0)` + // Minimum execution time: 86_516_000 picoseconds. + Weight::from_parts(92_324_464, 6248) + // Standard Error: 2_925 + .saturating_add(Weight::from_parts(1_286_284, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) @@ -1144,12 +1174,12 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `66671` - // Estimated: `69146` - // Minimum execution time: 89_123 nanoseconds. - Weight::from_parts(890_989_741, 69146) - // Standard Error: 58_282 - .saturating_add(Weight::from_parts(4_920_413, 0).saturating_mul(s.into())) + // Measured: `66639` + // Estimated: `70104` + // Minimum execution time: 90_193_000 picoseconds. + Weight::from_parts(821_522_318, 70104) + // Standard Error: 57_922 + .saturating_add(Weight::from_parts(4_554_659, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1174,17 +1204,17 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 256]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `20345 + n * (143 ±0)` - // Estimated: `54756 + n * (8024 ±1)` - // Minimum execution time: 73_652 nanoseconds. - Weight::from_parts(127_839_483, 54756) - // Standard Error: 14_195 - .saturating_add(Weight::from_parts(21_932_079, 0).saturating_mul(n.into())) + // Measured: `20217 + n * (143 ±0)` + // Estimated: `19844 + n * (2603 ±1)` + // Minimum execution time: 80_329_000 picoseconds. + Weight::from_parts(97_340_643, 19844) + // Standard Error: 22_713 + .saturating_add(Weight::from_parts(29_087_425, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 8024).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) } /// Storage: Staking CurrentEra (r:1 w:0) /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) @@ -1206,25 +1236,29 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:257 w:257) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:257 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `35099 + n * (465 ±0)` - // Estimated: `83594 + n * (16026 ±0)` - // Minimum execution time: 94_560 nanoseconds. - Weight::from_parts(154_033_219, 83594) - // Standard Error: 26_663 - .saturating_add(Weight::from_parts(31_269_223, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(n.into()))) + // Measured: `34971 + n * (401 ±0)` + // Estimated: `32376 + n * (3774 ±0)` + // Minimum execution time: 105_591_000 picoseconds. + Weight::from_parts(111_587_915, 32376) + // Standard Error: 15_598 + .saturating_add(Weight::from_parts(48_948_195, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 16026).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(n.into())) } /// Storage: Staking Ledger (r:1 w:1) /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) @@ -1236,13 +1270,13 @@ impl WeightInfo for () { /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2253 + l * (7 ±0)` - // Estimated: `25491` - // Minimum execution time: 74_764 nanoseconds. - Weight::from_parts(75_814_067, 25491) - // Standard Error: 1_217 - .saturating_add(Weight::from_parts(64_725, 0).saturating_mul(l.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `2029 + l * (7 ±0)` + // Estimated: `8877` + // Minimum execution time: 89_420_000 picoseconds. + Weight::from_parts(90_743_615, 8877) + // Standard Error: 1_260 + .saturating_add(Weight::from_parts(50_832, 0).saturating_mul(l.into())) + .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } /// Storage: System Account (r:1 w:1) @@ -1267,6 +1301,8 @@ impl WeightInfo for () { /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: Staking Payee (r:0 w:1) /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// Storage: Staking SpanSlash (r:0 w:100) @@ -1274,13 +1310,13 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2486 + s * (4 ±0)` - // Estimated: `31810 + s * (4 ±0)` - // Minimum execution time: 77_611 nanoseconds. - Weight::from_parts(79_760_034, 31810) - // Standard Error: 1_597 - .saturating_add(Weight::from_parts(1_039_268, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) + // Measured: `2294 + s * (4 ±0)` + // Estimated: `6248 + s * (4 ±0)` + // Minimum execution time: 100_911_000 picoseconds. + Weight::from_parts(102_678_006, 6248) + // Standard Error: 2_349 + .saturating_add(Weight::from_parts(1_262_431, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) @@ -1323,21 +1359,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + v * (3662 ±0) + n * (816 ±0)` - // Estimated: `528203 + v * (16743 ±0) + n * (12947 ±0)` - // Minimum execution time: 489_824 nanoseconds. - Weight::from_parts(491_687_000, 528203) - // Standard Error: 1_787_577 - .saturating_add(Weight::from_parts(58_719_498, 0).saturating_mul(v.into())) - // Standard Error: 178_122 - .saturating_add(Weight::from_parts(13_273_555, 0).saturating_mul(n.into())) + // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` + // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` + // Minimum execution time: 554_712_000 picoseconds. + Weight::from_parts(556_603_000, 512390) + // Standard Error: 1_925_251 + .saturating_add(Weight::from_parts(62_627_196, 0).saturating_mul(v.into())) + // Standard Error: 191_840 + .saturating_add(Weight::from_parts(16_681_790, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 16743).saturating_mul(v.into())) - .saturating_add(Weight::from_parts(0, 12947).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } /// Storage: VoterList CounterForListNodes (r:1 w:0) /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) @@ -1359,20 +1395,20 @@ impl WeightInfo for () { /// The range of component `n` is `[500, 1000]`. fn get_npos_voters(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3167 + v * (459 ±0) + n * (1007 ±0)` - // Estimated: `511899 + v * (14295 ±0) + n * (11775 ±0)` - // Minimum execution time: 23_373_467 nanoseconds. - Weight::from_parts(23_497_257_000, 511899) - // Standard Error: 299_205 - .saturating_add(Weight::from_parts(3_434_000, 0).saturating_mul(v.into())) - // Standard Error: 299_205 - .saturating_add(Weight::from_parts(2_568_954, 0).saturating_mul(n.into())) + // Measured: `3135 + n * (911 ±0) + v * (395 ±0)` + // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` + // Minimum execution time: 31_770_670_000 picoseconds. + Weight::from_parts(31_839_042_000, 512390) + // Standard Error: 355_382 + .saturating_add(Weight::from_parts(5_044_540, 0).saturating_mul(v.into())) + // Standard Error: 355_382 + .saturating_add(Weight::from_parts(3_205_722, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 14295).saturating_mul(v.into())) - .saturating_add(Weight::from_parts(0, 11775).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } /// Storage: Staking CounterForValidators (r:1 w:0) /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) @@ -1382,11 +1418,11 @@ impl WeightInfo for () { fn get_npos_targets(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `983 + v * (50 ±0)` - // Estimated: `3019 + v * (2520 ±0)` - // Minimum execution time: 3_882_120 nanoseconds. - Weight::from_parts(3_951_993_000, 3019) - // Standard Error: 46_729 - .saturating_add(Weight::from_parts(2_856_043, 0).saturating_mul(v.into())) + // Estimated: `3510 + v * (2520 ±0)` + // Minimum execution time: 2_253_567_000 picoseconds. + Weight::from_parts(61_440_613, 3510) + // Standard Error: 5_276 + .saturating_add(Weight::from_parts(4_414_153, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -1407,8 +1443,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_427 nanoseconds. - Weight::from_parts(8_794_000, 0) + // Minimum execution time: 9_292_000 picoseconds. + Weight::from_parts(9_587_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: Staking MinCommission (r:0 w:1) @@ -1427,8 +1463,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_620 nanoseconds. - Weight::from_parts(7_901_000, 0) + // Minimum execution time: 8_294_000 picoseconds. + Weight::from_parts(8_597_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: Staking Ledger (r:1 w:0) @@ -1453,10 +1489,10 @@ impl WeightInfo for () { /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `2031` - // Estimated: `19438` - // Minimum execution time: 66_188 nanoseconds. - Weight::from_parts(66_767_000, 19438) + // Measured: `1871` + // Estimated: `6248` + // Minimum execution time: 75_742_000 picoseconds. + Weight::from_parts(76_252_000, 6248) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1467,9 +1503,9 @@ impl WeightInfo for () { fn force_apply_min_commission() -> Weight { // Proof Size summary in bytes: // Measured: `694` - // Estimated: `3019` - // Minimum execution time: 14_703 nanoseconds. - Weight::from_parts(15_031_000, 3019) + // Estimated: `3510` + // Minimum execution time: 16_407_000 picoseconds. + Weight::from_parts(16_726_000, 3510) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1479,8 +1515,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_518 nanoseconds. - Weight::from_parts(4_656_000, 0) + // Minimum execution time: 4_977_000 picoseconds. + Weight::from_parts(5_224_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index 36b5912a60302..9cd875932ceaf 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -14,10 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.133", optional = true } thousands = { version = "0.2.0", optional = true } -zstd = { version = "0.11.2", default-features = false, optional = true } +zstd = { version = "0.12.3", default-features = false, optional = true } frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } frame-support = { default-features = false, path = "../support" } frame-system = { default-features = false, path = "../system" } diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 5385c6b5f4f46..1f6266d999825 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1128,6 +1128,10 @@ mod mock { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } /// Test only Weights for state migration. diff --git a/frame/state-trie-migration/src/weights.rs b/frame/state-trie-migration/src/weights.rs index e087ba185994f..8565dd73e0c46 100644 --- a/frame/state-trie-migration/src/weights.rs +++ b/frame/state-trie-migration/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_state_trie_migration //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -67,9 +67,9 @@ impl WeightInfo for SubstrateWeight { fn continue_migrate() -> Weight { // Proof Size summary in bytes: // Measured: `108` - // Estimated: `2040` - // Minimum execution time: 15_563 nanoseconds. - Weight::from_parts(15_783_000, 2040) + // Estimated: `2527` + // Minimum execution time: 17_385_000 picoseconds. + Weight::from_parts(17_766_000, 2527) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -78,26 +78,26 @@ impl WeightInfo for SubstrateWeight { fn continue_migrate_wrong_witness() -> Weight { // Proof Size summary in bytes: // Measured: `76` - // Estimated: `503` - // Minimum execution time: 4_347 nanoseconds. - Weight::from_parts(4_558_000, 503) + // Estimated: `1493` + // Minimum execution time: 4_537_000 picoseconds. + Weight::from_parts(4_734_000, 1493) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn migrate_custom_top_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_817 nanoseconds. - Weight::from_parts(9_027_000, 0) + // Minimum execution time: 10_127_000 picoseconds. + Weight::from_parts(10_384_000, 0) } /// Storage: unknown `0x666f6f` (r:1 w:1) /// Proof Skipped: unknown `0x666f6f` (r:1 w:1) fn migrate_custom_top_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `144` - // Estimated: `2619` - // Minimum execution time: 23_854 nanoseconds. - Weight::from_parts(24_850_000, 2619) + // Measured: `113` + // Estimated: `3578` + // Minimum execution time: 31_113_000 picoseconds. + Weight::from_parts(31_833_000, 3578) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -105,17 +105,17 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_020 nanoseconds. - Weight::from_parts(9_234_000, 0) + // Minimum execution time: 10_445_000 picoseconds. + Weight::from_parts(10_726_000, 0) } /// Storage: unknown `0x666f6f` (r:1 w:1) /// Proof Skipped: unknown `0x666f6f` (r:1 w:1) fn migrate_custom_child_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `136` - // Estimated: `2611` - // Minimum execution time: 24_136 nanoseconds. - Weight::from_parts(24_810_000, 2611) + // Measured: `105` + // Estimated: `3570` + // Minimum execution time: 31_795_000 picoseconds. + Weight::from_parts(32_737_000, 3570) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -124,12 +124,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[1, 4194304]`. fn process_top_key(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `228 + v * (1 ±0)` - // Estimated: `2700 + v * (1 ±0)` - // Minimum execution time: 5_279 nanoseconds. - Weight::from_parts(5_517_000, 2700) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_230, 0).saturating_mul(v.into())) + // Measured: `197 + v * (1 ±0)` + // Estimated: `3662 + v * (1 ±0)` + // Minimum execution time: 5_933_000 picoseconds. + Weight::from_parts(6_040_000, 3662) + // Standard Error: 3 + .saturating_add(Weight::from_parts(1_336, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(v.into())) @@ -145,9 +145,9 @@ impl WeightInfo for () { fn continue_migrate() -> Weight { // Proof Size summary in bytes: // Measured: `108` - // Estimated: `2040` - // Minimum execution time: 15_563 nanoseconds. - Weight::from_parts(15_783_000, 2040) + // Estimated: `2527` + // Minimum execution time: 17_385_000 picoseconds. + Weight::from_parts(17_766_000, 2527) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -156,26 +156,26 @@ impl WeightInfo for () { fn continue_migrate_wrong_witness() -> Weight { // Proof Size summary in bytes: // Measured: `76` - // Estimated: `503` - // Minimum execution time: 4_347 nanoseconds. - Weight::from_parts(4_558_000, 503) + // Estimated: `1493` + // Minimum execution time: 4_537_000 picoseconds. + Weight::from_parts(4_734_000, 1493) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn migrate_custom_top_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_817 nanoseconds. - Weight::from_parts(9_027_000, 0) + // Minimum execution time: 10_127_000 picoseconds. + Weight::from_parts(10_384_000, 0) } /// Storage: unknown `0x666f6f` (r:1 w:1) /// Proof Skipped: unknown `0x666f6f` (r:1 w:1) fn migrate_custom_top_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `144` - // Estimated: `2619` - // Minimum execution time: 23_854 nanoseconds. - Weight::from_parts(24_850_000, 2619) + // Measured: `113` + // Estimated: `3578` + // Minimum execution time: 31_113_000 picoseconds. + Weight::from_parts(31_833_000, 3578) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -183,17 +183,17 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_020 nanoseconds. - Weight::from_parts(9_234_000, 0) + // Minimum execution time: 10_445_000 picoseconds. + Weight::from_parts(10_726_000, 0) } /// Storage: unknown `0x666f6f` (r:1 w:1) /// Proof Skipped: unknown `0x666f6f` (r:1 w:1) fn migrate_custom_child_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `136` - // Estimated: `2611` - // Minimum execution time: 24_136 nanoseconds. - Weight::from_parts(24_810_000, 2611) + // Measured: `105` + // Estimated: `3570` + // Minimum execution time: 31_795_000 picoseconds. + Weight::from_parts(32_737_000, 3570) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -202,12 +202,12 @@ impl WeightInfo for () { /// The range of component `v` is `[1, 4194304]`. fn process_top_key(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `228 + v * (1 ±0)` - // Estimated: `2700 + v * (1 ±0)` - // Minimum execution time: 5_279 nanoseconds. - Weight::from_parts(5_517_000, 2700) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_230, 0).saturating_mul(v.into())) + // Measured: `197 + v * (1 ±0)` + // Estimated: `3662 + v * (1 ±0)` + // Minimum execution time: 5_933_000 picoseconds. + Weight::from_parts(6_040_000, 3662) + // Standard Error: 3 + .saturating_add(Weight::from_parts(1_336, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(v.into())) diff --git a/frame/sudo-mangata/src/lib.rs b/frame/sudo-mangata/src/lib.rs index 6ecbbad33120f..f7293ac157a5b 100644 --- a/frame/sudo-mangata/src/lib.rs +++ b/frame/sudo-mangata/src/lib.rs @@ -220,7 +220,7 @@ pub mod pallet { /// - One DB change. /// # #[pallet::call_index(2)] - #[pallet::weight(0)] + #[pallet::weight({0})] pub fn set_key( origin: OriginFor, new: AccountIdLookupOf, diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index 56d04b172c268..e1161497b1744 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 47309833a0681..1bc206e0063e4 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -195,7 +195,7 @@ pub mod pallet { /// ## Complexity /// - O(1). #[pallet::call_index(2)] - #[pallet::weight(0)] + #[pallet::weight({0})] // FIXME pub fn set_key( origin: OriginFor, new: AccountIdLookupOf, diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index cffd38e3ce46f..4f13803b275a4 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] mangata-types = { version = "0.1.0", default-features = false, path = "../../primitives/mangata-types" } serde = { version = "1.0.136", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -frame-metadata = { version = "15.0.0", default-features = false, features = ["v14"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +frame-metadata = { version = "15.1.0", default-features = false, features = ["v14", "v15-unstable"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } @@ -38,7 +38,7 @@ impl-trait-for-tuples = "0.2.2" smallvec = "1.8.0" log = { version = "0.4.17", default-features = false } sp-core-hashing-proc-macro = { version = "5.0.0", path = "../../primitives/core/hashing/proc-macro" } -k256 = { version = "0.11.5", default-features = false, features = ["ecdsa"] } +k256 = { version = "0.13.0", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } [dev-dependencies] @@ -75,11 +75,11 @@ runtime-benchmarks = [] try-runtime = [] # By default some types have documentation, `no-metadata-docs` allows to reduce the documentation # in the metadata. -no-metadata-docs = ["frame-support-procedural/no-metadata-docs"] +no-metadata-docs = ["frame-support-procedural/no-metadata-docs", "sp-api/no-metadata-docs"] # By default some types have documentation, `full-metadata-docs` allows to add documentation to # more types in the metadata. full-metadata-docs = ["scale-info/docs"] # Generate impl-trait for tuples with the given number of tuples. Will be needed as the number of # pallets in a runtime grows. Does increase the compile time! -tuples-96 = [] -tuples-128 = [] +tuples-96 = ["frame-support-procedural/tuples-96"] +tuples-128 = ["frame-support-procedural/tuples-128"] diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index ee1ca4dff8873..1a17924b40e2e 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -19,12 +19,17 @@ derive-syn-parse = "0.1.5" Inflector = "0.11.4" cfg-expr = "0.10.3" itertools = "0.10.3" -proc-macro2 = "1.0.37" -quote = "1.0.10" -syn = { version = "1.0.98", features = ["full"] } +proc-macro2 = "1.0.56" +quote = "1.0.26" +syn = { version = "2.0.14", features = ["full"] } frame-support-procedural-tools = { version = "4.0.0-dev", path = "./tools" } +proc-macro-warning = { version = "0.3.0", default-features = false } [features] default = ["std"] std = [] no-metadata-docs = [] +# Generate impl-trait for tuples with the given number of tuples. Will be needed as the number of +# pallets in a runtime grows. Does increase the compile time! +tuples-96 = [] +tuples-128 = [] diff --git a/frame/support/procedural/src/benchmark.rs b/frame/support/procedural/src/benchmark.rs index 4f9994b6cd073..28b5aa1b983b7 100644 --- a/frame/support/procedural/src/benchmark.rs +++ b/frame/support/procedural/src/benchmark.rs @@ -21,17 +21,16 @@ use derive_syn_parse::Parse; use frame_support_procedural_tools::generate_crate_access_2018; use proc_macro::TokenStream; use proc_macro2::{Ident, Span, TokenStream as TokenStream2}; -use quote::{quote, quote_spanned, ToTokens}; +use quote::{quote, ToTokens}; use syn::{ - parenthesized, parse::{Nothing, ParseStream}, parse_quote, punctuated::Punctuated, spanned::Spanned, - token::{Colon2, Comma, Gt, Lt, Paren}, - Attribute, Error, Expr, ExprBlock, ExprCall, ExprPath, FnArg, Item, ItemFn, ItemMod, LitInt, - Pat, Path, PathArguments, PathSegment, Result, ReturnType, Signature, Stmt, Token, Type, - TypePath, Visibility, WhereClause, + token::{Comma, Gt, Lt, PathSep}, + Attribute, Error, Expr, ExprBlock, ExprCall, ExprPath, FnArg, Item, ItemFn, ItemMod, Pat, Path, + PathArguments, PathSegment, Result, ReturnType, Signature, Stmt, Token, Type, TypePath, + Visibility, WhereClause, }; mod keywords { @@ -45,24 +44,27 @@ mod keywords { custom_keyword!(skip_meta); custom_keyword!(BenchmarkError); custom_keyword!(Result); + + pub const BENCHMARK_TOKEN: &str = stringify!(benchmark); + pub const BENCHMARKS_TOKEN: &str = stringify!(benchmarks); } /// This represents the raw parsed data for a param definition such as `x: Linear<10, 20>`. #[derive(Clone)] struct ParamDef { name: String, - typ: Type, - start: u32, - end: u32, + _typ: Type, + start: syn::GenericArgument, + end: syn::GenericArgument, } /// Allows easy parsing of the `<10, 20>` component of `x: Linear<10, 20>`. #[derive(Parse)] struct RangeArgs { _lt_token: Lt, - start: LitInt, + start: syn::GenericArgument, _comma: Comma, - end: LitInt, + end: syn::GenericArgument, _gt_token: Gt, } @@ -95,27 +97,20 @@ impl syn::parse::Parse for BenchmarkAttrKeyword { impl syn::parse::Parse for BenchmarkAttrs { fn parse(input: ParseStream) -> syn::Result { - let lookahead = input.lookahead1(); - if !lookahead.peek(Paren) { - let _nothing: Nothing = input.parse()?; - return Ok(BenchmarkAttrs { skip_meta: false, extra: false }) - } - let content; - let _paren: Paren = parenthesized!(content in input); let mut extra = false; let mut skip_meta = false; - let args = Punctuated::::parse_terminated(&content)?; + let args = Punctuated::::parse_terminated(&input)?; for arg in args.into_iter() { match arg { BenchmarkAttrKeyword::Extra => { if extra { - return Err(content.error("`extra` can only be specified once")) + return Err(input.error("`extra` can only be specified once")) } extra = true; }, BenchmarkAttrKeyword::SkipMeta => { if skip_meta { - return Err(content.error("`skip_meta` can only be specified once")) + return Err(input.error("`skip_meta` can only be specified once")) } skip_meta = true; }, @@ -150,8 +145,6 @@ struct BenchmarkDef { call_def: BenchmarkCallDef, verify_stmts: Vec, last_stmt: Option, - extra: bool, - skip_meta: bool, fn_sig: Signature, fn_vis: Visibility, fn_attrs: Vec, @@ -218,7 +211,7 @@ fn parse_params(item_fn: &ItemFn) -> Result> { return Err(Error::new( var_span, "Benchmark parameter names must consist of a single lowercase letter (a-z) and no other characters.", - )) + )); }; let name = ident.ident.to_token_stream().to_string(); if name.len() > 1 { @@ -235,17 +228,8 @@ fn parse_params(item_fn: &ItemFn) -> Result> { let Some(segment) = tpath.path.segments.last() else { return invalid_param(typ.span()) }; let args = segment.arguments.to_token_stream().into(); let Ok(args) = syn::parse::(args) else { return invalid_param(typ.span()) }; - let Ok(start) = args.start.base10_parse::() else { return invalid_param(args.start.span()) }; - let Ok(end) = args.end.base10_parse::() else { return invalid_param(args.end.span()) }; - if end < start { - return Err(Error::new( - args.start.span(), - "The start of a `ParamRange` must be less than or equal to the end", - )) - } - - params.push(ParamDef { name, typ: typ.clone(), start, end }); + params.push(ParamDef { name, _typ: typ.clone(), start: args.start, end: args.end }); } Ok(params) } @@ -253,9 +237,9 @@ fn parse_params(item_fn: &ItemFn) -> Result> { /// Used in several places where the `#[extrinsic_call]` or `#[body]` annotation is missing fn missing_call(item_fn: &ItemFn) -> Result { return Err(Error::new( - item_fn.block.brace_token.span, + item_fn.block.brace_token.span.join(), "No valid #[extrinsic_call] or #[block] annotation could be found in benchmark function body." - )) + )); } /// Finds the `BenchmarkCallDef` and its index (within the list of stmts for the fn) and @@ -264,10 +248,10 @@ fn missing_call(item_fn: &ItemFn) -> Result { fn parse_call_def(item_fn: &ItemFn) -> Result<(usize, BenchmarkCallDef)> { // #[extrinsic_call] / #[block] handling let call_defs = item_fn.block.stmts.iter().enumerate().filter_map(|(i, child)| { - if let Stmt::Semi(Expr::Call(expr_call), _semi) = child { + if let Stmt::Expr(Expr::Call(expr_call), _semi) = child { // #[extrinsic_call] case expr_call.attrs.iter().enumerate().find_map(|(k, attr)| { - let segment = attr.path.segments.last()?; + let segment = attr.path().segments.last()?; let _: keywords::extrinsic_call = syn::parse(segment.ident.to_token_stream().into()).ok()?; let mut expr_call = expr_call.clone(); @@ -281,10 +265,10 @@ fn parse_call_def(item_fn: &ItemFn) -> Result<(usize, BenchmarkCallDef)> { Some(Ok((i, BenchmarkCallDef::ExtrinsicCall { origin, expr_call, attr_span: attr.span() }))) }) - } else if let Stmt::Expr(Expr::Block(block)) = child { + } else if let Stmt::Expr(Expr::Block(block), _) = child { // #[block] case block.attrs.iter().enumerate().find_map(|(k, attr)| { - let segment = attr.path.segments.last()?; + let segment = attr.path().segments.last()?; let _: keywords::block = syn::parse(segment.ident.to_token_stream().into()).ok()?; let mut block = block.clone(); @@ -310,7 +294,7 @@ fn parse_call_def(item_fn: &ItemFn) -> Result<(usize, BenchmarkCallDef)> { impl BenchmarkDef { /// Constructs a [`BenchmarkDef`] by traversing an existing [`ItemFn`] node. - pub fn from(item_fn: &ItemFn, extra: bool, skip_meta: bool) -> Result { + pub fn from(item_fn: &ItemFn) -> Result { let params = parse_params(item_fn)?; ensure_valid_return_type(item_fn)?; let (i, call_def) = parse_call_def(&item_fn)?; @@ -346,8 +330,6 @@ impl BenchmarkDef { call_def, verify_stmts, last_stmt, - extra, - skip_meta, fn_sig: item_fn.sig.clone(), fn_vis: item_fn.vis.clone(), fn_attrs: item_fn.attrs.clone(), @@ -375,7 +357,7 @@ pub fn benchmarks( let mod_attrs: Vec<&Attribute> = module .attrs .iter() - .filter(|attr| syn::parse2::(attr.to_token_stream()).is_err()) + .filter(|attr| !attr.path().is_ident(keywords::BENCHMARKS_TOKEN)) .collect(); let mut benchmark_names: Vec = Vec::new(); @@ -391,35 +373,32 @@ pub fn benchmarks( let Item::Fn(func) = stmt else { return None }; // find #[benchmark] attribute on function def - let benchmark_attr = func.attrs.iter().find_map(|attr| { - let seg = attr.path.segments.last()?; - syn::parse::(seg.ident.to_token_stream().into()).ok()?; - Some(attr) - })?; + let benchmark_attr = + func.attrs.iter().find(|attr| attr.path().is_ident(keywords::BENCHMARK_TOKEN))?; Some((benchmark_attr.clone(), func.clone(), stmt)) }); // parse individual benchmark defs and args for (benchmark_attr, func, stmt) in benchmark_fn_metas { - // parse any args provided to #[benchmark] - let attr_tokens = benchmark_attr.tokens.to_token_stream().into(); - let benchmark_args: BenchmarkAttrs = syn::parse(attr_tokens)?; - // parse benchmark def - let benchmark_def = - BenchmarkDef::from(&func, benchmark_args.extra, benchmark_args.skip_meta)?; + let benchmark_def = BenchmarkDef::from(&func)?; // record benchmark name let name = &func.sig.ident; benchmark_names.push(name.clone()); - // record name sets - if benchmark_def.extra { - extra_benchmark_names.push(name.clone()); - } - if benchmark_def.skip_meta { - skip_meta_benchmark_names.push(name.clone()) + // Check if we need to parse any args + if benchmark_attr.meta.require_path_only().is_err() { + // parse any args provided to #[benchmark] + let benchmark_attrs: BenchmarkAttrs = benchmark_attr.parse_args()?; + + // record name sets + if benchmark_attrs.extra { + extra_benchmark_names.push(name.clone()); + } else if benchmark_attrs.skip_meta { + skip_meta_benchmark_names.push(name.clone()); + } } // expand benchmark @@ -702,7 +681,6 @@ pub fn benchmarks( struct UnrolledParams { param_ranges: Vec, param_names: Vec, - param_types: Vec, } impl UnrolledParams { @@ -712,8 +690,8 @@ impl UnrolledParams { .iter() .map(|p| { let name = Ident::new(&p.name, Span::call_site()); - let start = p.start; - let end = p.end; + let start = &p.start; + let end = &p.end; quote!(#name, #start, #end) }) .collect(); @@ -724,14 +702,7 @@ impl UnrolledParams { quote!(#name) }) .collect(); - let param_types: Vec = params - .iter() - .map(|p| { - let typ = &p.typ; - quote!(#typ) - }) - .collect(); - UnrolledParams { param_ranges, param_names, param_types } + UnrolledParams { param_ranges, param_names } } } @@ -747,7 +718,6 @@ fn expand_benchmark( Ok(ident) => ident, Err(err) => return err.to_compile_error().into(), }; - let home = quote!(#krate::v2); let codec = quote!(#krate::frame_support::codec); let traits = quote!(#krate::frame_support::traits); let setup_stmts = benchmark_def.setup_stmts; @@ -759,7 +729,6 @@ fn expand_benchmark( let unrolled = UnrolledParams::from(&benchmark_def.params); let param_names = unrolled.param_names; let param_ranges = unrolled.param_ranges; - let param_types = unrolled.param_types; let type_use_generics = match is_instance { false => quote!(T), @@ -784,10 +753,23 @@ fn expand_benchmark( } expr_call.args = final_args; + let origin = match origin { + Expr::Cast(t) => { + let ty = t.ty.clone(); + quote! { + <::RuntimeOrigin as From<#ty>>::from(#origin); + } + }, + _ => quote! { + #origin.into(); + }, + }; + // determine call name (handles `_` and normal call syntax) let expr_span = expr_call.span(); let call_err = || { - quote_spanned!(expr_span=> "Extrinsic call must be a function call or `_`".to_compile_error()).into() + syn::Error::new(expr_span, "Extrinsic call must be a function call or `_`") + .to_compile_error() }; let call_name = match *expr_call.func { Expr::Path(expr_path) => { @@ -795,10 +777,9 @@ fn expand_benchmark( let Some(segment) = expr_path.path.segments.last() else { return call_err(); }; segment.ident.to_string() }, - Expr::Verbatim(tokens) => { + Expr::Infer(_) => { // `_` style // replace `_` with fn name - let Ok(_) = syn::parse::(tokens.to_token_stream().into()) else { return call_err(); }; name.to_string() }, _ => return call_err(), @@ -806,7 +787,7 @@ fn expand_benchmark( // modify extrinsic call to be prefixed with "new_call_variant" let call_name = format!("new_call_variant_{}", call_name); - let mut punct: Punctuated = Punctuated::new(); + let mut punct: Punctuated = Punctuated::new(); punct.push(PathSegment { arguments: PathArguments::None, ident: Ident::new(call_name.as_str(), Span::call_site()), @@ -824,7 +805,7 @@ fn expand_benchmark( let __call_decoded = as #codec::Decode> ::decode(&mut &__benchmarked_call_encoded[..]) .expect("call is encoded above, encoding must be correct"); - let __origin = #origin.into(); + let __origin = #origin; as #traits::UnfilteredDispatchable>::dispatch_bypass_filter( __call_decoded, __origin, @@ -847,11 +828,10 @@ fn expand_benchmark( let vis = benchmark_def.fn_vis; // remove #[benchmark] attribute - let fn_attrs: Vec<&Attribute> = benchmark_def + let fn_attrs = benchmark_def .fn_attrs .iter() - .filter(|attr| !syn::parse2::(attr.path.to_token_stream()).is_ok()) - .collect(); + .filter(|attr| !attr.path().is_ident(keywords::BENCHMARK_TOKEN)); // modify signature generics, ident, and inputs, e.g: // before: `fn bench(u: Linear<1, 100>) -> Result<(), BenchmarkError>` @@ -874,10 +854,11 @@ fn expand_benchmark( Some(stmt) => quote!(#stmt), None => quote!(Ok(())), }; + let fn_attrs_clone = fn_attrs.clone(); let fn_def = quote! { #( - #fn_attrs + #fn_attrs_clone )* #vis #sig { #( @@ -898,11 +879,6 @@ fn expand_benchmark( // benchmark function definition #fn_def - // compile-time assertions that each referenced param type implements ParamRange - #( - #home::assert_impl_all!(#param_types: #home::ParamRange); - )* - #[allow(non_camel_case_types)] #( #fn_attrs @@ -999,6 +975,9 @@ fn expand_benchmark( // Test the lowest, highest (if its different from the lowest) // and up to num_values-2 more equidistant values in between. // For 0..10 and num_values=6 this would mean: [0, 2, 4, 6, 8, 10] + if high < low { + return Err("The start of a `ParamRange` must be less than or equal to the end".into()); + } let mut values = #krate::vec![low]; let diff = (high - low).min(num_values - 1); diff --git a/frame/support/procedural/src/construct_runtime/expand/freeze_reason.rs b/frame/support/procedural/src/construct_runtime/expand/freeze_reason.rs new file mode 100644 index 0000000000000..d357fab72115b --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/freeze_reason.rs @@ -0,0 +1,68 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::{parse::PalletPath, Pallet}; +use proc_macro2::{Ident, TokenStream}; +use quote::quote; + +pub fn expand_outer_freeze_reason(pallet_decls: &[Pallet], scrate: &TokenStream) -> TokenStream { + let mut conversion_fns = Vec::new(); + let mut freeze_reason_variants = Vec::new(); + for decl in pallet_decls { + if let Some(_) = decl.find_part("FreezeReason") { + let variant_name = &decl.name; + let path = &decl.path; + let index = decl.index; + + conversion_fns.push(expand_conversion_fn(path, variant_name)); + + freeze_reason_variants.push(expand_variant(index, path, variant_name)); + } + } + + quote! { + /// A reason for placing a freeze on funds. + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + #scrate::codec::Encode, #scrate::codec::Decode, #scrate::codec::MaxEncodedLen, + #scrate::scale_info::TypeInfo, + #scrate::RuntimeDebug, + )] + pub enum RuntimeFreezeReason { + #( #freeze_reason_variants )* + } + + #( #conversion_fns )* + } +} + +fn expand_conversion_fn(path: &PalletPath, variant_name: &Ident) -> TokenStream { + quote! { + impl From<#path::FreezeReason> for RuntimeFreezeReason { + fn from(hr: #path::FreezeReason) -> Self { + RuntimeFreezeReason::#variant_name(hr) + } + } + } +} + +fn expand_variant(index: u8, path: &PalletPath, variant_name: &Ident) -> TokenStream { + quote! { + #[codec(index = #index)] + #variant_name(#path::FreezeReason), + } +} diff --git a/frame/support/procedural/src/construct_runtime/expand/hold_reason.rs b/frame/support/procedural/src/construct_runtime/expand/hold_reason.rs new file mode 100644 index 0000000000000..6acfe5382f18a --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/hold_reason.rs @@ -0,0 +1,68 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::{parse::PalletPath, Pallet}; +use proc_macro2::{Ident, TokenStream}; +use quote::quote; + +pub fn expand_outer_hold_reason(pallet_decls: &[Pallet], scrate: &TokenStream) -> TokenStream { + let mut conversion_fns = Vec::new(); + let mut hold_reason_variants = Vec::new(); + for decl in pallet_decls { + if let Some(_) = decl.find_part("HoldReason") { + let variant_name = &decl.name; + let path = &decl.path; + let index = decl.index; + + conversion_fns.push(expand_conversion_fn(path, variant_name)); + + hold_reason_variants.push(expand_variant(index, path, variant_name)); + } + } + + quote! { + /// A reason for placing a hold on funds. + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + #scrate::codec::Encode, #scrate::codec::Decode, #scrate::codec::MaxEncodedLen, + #scrate::scale_info::TypeInfo, + #scrate::RuntimeDebug, + )] + pub enum RuntimeHoldReason { + #( #hold_reason_variants )* + } + + #( #conversion_fns )* + } +} + +fn expand_conversion_fn(path: &PalletPath, variant_name: &Ident) -> TokenStream { + quote! { + impl From<#path::HoldReason> for RuntimeHoldReason { + fn from(hr: #path::HoldReason) -> Self { + RuntimeHoldReason::#variant_name(hr) + } + } + } +} + +fn expand_variant(index: u8, path: &PalletPath, variant_name: &Ident) -> TokenStream { + quote! { + #[codec(index = #index)] + #variant_name(#path::HoldReason), + } +} diff --git a/frame/support/procedural/src/construct_runtime/expand/lock_id.rs b/frame/support/procedural/src/construct_runtime/expand/lock_id.rs new file mode 100644 index 0000000000000..d7031dcaf552b --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/lock_id.rs @@ -0,0 +1,68 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::{parse::PalletPath, Pallet}; +use proc_macro2::{Ident, TokenStream}; +use quote::quote; + +pub fn expand_outer_lock_id(pallet_decls: &[Pallet], scrate: &TokenStream) -> TokenStream { + let mut conversion_fns = Vec::new(); + let mut lock_id_variants = Vec::new(); + for decl in pallet_decls { + if let Some(_) = decl.find_part("LockId") { + let variant_name = &decl.name; + let path = &decl.path; + let index = decl.index; + + conversion_fns.push(expand_conversion_fn(path, variant_name)); + + lock_id_variants.push(expand_variant(index, path, variant_name)); + } + } + + quote! { + /// An identifier for each lock placed on funds. + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + #scrate::codec::Encode, #scrate::codec::Decode, #scrate::codec::MaxEncodedLen, + #scrate::scale_info::TypeInfo, + #scrate::RuntimeDebug, + )] + pub enum RuntimeLockId { + #( #lock_id_variants )* + } + + #( #conversion_fns )* + } +} + +fn expand_conversion_fn(path: &PalletPath, variant_name: &Ident) -> TokenStream { + quote! { + impl From<#path::LockId> for RuntimeLockId { + fn from(hr: #path::LockId) -> Self { + RuntimeLockId::#variant_name(hr) + } + } + } +} + +fn expand_variant(index: u8, path: &PalletPath, variant_name: &Ident) -> TokenStream { + quote! { + #[codec(index = #index)] + #variant_name(#path::LockId), + } +} diff --git a/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/frame/support/procedural/src/construct_runtime/expand/metadata.rs index e9996121757d7..81fc93ba3c9ef 100644 --- a/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -48,6 +48,7 @@ pub fn expand_runtime_metadata( let event = expand_pallet_metadata_events(&filtered_names, runtime, scrate, decl); let constants = expand_pallet_metadata_constants(runtime, decl); let errors = expand_pallet_metadata_errors(runtime, decl); + let docs = expand_pallet_metadata_docs(runtime, decl); let attr = decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) .expect("was successfully parsed before; qed"); @@ -59,7 +60,7 @@ pub fn expand_runtime_metadata( quote! { #attr - #scrate::metadata::PalletMetadata { + #scrate::metadata_ir::PalletMetadataIR { name: stringify!(#name), index: #index, storage: #storage, @@ -67,6 +68,7 @@ pub fn expand_runtime_metadata( event: #event, constants: #constants, error: #errors, + docs: #docs, } } }) @@ -74,10 +76,28 @@ pub fn expand_runtime_metadata( quote! { impl #runtime { - pub fn metadata() -> #scrate::metadata::RuntimeMetadataPrefixed { - #scrate::metadata::RuntimeMetadataLastVersion::new( - #scrate::sp_std::vec![ #(#pallets),* ], - #scrate::metadata::ExtrinsicMetadata { + fn metadata_ir() -> #scrate::metadata_ir::MetadataIR { + // Each runtime must expose the `runtime_metadata()` to fetch the runtime API metadata. + // The function is implemented by calling `impl_runtime_apis!`. + // + // However, the `construct_runtime!` may be called without calling `impl_runtime_apis!`. + // Rely on the `Deref` trait to differentiate between a runtime that implements + // APIs (by macro impl_runtime_apis!) and a runtime that is simply created (by macro construct_runtime!). + // + // Both `InternalConstructRuntime` and `InternalImplRuntimeApis` expose a `runtime_metadata()` function. + // `InternalConstructRuntime` is implemented by the `construct_runtime!` for Runtime references (`& Runtime`), + // while `InternalImplRuntimeApis` is implemented by the `impl_runtime_apis!` for Runtime (`Runtime`). + // + // Therefore, the `Deref` trait will resolve the `runtime_metadata` from `impl_runtime_apis!` + // when both macros are called; and will resolve an empty `runtime_metadata` when only the `construct_runtime!` + // is called. + // + // `Deref` needs a reference for resolving the function call. + let rt = #runtime; + + #scrate::metadata_ir::MetadataIR { + pallets: #scrate::sp_std::vec![ #(#pallets),* ], + extrinsic: #scrate::metadata_ir::ExtrinsicMetadataIR { ty: #scrate::scale_info::meta_type::<#extrinsic>(), version: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSION, signed_extensions: < @@ -86,15 +106,30 @@ pub fn expand_runtime_metadata( >::SignedExtensions as #scrate::sp_runtime::traits::SignedExtension >::metadata() .into_iter() - .map(|meta| #scrate::metadata::SignedExtensionMetadata { + .map(|meta| #scrate::metadata_ir::SignedExtensionMetadataIR { identifier: meta.identifier, ty: meta.ty, additional_signed: meta.additional_signed, }) .collect(), }, - #scrate::scale_info::meta_type::<#runtime>() - ).into() + ty: #scrate::scale_info::meta_type::<#runtime>(), + apis: (&rt).runtime_metadata(), + } + } + + pub fn metadata() -> #scrate::metadata::RuntimeMetadataPrefixed { + #scrate::metadata_ir::into_latest(#runtime::metadata_ir()) + } + + pub fn metadata_at_version(version: u32) -> Option<#scrate::OpaqueMetadata> { + #scrate::metadata_ir::into_version(#runtime::metadata_ir(), version).map(|prefixed| { + #scrate::OpaqueMetadata::new(prefixed.into()) + }) + } + + pub fn metadata_versions() -> #scrate::sp_std::vec::Vec { + #scrate::metadata_ir::supported_versions() } } } @@ -157,7 +192,7 @@ fn expand_pallet_metadata_events( quote! { Some( - #scrate::metadata::PalletEventMetadata { + #scrate::metadata_ir::PalletEventMetadataIR { ty: #scrate::scale_info::meta_type::<#pallet_event>() } ) @@ -184,3 +219,12 @@ fn expand_pallet_metadata_errors(runtime: &Ident, decl: &Pallet) -> TokenStream #path::Pallet::<#runtime #(, #path::#instance)*>::error_metadata() } } + +fn expand_pallet_metadata_docs(runtime: &Ident, decl: &Pallet) -> TokenStream { + let path = &decl.path; + let instance = decl.instance.as_ref().into_iter(); + + quote! { + #path::Pallet::<#runtime #(, #path::#instance)*>::pallet_documentation_metadata() + } +} diff --git a/frame/support/procedural/src/construct_runtime/expand/mod.rs b/frame/support/procedural/src/construct_runtime/expand/mod.rs index ace0b23bd7f85..0fd98bb4dda13 100644 --- a/frame/support/procedural/src/construct_runtime/expand/mod.rs +++ b/frame/support/procedural/src/construct_runtime/expand/mod.rs @@ -18,15 +18,23 @@ mod call; mod config; mod event; +mod freeze_reason; +mod hold_reason; mod inherent; +mod lock_id; mod metadata; mod origin; +mod slash_reason; mod unsigned; pub use call::expand_outer_dispatch; pub use config::expand_outer_config; pub use event::expand_outer_event; +pub use freeze_reason::expand_outer_freeze_reason; +pub use hold_reason::expand_outer_hold_reason; pub use inherent::expand_outer_inherent; +pub use lock_id::expand_outer_lock_id; pub use metadata::expand_runtime_metadata; pub use origin::expand_outer_origin; +pub use slash_reason::expand_outer_slash_reason; pub use unsigned::expand_outer_validate_unsigned; diff --git a/frame/support/procedural/src/construct_runtime/expand/slash_reason.rs b/frame/support/procedural/src/construct_runtime/expand/slash_reason.rs new file mode 100644 index 0000000000000..abffb97d43981 --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/slash_reason.rs @@ -0,0 +1,68 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::{parse::PalletPath, Pallet}; +use proc_macro2::{Ident, TokenStream}; +use quote::quote; + +pub fn expand_outer_slash_reason(pallet_decls: &[Pallet], scrate: &TokenStream) -> TokenStream { + let mut conversion_fns = Vec::new(); + let mut slash_reason_variants = Vec::new(); + for decl in pallet_decls { + if let Some(_) = decl.find_part("SlashReason") { + let variant_name = &decl.name; + let path = &decl.path; + let index = decl.index; + + conversion_fns.push(expand_conversion_fn(path, variant_name)); + + slash_reason_variants.push(expand_variant(index, path, variant_name)); + } + } + + quote! { + /// A reason for slashing funds. + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + #scrate::codec::Encode, #scrate::codec::Decode, #scrate::codec::MaxEncodedLen, + #scrate::scale_info::TypeInfo, + #scrate::RuntimeDebug, + )] + pub enum RuntimeSlashReason { + #( #slash_reason_variants )* + } + + #( #conversion_fns )* + } +} + +fn expand_conversion_fn(path: &PalletPath, variant_name: &Ident) -> TokenStream { + quote! { + impl From<#path::SlashReason> for RuntimeSlashReason { + fn from(hr: #path::SlashReason) -> Self { + RuntimeSlashReason::#variant_name(hr) + } + } + } +} + +fn expand_variant(index: u8, path: &PalletPath, variant_name: &Ident) -> TokenStream { + quote! { + #[codec(index = #index)] + #variant_name(#path::SlashReason), + } +} diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 37f23efed36c1..1af44fc00a0ec 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -157,7 +157,7 @@ use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; use quote::quote; use std::{collections::HashSet, str::FromStr}; -use syn::{Ident, Result}; +use syn::{spanned::Spanned, Ident, Result}; /// The fixed name of the system pallet. const SYSTEM_PALLET_NAME: &str = "System"; @@ -170,9 +170,12 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { let res = match definition { RuntimeDeclaration::Implicit(implicit_def) => - construct_runtime_intermediary_expansion(input_copy.into(), implicit_def), + check_pallet_number(input_copy.clone().into(), implicit_def.pallets.len()).and_then( + |_| construct_runtime_intermediary_expansion(input_copy.into(), implicit_def), + ), RuntimeDeclaration::Explicit(explicit_decl) => - construct_runtime_final_expansion(explicit_decl), + check_pallet_number(input_copy.into(), explicit_decl.pallets.len()) + .and_then(|_| construct_runtime_final_expansion(explicit_decl)), }; res.unwrap_or_else(|e| e.to_compile_error()).into() @@ -221,7 +224,7 @@ fn construct_runtime_final_expansion( let system_pallet = pallets.iter().find(|decl| decl.name == SYSTEM_PALLET_NAME).ok_or_else(|| { syn::Error::new( - pallets_token.span, + pallets_token.span.join(), "`System` pallet declaration is missing. \ Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", ) @@ -265,6 +268,10 @@ fn construct_runtime_final_expansion( let inherent = expand::expand_outer_inherent(&name, &block, &unchecked_extrinsic, &pallets, &scrate); let validate_unsigned = expand::expand_outer_validate_unsigned(&name, &pallets, &scrate); + let freeze_reason = expand::expand_outer_freeze_reason(&pallets, &scrate); + let hold_reason = expand::expand_outer_hold_reason(&pallets, &scrate); + let lock_id = expand::expand_outer_lock_id(&pallets, &scrate); + let slash_reason = expand::expand_outer_slash_reason(&pallets, &scrate); let integrity_test = decl_integrity_test(&scrate); let static_assertions = decl_static_assertions(&name, &pallets, &scrate); @@ -289,6 +296,31 @@ fn construct_runtime_final_expansion( type RuntimeBlock = #block; } + // Each runtime must expose the `runtime_metadata()` to fetch the runtime API metadata. + // The function is implemented by calling `impl_runtime_apis!`. + // + // However, the `construct_runtime!` may be called without calling `impl_runtime_apis!`. + // Rely on the `Deref` trait to differentiate between a runtime that implements + // APIs (by macro impl_runtime_apis!) and a runtime that is simply created (by macro construct_runtime!). + // + // Both `InternalConstructRuntime` and `InternalImplRuntimeApis` expose a `runtime_metadata()` function. + // `InternalConstructRuntime` is implemented by the `construct_runtime!` for Runtime references (`& Runtime`), + // while `InternalImplRuntimeApis` is implemented by the `impl_runtime_apis!` for Runtime (`Runtime`). + // + // Therefore, the `Deref` trait will resolve the `runtime_metadata` from `impl_runtime_apis!` + // when both macros are called; and will resolve an empty `runtime_metadata` when only the `construct_runtime!` + // is called. + + #[doc(hidden)] + trait InternalConstructRuntime { + #[inline(always)] + fn runtime_metadata(&self) -> #scrate::sp_std::vec::Vec<#scrate::metadata_ir::RuntimeApiMetadataIR> { + Default::default() + } + } + #[doc(hidden)] + impl InternalConstructRuntime for &#name {} + #outer_event #outer_origin @@ -307,6 +339,14 @@ fn construct_runtime_final_expansion( #validate_unsigned + #freeze_reason + + #hold_reason + + #lock_id + + #slash_reason + #integrity_test #static_assertions @@ -616,3 +656,34 @@ fn decl_static_assertions( #(#error_encoded_size_check)* } } + +fn check_pallet_number(input: TokenStream2, pallet_num: usize) -> Result<()> { + let max_pallet_num = { + if cfg!(feature = "tuples-96") { + 96 + } else if cfg!(feature = "tuples-128") { + 128 + } else { + 64 + } + }; + + if pallet_num > max_pallet_num { + let no_feature = max_pallet_num == 128; + return Err(syn::Error::new( + input.span(), + format!( + "{} To increase this limit, enable the tuples-{} feature of [frame_support]. {}", + "The number of pallets exceeds the maximum number of tuple elements.", + max_pallet_num + 32, + if no_feature { + "If the feature does not exist - it needs to be implemented." + } else { + "" + }, + ), + )) + } + + Ok(()) +} diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index b0bebcd9e0f21..f819a90d1b5cd 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -17,6 +17,7 @@ use frame_support_procedural_tools::syn_ext as ext; use proc_macro2::{Span, TokenStream}; +use quote::ToTokens; use std::collections::{HashMap, HashSet}; use syn::{ ext::IdentExt, @@ -38,6 +39,10 @@ mod keyword { syn::custom_keyword!(Origin); syn::custom_keyword!(Inherent); syn::custom_keyword!(ValidateUnsigned); + syn::custom_keyword!(FreezeReason); + syn::custom_keyword!(HoldReason); + syn::custom_keyword!(LockId); + syn::custom_keyword!(SlashReason); syn::custom_keyword!(exclude_parts); syn::custom_keyword!(use_parts); } @@ -370,6 +375,10 @@ pub enum PalletPartKeyword { Origin(keyword::Origin), Inherent(keyword::Inherent), ValidateUnsigned(keyword::ValidateUnsigned), + FreezeReason(keyword::FreezeReason), + HoldReason(keyword::HoldReason), + LockId(keyword::LockId), + SlashReason(keyword::SlashReason), } impl Parse for PalletPartKeyword { @@ -392,6 +401,14 @@ impl Parse for PalletPartKeyword { Ok(Self::Inherent(input.parse()?)) } else if lookahead.peek(keyword::ValidateUnsigned) { Ok(Self::ValidateUnsigned(input.parse()?)) + } else if lookahead.peek(keyword::FreezeReason) { + Ok(Self::FreezeReason(input.parse()?)) + } else if lookahead.peek(keyword::HoldReason) { + Ok(Self::HoldReason(input.parse()?)) + } else if lookahead.peek(keyword::LockId) { + Ok(Self::LockId(input.parse()?)) + } else if lookahead.peek(keyword::SlashReason) { + Ok(Self::SlashReason(input.parse()?)) } else { Err(lookahead.error()) } @@ -410,6 +427,10 @@ impl PalletPartKeyword { Self::Origin(_) => "Origin", Self::Inherent(_) => "Inherent", Self::ValidateUnsigned(_) => "ValidateUnsigned", + Self::FreezeReason(_) => "FreezeReason", + Self::HoldReason(_) => "HoldReason", + Self::LockId(_) => "LockId", + Self::SlashReason(_) => "SlashReason", } } @@ -424,17 +445,21 @@ impl PalletPartKeyword { } } -impl Spanned for PalletPartKeyword { - fn span(&self) -> Span { +impl ToTokens for PalletPartKeyword { + fn to_tokens(&self, tokens: &mut TokenStream) { match self { - Self::Pallet(inner) => inner.span(), - Self::Call(inner) => inner.span(), - Self::Storage(inner) => inner.span(), - Self::Event(inner) => inner.span(), - Self::Config(inner) => inner.span(), - Self::Origin(inner) => inner.span(), - Self::Inherent(inner) => inner.span(), - Self::ValidateUnsigned(inner) => inner.span(), + Self::Pallet(inner) => inner.to_tokens(tokens), + Self::Call(inner) => inner.to_tokens(tokens), + Self::Storage(inner) => inner.to_tokens(tokens), + Self::Event(inner) => inner.to_tokens(tokens), + Self::Config(inner) => inner.to_tokens(tokens), + Self::Origin(inner) => inner.to_tokens(tokens), + Self::Inherent(inner) => inner.to_tokens(tokens), + Self::ValidateUnsigned(inner) => inner.to_tokens(tokens), + Self::FreezeReason(inner) => inner.to_tokens(tokens), + Self::HoldReason(inner) => inner.to_tokens(tokens), + Self::LockId(inner) => inner.to_tokens(tokens), + Self::SlashReason(inner) => inner.to_tokens(tokens), } } } @@ -657,7 +682,7 @@ fn convert_pallets(pallets: Vec) -> syn::Result proc_macro::To let default_variants = enum_ .variants .into_iter() - .filter(|variant| variant.attrs.iter().any(|attr| attr.path.is_ident("default"))) + .filter(|variant| variant.attrs.iter().any(|attr| attr.path().is_ident("default"))) .collect::>(); match &*default_variants { @@ -80,7 +80,7 @@ pub fn derive_default_no_bound(input: proc_macro::TokenStream) -> proc_macro::To let variant_attrs = default_variant .attrs .iter() - .filter(|a| a.path.is_ident("default")) + .filter(|a| a.path().is_ident("default")) .collect::>(); // check that there is only one #[default] attribute on the variant diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 6df2db8303da0..c49afbc243be6 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -465,6 +465,8 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { /// * All storages are marked as unbounded, meaning you do not need to implement `MaxEncodedLen` on /// storage types. This is equivalent to specifying `#[pallet::unbounded]` on all storage type /// definitions. +/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, these +/// will be replaced by `Blake2_128Concat`. /// /// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or /// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This argument @@ -495,7 +497,7 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { /// #[doc = include_str!("../README.md")] /// #[pallet_doc("../doc1.md")] /// #[pallet_doc("../doc2.md")] -/// pub struct Pallet(_); +/// pub mod pallet {} /// ``` /// /// The runtime metadata for this pallet contains the following @@ -514,7 +516,7 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { /// /// Documentation for pallet 1 /// /// Documentation for pallet 2 /// /// Content of README.md -/// pub struct Pallet(_); +/// pub mod pallet {} /// ``` /// /// If you want to specify the file from which the documentation is loaded, you can use the @@ -531,7 +533,7 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { /// /// This approach is beneficial when you use the `include_str` macro at the beginning of the file /// and want that documentation to extend to the runtime metadata, without reiterating the -/// documentation on the module itself. +/// documentation on the pallet module itself. #[proc_macro_attribute] pub fn pallet(attr: TokenStream, item: TokenStream) -> TokenStream { pallet::pallet(attr, item) @@ -983,7 +985,8 @@ pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { /// /// The macro creates an enum `Call` with one variant per dispatchable. This enum implements: /// [`Clone`], [`Eq`], [`PartialEq`], [`Debug`] (with stripped implementation in `not("std")`), -/// `Encode`, `Decode`, `GetDispatchInfo`, `GetCallName`, and `UnfilteredDispatchable`. +/// `Encode`, `Decode`, `GetDispatchInfo`, `GetCallName`, `GetCallIndex` and +/// `UnfilteredDispatchable`. /// /// The macro implements the `Callable` trait on `Pallet` and a function `call_functions` /// which returns the dispatchable metadata. @@ -1409,3 +1412,30 @@ pub fn validate_unsigned(_: TokenStream, _: TokenStream) -> TokenStream { pub fn origin(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } + +/// The `#[pallet::composite_enum]` attribute allows you to define an enum that gets composed as an +/// aggregate enum by `construct_runtime`. This is similar in principle with `#[pallet::event]` and +/// `#[pallet::error]`. +/// +/// The attribute currently only supports enum definitions, and identifiers that are named +/// `FreezeReason`, `HoldReason`, `LockId` or `SlashReason`. Arbitrary identifiers for the enum are +/// not supported. The aggregate enum generated by `construct_runtime` will have the name of +/// `RuntimeFreezeReason`, `RuntimeHoldReason`, `RuntimeLockId` and `RuntimeSlashReason` +/// respectively. +/// +/// NOTE: The aggregate enum generated by `construct_runtime` generates a conversion function from +/// the pallet enum to the aggregate enum, and automatically derives the following traits: +/// +/// ```ignore +/// Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo, +/// RuntimeDebug +/// ``` +/// +/// For ease of usage, when no `#[derive]` attributes are found for the enum under +/// `#[pallet::composite_enum]`, the aforementioned traits are automatically derived for it. The +/// inverse is also true: if there are any `#[derive]` attributes found for the enum, then no traits +/// will automatically be derived for it. +#[proc_macro_attribute] +pub fn composite_enum(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index 3db454eb6211b..f17fdc81a647c 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -15,8 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{pallet::Def, COUNTER}; -use quote::ToTokens; +use crate::{ + pallet::{parse::call::CallWeightDef, Def}, + COUNTER, +}; +use proc_macro2::TokenStream as TokenStream2; +use quote::{quote, ToTokens}; use syn::spanned::Spanned; /// @@ -54,30 +58,60 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { .map(|fn_name| format!("Create a call with the variant `{}`.", fn_name)) .collect::>(); - let mut warning_structs = Vec::new(); - let mut warning_names = Vec::new(); + let mut call_index_warnings = Vec::new(); // Emit a warning for each call that is missing `call_index` when not in dev-mode. for method in &methods { if method.explicit_call_index || def.dev_mode { continue } - let name = syn::Ident::new(&format!("{}", method.name), method.name.span()); - let warning: syn::ItemStruct = syn::parse_quote!( - #[deprecated(note = r" - Implicit call indices are deprecated in favour of explicit ones. - Please ensure that all calls have the `pallet::call_index` attribute or that the - `dev-mode` of the pallet is enabled. For more info see: - and - .")] - #[allow(non_camel_case_types)] - struct #name; - ); - warning_names.push(name); - warning_structs.push(warning); + let warning = proc_macro_warning::Warning::new_deprecated("ImplicitCallIndex") + .index(call_index_warnings.len()) + .old("use implicit call indices") + .new("ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode") + .help_links(&[ + "https://github.com/paritytech/substrate/pull/12891", + "https://github.com/paritytech/substrate/pull/11381" + ]) + .span(method.name.span()) + .build(); + call_index_warnings.push(warning); } - let fn_weight = methods.iter().map(|method| &method.weight); + let mut fn_weight = Vec::::new(); + let mut weight_warnings = Vec::new(); + for method in &methods { + match &method.weight { + CallWeightDef::DevModeDefault => fn_weight.push(syn::parse_quote!(0)), + CallWeightDef::Immediate(e @ syn::Expr::Lit(lit)) if !def.dev_mode => { + let warning = proc_macro_warning::Warning::new_deprecated("ConstantWeight") + .index(weight_warnings.len()) + .old("use hard-coded constant as call weight") + .new("benchmark all calls or put the pallet into `dev` mode") + .help_link("https://github.com/paritytech/substrate/pull/13798") + .span(lit.span()) + .build(); + weight_warnings.push(warning); + fn_weight.push(e.into_token_stream()); + }, + CallWeightDef::Immediate(e) => fn_weight.push(e.into_token_stream()), + CallWeightDef::Inherited => { + let pallet_weight = def + .call + .as_ref() + .expect("we have methods; we have calls; qed") + .inherited_call_weight + .as_ref() + .expect("the parser prevents this"); + + // Expand `<::WeightInfo>::call_name()`. + let t = &pallet_weight.typename; + let n = &method.name; + fn_weight.push(quote!({ < #t > :: #n () })); + }, + } + } + debug_assert_eq!(fn_weight.len(), methods.len()); let fn_doc = methods.iter().map(|method| &method.docs).collect::>(); @@ -171,7 +205,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { .map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index]) { item_impl.items.iter_mut().for_each(|i| { - if let syn::ImplItem::Method(method) = i { + if let syn::ImplItem::Fn(method) = i { let block = &method.block; method.block = syn::parse_quote! {{ // We execute all dispatchable in a new storage layer, allowing them @@ -189,13 +223,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { method .attrs .iter() - .find(|attr| { - if let Ok(syn::Meta::List(syn::MetaList { path, .. })) = attr.parse_meta() { - path.segments.last().map(|seg| seg.ident == "allow").unwrap_or(false) - } else { - false - } - }) + .find(|attr| attr.path().is_ident("allow")) .map_or(proc_macro2::TokenStream::new(), |attr| attr.to_token_stream()) }) .collect::>(); @@ -203,9 +231,10 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { quote::quote_spanned!(span => mod warnings { #( - #warning_structs - // This triggers each deprecated warning once. - const _: Option<#warning_names> = None; + #call_index_warnings + )* + #( + #weight_warnings )* } @@ -324,6 +353,21 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { } } + impl<#type_impl_gen> #frame_support::dispatch::GetCallIndex for #call_ident<#type_use_gen> + #where_clause + { + fn get_call_index(&self) -> u8 { + match *self { + #( Self::#fn_name { .. } => #call_index, )* + Self::__Ignore(_, _) => unreachable!("__PhantomItem cannot be used."), + } + } + + fn get_call_indices() -> &'static [u8] { + &[ #( #call_index, )* ] + } + } + impl<#type_impl_gen> #frame_support::traits::UnfilteredDispatchable for #call_ident<#type_use_gen> #where_clause @@ -362,7 +406,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { impl<#type_impl_gen> #pallet_ident<#type_use_gen> #where_clause { #[doc(hidden)] - pub fn call_functions() -> #frame_support::metadata::PalletCallMetadata { + pub fn call_functions() -> #frame_support::metadata_ir::PalletCallMetadataIR { #frame_support::scale_info::meta_type::<#call_ident<#type_use_gen>>().into() } } diff --git a/frame/support/procedural/src/pallet/expand/constants.rs b/frame/support/procedural/src/pallet/expand/constants.rs index 21ac1de3642d5..6e1cd3df627f3 100644 --- a/frame/support/procedural/src/pallet/expand/constants.rs +++ b/frame/support/procedural/src/pallet/expand/constants.rs @@ -23,7 +23,7 @@ struct ConstDef { /// The type in Get, e.g. `u32` in `type Foo: Get;`, but `Self` is replaced by `T` pub type_: syn::Type, /// The doc associated - pub doc: Vec, + pub doc: Vec, /// default_byte implementation pub default_byte_impl: proc_macro2::TokenStream, /// Constant name for Metadata (optional) @@ -85,7 +85,7 @@ pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { let default_byte_impl = &const_.default_byte_impl; quote::quote!({ - #frame_support::metadata::PalletConstantMetadata { + #frame_support::metadata_ir::PalletConstantMetadataIR { name: #ident_str, ty: #frame_support::scale_info::meta_type::<#const_type>(), value: { #default_byte_impl }, @@ -99,7 +99,7 @@ pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { #[doc(hidden)] pub fn pallet_constants_metadata() - -> #frame_support::sp_std::vec::Vec<#frame_support::metadata::PalletConstantMetadata> + -> #frame_support::sp_std::vec::Vec<#frame_support::metadata_ir::PalletConstantMetadataIR> { #frame_support::sp_std::vec![ #( #consts ),* ] } diff --git a/frame/support/procedural/src/pallet/expand/documentation.rs b/frame/support/procedural/src/pallet/expand/documentation.rs index e158448a89711..4e6347e83bb73 100644 --- a/frame/support/procedural/src/pallet/expand/documentation.rs +++ b/frame/support/procedural/src/pallet/expand/documentation.rs @@ -16,54 +16,24 @@ // limitations under the License. use crate::pallet::Def; -use derive_syn_parse::Parse; use proc_macro2::TokenStream; use quote::ToTokens; -use syn::{ - parse::{self, Parse, ParseStream}, - spanned::Spanned, - Attribute, Lit, -}; +use syn::{spanned::Spanned, Attribute, Lit, LitStr}; const DOC: &'static str = "doc"; const PALLET_DOC: &'static str = "pallet_doc"; -mod keywords { - syn::custom_keyword!(include_str); -} - /// Get the documentation file path from the `pallet_doc` attribute. /// /// Supported format: /// `#[pallet_doc(PATH)]`: The path of the file from which the documentation is loaded fn parse_pallet_doc_value(attr: &Attribute) -> syn::Result { - let span = attr.span(); - - let meta = attr.parse_meta()?; - let syn::Meta::List(metalist) = meta else { - let msg = "The `pallet_doc` attribute must receive arguments as a list. Supported format: `pallet_doc(PATH)`"; - return Err(syn::Error::new(span, msg)) - }; + let lit: syn::LitStr = attr.parse_args().map_err(|_| { + let msg = "The `pallet_doc` received an unsupported argument. Supported format: `pallet_doc(\"PATH\")`"; + syn::Error::new(attr.span(), msg) + })?; - let paths: Vec<_> = metalist - .nested - .into_iter() - .map(|nested| { - let syn::NestedMeta::Lit(lit) = nested else { - let msg = "The `pallet_doc` received an unsupported argument. Supported format: `pallet_doc(PATH)`"; - return Err(syn::Error::new(span, msg)) - }; - - Ok(lit) - }) - .collect::>()?; - - if paths.len() != 1 { - let msg = "The `pallet_doc` attribute must receive only one argument. Supported format: `pallet_doc(PATH)`"; - return Err(syn::Error::new(span, msg)) - } - - Ok(DocMetaValue::Path(paths[0].clone())) + Ok(DocMetaValue::Path(lit)) } /// Get the value from the `doc` comment attribute: @@ -71,47 +41,22 @@ fn parse_pallet_doc_value(attr: &Attribute) -> syn::Result { /// Supported formats: /// - `#[doc = "A doc string"]`: Documentation as a string literal /// - `#[doc = include_str!(PATH)]`: Documentation obtained from a path -fn parse_doc_value(attr: &Attribute) -> Option { - let Some(ident) = attr.path.get_ident() else { - return None - }; - if ident != DOC { - return None +fn parse_doc_value(attr: &Attribute) -> syn::Result> { + if !attr.path().is_ident(DOC) { + return Ok(None) } - let parser = |input: ParseStream| DocParser::parse(input); - let result = parse::Parser::parse2(parser, attr.tokens.clone()).ok()?; + let meta = attr.meta.require_name_value()?; - if let Some(lit) = result.lit { - Some(DocMetaValue::Lit(lit)) - } else if let Some(include_doc) = result.include_doc { - Some(DocMetaValue::Path(include_doc.lit)) - } else { - None + match &meta.value { + syn::Expr::Lit(lit) => Ok(Some(DocMetaValue::Lit(lit.lit.clone()))), + syn::Expr::Macro(mac) if mac.mac.path.is_ident("include_str") => + Ok(Some(DocMetaValue::Path(mac.mac.parse_body()?))), + _ => + Err(syn::Error::new(attr.span(), "Expected `= \"docs\"` or `= include_str!(\"PATH\")`")), } } -/// Parse the include_str attribute. -#[derive(Debug, Parse)] -struct IncludeDocParser { - _include_str: keywords::include_str, - _eq_token: syn::token::Bang, - #[paren] - _paren: syn::token::Paren, - #[inside(_paren)] - lit: Lit, -} - -/// Parse the doc literal. -#[derive(Debug, Parse)] -struct DocParser { - _eq_token: syn::token::Eq, - #[peek(Lit)] - lit: Option, - #[parse_if(lit.is_none())] - include_doc: Option, -} - /// Supported documentation tokens. #[derive(Debug)] enum DocMetaValue { @@ -124,7 +69,7 @@ enum DocMetaValue { /// The string literal represents the file `PATH`. /// /// `#[doc = include_str!(PATH)]` - Path(Lit), + Path(LitStr), } impl ToTokens for DocMetaValue { @@ -145,7 +90,7 @@ impl ToTokens for DocMetaValue { /// Implement a `pallet_documentation_metadata` function to fetch the /// documentation that is included in the metadata. /// -/// The documentation is placed at the top of the module similar to: +/// The documentation is placed on the pallet similar to: /// /// ```ignore /// #[pallet] @@ -163,7 +108,7 @@ impl ToTokens for DocMetaValue { /// which is the file path that holds the documentation to be added to the metadata. /// /// Unlike the `doc` attribute, the documentation provided to the `proc_macro` attribute is -/// not inserted at the beginning of the module. +/// not added to the pallet. pub fn expand_documentation(def: &mut Def) -> proc_macro2::TokenStream { let frame_support = &def.frame_support; let type_impl_gen = &def.type_impl_generics(proc_macro2::Span::call_site()); @@ -179,33 +124,39 @@ pub fn expand_documentation(def: &mut Def) -> proc_macro2::TokenStream { let mut index = 0; while index < def.item.attrs.len() { let attr = &def.item.attrs[index]; - if let Some(ident) = attr.path.get_ident() { - if ident == PALLET_DOC { - let elem = def.item.attrs.remove(index); - pallet_docs.push(elem); - // Do not increment the index, we have just removed the - // element from the attributes. - continue - } + if attr.path().get_ident().map_or(false, |i| *i == PALLET_DOC) { + pallet_docs.push(def.item.attrs.remove(index)); + // Do not increment the index, we have just removed the + // element from the attributes. + continue } index += 1; } // Capture the `#[doc = include_str!("../README.md")]` and `#[doc = "Documentation"]`. - let mut docs: Vec<_> = def.item.attrs.iter().filter_map(parse_doc_value).collect(); + let docs = match def + .item + .attrs + .iter() + .filter_map(|v| parse_doc_value(v).transpose()) + .collect::>>() + { + Ok(r) => r, + Err(err) => return err.into_compile_error(), + }; // Capture the `#[pallet_doc("../README.md")]`. - let pallet_docs: Vec<_> = match pallet_docs + let pallet_docs = match pallet_docs .into_iter() .map(|attr| parse_pallet_doc_value(&attr)) - .collect::>() + .collect::>>() { Ok(docs) => docs, Err(err) => return err.into_compile_error(), }; - docs.extend(pallet_docs); + let docs = docs.iter().chain(pallet_docs.iter()); quote::quote!( impl<#type_impl_gen> #pallet_ident<#type_use_gen> #where_clauses{ diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index 27fe9d3401ea2..7acfb9090698e 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -82,8 +82,8 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { quote::quote_spanned!(def.pallet_struct.attr_span => impl<#type_impl_gen> #pallet_ident<#type_use_gen> #config_where_clause { #[doc(hidden)] - pub fn error_metadata() -> Option<#frame_support::metadata::PalletErrorMetadata> { - Some(#frame_support::metadata::PalletErrorMetadata { + pub fn error_metadata() -> Option<#frame_support::metadata_ir::PalletErrorMetadataIR> { + Some(#frame_support::metadata_ir::PalletErrorMetadataIR { ty: #frame_support::scale_info::meta_type::<#error_ident<#type_use_gen>>() }) } @@ -93,7 +93,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { quote::quote_spanned!(def.pallet_struct.attr_span => impl<#type_impl_gen> #pallet_ident<#type_use_gen> #config_where_clause { #[doc(hidden)] - pub fn error_metadata() -> Option<#frame_support::metadata::PalletErrorMetadata> { + pub fn error_metadata() -> Option<#frame_support::metadata_ir::PalletErrorMetadataIR> { None } } diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index 05d61bb522180..c742ddcd25fbc 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -269,6 +269,19 @@ pub fn process_generics(def: &mut Def) -> syn::Result (5, 6, 7), }; + if storage_def.use_default_hasher { + let hasher_indices: Vec = match storage_def.metadata { + Metadata::Map { .. } | Metadata::CountedMap { .. } => vec![1], + Metadata::DoubleMap { .. } => vec![1, 3], + _ => vec![], + }; + for hasher_idx in hasher_indices { + args.args[hasher_idx] = syn::GenericArgument::Type( + syn::parse_quote!(#frame_support::Blake2_128Concat), + ); + } + } + if query_idx < args.args.len() { if let syn::GenericArgument::Type(query_kind) = args.args.index_mut(query_idx) { set_result_query_type_parameter(query_kind)?; @@ -371,10 +384,11 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => + QueryKind::ResultQuery(error_path, _) => { quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ), + ) + }, QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -394,10 +408,11 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => + QueryKind::ResultQuery(error_path, _) => { quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ), + ) + }, QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -419,10 +434,11 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => + QueryKind::ResultQuery(error_path, _) => { quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ), + ) + }, QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -444,10 +460,11 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => + QueryKind::ResultQuery(error_path, _) => { quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ), + ) + }, QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -471,10 +488,11 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => + QueryKind::ResultQuery(error_path, _) => { quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ), + ) + }, QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -639,8 +657,8 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { #completed_where_clause { #[doc(hidden)] - pub fn storage_metadata() -> #frame_support::metadata::PalletStorageMetadata { - #frame_support::metadata::PalletStorageMetadata { + pub fn storage_metadata() -> #frame_support::metadata_ir::PalletStorageMetadataIR { + #frame_support::metadata_ir::PalletStorageMetadataIR { prefix: < ::PalletInfo as #frame_support::traits::PalletInfo diff --git a/frame/support/procedural/src/pallet/expand/tt_default_parts.rs b/frame/support/procedural/src/pallet/expand/tt_default_parts.rs index 564b526e93536..f36c765f7beb6 100644 --- a/frame/support/procedural/src/pallet/expand/tt_default_parts.rs +++ b/frame/support/procedural/src/pallet/expand/tt_default_parts.rs @@ -15,7 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{pallet::Def, COUNTER}; +use crate::{ + pallet::{CompositeKeyword, Def}, + COUNTER, +}; use syn::spanned::Spanned; /// Generate the `tt_default_parts` macro. @@ -48,6 +51,30 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { let validate_unsigned_part = def.validate_unsigned.as_ref().map(|_| quote::quote!(ValidateUnsigned,)); + let freeze_reason_part = def + .composites + .iter() + .any(|c| matches!(c.composite_keyword, CompositeKeyword::FreezeReason(_))) + .then_some(quote::quote!(FreezeReason,)); + + let hold_reason_part = def + .composites + .iter() + .any(|c| matches!(c.composite_keyword, CompositeKeyword::HoldReason(_))) + .then_some(quote::quote!(HoldReason,)); + + let lock_id_part = def + .composites + .iter() + .any(|c| matches!(c.composite_keyword, CompositeKeyword::LockId(_))) + .then_some(quote::quote!(LockId,)); + + let slash_reason_part = def + .composites + .iter() + .any(|c| matches!(c.composite_keyword, CompositeKeyword::SlashReason(_))) + .then_some(quote::quote!(SlashReason,)); + quote::quote!( // This macro follows the conventions as laid out by the `tt-call` crate. It does not // accept any arguments and simply returns the pallet parts, separated by commas, then @@ -70,7 +97,8 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { tokens = [{ ::{ Pallet, #call_part #storage_part #event_part #origin_part #config_part - #inherent_part #validate_unsigned_part + #inherent_part #validate_unsigned_part #freeze_reason_part + #hold_reason_part #lock_id_part #slash_reason_part } }] } diff --git a/frame/support/procedural/src/pallet/mod.rs b/frame/support/procedural/src/pallet/mod.rs index 31048841c64e7..3618711051d7f 100644 --- a/frame/support/procedural/src/pallet/mod.rs +++ b/frame/support/procedural/src/pallet/mod.rs @@ -28,7 +28,7 @@ mod expand; mod parse; -pub use parse::Def; +pub use parse::{composite::keyword::CompositeKeyword, Def}; use syn::spanned::Spanned; mod keyword { diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index 5b90c2d98b85e..90631f264b92a 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::helper; +use super::{helper, InheritedCallWeightAttr}; use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; use std::collections::HashMap; @@ -45,7 +45,24 @@ pub struct CallDef { /// The span of the pallet::call attribute. pub attr_span: proc_macro2::Span, /// Docs, specified on the impl Block. - pub docs: Vec, + pub docs: Vec, + /// The optional `weight` attribute on the `pallet::call`. + pub inherited_call_weight: Option, +} + +/// The weight of a call. +#[derive(Clone)] +pub enum CallWeightDef { + /// Explicitly set on the call itself with `#[pallet::weight(…)]`. This value is used. + Immediate(syn::Expr), + + /// The default value that should be set for dev-mode pallets. Usually zero. + DevModeDefault, + + /// Inherits whatever value is configured on the pallet level. + /// + /// The concrete value is not known at this point. + Inherited, } /// Definition of dispatchable typically: `#[weight...] fn foo(origin .., param1: ...) -> ..` @@ -55,14 +72,14 @@ pub struct CallVariantDef { pub name: syn::Ident, /// Information on args: `(is_compact, name, type)` pub args: Vec<(bool, syn::Ident, Box)>, - /// Weight formula. - pub weight: syn::Expr, + /// Weight for the call. + pub weight: CallWeightDef, /// Call index of the dispatchable. pub call_index: u8, /// Whether an explicit call index was specified. pub explicit_call_index: bool, /// Docs, used for metadata. - pub docs: Vec, + pub docs: Vec, /// Attributes annotated at the top of the dispatchable function. pub attrs: Vec, } @@ -151,6 +168,7 @@ impl CallDef { index: usize, item: &mut syn::Item, dev_mode: bool, + inherited_call_weight: Option, ) -> syn::Result { let item_impl = if let syn::Item::Impl(item) = item { item @@ -173,7 +191,7 @@ impl CallDef { let mut indices = HashMap::new(); let mut last_index: Option = None; for item in &mut item_impl.items { - if let syn::ImplItem::Method(method) = item { + if let syn::ImplItem::Fn(method) = item { if !matches!(method.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::call, dispatchable function must be public: \ `pub fn`"; @@ -228,17 +246,23 @@ impl CallDef { weight_attrs.push(FunctionAttr::Weight(empty_weight)); } - if weight_attrs.len() != 1 { - let msg = if weight_attrs.is_empty() { - "Invalid pallet::call, requires weight attribute i.e. `#[pallet::weight($expr)]`" - } else { - "Invalid pallet::call, too many weight attributes given" - }; - return Err(syn::Error::new(method.sig.span(), msg)) - } - let weight = match weight_attrs.pop().unwrap() { - FunctionAttr::Weight(w) => w, - _ => unreachable!("checked during creation of the let binding"), + let weight = match weight_attrs.len() { + 0 if inherited_call_weight.is_some() => CallWeightDef::Inherited, + 0 if dev_mode => CallWeightDef::DevModeDefault, + 0 => return Err(syn::Error::new( + method.sig.span(), + "A pallet::call requires either a concrete `#[pallet::weight($expr)]` or an + inherited weight from the `#[pallet:call(weight($type))]` attribute, but + none were given.", + )), + 1 => match weight_attrs.pop().unwrap() { + FunctionAttr::Weight(w) => CallWeightDef::Immediate(w), + _ => unreachable!("checked during creation of the let binding"), + }, + _ => { + let msg = "Invalid pallet::call, too many weight attributes given"; + return Err(syn::Error::new(method.sig.span(), msg)) + }, }; if call_idx_attrs.len() > 1 { @@ -321,6 +345,7 @@ impl CallDef { methods, where_clause: item_impl.generics.where_clause.clone(), docs: get_doc_literals(&item_impl.attrs), + inherited_call_weight, }) } } diff --git a/frame/support/procedural/src/pallet/parse/composite.rs b/frame/support/procedural/src/pallet/parse/composite.rs new file mode 100644 index 0000000000000..2bbfcd2e998ab --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/composite.rs @@ -0,0 +1,136 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use quote::ToTokens; +use syn::spanned::Spanned; + +pub mod keyword { + use super::*; + + syn::custom_keyword!(FreezeReason); + syn::custom_keyword!(HoldReason); + syn::custom_keyword!(LockId); + syn::custom_keyword!(SlashReason); + pub enum CompositeKeyword { + FreezeReason(FreezeReason), + HoldReason(HoldReason), + LockId(LockId), + SlashReason(SlashReason), + } + + impl ToTokens for CompositeKeyword { + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + use CompositeKeyword::*; + match self { + FreezeReason(inner) => inner.to_tokens(tokens), + HoldReason(inner) => inner.to_tokens(tokens), + LockId(inner) => inner.to_tokens(tokens), + SlashReason(inner) => inner.to_tokens(tokens), + } + } + } + + impl syn::parse::Parse for CompositeKeyword { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let lookahead = input.lookahead1(); + if lookahead.peek(FreezeReason) { + Ok(Self::FreezeReason(input.parse()?)) + } else if lookahead.peek(HoldReason) { + Ok(Self::HoldReason(input.parse()?)) + } else if lookahead.peek(LockId) { + Ok(Self::LockId(input.parse()?)) + } else if lookahead.peek(SlashReason) { + Ok(Self::SlashReason(input.parse()?)) + } else { + Err(lookahead.error()) + } + } + } + + impl std::fmt::Display for CompositeKeyword { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use CompositeKeyword::*; + write!( + f, + "{}", + match self { + FreezeReason(_) => "FreezeReason", + HoldReason(_) => "HoldReason", + LockId(_) => "LockId", + SlashReason(_) => "SlashReason", + } + ) + } + } +} + +pub struct CompositeDef { + /// The index of the HoldReason item in the pallet module. + pub index: usize, + /// The composite keyword used (contains span). + pub composite_keyword: keyword::CompositeKeyword, + /// The span of the pallet::composite_enum attribute. + pub attr_span: proc_macro2::Span, +} + +impl CompositeDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + scrate: &proc_macro2::Ident, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Enum(item) = item { + item + } else { + return Err(syn::Error::new( + item.span(), + "Invalid pallet::composite_enum, expected enum item", + )) + }; + + if !matches!(item.vis, syn::Visibility::Public(_)) { + let msg = format!("Invalid pallet::composite_enum, `{}` must be public", item.ident); + return Err(syn::Error::new(item.span(), msg)) + } + + let has_derive_attr = item.attrs.iter().any(|attr| { + if let syn::Meta::List(syn::MetaList { path, .. }) = &attr.meta { + path.get_ident().map(|ident| ident == "derive").unwrap_or(false) + } else { + false + } + }); + + if !has_derive_attr { + let derive_attr: syn::Attribute = syn::parse_quote! { + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + #scrate::codec::Encode, #scrate::codec::Decode, #scrate::codec::MaxEncodedLen, + #scrate::scale_info::TypeInfo, + #scrate::RuntimeDebug, + )] + }; + item.attrs.push(derive_attr); + } + + let composite_keyword = + syn::parse2::(item.ident.to_token_stream())?; + + Ok(CompositeDef { index, composite_keyword, attr_span }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/config.rs b/frame/support/procedural/src/pallet/parse/config.rs index 4d22e50ef2bbf..7b52c1664ba45 100644 --- a/frame/support/procedural/src/pallet/parse/config.rs +++ b/frame/support/procedural/src/pallet/parse/config.rs @@ -61,7 +61,7 @@ pub struct ConstMetadataDef { /// The type in Get, e.g. `u32` in `type Foo: Get;`, but `Self` is replaced by `T` pub type_: syn::Type, /// The doc associated - pub doc: Vec, + pub doc: Vec, } impl TryFrom<&syn::TraitItemType> for ConstMetadataDef { @@ -124,23 +124,35 @@ impl syn::parse::Parse for DisableFrameSystemSupertraitCheck { } /// Parse for `#[pallet::constant]` -pub struct TypeAttrConst(proc_macro2::Span); - -impl Spanned for TypeAttrConst { - fn span(&self) -> proc_macro2::Span { - self.0 - } +pub struct TypeAttrConst { + pound_token: syn::Token![#], + bracket_token: syn::token::Bracket, + pallet_ident: syn::Ident, + path_sep_token: syn::token::PathSep, + constant_keyword: keyword::constant, } impl syn::parse::Parse for TypeAttrConst { fn parse(input: syn::parse::ParseStream) -> syn::Result { - input.parse::()?; + let pound_token = input.parse::()?; let content; - syn::bracketed!(content in input); - content.parse::()?; - content.parse::()?; + let bracket_token = syn::bracketed!(content in input); + let pallet_ident = content.parse::()?; + let path_sep_token = content.parse::()?; + let constant_keyword = content.parse::()?; - Ok(TypeAttrConst(content.parse::()?.span())) + Ok(Self { pound_token, bracket_token, pallet_ident, path_sep_token, constant_keyword }) + } +} + +impl ToTokens for TypeAttrConst { + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + self.pound_token.to_tokens(tokens); + self.bracket_token.surround(tokens, |tokens| { + self.pallet_ident.to_tokens(tokens); + self.path_sep_token.to_tokens(tokens); + self.constant_keyword.to_tokens(tokens); + }) } } diff --git a/frame/support/procedural/src/pallet/parse/error.rs b/frame/support/procedural/src/pallet/parse/error.rs index f344c5d27a877..6f82ce61fc93f 100644 --- a/frame/support/procedural/src/pallet/parse/error.rs +++ b/frame/support/procedural/src/pallet/parse/error.rs @@ -37,7 +37,7 @@ pub struct ErrorDef { /// The index of error item in pallet module. pub index: usize, /// Variants ident, optional field and doc literals (ordered as declaration order) - pub variants: Vec<(syn::Ident, Option, Vec)>, + pub variants: Vec<(syn::Ident, Option, Vec)>, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, /// The keyword error used (contains span). diff --git a/frame/support/procedural/src/pallet/parse/event.rs b/frame/support/procedural/src/pallet/parse/event.rs index 64c3afd557335..0fb8ee4f54977 100644 --- a/frame/support/procedural/src/pallet/parse/event.rs +++ b/frame/support/procedural/src/pallet/parse/event.rs @@ -104,7 +104,7 @@ impl EventDef { let item = if let syn::Item::Enum(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::event, expected item enum")) + return Err(syn::Error::new(item.span(), "Invalid pallet::event, expected enum item")) }; let event_attrs: Vec = diff --git a/frame/support/procedural/src/pallet/parse/extra_constants.rs b/frame/support/procedural/src/pallet/parse/extra_constants.rs index 8e85d2c2622d5..2ba6c44b7d158 100644 --- a/frame/support/procedural/src/pallet/parse/extra_constants.rs +++ b/frame/support/procedural/src/pallet/parse/extra_constants.rs @@ -50,7 +50,7 @@ pub struct ExtraConstantDef { /// The type returned by the function pub type_: syn::Type, /// The doc associated - pub doc: Vec, + pub doc: Vec, /// Optional MetaData Name pub metadata_name: Option, } @@ -100,7 +100,7 @@ impl ExtraConstantsDef { let mut extra_constants = vec![]; for impl_item in &mut item.items { - let method = if let syn::ImplItem::Method(method) = impl_item { + let method = if let syn::ImplItem::Fn(method) = impl_item { method } else { let msg = "Invalid pallet::call, only method accepted"; diff --git a/frame/support/procedural/src/pallet/parse/helper.rs b/frame/support/procedural/src/pallet/parse/helper.rs index 4814dce5a3dfa..3cdbfb1f591d9 100644 --- a/frame/support/procedural/src/pallet/parse/helper.rs +++ b/frame/support/procedural/src/pallet/parse/helper.rs @@ -54,7 +54,7 @@ where let attrs = if let Some(attrs) = item.mut_item_attrs() { attrs } else { return Ok(None) }; if let Some(index) = attrs.iter().position(|attr| { - attr.path.segments.first().map_or(false, |segment| segment.ident == "pallet") + attr.path().segments.first().map_or(false, |segment| segment.ident == "pallet") }) { let pallet_attr = attrs.remove(index); Ok(Some(syn::parse2(pallet_attr.into_token_stream())?)) @@ -82,7 +82,7 @@ pub fn get_item_cfg_attrs(attrs: &[syn::Attribute]) -> Vec { attrs .iter() .filter_map(|attr| { - if attr.path.segments.first().map_or(false, |segment| segment.ident == "cfg") { + if attr.path().segments.first().map_or(false, |segment| segment.ident == "cfg") { Some(attr.clone()) } else { None @@ -101,7 +101,6 @@ impl MutItemAttrs for syn::Item { Self::ForeignMod(item) => Some(item.attrs.as_mut()), Self::Impl(item) => Some(item.attrs.as_mut()), Self::Macro(item) => Some(item.attrs.as_mut()), - Self::Macro2(item) => Some(item.attrs.as_mut()), Self::Mod(item) => Some(item.attrs.as_mut()), Self::Static(item) => Some(item.attrs.as_mut()), Self::Struct(item) => Some(item.attrs.as_mut()), @@ -119,7 +118,7 @@ impl MutItemAttrs for syn::TraitItem { fn mut_item_attrs(&mut self) -> Option<&mut Vec> { match self { Self::Const(item) => Some(item.attrs.as_mut()), - Self::Method(item) => Some(item.attrs.as_mut()), + Self::Fn(item) => Some(item.attrs.as_mut()), Self::Type(item) => Some(item.attrs.as_mut()), Self::Macro(item) => Some(item.attrs.as_mut()), _ => None, @@ -139,7 +138,7 @@ impl MutItemAttrs for syn::ItemMod { } } -impl MutItemAttrs for syn::ImplItemMethod { +impl MutItemAttrs for syn::ImplItemFn { fn mut_item_attrs(&mut self) -> Option<&mut Vec> { Some(&mut self.attrs) } diff --git a/frame/support/procedural/src/pallet/parse/hooks.rs b/frame/support/procedural/src/pallet/parse/hooks.rs index f941df5f29272..37d7d22f4b6bb 100644 --- a/frame/support/procedural/src/pallet/parse/hooks.rs +++ b/frame/support/procedural/src/pallet/parse/hooks.rs @@ -71,7 +71,7 @@ impl HooksDef { } let has_runtime_upgrade = item.items.iter().any(|i| match i { - syn::ImplItem::Method(method) => method.sig.ident == "on_runtime_upgrade", + syn::ImplItem::Fn(method) => method.sig.ident == "on_runtime_upgrade", _ => false, }); diff --git a/frame/support/procedural/src/pallet/parse/mod.rs b/frame/support/procedural/src/pallet/parse/mod.rs index 94e6f7a7c2f7d..770cba68c1aad 100644 --- a/frame/support/procedural/src/pallet/parse/mod.rs +++ b/frame/support/procedural/src/pallet/parse/mod.rs @@ -20,6 +20,7 @@ //! Parse the module into `Def` struct through `Def::try_from` function. pub mod call; +pub mod composite; pub mod config; pub mod error; pub mod event; @@ -35,6 +36,7 @@ pub mod storage; pub mod type_value; pub mod validate_unsigned; +use composite::{keyword::CompositeKeyword, CompositeDef}; use frame_support_procedural_tools::generate_crate_access_2018; use syn::spanned::Spanned; @@ -56,6 +58,7 @@ pub struct Def { pub genesis_build: Option, pub validate_unsigned: Option, pub extra_constants: Option, + pub composites: Vec, pub type_values: Vec, pub frame_system: syn::Ident, pub frame_support: syn::Ident, @@ -91,6 +94,7 @@ impl Def { let mut extra_constants = None; let mut storages = vec![]; let mut type_values = vec![]; + let mut composites: Vec = vec![]; for (index, item) in items.iter_mut().enumerate() { let pallet_attr: Option = helper::take_first_item_pallet_attr(item)?; @@ -106,8 +110,8 @@ impl Def { let m = hooks::HooksDef::try_from(span, index, item)?; hooks = Some(m); }, - Some(PalletAttr::RuntimeCall(span)) if call.is_none() => - call = Some(call::CallDef::try_from(span, index, item, dev_mode)?), + Some(PalletAttr::RuntimeCall(cw, span)) if call.is_none() => + call = Some(call::CallDef::try_from(span, index, item, dev_mode, cw)?), Some(PalletAttr::Error(span)) if error.is_none() => error = Some(error::ErrorDef::try_from(span, index, item)?), Some(PalletAttr::RuntimeEvent(span)) if event.is_none() => @@ -135,6 +139,32 @@ impl Def { Some(PalletAttr::ExtraConstants(_)) => extra_constants = Some(extra_constants::ExtraConstantsDef::try_from(index, item)?), + Some(PalletAttr::Composite(span)) => { + let composite = + composite::CompositeDef::try_from(span, index, &frame_support, item)?; + if composites.iter().any(|def| { + match (&def.composite_keyword, &composite.composite_keyword) { + ( + CompositeKeyword::FreezeReason(_), + CompositeKeyword::FreezeReason(_), + ) | + (CompositeKeyword::HoldReason(_), CompositeKeyword::HoldReason(_)) | + (CompositeKeyword::LockId(_), CompositeKeyword::LockId(_)) | + ( + CompositeKeyword::SlashReason(_), + CompositeKeyword::SlashReason(_), + ) => true, + _ => false, + } + }) { + let msg = format!( + "Invalid duplicated `{}` definition", + composite.composite_keyword + ); + return Err(syn::Error::new(composite.composite_keyword.span(), &msg)) + } + composites.push(composite); + }, Some(attr) => { let msg = "Invalid duplicated attribute"; return Err(syn::Error::new(attr.span(), msg)) @@ -171,6 +201,7 @@ impl Def { origin, inherent, storages, + composites, type_values, frame_system, frame_support, @@ -371,6 +402,7 @@ impl GenericKind { mod keyword { syn::custom_keyword!(origin); syn::custom_keyword!(call); + syn::custom_keyword!(weight); syn::custom_keyword!(event); syn::custom_keyword!(config); syn::custom_keyword!(hooks); @@ -385,6 +417,7 @@ mod keyword { syn::custom_keyword!(generate_store); syn::custom_keyword!(Store); syn::custom_keyword!(extra_constants); + syn::custom_keyword!(composite_enum); } /// Parse attributes for item in pallet module @@ -393,7 +426,44 @@ enum PalletAttr { Config(proc_macro2::Span), Pallet(proc_macro2::Span), Hooks(proc_macro2::Span), - RuntimeCall(proc_macro2::Span), + /// A `#[pallet::call]` with optional attributes to specialize the behaviour. + /// + /// # Attributes + /// + /// Each attribute `attr` can take the form of `#[pallet::call(attr = …)]` or + /// `#[pallet::call(attr(…))]`. The possible attributes are: + /// + /// ## `weight` + /// + /// Can be used to reduce the repetitive weight annotation in the trivial case. It accepts one + /// argument that is expected to be an implementation of the `WeightInfo` or something that + /// behaves syntactically equivalent. This allows to annotate a `WeightInfo` for all the calls. + /// Now each call does not need to specify its own `#[pallet::weight]` but can instead use the + /// one from the `#[pallet::call]` definition. So instead of having to write it on each call: + /// + /// ```ignore + /// #[pallet::call] + /// impl Pallet { + /// #[pallet::weight(T::WeightInfo::create())] + /// pub fn create( + /// ``` + /// you can now omit it on the call itself, if the name of the weigh function matches the call: + /// + /// ```ignore + /// #[pallet::call(weight = ::WeightInfo)] + /// impl Pallet { + /// pub fn create( + /// ``` + /// + /// It is possible to use this syntax together with instantiated pallets by using `Config` + /// instead. + /// + /// ### Dev Mode + /// + /// Normally the `dev_mode` sets all weights of calls without a `#[pallet::weight]` annotation + /// to zero. Now when there is a `weight` attribute on the `#[pallet::call]`, then that is used + /// instead of the zero weight. So to say: it works together with `dev_mode`. + RuntimeCall(Option, proc_macro2::Span), Error(proc_macro2::Span), RuntimeEvent(proc_macro2::Span), RuntimeOrigin(proc_macro2::Span), @@ -404,6 +474,7 @@ enum PalletAttr { ValidateUnsigned(proc_macro2::Span), TypeValue(proc_macro2::Span), ExtraConstants(proc_macro2::Span), + Composite(proc_macro2::Span), } impl PalletAttr { @@ -412,7 +483,7 @@ impl PalletAttr { Self::Config(span) => *span, Self::Pallet(span) => *span, Self::Hooks(span) => *span, - Self::RuntimeCall(span) => *span, + Self::RuntimeCall(_, span) => *span, Self::Error(span) => *span, Self::RuntimeEvent(span) => *span, Self::RuntimeOrigin(span) => *span, @@ -423,6 +494,7 @@ impl PalletAttr { Self::ValidateUnsigned(span) => *span, Self::TypeValue(span) => *span, Self::ExtraConstants(span) => *span, + Self::Composite(span) => *span, } } } @@ -443,7 +515,12 @@ impl syn::parse::Parse for PalletAttr { } else if lookahead.peek(keyword::hooks) { Ok(PalletAttr::Hooks(content.parse::()?.span())) } else if lookahead.peek(keyword::call) { - Ok(PalletAttr::RuntimeCall(content.parse::()?.span())) + let span = content.parse::().expect("peeked").span(); + let attr = match content.is_empty() { + true => None, + false => Some(InheritedCallWeightAttr::parse(&content)?), + }; + Ok(PalletAttr::RuntimeCall(attr, span)) } else if lookahead.peek(keyword::error) { Ok(PalletAttr::Error(content.parse::()?.span())) } else if lookahead.peek(keyword::event) { @@ -464,8 +541,40 @@ impl syn::parse::Parse for PalletAttr { Ok(PalletAttr::TypeValue(content.parse::()?.span())) } else if lookahead.peek(keyword::extra_constants) { Ok(PalletAttr::ExtraConstants(content.parse::()?.span())) + } else if lookahead.peek(keyword::composite_enum) { + Ok(PalletAttr::Composite(content.parse::()?.span())) } else { Err(lookahead.error()) } } } + +/// The optional weight annotation on a `#[pallet::call]` like `#[pallet::call(weight($type))]`. +#[derive(Clone)] +pub struct InheritedCallWeightAttr { + pub typename: syn::Type, + pub span: proc_macro2::Span, +} + +impl syn::parse::Parse for InheritedCallWeightAttr { + // Parses `(weight($type))` or `(weight = $type)`. + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let content; + syn::parenthesized!(content in input); + content.parse::()?; + let lookahead = content.lookahead1(); + + let buffer = if lookahead.peek(syn::token::Paren) { + let inner; + syn::parenthesized!(inner in content); + inner + } else if lookahead.peek(syn::Token![=]) { + content.parse::().expect("peeked"); + content + } else { + return Err(lookahead.error()) + }; + + Ok(Self { typename: buffer.parse()?, span: input.span() }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/frame/support/procedural/src/pallet/parse/pallet_struct.rs index 98312d0652d8e..f4af86aa3e993 100644 --- a/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -61,8 +61,8 @@ pub enum PalletStructAttr { impl PalletStructAttr { fn span(&self) -> proc_macro2::Span { match self { - Self::GenerateStore { span, .. } => *span, - Self::WithoutStorageInfoTrait(span) => *span, + Self::GenerateStore { span, .. } | + Self::WithoutStorageInfoTrait(span) | Self::StorageVersion { span, .. } => *span, } } diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 3ed7ce54ebd64..16c0851ce7a04 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -159,7 +159,7 @@ pub struct StorageDef { /// The keys and value metadata of the storage. pub metadata: Metadata, /// The doc associated to the storage. - pub docs: Vec, + pub docs: Vec, /// A set of usage of instance, must be check for consistency with config. pub instances: Vec, /// Optional getter to generate. If some then query_kind is ensured to be some as well. @@ -185,6 +185,8 @@ pub struct StorageDef { pub unbounded: bool, /// Whether or not reads to this storage key will be ignored by benchmarking pub whitelisted: bool, + /// Whether or not a default hasher is allowed to replace `_` + pub use_default_hasher: bool, } /// The parsed generic from the @@ -268,7 +270,7 @@ enum StorageKind { /// Check the generics in the `map` contains the generics in `gen` may contains generics in /// `optional_gen`, and doesn't contains any other. fn check_generics( - map: &HashMap, + map: &HashMap, mandatory_generics: &[&str], optional_generics: &[&str], storage_type_name: &str, @@ -325,13 +327,13 @@ fn check_generics( } } -/// Returns `(named generics, metadata, query kind)` +/// Returns `(named generics, metadata, query kind, use_default_hasher)` fn process_named_generics( storage: &StorageKind, args_span: proc_macro2::Span, - args: &[syn::Binding], -) -> syn::Result<(Option, Metadata, Option)> { - let mut parsed = HashMap::::new(); + args: &[syn::AssocType], +) -> syn::Result<(Option, Metadata, Option, bool)> { + let mut parsed = HashMap::::new(); // Ensure no duplicate. for arg in args { @@ -480,15 +482,16 @@ fn process_named_generics( let metadata = generics.metadata()?; let query_kind = generics.query_kind(); - Ok((Some(generics), metadata, query_kind)) + Ok((Some(generics), metadata, query_kind, false)) } -/// Returns `(named generics, metadata, query kind)` +/// Returns `(named generics, metadata, query kind, use_default_hasher)` fn process_unnamed_generics( storage: &StorageKind, args_span: proc_macro2::Span, args: &[syn::Type], -) -> syn::Result<(Option, Metadata, Option)> { + dev_mode: bool, +) -> syn::Result<(Option, Metadata, Option, bool)> { let retrieve_arg = |arg_pos| { args.get(arg_pos).cloned().ok_or_else(|| { let msg = format!( @@ -510,18 +513,34 @@ fn process_unnamed_generics( err })?; + let use_default_hasher = |arg_pos| { + let arg = retrieve_arg(arg_pos)?; + if syn::parse2::(arg.to_token_stream()).is_ok() { + if dev_mode { + Ok(true) + } else { + let msg = "`_` can only be used in dev_mode. Please specify an appropriate hasher."; + Err(syn::Error::new(arg.span(), msg)) + } + } else { + Ok(false) + } + }; + let res = match storage { StorageKind::Value => - (None, Metadata::Value { value: retrieve_arg(1)? }, retrieve_arg(2).ok()), + (None, Metadata::Value { value: retrieve_arg(1)? }, retrieve_arg(2).ok(), false), StorageKind::Map => ( None, Metadata::Map { key: retrieve_arg(2)?, value: retrieve_arg(3)? }, retrieve_arg(4).ok(), + use_default_hasher(1)?, ), StorageKind::CountedMap => ( None, Metadata::CountedMap { key: retrieve_arg(2)?, value: retrieve_arg(3)? }, retrieve_arg(4).ok(), + use_default_hasher(1)?, ), StorageKind::DoubleMap => ( None, @@ -531,21 +550,28 @@ fn process_unnamed_generics( value: retrieve_arg(5)?, }, retrieve_arg(6).ok(), + use_default_hasher(1)? && use_default_hasher(3)?, ), StorageKind::NMap => { let keygen = retrieve_arg(1)?; let keys = collect_keys(&keygen)?; - (None, Metadata::NMap { keys, keygen, value: retrieve_arg(2)? }, retrieve_arg(3).ok()) + ( + None, + Metadata::NMap { keys, keygen, value: retrieve_arg(2)? }, + retrieve_arg(3).ok(), + false, + ) }, }; Ok(res) } -/// Returns `(named generics, metadata, query kind)` +/// Returns `(named generics, metadata, query kind, use_default_hasher)` fn process_generics( segment: &syn::PathSegment, -) -> syn::Result<(Option, Metadata, Option)> { + dev_mode: bool, +) -> syn::Result<(Option, Metadata, Option, bool)> { let storage_kind = match &*segment.ident.to_string() { "StorageValue" => StorageKind::Value, "StorageMap" => StorageKind::Map, @@ -583,13 +609,13 @@ fn process_generics( _ => unreachable!("It is asserted above that all generics are types"), }) .collect::>(); - process_unnamed_generics(&storage_kind, args_span, &args) - } else if args.args.iter().all(|gen| matches!(gen, syn::GenericArgument::Binding(_))) { + process_unnamed_generics(&storage_kind, args_span, &args, dev_mode) + } else if args.args.iter().all(|gen| matches!(gen, syn::GenericArgument::AssocType(_))) { let args = args .args .iter() .map(|gen| match gen { - syn::GenericArgument::Binding(gen) => gen.clone(), + syn::GenericArgument::AssocType(gen) => gen.clone(), _ => unreachable!("It is asserted above that all generics are bindings"), }) .collect::>(); @@ -711,7 +737,8 @@ impl StorageDef { return Err(syn::Error::new(item.ty.span(), msg)) } - let (named_generics, metadata, query_kind) = process_generics(&typ.path.segments[0])?; + let (named_generics, metadata, query_kind, use_default_hasher) = + process_generics(&typ.path.segments[0], dev_mode)?; let query_kind = query_kind .map(|query_kind| { @@ -832,6 +859,7 @@ impl StorageDef { named_generics, unbounded, whitelisted, + use_default_hasher, }) } } diff --git a/frame/support/procedural/src/pallet/parse/type_value.rs b/frame/support/procedural/src/pallet/parse/type_value.rs index 4b80506889dfd..4d9db30b3a788 100644 --- a/frame/support/procedural/src/pallet/parse/type_value.rs +++ b/frame/support/procedural/src/pallet/parse/type_value.rs @@ -39,7 +39,7 @@ pub struct TypeValueDef { /// The span of the pallet::type_value attribute. pub attr_span: proc_macro2::Span, /// Docs on the item. - pub docs: Vec, + pub docs: Vec, } impl TypeValueDef { @@ -57,9 +57,9 @@ impl TypeValueDef { let mut docs = vec![]; for attr in &item.attrs { - if let Ok(syn::Meta::NameValue(meta)) = attr.parse_meta() { + if let syn::Meta::NameValue(meta) = &attr.meta { if meta.path.get_ident().map_or(false, |ident| ident == "doc") { - docs.push(meta.lit); + docs.push(meta.value.clone()); continue } } diff --git a/frame/support/procedural/src/pallet_error.rs b/frame/support/procedural/src/pallet_error.rs index fb0bf52f69094..246a5bd4a219a 100644 --- a/frame/support/procedural/src/pallet_error.rs +++ b/frame/support/procedural/src/pallet_error.rs @@ -17,7 +17,6 @@ use frame_support_procedural_tools::generate_crate_access_2018; use quote::ToTokens; -use std::str::FromStr; // Derive `PalletError` pub fn derive_pallet_error(input: proc_macro::TokenStream) -> proc_macro::TokenStream { @@ -117,38 +116,24 @@ fn generate_field_types( let attrs = &field.attrs; for attr in attrs { - if attr.path.is_ident("codec") { - match attr.parse_meta()? { - syn::Meta::List(ref meta_list) if meta_list.nested.len() == 1 => { - match meta_list - .nested - .first() - .expect("Just checked that there is one item; qed") - { - syn::NestedMeta::Meta(syn::Meta::Path(path)) - if path.get_ident().map_or(false, |i| i == "skip") => - return Ok(None), - - syn::NestedMeta::Meta(syn::Meta::Path(path)) - if path.get_ident().map_or(false, |i| i == "compact") => - { - let field_ty = &field.ty; - return Ok(Some(quote::quote!(#scrate::codec::Compact<#field_ty>))) - }, - - syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue { - path, - lit: syn::Lit::Str(lit_str), - .. - })) if path.get_ident().map_or(false, |i| i == "encoded_as") => { - let ty = proc_macro2::TokenStream::from_str(&lit_str.value())?; - return Ok(Some(ty)) - }, - - _ => (), - } - }, - _ => (), + if attr.path().is_ident("codec") { + let mut res = None; + + attr.parse_nested_meta(|meta| { + if meta.path.is_ident("skip") { + res = Some(None); + } else if meta.path.is_ident("compact") { + let field_ty = &field.ty; + res = Some(Some(quote::quote!(#scrate::codec::Compact<#field_ty>))); + } else if meta.path.is_ident("compact") { + res = Some(Some(meta.value()?.parse()?)); + } + + Ok(()) + })?; + + if let Some(v) = res { + return Ok(v) } } } @@ -163,22 +148,18 @@ fn generate_variant_field_types( let attrs = &variant.attrs; for attr in attrs { - if attr.path.is_ident("codec") { - match attr.parse_meta()? { - syn::Meta::List(ref meta_list) if meta_list.nested.len() == 1 => { - match meta_list - .nested - .first() - .expect("Just checked that there is one item; qed") - { - syn::NestedMeta::Meta(syn::Meta::Path(path)) - if path.get_ident().map_or(false, |i| i == "skip") => - return Ok(None), - - _ => (), - } - }, - _ => (), + if attr.path().is_ident("codec") { + let mut skip = false; + + // We ignore the error intentionally as this isn't `codec(skip)` when + // `parse_nested_meta` fails. + let _ = attr.parse_nested_meta(|meta| { + skip = meta.path.is_ident("skip"); + Ok(()) + }); + + if skip { + return Ok(None) } } } diff --git a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs index 491db59184fd9..31e9996ee51f4 100644 --- a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs +++ b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs @@ -133,14 +133,13 @@ impl GenesisConfigDef { .attrs .iter() .map(|attr| { - let meta = attr.parse_meta()?; - if meta.path().is_ident("cfg") { + if attr.meta.path().is_ident("cfg") { return Err(syn::Error::new( - meta.span(), + attr.meta.span(), "extra genesis config items do not support `cfg` attribute", )) } - Ok(meta) + Ok(attr.meta.clone()) }) .collect::>()?; diff --git a/frame/support/procedural/src/storage/metadata.rs b/frame/support/procedural/src/storage/metadata.rs index 3e3e0576f63b5..5561d0564597b 100644 --- a/frame/support/procedural/src/storage/metadata.rs +++ b/frame/support/procedural/src/storage/metadata.rs @@ -27,7 +27,7 @@ fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> match &line.storage_type { StorageLineTypeDef::Simple(_) => { quote! { - #scrate::metadata::StorageEntryType::Plain( + #scrate::metadata_ir::StorageEntryTypeIR::Plain( #scrate::scale_info::meta_type::<#value_type>() ) } @@ -36,8 +36,8 @@ fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> let hasher = map.hasher.into_metadata(); let key = &map.key; quote! { - #scrate::metadata::StorageEntryType::Map { - hashers: #scrate::sp_std::vec! [ #scrate::metadata::#hasher ], + #scrate::metadata_ir::StorageEntryTypeIR::Map { + hashers: #scrate::sp_std::vec! [ #scrate::metadata_ir::#hasher ], key: #scrate::scale_info::meta_type::<#key>(), value: #scrate::scale_info::meta_type::<#value_type>(), } @@ -49,10 +49,10 @@ fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> let key1 = &map.key1; let key2 = &map.key2; quote! { - #scrate::metadata::StorageEntryType::Map { + #scrate::metadata_ir::StorageEntryTypeIR::Map { hashers: #scrate::sp_std::vec! [ - #scrate::metadata::#hasher1, - #scrate::metadata::#hasher2, + #scrate::metadata_ir::#hasher1, + #scrate::metadata_ir::#hasher2, ], key: #scrate::scale_info::meta_type::<(#key1, #key2)>(), value: #scrate::scale_info::meta_type::<#value_type>(), @@ -67,9 +67,9 @@ fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> .map(|hasher| hasher.to_storage_hasher_struct()) .collect::>(); quote! { - #scrate::metadata::StorageEntryType::Map { + #scrate::metadata_ir::StorageEntryTypeIR::Map { hashers: #scrate::sp_std::vec! [ - #( #scrate::metadata::StorageHasher::#hashers, )* + #( #scrate::metadata_ir::StorageHasherIR::#hashers, )* ], key: #scrate::scale_info::meta_type::<#key_tuple>(), value: #scrate::scale_info::meta_type::<#value_type>(), @@ -159,9 +159,9 @@ pub fn impl_metadata(def: &DeclStorageDefExt) -> TokenStream { let str_name = line.name.to_string(); let modifier = if line.is_option { - quote!(#scrate::metadata::StorageEntryModifier::Optional) + quote!(#scrate::metadata_ir::StorageEntryModifierIR::Optional) } else { - quote!(#scrate::metadata::StorageEntryModifier::Default) + quote!(#scrate::metadata_ir::StorageEntryModifierIR::Default) }; let ty = storage_line_metadata_type(scrate, line); @@ -172,7 +172,7 @@ pub fn impl_metadata(def: &DeclStorageDefExt) -> TokenStream { let docs = get_doc_literals(&line.attrs); let entry = quote! { - #scrate::metadata::StorageEntryMetadata { + #scrate::metadata_ir::StorageEntryMetadataIR { name: #str_name, modifier: #modifier, ty: #ty, @@ -194,7 +194,7 @@ pub fn impl_metadata(def: &DeclStorageDefExt) -> TokenStream { }; let store_metadata = quote!( - #scrate::metadata::PalletStorageMetadata { + #scrate::metadata_ir::PalletStorageMetadataIR { prefix: #prefix, entries: #scrate::sp_std::vec![ #entries ], } @@ -209,7 +209,7 @@ pub fn impl_metadata(def: &DeclStorageDefExt) -> TokenStream { impl #module_impl #module_struct #where_clause { #[doc(hidden)] - pub fn storage_metadata() -> #scrate::metadata::PalletStorageMetadata { + pub fn storage_metadata() -> #scrate::metadata_ir::PalletStorageMetadataIR { #store_metadata } } diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs index 1e48a1fbb06d1..c04862256a7bc 100644 --- a/frame/support/procedural/src/storage/mod.rs +++ b/frame/support/procedural/src/storage/mod.rs @@ -332,8 +332,8 @@ impl StorageLineDefExt { let doc_attrs = storage_def .attrs .iter() - .filter_map(|a| a.parse_meta().ok()) - .filter(|m| m.path().is_ident("doc")) + .filter(|a| a.meta.path().is_ident("doc")) + .map(|a| a.meta.clone()) .collect(); Self { @@ -454,13 +454,13 @@ impl HasherKind { fn into_metadata(&self) -> proc_macro2::TokenStream { match self { - HasherKind::Blake2_256 => quote!(StorageHasher::Blake2_256), - HasherKind::Blake2_128 => quote!(StorageHasher::Blake2_128), - HasherKind::Blake2_128Concat => quote!(StorageHasher::Blake2_128Concat), - HasherKind::Twox256 => quote!(StorageHasher::Twox256), - HasherKind::Twox128 => quote!(StorageHasher::Twox128), - HasherKind::Twox64Concat => quote!(StorageHasher::Twox64Concat), - HasherKind::Identity => quote!(StorageHasher::Identity), + HasherKind::Blake2_256 => quote!(StorageHasherIR::Blake2_256), + HasherKind::Blake2_128 => quote!(StorageHasherIR::Blake2_128), + HasherKind::Blake2_128Concat => quote!(StorageHasherIR::Blake2_128Concat), + HasherKind::Twox256 => quote!(StorageHasherIR::Twox256), + HasherKind::Twox128 => quote!(StorageHasherIR::Twox128), + HasherKind::Twox64Concat => quote!(StorageHasherIR::Twox64Concat), + HasherKind::Identity => quote!(StorageHasherIR::Identity), } } } diff --git a/frame/support/procedural/src/storage/print_pallet_upgrade.rs b/frame/support/procedural/src/storage/print_pallet_upgrade.rs index 03f09a7edb48e..4e330fd4b85f2 100644 --- a/frame/support/procedural/src/storage/print_pallet_upgrade.rs +++ b/frame/support/procedural/src/storage/print_pallet_upgrade.rs @@ -273,14 +273,12 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { doc = line.doc_attrs.iter().fold(String::new(), |mut res, attr| { if let syn::Meta::NameValue(name_value) = attr { if name_value.path.is_ident("doc") { - if let syn::Lit::Str(string) = &name_value.lit { - res = format!( - "{} - ///{}", - res, - string.value(), - ); - } + res = format!( + "{} + ///{}", + res, + name_value.value.to_token_stream(), + ); } } res diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index 755483f7db9aa..bf828b01b6f76 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] proc-macro-crate = "1.1.3" -proc-macro2 = "1.0.37" -quote = "1.0.10" -syn = { version = "1.0.98", features = ["full", "visit", "extra-traits"] } +proc-macro2 = "1.0.56" +quote = "1.0.26" +syn = { version = "2.0.14", features = ["full", "visit", "extra-traits"] } frame-support-procedural-tools-derive = { version = "3.0.0", path = "./derive" } diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index a688ee13a7d17..81b5a8487449b 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -15,6 +15,6 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro2 = "1.0.37" -quote = { version = "1.0.10", features = ["proc-macro"] } -syn = { version = "1.0.98", features = ["proc-macro", "full", "extra-traits", "parsing"] } +proc-macro2 = "1.0.56" +quote = { version = "1.0.26", features = ["proc-macro"] } +syn = { version = "2.0.14", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/frame/support/procedural/tools/src/lib.rs b/frame/support/procedural/tools/src/lib.rs index 385b20ad05dc8..541accc8ab96a 100644 --- a/frame/support/procedural/tools/src/lib.rs +++ b/frame/support/procedural/tools/src/lib.rs @@ -102,16 +102,15 @@ pub fn clean_type_string(input: &str) -> String { } /// Return all doc attributes literals found. -pub fn get_doc_literals(attrs: &[syn::Attribute]) -> Vec { +pub fn get_doc_literals(attrs: &[syn::Attribute]) -> Vec { attrs .iter() .filter_map(|attr| { - if let Ok(syn::Meta::NameValue(meta)) = attr.parse_meta() { - if meta.path.get_ident().map_or(false, |ident| ident == "doc") { - Some(meta.lit) - } else { - None - } + if let syn::Meta::NameValue(meta) = &attr.meta { + meta.path + .get_ident() + .filter(|ident| *ident == "doc") + .map(|_| meta.value.clone()) } else { None } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index c5f4cc60c1f1b..c75d75b41a292 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -29,7 +29,8 @@ pub use crate::{ result, }, traits::{ - CallMetadata, GetCallMetadata, GetCallName, GetStorageVersion, UnfilteredDispatchable, + CallMetadata, GetCallIndex, GetCallMetadata, GetCallName, GetStorageVersion, + UnfilteredDispatchable, }, }; #[cfg(feature = "std")] @@ -549,27 +550,6 @@ impl ClassifyDispatch for (Weight, DispatchClass, Pays) { // TODO: Eventually remove these -impl From> for PostDispatchInfo { - fn from(maybe_actual_computation: Option) -> Self { - let actual_weight = match maybe_actual_computation { - Some(actual_computation) => Some(Weight::from_parts(actual_computation, 0)), - None => None, - }; - Self { actual_weight, pays_fee: Default::default() } - } -} - -impl From<(Option, Pays)> for PostDispatchInfo { - fn from(post_weight_info: (Option, Pays)) -> Self { - let (maybe_actual_time, pays_fee) = post_weight_info; - let actual_weight = match maybe_actual_time { - Some(actual_time) => Some(Weight::from_parts(actual_time, 0)), - None => None, - }; - Self { actual_weight, pays_fee } - } -} - impl ClassifyDispatch for u64 { fn classify_dispatch(&self, _: T) -> DispatchClass { DispatchClass::Normal @@ -729,7 +709,7 @@ impl PaysFee for (u64, Pays) { /// ``` /// # #[macro_use] /// # extern crate frame_support; -/// # use frame_support::{weights::Weight, dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}}; +/// # use frame_support::{weights::Weight, dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo, PostDispatchInfo}}; /// # use frame_system::{Config, ensure_signed}; /// decl_module! { /// pub struct Module for enum Call where origin: T::RuntimeOrigin { @@ -743,7 +723,7 @@ impl PaysFee for (u64, Pays) { /// return Ok(None::.into()); /// } /// // expensive calculation not executed: use only a portion of the weight -/// Ok(Some(100_000).into()) +/// Ok(PostDispatchInfo { actual_weight: Some(Weight::from_parts(100_000, 0)), ..Default::default() }) /// } /// } /// } @@ -2977,7 +2957,7 @@ macro_rules! __dispatch_impl_metadata { { #[doc(hidden)] #[allow(dead_code)] - pub fn call_functions() -> $crate::metadata::PalletCallMetadata { + pub fn call_functions() -> $crate::metadata_ir::PalletCallMetadataIR { $crate::scale_info::meta_type::<$call_type<$trait_instance $(, $instance)?>>().into() } } @@ -2998,7 +2978,7 @@ macro_rules! __impl_error_metadata { { #[doc(hidden)] #[allow(dead_code)] - pub fn error_metadata() -> Option<$crate::metadata::PalletErrorMetadata> { + pub fn error_metadata() -> Option<$crate::metadata_ir::PalletErrorMetadataIR> { None } } @@ -3013,8 +2993,8 @@ macro_rules! __impl_error_metadata { { #[doc(hidden)] #[allow(dead_code)] - pub fn error_metadata() -> Option<$crate::metadata::PalletErrorMetadata> { - Some($crate::metadata::PalletErrorMetadata { + pub fn error_metadata() -> Option<$crate::metadata_ir::PalletErrorMetadataIR> { + Some($crate::metadata_ir::PalletErrorMetadataIR { ty: $crate::scale_info::meta_type::<$( $error_type )*>() }) } @@ -3109,7 +3089,7 @@ macro_rules! __impl_module_constants_metadata { { #[doc(hidden)] #[allow(dead_code)] - pub fn pallet_constants_metadata() -> $crate::sp_std::vec::Vec<$crate::metadata::PalletConstantMetadata> { + pub fn pallet_constants_metadata() -> $crate::sp_std::vec::Vec<$crate::metadata_ir::PalletConstantMetadataIR> { // Create the `ByteGetter`s $( #[allow(non_upper_case_types)] @@ -3133,7 +3113,7 @@ macro_rules! __impl_module_constants_metadata { )* $crate::sp_std::vec![ $( - $crate::metadata::PalletConstantMetadata { + $crate::metadata_ir::PalletConstantMetadataIR { name: stringify!($name), ty: $crate::scale_info::meta_type::<$type>(), value: $default_byte_name::<$const_trait_instance $(, $const_instance)?>( @@ -3207,13 +3187,13 @@ mod tests { use super::*; use crate::{ dispatch::{DispatchClass, DispatchInfo, Pays}, - metadata::*, + metadata_ir::*, traits::{ CallerTrait, CrateVersion, Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, PalletInfo, }, }; - use sp_weights::RuntimeDbWeight; + use sp_weights::{RuntimeDbWeight, Weight}; pub trait Config: system::Config + Sized where @@ -3405,7 +3385,7 @@ mod tests { fn module_json_metadata() { let metadata = Module::::call_functions(); let expected_metadata = - PalletCallMetadata { ty: scale_info::meta_type::>() }; + PalletCallMetadataIR { ty: scale_info::meta_type::>() }; assert_eq!(expected_metadata, metadata); } @@ -3534,119 +3514,195 @@ mod tests { fn test_new_call_variant() { Call::::new_call_variant_aux_0(); } + + pub fn from_actual_ref_time(ref_time: Option) -> PostDispatchInfo { + PostDispatchInfo { + actual_weight: ref_time.map(|t| Weight::from_all(t)), + pays_fee: Default::default(), + } + } + + pub fn from_post_weight_info(ref_time: Option, pays_fee: Pays) -> PostDispatchInfo { + PostDispatchInfo { actual_weight: ref_time.map(|t| Weight::from_all(t)), pays_fee } + } } #[cfg(test)] // Do not complain about unused `dispatch` and `dispatch_aux`. #[allow(dead_code)] mod weight_tests { - use super::*; - use sp_core::{parameter_types, Get}; + use super::{tests::*, *}; + use sp_core::parameter_types; + use sp_runtime::{generic, traits::BlakeTwo256}; use sp_weights::RuntimeDbWeight; - pub trait Config: 'static { - type RuntimeOrigin; - type Balance; - type BlockNumber; - type DbWeight: Get; - type PalletInfo: crate::traits::PalletInfo; - } + pub use self::frame_system::{Call, Config, Pallet}; - pub struct TraitImpl {} + #[crate::pallet(dev_mode)] + pub mod frame_system { + use super::{frame_system, frame_system::pallet_prelude::*}; + pub use crate::dispatch::RawOrigin; + use crate::pallet_prelude::*; - parameter_types! { - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 100, - write: 1000, - }; - } + #[pallet::pallet] + pub struct Pallet(PhantomData); - impl Config for TraitImpl { - type RuntimeOrigin = u32; - type BlockNumber = u32; - type Balance = u32; - type DbWeight = DbWeight; - type PalletInfo = crate::tests::PanicPalletInfo; - } + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: 'static { + type BlockNumber: Parameter + Default + MaxEncodedLen; + type AccountId; + type Balance; + type BaseCallFilter: crate::traits::Contains; + type RuntimeOrigin; + type RuntimeCall; + type PalletInfo: crate::traits::PalletInfo; + type DbWeight: Get; + } - decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { + #[pallet::error] + pub enum Error { + /// Required by construct_runtime + CallFiltered, + } + + #[pallet::origin] + pub type Origin = RawOrigin<::AccountId>; + + #[pallet::call] + impl Pallet { // no arguments, fixed weight - #[weight = 1000] - fn f00(_origin) { unimplemented!(); } + #[pallet::weight(1000)] + pub fn f00(_origin: OriginFor) -> DispatchResult { + unimplemented!(); + } - #[weight = (1000, DispatchClass::Mandatory)] - fn f01(_origin) { unimplemented!(); } + #[pallet::weight((1000, DispatchClass::Mandatory))] + pub fn f01(_origin: OriginFor) -> DispatchResult { + unimplemented!(); + } - #[weight = (1000, Pays::No)] - fn f02(_origin) { unimplemented!(); } + #[pallet::weight((1000, Pays::No))] + pub fn f02(_origin: OriginFor) -> DispatchResult { + unimplemented!(); + } - #[weight = (1000, DispatchClass::Operational, Pays::No)] - fn f03(_origin) { unimplemented!(); } + #[pallet::weight((1000, DispatchClass::Operational, Pays::No))] + pub fn f03(_origin: OriginFor) -> DispatchResult { + unimplemented!(); + } // weight = a x 10 + b - #[weight = ((_a * 10 + _eb * 1) as u64, DispatchClass::Normal, Pays::Yes)] - fn f11(_origin, _a: u32, _eb: u32) { unimplemented!(); } + #[pallet::weight(((_a * 10 + _eb * 1) as u64, DispatchClass::Normal, Pays::Yes))] + pub fn f11(_origin: OriginFor, _a: u32, _eb: u32) -> DispatchResult { + unimplemented!(); + } - #[weight = (0, DispatchClass::Operational, Pays::Yes)] - fn f12(_origin, _a: u32, _eb: u32) { unimplemented!(); } + #[pallet::weight((0, DispatchClass::Operational, Pays::Yes))] + pub fn f12(_origin: OriginFor, _a: u32, _eb: u32) -> DispatchResult { + unimplemented!(); + } - #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + Weight::from_parts(10_000, 0)] - fn f20(_origin) { unimplemented!(); } + #[pallet::weight(T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + Weight::from_all(10_000))] + pub fn f20(_origin: OriginFor) -> DispatchResult { + unimplemented!(); + } - #[weight = T::DbWeight::get().reads_writes(6, 5) + Weight::from_parts(40_000, 0)] - fn f21(_origin) { unimplemented!(); } + #[pallet::weight(T::DbWeight::get().reads_writes(6, 5) + Weight::from_all(40_000))] + pub fn f21(_origin: OriginFor) -> DispatchResult { + unimplemented!(); + } + } + pub mod pallet_prelude { + pub type OriginFor = ::RuntimeOrigin; } } + type BlockNumber = u32; + type AccountId = u32; + type Balance = u32; + type Header = generic::Header; + type UncheckedExtrinsic = generic::UncheckedExtrinsic; + type Block = generic::Block; + + crate::construct_runtime!( + pub enum Runtime + where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: self::frame_system, + } + ); + + parameter_types! { + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 100, + write: 1000, + }; + } + + impl Config for Runtime { + type BlockNumber = BlockNumber; + type AccountId = AccountId; + type Balance = Balance; + type BaseCallFilter = crate::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type DbWeight = DbWeight; + type PalletInfo = PalletInfo; + } + #[test] fn weights_are_correct() { - // #[weight = 1000] - let info = Call::::f00 {}.get_dispatch_info(); + // #[pallet::weight(1000)] + let info = Call::::f00 {}.get_dispatch_info(); assert_eq!(info.weight, Weight::from_parts(1000, 0)); assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); - // #[weight = (1000, DispatchClass::Mandatory)] - let info = Call::::f01 {}.get_dispatch_info(); + // #[pallet::weight((1000, DispatchClass::Mandatory))] + let info = Call::::f01 {}.get_dispatch_info(); assert_eq!(info.weight, Weight::from_parts(1000, 0)); assert_eq!(info.class, DispatchClass::Mandatory); assert_eq!(info.pays_fee, Pays::Yes); - // #[weight = (1000, Pays::No)] - let info = Call::::f02 {}.get_dispatch_info(); + // #[pallet::weight((1000, Pays::No))] + let info = Call::::f02 {}.get_dispatch_info(); assert_eq!(info.weight, Weight::from_parts(1000, 0)); assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::No); - // #[weight = (1000, DispatchClass::Operational, Pays::No)] - let info = Call::::f03 {}.get_dispatch_info(); + // #[pallet::weight((1000, DispatchClass::Operational, Pays::No))] + let info = Call::::f03 {}.get_dispatch_info(); assert_eq!(info.weight, Weight::from_parts(1000, 0)); assert_eq!(info.class, DispatchClass::Operational); assert_eq!(info.pays_fee, Pays::No); - // #[weight = ((_a * 10 + _eb * 1) as Weight, DispatchClass::Normal, Pays::Yes)] - let info = Call::::f11 { _a: 13, _eb: 20 }.get_dispatch_info(); + // #[pallet::weight(((_a * 10 + _eb * 1) as u64, DispatchClass::Normal, Pays::Yes))] + let info = Call::::f11 { a: 13, eb: 20 }.get_dispatch_info(); assert_eq!(info.weight, Weight::from_parts(150, 0)); // 13*10 + 20 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); - // #[weight = (0, DispatchClass::Operational, Pays::Yes)] - let info = Call::::f12 { _a: 10, _eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(0, 0)); + // #[pallet::weight((0, DispatchClass::Operational, Pays::Yes))] + let info = Call::::f12 { a: 10, eb: 20 }.get_dispatch_info(); + assert_eq!(info.weight, Weight::zero()); assert_eq!(info.class, DispatchClass::Operational); assert_eq!(info.pays_fee, Pays::Yes); - // #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] - let info = Call::::f20 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(12300, 0)); // 100*3 + 1000*2 + 10_1000 + // #[pallet::weight(T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + + // Weight::from_all(10_000))] + let info = Call::::f20 {}.get_dispatch_info(); + assert_eq!(info.weight, Weight::from_parts(12300, 10000)); // 100*3 + 1000*2 + 10_1000 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); - // #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] - let info = Call::::f21 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(45600, 0)); // 100*6 + 1000*5 + 40_1000 + // #[pallet::weight(T::DbWeight::get().reads_writes(6, 5) + Weight::from_all(40_000))] + let info = Call::::f21 {}.get_dispatch_info(); + assert_eq!(info.weight, Weight::from_parts(45600, 40000)); // 100*6 + 1000*5 + 40_1000 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); } @@ -3654,9 +3710,12 @@ mod weight_tests { #[test] fn extract_actual_weight_works() { let pre = DispatchInfo { weight: Weight::from_parts(1000, 0), ..Default::default() }; - assert_eq!(extract_actual_weight(&Ok(Some(7).into()), &pre), Weight::from_parts(7, 0)); assert_eq!( - extract_actual_weight(&Ok(Some(1000).into()), &pre), + extract_actual_weight(&Ok(from_actual_ref_time(Some(7))), &pre), + Weight::from_parts(7, 0) + ); + assert_eq!( + extract_actual_weight(&Ok(from_actual_ref_time(Some(1000))), &pre), Weight::from_parts(1000, 0) ); assert_eq!( @@ -3672,7 +3731,7 @@ mod weight_tests { fn extract_actual_weight_caps_at_pre_weight() { let pre = DispatchInfo { weight: Weight::from_parts(1000, 0), ..Default::default() }; assert_eq!( - extract_actual_weight(&Ok(Some(1250).into()), &pre), + extract_actual_weight(&Ok(from_actual_ref_time(Some(1250))), &pre), Weight::from_parts(1000, 0) ); assert_eq!( @@ -3687,10 +3746,19 @@ mod weight_tests { #[test] fn extract_actual_pays_fee_works() { let pre = DispatchInfo { weight: Weight::from_parts(1000, 0), ..Default::default() }; - assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::Yes); - assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::Yes); - assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::Yes); - assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::No).into()), &pre), Pays::No); + assert_eq!(extract_actual_pays_fee(&Ok(from_actual_ref_time(Some(7))), &pre), Pays::Yes); + assert_eq!( + extract_actual_pays_fee(&Ok(from_actual_ref_time(Some(1000)).into()), &pre), + Pays::Yes + ); + assert_eq!( + extract_actual_pays_fee(&Ok(from_post_weight_info(Some(1000), Pays::Yes)), &pre), + Pays::Yes + ); + assert_eq!( + extract_actual_pays_fee(&Ok(from_post_weight_info(Some(1000), Pays::No)), &pre), + Pays::No + ); assert_eq!( extract_actual_pays_fee( &Err(DispatchError::BadOrigin.with_weight(Weight::from_parts(9, 0))), @@ -3714,9 +3782,12 @@ mod weight_tests { pays_fee: Pays::No, ..Default::default() }; - assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::No); - assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::No); - assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::No); + assert_eq!(extract_actual_pays_fee(&Ok(from_actual_ref_time(Some(7))), &pre), Pays::No); + assert_eq!(extract_actual_pays_fee(&Ok(from_actual_ref_time(Some(1000))), &pre), Pays::No); + assert_eq!( + extract_actual_pays_fee(&Ok(from_post_weight_info(Some(1000), Pays::Yes)), &pre), + Pays::No + ); } } diff --git a/frame/support/src/hash.rs b/frame/support/src/hash.rs index bf9eb2f88b0b1..115ce605d542e 100644 --- a/frame/support/src/hash.rs +++ b/frame/support/src/hash.rs @@ -17,7 +17,7 @@ //! Hash utilities. -use crate::metadata; +use crate::metadata_ir; use codec::{Codec, MaxEncodedLen}; use sp_io::hashing::{blake2_128, blake2_256, twox_128, twox_256, twox_64}; use sp_std::prelude::Vec; @@ -59,7 +59,7 @@ impl Hashable for T { /// Hasher to use to hash keys to insert to storage. pub trait StorageHasher: 'static { - const METADATA: metadata::StorageHasher; + const METADATA: metadata_ir::StorageHasherIR; type Output: AsRef<[u8]>; fn hash(x: &[u8]) -> Self::Output; @@ -80,7 +80,7 @@ pub trait ReversibleStorageHasher: StorageHasher { /// Store the key directly. pub struct Identity; impl StorageHasher for Identity { - const METADATA: metadata::StorageHasher = metadata::StorageHasher::Identity; + const METADATA: metadata_ir::StorageHasherIR = metadata_ir::StorageHasherIR::Identity; type Output = Vec; fn hash(x: &[u8]) -> Vec { x.to_vec() @@ -98,7 +98,7 @@ impl ReversibleStorageHasher for Identity { /// Hash storage keys with `concat(twox64(key), key)` pub struct Twox64Concat; impl StorageHasher for Twox64Concat { - const METADATA: metadata::StorageHasher = metadata::StorageHasher::Twox64Concat; + const METADATA: metadata_ir::StorageHasherIR = metadata_ir::StorageHasherIR::Twox64Concat; type Output = Vec; fn hash(x: &[u8]) -> Vec { twox_64(x).iter().chain(x.iter()).cloned().collect::>() @@ -120,7 +120,7 @@ impl ReversibleStorageHasher for Twox64Concat { /// Hash storage keys with `concat(blake2_128(key), key)` pub struct Blake2_128Concat; impl StorageHasher for Blake2_128Concat { - const METADATA: metadata::StorageHasher = metadata::StorageHasher::Blake2_128Concat; + const METADATA: metadata_ir::StorageHasherIR = metadata_ir::StorageHasherIR::Blake2_128Concat; type Output = Vec; fn hash(x: &[u8]) -> Vec { blake2_128(x).iter().chain(x.iter()).cloned().collect::>() @@ -142,7 +142,7 @@ impl ReversibleStorageHasher for Blake2_128Concat { /// Hash storage keys with blake2 128 pub struct Blake2_128; impl StorageHasher for Blake2_128 { - const METADATA: metadata::StorageHasher = metadata::StorageHasher::Blake2_128; + const METADATA: metadata_ir::StorageHasherIR = metadata_ir::StorageHasherIR::Blake2_128; type Output = [u8; 16]; fn hash(x: &[u8]) -> [u8; 16] { blake2_128(x) @@ -155,7 +155,7 @@ impl StorageHasher for Blake2_128 { /// Hash storage keys with blake2 256 pub struct Blake2_256; impl StorageHasher for Blake2_256 { - const METADATA: metadata::StorageHasher = metadata::StorageHasher::Blake2_256; + const METADATA: metadata_ir::StorageHasherIR = metadata_ir::StorageHasherIR::Blake2_256; type Output = [u8; 32]; fn hash(x: &[u8]) -> [u8; 32] { blake2_256(x) @@ -168,7 +168,7 @@ impl StorageHasher for Blake2_256 { /// Hash storage keys with twox 128 pub struct Twox128; impl StorageHasher for Twox128 { - const METADATA: metadata::StorageHasher = metadata::StorageHasher::Twox128; + const METADATA: metadata_ir::StorageHasherIR = metadata_ir::StorageHasherIR::Twox128; type Output = [u8; 16]; fn hash(x: &[u8]) -> [u8; 16] { twox_128(x) @@ -181,7 +181,7 @@ impl StorageHasher for Twox128 { /// Hash storage keys with twox 256 pub struct Twox256; impl StorageHasher for Twox256 { - const METADATA: metadata::StorageHasher = metadata::StorageHasher::Twox256; + const METADATA: metadata_ir::StorageHasherIR = metadata_ir::StorageHasherIR::Twox256; type Output = [u8; 32]; fn hash(x: &[u8]) -> [u8; 32] { twox_256(x) diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 167c569f35d01..f74b5410e774d 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -50,7 +50,8 @@ pub use paste; pub use scale_info; #[cfg(feature = "std")] pub use serde; -pub use sp_core::Void; +pub use sp_api::metadata_ir; +pub use sp_core::{OpaqueMetadata, Void}; #[doc(hidden)] pub use sp_core_hashing_proc_macro; #[doc(hidden)] @@ -83,7 +84,6 @@ pub mod instances; pub mod migrations; pub mod traits; pub mod weights; - #[doc(hidden)] pub mod unsigned { #[doc(hidden)] @@ -371,16 +371,14 @@ macro_rules! parameter_types { /// Set the value of this parameter type in the storage. /// - /// This needs to be executed in an externalities provided - /// environment. + /// This needs to be executed in an externalities provided environment. pub fn set(value: &$type) { $crate::storage::unhashed::put(&Self::key(), value); } /// Returns the value of this parameter type. /// - /// This needs to be executed in an externalities provided - /// environment. + /// This needs to be executed in an externalities provided environment. #[allow(unused)] pub fn get() -> $type { $crate::storage::unhashed::get(&Self::key()).unwrap_or_else(|| $value) @@ -827,81 +825,157 @@ pub use serde::{Deserialize, Serialize}; #[cfg(test)] pub mod tests { use super::*; - use crate::metadata::{ - PalletStorageMetadata, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, - StorageHasher, + use crate::metadata_ir::{ + PalletStorageMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, + StorageEntryTypeIR, StorageHasherIR, }; - use codec::{Codec, EncodeLike}; - use frame_support::traits::CrateVersion; use sp_io::{MultiRemovalResults, TestExternalities}; + use sp_runtime::{generic, traits::BlakeTwo256, BuildStorage}; use sp_std::result; - /// A PalletInfo implementation which just panics. - pub struct PanicPalletInfo; + pub use self::frame_system::{Config, Pallet}; - impl crate::traits::PalletInfo for PanicPalletInfo { - fn index() -> Option { - unimplemented!("PanicPalletInfo mustn't be triggered by tests"); - } - fn name() -> Option<&'static str> { - unimplemented!("PanicPalletInfo mustn't be triggered by tests"); - } - fn module_name() -> Option<&'static str> { - unimplemented!("PanicPalletInfo mustn't be triggered by tests"); - } - fn crate_version() -> Option { - unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + #[pallet] + pub mod frame_system { + #[allow(unused)] + use super::{frame_system, frame_system::pallet_prelude::*}; + pub use crate::dispatch::RawOrigin; + use crate::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: 'static { + type BlockNumber: Parameter + Default + MaxEncodedLen; + type AccountId; + type BaseCallFilter: crate::traits::Contains; + type RuntimeOrigin; + type RuntimeCall; + type PalletInfo: crate::traits::PalletInfo; + type DbWeight: Get; } - } - pub trait Config: 'static { - type BlockNumber: Codec + EncodeLike + Default + TypeInfo; - type RuntimeOrigin; - type PalletInfo: crate::traits::PalletInfo; - type DbWeight: crate::traits::Get; - } + #[pallet::error] + pub enum Error { + /// Required by construct_runtime + CallFiltered, + } - mod module { - #![allow(dead_code)] + #[pallet::origin] + pub type Origin = RawOrigin<::AccountId>; + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + pub type Data = StorageMap<_, Twox64Concat, u32, u64, ValueQuery>; + + #[pallet::storage] + pub type OptionLinkedMap = StorageMap<_, Blake2_128Concat, u32, u32, OptionQuery>; + + #[pallet::storage] + #[pallet::getter(fn generic_data)] + pub type GenericData = + StorageMap<_, Identity, T::BlockNumber, T::BlockNumber, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn generic_data2)] + pub type GenericData2 = + StorageMap<_, Blake2_128Concat, T::BlockNumber, T::BlockNumber, OptionQuery>; + + #[pallet::storage] + pub type DataDM = + StorageDoubleMap<_, Twox64Concat, u32, Blake2_128Concat, u32, u64, ValueQuery>; + + #[pallet::storage] + pub type GenericDataDM = StorageDoubleMap< + _, + Blake2_128Concat, + T::BlockNumber, + Identity, + T::BlockNumber, + T::BlockNumber, + ValueQuery, + >; + + #[pallet::storage] + pub type GenericData2DM = StorageDoubleMap< + _, + Blake2_128Concat, + T::BlockNumber, + Twox64Concat, + T::BlockNumber, + T::BlockNumber, + OptionQuery, + >; + + #[pallet::storage] + #[pallet::unbounded] + pub type AppendableDM = StorageDoubleMap< + _, + Blake2_128Concat, + u32, + Blake2_128Concat, + T::BlockNumber, + Vec, + ValueQuery, + >; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub data: Vec<(u32, u64)>, + pub test_config: Vec<(u32, u32, u64)>, + } - use super::Config; + impl Default for GenesisConfig { + fn default() -> Self { + Self { data: vec![(15u32, 42u64)], test_config: vec![(15u32, 16u32, 42u64)] } + } + } - decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + for (k, v) in &self.data { + >::insert(k, v); + } + for (k1, k2, v) in &self.test_config { + >::insert(k1, k2, v); + } + } } - } - use self::module::Module; - - decl_storage! { - trait Store for Module as Test { - pub Value get(fn value): u64; - pub Data get(fn data) build(|_| vec![(15u32, 42u64)]): - map hasher(twox_64_concat) u32 => u64; - pub OptionLinkedMap: map hasher(blake2_128_concat) u32 => Option; - pub GenericData get(fn generic_data): - map hasher(identity) T::BlockNumber => T::BlockNumber; - pub GenericData2 get(fn generic_data2): - map hasher(blake2_128_concat) T::BlockNumber => Option; - pub DataDM config(test_config) build(|_| vec![(15u32, 16u32, 42u64)]): - double_map hasher(twox_64_concat) u32, hasher(blake2_128_concat) u32 => u64; - pub GenericDataDM: - double_map hasher(blake2_128_concat) T::BlockNumber, hasher(identity) T::BlockNumber - => T::BlockNumber; - pub GenericData2DM: - double_map hasher(blake2_128_concat) T::BlockNumber, hasher(twox_64_concat) T::BlockNumber - => Option; - pub AppendableDM: - double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) T::BlockNumber => Vec; + pub mod pallet_prelude { + pub type OriginFor = ::RuntimeOrigin; } } - struct Test; + type BlockNumber = u32; + type AccountId = u32; + type Header = generic::Header; + type UncheckedExtrinsic = generic::UncheckedExtrinsic; + type Block = generic::Block; + + crate::construct_runtime!( + pub enum Runtime + where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: self::frame_system, + } + ); - impl Config for Test { - type BlockNumber = u32; - type RuntimeOrigin = u32; - type PalletInfo = PanicPalletInfo; + impl Config for Runtime { + type BlockNumber = BlockNumber; + type AccountId = AccountId; + type BaseCallFilter = crate::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type PalletInfo = PalletInfo; type DbWeight = (); } @@ -909,8 +983,6 @@ pub mod tests { GenesisConfig::default().build_storage().unwrap().into() } - type Map = Data; - trait Sorted { fn sorted(self) -> Self; } @@ -927,15 +999,15 @@ pub mod tests { new_test_ext().execute_with(|| { #[crate::storage_alias] type GenericData2 = StorageMap< - Test, + System, Blake2_128Concat, ::BlockNumber, ::BlockNumber, >; - assert_eq!(Module::::generic_data2(5), None); - GenericData2::::insert(5, 5); - assert_eq!(Module::::generic_data2(5), Some(5)); + assert_eq!(Pallet::::generic_data2(5), None); + GenericData2::::insert(5, 5); + assert_eq!(Pallet::::generic_data2(5), Some(5)); /// Some random docs that ensure that docs are accepted #[crate::storage_alias] @@ -1006,6 +1078,8 @@ pub mod tests { #[test] fn map_issue_3318() { new_test_ext().execute_with(|| { + type OptionLinkedMap = self::frame_system::OptionLinkedMap; + OptionLinkedMap::insert(1, 1); assert_eq!(OptionLinkedMap::get(1), Some(1)); OptionLinkedMap::insert(1, 2); @@ -1016,6 +1090,8 @@ pub mod tests { #[test] fn map_swap_works() { new_test_ext().execute_with(|| { + type OptionLinkedMap = self::frame_system::OptionLinkedMap; + OptionLinkedMap::insert(0, 0); OptionLinkedMap::insert(1, 1); OptionLinkedMap::insert(2, 2); @@ -1045,6 +1121,8 @@ pub mod tests { #[test] fn double_map_swap_works() { new_test_ext().execute_with(|| { + type DataDM = self::frame_system::DataDM; + DataDM::insert(0, 1, 1); DataDM::insert(1, 0, 2); DataDM::insert(1, 1, 3); @@ -1077,6 +1155,8 @@ pub mod tests { #[test] fn map_basic_insert_remove_should_work() { new_test_ext().execute_with(|| { + type Map = self::frame_system::Data; + // initialized during genesis assert_eq!(Map::get(&15u32), 42u64); @@ -1103,6 +1183,8 @@ pub mod tests { #[test] fn map_iteration_should_work() { new_test_ext().execute_with(|| { + type Map = self::frame_system::Data; + assert_eq!(Map::iter().collect::>().sorted(), vec![(15, 42)]); // insert / remove let key = 17u32; @@ -1156,7 +1238,7 @@ pub mod tests { fn double_map_basic_insert_remove_remove_prefix_with_commit_should_work() { let key1 = 17u32; let key2 = 18u32; - type DoubleMap = DataDM; + type DoubleMap = self::frame_system::DataDM; let mut e = new_test_ext(); e.execute_with(|| { // initialized during genesis @@ -1201,7 +1283,7 @@ pub mod tests { new_test_ext().execute_with(|| { let key1 = 17u32; let key2 = 18u32; - type DoubleMap = DataDM; + type DoubleMap = self::frame_system::DataDM; // initialized during genesis assert_eq!(DoubleMap::get(&15u32, &16u32), 42u64); @@ -1249,7 +1331,7 @@ pub mod tests { #[test] fn double_map_append_should_work() { new_test_ext().execute_with(|| { - type DoubleMap = AppendableDM; + type DoubleMap = self::frame_system::AppendableDM; let key1 = 17u32; let key2 = 18u32; @@ -1263,7 +1345,7 @@ pub mod tests { #[test] fn double_map_mutate_exists_should_work() { new_test_ext().execute_with(|| { - type DoubleMap = DataDM; + type DoubleMap = self::frame_system::DataDM; let (key1, key2) = (11, 13); @@ -1280,8 +1362,8 @@ pub mod tests { #[test] fn double_map_try_mutate_exists_should_work() { new_test_ext().execute_with(|| { - type DoubleMap = DataDM; - type TestResult = result::Result<(), &'static str>; + type DoubleMap = self::frame_system::DataDM; + type TestResult = Result<(), &'static str>; let (key1, key2) = (11, 13); @@ -1310,101 +1392,100 @@ pub mod tests { }); } - fn expected_metadata() -> PalletStorageMetadata { - PalletStorageMetadata { - prefix: "Test", + fn expected_metadata() -> PalletStorageMetadataIR { + PalletStorageMetadataIR { + prefix: "System", entries: vec![ - StorageEntryMetadata { - name: "Value", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::()), - default: vec![0, 0, 0, 0, 0, 0, 0, 0], - docs: vec![], - }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "Data", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Twox64Concat], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Twox64Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::(), }, default: vec![0, 0, 0, 0, 0, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "OptionLinkedMap", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::(), }, default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GenericData", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Identity], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Identity], key: scale_info::meta_type::(), value: scale_info::meta_type::(), }, default: vec![0, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GenericData2", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::(), }, default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "DataDM", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Twox64Concat, StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![ + StorageHasherIR::Twox64Concat, + StorageHasherIR::Blake2_128Concat, + ], key: scale_info::meta_type::<(u32, u32)>(), value: scale_info::meta_type::(), }, default: vec![0, 0, 0, 0, 0, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GenericDataDM", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Identity], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat, StorageHasherIR::Identity], key: scale_info::meta_type::<(u32, u32)>(), value: scale_info::meta_type::(), }, default: vec![0, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GenericData2DM", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![ + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Twox64Concat, + ], key: scale_info::meta_type::<(u32, u32)>(), value: scale_info::meta_type::(), }, default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "AppendableDM", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Blake2_128Concat, + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Blake2_128Concat, ], key: scale_info::meta_type::<(u32, u32)>(), value: scale_info::meta_type::>(), @@ -1418,7 +1499,7 @@ pub mod tests { #[test] fn store_metadata() { - let metadata = Module::::storage_metadata(); + let metadata = Pallet::::storage_metadata(); pretty_assertions::assert_eq!(expected_metadata(), metadata); } @@ -1437,12 +1518,6 @@ pub mod tests { assert_eq!(300, StorageParameter::get()); }) } - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub static Members: Vec = vec![]; - pub const Foo: Option = None; - } } /// Prelude to be used alongside pallet macro, for ease of use. @@ -1546,6 +1621,7 @@ pub mod pallet_prelude { /// * [`pallet::inherent`](#inherent-palletinherent-optional) /// * [`pallet::validate_unsigned`](#validate-unsigned-palletvalidate_unsigned-optional) /// * [`pallet::origin`](#origin-palletorigin-optional) +/// * [`pallet::composite_enum`](#composite-enum-palletcomposite_enum-optional) /// /// Note that at compile-time, the `#[pallet]` macro will analyze and expand all of these /// attributes, ultimately removing their AST nodes before they can be parsed as real @@ -2270,6 +2346,29 @@ pub mod pallet_prelude { /// /// Also see [`pallet::origin`](`frame_support::pallet_macros::origin`) /// +/// # Composite enum `#[pallet::composite_enum]` (optional) +/// +/// The `#[pallet::composite_enum]` attribute allows you to define an enum on the pallet which +/// will then instruct `construct_runtime` to amalgamate all similarly-named enums from other +/// pallets into an aggregate enum. This is similar in principle with how the aggregate enum is +/// generated for `#[pallet::event]` or `#[pallet::error]`. +/// +/// The item tagged with `#[pallet::composite_enum]` MUST be an enum declaration, and can ONLY +/// be the following identifiers: `FreezeReason`, `HoldReason`, `LockId` or `SlashReason`. +/// Custom identifiers are not supported. +/// +/// NOTE: For ease of usage, when no `#[derive]` attributes are detected, the +/// `#[pallet::composite_enum]` attribute will automatically derive the following traits for +/// the enum: +/// +/// ```ignore +/// Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo, +/// RuntimeDebug +/// ``` +/// +/// The inverse is also true: if there are any #[derive] attributes present for the enum, then +/// the attribute will not automatically derive any of the traits described above. +/// /// # General notes on instantiable pallets /// /// An instantiable pallet is one where Config is generic, i.e. `Config`. This allows @@ -2499,7 +2598,7 @@ pub mod pallet_prelude { /// #[pallet::extra_constants] /// impl, I: 'static> Pallet { /// /// Some description -/// fn exra_constant_name() -> u128 { 4u128 } +/// fn extra_constant_name() -> u128 { 4u128 } /// } /// /// #[pallet::pallet] @@ -2799,10 +2898,11 @@ pub use frame_support_procedural::pallet; /// Contains macro stubs for all of the pallet:: macros pub mod pallet_macros { pub use frame_support_procedural::{ - call_index, compact, config, constant, disable_frame_system_supertrait_check, error, event, - extra_constants, generate_deposit, generate_storage_info, generate_store, genesis_build, - genesis_config, getter, hooks, inherent, origin, storage, storage_prefix, storage_version, - type_value, unbounded, validate_unsigned, weight, whitelist_storage, + call_index, compact, composite_enum, config, constant, + disable_frame_system_supertrait_check, error, event, extra_constants, generate_deposit, + generate_storage_info, generate_store, genesis_build, genesis_config, getter, hooks, + inherent, origin, storage, storage_prefix, storage_version, type_value, unbounded, + validate_unsigned, weight, whitelist_storage, }; } diff --git a/frame/support/src/migrations.rs b/frame/support/src/migrations.rs index 645316042b38c..f54b73aad8889 100644 --- a/frame/support/src/migrations.rs +++ b/frame/support/src/migrations.rs @@ -15,11 +15,18 @@ // See the License for the specific language governing permissions and // limitations under the License. +#[cfg(feature = "try-runtime")] +use crate::storage::unhashed::contains_prefixed_key; use crate::{ traits::{GetStorageVersion, PalletInfoAccess}, weights::{RuntimeDbWeight, Weight}, }; use impl_trait_for_tuples::impl_for_tuples; +use sp_core::Get; +use sp_io::{hashing::twox_128, storage::clear_prefix, KillStorageResult}; +use sp_std::marker::PhantomData; +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; /// Trait used by [`migrate_from_pallet_version_to_storage_version`] to do the actual migration. pub trait PalletVersionToStorageVersionHelper { @@ -67,3 +74,112 @@ pub fn migrate_from_pallet_version_to_storage_version< ) -> Weight { Pallets::migrate(db_weight) } + +/// `RemovePallet` is a utility struct used to remove all storage items associated with a specific +/// pallet. +/// +/// This struct is generic over two parameters: +/// - `P` is a type that implements the `Get` trait for a static string, representing the pallet's +/// name. +/// - `DbWeight` is a type that implements the `Get` trait for `RuntimeDbWeight`, providing the +/// weight for database operations. +/// +/// On runtime upgrade, the `on_runtime_upgrade` function will clear all storage items associated +/// with the specified pallet, logging the number of keys removed. If the `try-runtime` feature is +/// enabled, the `pre_upgrade` and `post_upgrade` functions can be used to verify the storage +/// removal before and after the upgrade. +/// +/// # Examples: +/// ```ignore +/// construct_runtime! { +/// pub enum Runtime where +/// Block = Block, +/// NodeBlock = primitives::Block, +/// UncheckedExtrinsic = UncheckedExtrinsic +/// { +/// System: frame_system::{Pallet, Call, Storage, Config, Event} = 0, +/// +/// SomePalletToRemove: pallet_something::{Pallet, Call, Storage, Event} = 1, +/// AnotherPalletToRemove: pallet_something_else::{Pallet, Call, Storage, Event} = 2, +/// +/// YourOtherPallets... +/// } +/// }; +/// +/// parameter_types! { +/// pub const SomePalletToRemoveStr: &'static str = "SomePalletToRemove"; +/// pub const AnotherPalletToRemoveStr: &'static str = "AnotherPalletToRemove"; +/// } +/// +/// pub type Migrations = ( +/// RemovePallet, +/// RemovePallet, +/// AnyOtherMigrations... +/// ); +/// +/// pub type Executive = frame_executive::Executive< +/// Runtime, +/// Block, +/// frame_system::ChainContext, +/// Runtime, +/// Migrations +/// >; +/// ``` +/// +/// WARNING: `RemovePallet` has no guard rails preventing it from bricking the chain if the +/// operation of removing storage for the given pallet would exceed the block weight limit. +/// +/// If your pallet has too many keys to be removed in a single block, it is advised to wait for +/// a multi-block scheduler currently under development which will allow for removal of storage +/// items (and performing other heavy migrations) over multiple blocks +/// (see ). +pub struct RemovePallet, DbWeight: Get>( + PhantomData<(P, DbWeight)>, +); +impl, DbWeight: Get> frame_support::traits::OnRuntimeUpgrade + for RemovePallet +{ + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let hashed_prefix = twox_128(P::get().as_bytes()); + let keys_removed = match clear_prefix(&hashed_prefix, None) { + KillStorageResult::AllRemoved(value) => value, + KillStorageResult::SomeRemaining(value) => { + log::error!( + "`clear_prefix` failed to remove all keys for {}. THIS SHOULD NEVER HAPPEN! 🚨", + P::get() + ); + value + }, + } as u64; + + log::info!("Removed {} {} keys 🧹", keys_removed, P::get()); + + DbWeight::get().reads_writes(keys_removed + 1, keys_removed) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + let hashed_prefix = twox_128(P::get().as_bytes()); + match contains_prefixed_key(&hashed_prefix) { + true => log::info!("Found {} keys pre-removal 👀", P::get()), + false => log::warn!( + "Migration RemovePallet<{}> can be removed (no keys found pre-removal).", + P::get() + ), + }; + Ok(Vec::new()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + let hashed_prefix = twox_128(P::get().as_bytes()); + match contains_prefixed_key(&hashed_prefix) { + true => { + log::error!("{} has keys remaining post-removal ❗", P::get()); + return Err("Keys remaining post-removal, this should never happen 🚨") + }, + false => log::info!("No {} keys found post-removal 🎉", P::get()), + }; + Ok(()) + } +} diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index 5763a472cd570..5da68873b10e6 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -514,43 +514,12 @@ where mod test_iterators { use crate::{ hash::StorageHasher, - storage::{generator::StorageDoubleMap, unhashed, IterableStorageDoubleMap}, + storage::{ + generator::{tests::*, StorageDoubleMap}, + unhashed, + }, }; - use codec::{Decode, Encode}; - - pub trait Config: 'static { - type RuntimeOrigin; - type BlockNumber; - type PalletInfo: crate::traits::PalletInfo; - type DbWeight: crate::traits::Get; - } - - crate::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} - } - - #[derive(PartialEq, Eq, Clone, Encode, Decode)] - struct NoDef(u32); - - crate::decl_storage! { - trait Store for Module as Test { - DoubleMap: double_map hasher(blake2_128_concat) u16, hasher(twox_64_concat) u32 => u64; - } - } - - fn key_before_prefix(mut prefix: Vec) -> Vec { - let last = prefix.iter_mut().last().unwrap(); - assert!(*last != 0, "mock function not implemented for this prefix"); - *last -= 1; - prefix - } - - fn key_after_prefix(mut prefix: Vec) -> Vec { - let last = prefix.iter_mut().last().unwrap(); - assert!(*last != 255, "mock function not implemented for this prefix"); - *last += 1; - prefix - } + use codec::Encode; #[test] fn double_map_iter_from() { @@ -589,6 +558,8 @@ mod test_iterators { #[test] fn double_map_reversible_reversible_iteration() { sp_io::TestExternalities::default().execute_with(|| { + type DoubleMap = self::frame_system::DoubleMap; + // All map iterator let prefix = DoubleMap::prefix_hash(); diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index fb499384f1506..3b36b9bddb704 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -349,43 +349,12 @@ impl> storage::StorageMap mod test_iterators { use crate::{ hash::StorageHasher, - storage::{generator::StorageMap, unhashed, IterableStorageMap}, + storage::{ + generator::{tests::*, StorageMap}, + unhashed, + }, }; - use codec::{Decode, Encode}; - - pub trait Config: 'static { - type RuntimeOrigin; - type BlockNumber; - type PalletInfo: crate::traits::PalletInfo; - type DbWeight: crate::traits::Get; - } - - crate::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} - } - - #[derive(PartialEq, Eq, Clone, Encode, Decode)] - struct NoDef(u32); - - crate::decl_storage! { - trait Store for Module as Test { - Map: map hasher(blake2_128_concat) u16 => u64; - } - } - - fn key_before_prefix(mut prefix: Vec) -> Vec { - let last = prefix.iter_mut().last().unwrap(); - assert!(*last != 0, "mock function not implemented for this prefix"); - *last -= 1; - prefix - } - - fn key_after_prefix(mut prefix: Vec) -> Vec { - let last = prefix.iter_mut().last().unwrap(); - assert!(*last != 255, "mock function not implemented for this prefix"); - *last += 1; - prefix - } + use codec::Encode; #[test] fn map_iter_from() { @@ -413,6 +382,8 @@ mod test_iterators { #[test] fn map_reversible_reversible_iteration() { sp_io::TestExternalities::default().execute_with(|| { + type Map = self::frame_system::Map; + // All map iterator let prefix = Map::prefix_hash(); diff --git a/frame/support/src/storage/generator/mod.rs b/frame/support/src/storage/generator/mod.rs index 2da612b24de8f..568d400129689 100644 --- a/frame/support/src/storage/generator/mod.rs +++ b/frame/support/src/storage/generator/mod.rs @@ -35,47 +35,123 @@ pub use nmap::StorageNMap; pub use value::StorageValue; #[cfg(test)] -#[allow(dead_code)] mod tests { + use codec::Encode; + use sp_io::TestExternalities; + use sp_runtime::{generic, traits::BlakeTwo256, BuildStorage}; + use crate::{ assert_noop, assert_ok, - storage::{generator::StorageValue, unhashed, IterableStorageMap}, + storage::{generator::StorageValue, unhashed}, }; - use codec::Encode; - use sp_io::TestExternalities; - struct Runtime; + #[crate::pallet] + pub mod frame_system { + #[allow(unused)] + use super::{frame_system, frame_system::pallet_prelude::*}; + pub use crate::dispatch::RawOrigin; + use crate::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: 'static { + type BlockNumber; + type AccountId; + type BaseCallFilter: crate::traits::Contains; + type RuntimeOrigin; + type RuntimeCall; + type PalletInfo: crate::traits::PalletInfo; + type DbWeight: Get; + } + + #[pallet::origin] + pub type Origin = RawOrigin<::AccountId>; + + #[pallet::error] + pub enum Error { + /// Required by construct_runtime + CallFiltered, + } + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + pub type Value = StorageValue<_, (u64, u64), ValueQuery>; - pub trait Config: 'static { - type RuntimeOrigin; - type BlockNumber; - type PalletInfo: crate::traits::PalletInfo; - type DbWeight: crate::traits::Get; + #[pallet::storage] + pub type Map = StorageMap<_, Blake2_128Concat, u16, u64, ValueQuery>; + + #[pallet::storage] + pub type NumberMap = StorageMap<_, Identity, u32, u64, ValueQuery>; + + #[pallet::storage] + pub type DoubleMap = + StorageDoubleMap<_, Blake2_128Concat, u16, Twox64Concat, u32, u64, ValueQuery>; + + #[pallet::storage] + pub type NMap = StorageNMap< + _, + (storage::Key, storage::Key), + u64, + ValueQuery, + >; + + pub mod pallet_prelude { + pub type OriginFor = ::RuntimeOrigin; + } } - impl Config for Runtime { - type RuntimeOrigin = u32; - type BlockNumber = u32; - type PalletInfo = crate::tests::PanicPalletInfo; + type BlockNumber = u32; + type AccountId = u32; + type Header = generic::Header; + type UncheckedExtrinsic = generic::UncheckedExtrinsic; + type Block = generic::Block; + + crate::construct_runtime!( + pub enum Runtime + where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: self::frame_system, + } + ); + + impl self::frame_system::Config for Runtime { + type BlockNumber = BlockNumber; + type AccountId = AccountId; + type BaseCallFilter = crate::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type PalletInfo = PalletInfo; type DbWeight = (); } - decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} + pub fn key_before_prefix(mut prefix: Vec) -> Vec { + let last = prefix.iter_mut().last().unwrap(); + assert_ne!(*last, 0, "mock function not implemented for this prefix"); + *last -= 1; + prefix } - crate::decl_storage! { - trait Store for Module as Runtime { - Value get(fn value) config(): (u64, u64); - NumberMap: map hasher(identity) u32 => u64; - DoubleMap: double_map hasher(identity) u32, hasher(identity) u32 => u64; - } + pub fn key_after_prefix(mut prefix: Vec) -> Vec { + let last = prefix.iter_mut().last().unwrap(); + assert_ne!(*last, 255, "mock function not implemented for this prefix"); + *last += 1; + prefix } #[test] fn value_translate_works() { let t = GenesisConfig::default().build_storage().unwrap(); TestExternalities::new(t).execute_with(|| { + type Value = self::frame_system::Value; + // put the old value `1111u32` in the storage. let key = Value::storage_value_final_key(); unhashed::put_raw(&key, &1111u32.encode()); @@ -96,7 +172,9 @@ mod tests { fn map_translate_works() { let t = GenesisConfig::default().build_storage().unwrap(); TestExternalities::new(t).execute_with(|| { - // start with a map of u32 -> u32. + type NumberMap = self::frame_system::NumberMap; + + // start with a map of u32 -> u64. for i in 0u32..100u32 { unhashed::put(&NumberMap::hashed_key_for(&i), &(i as u64)); } @@ -125,6 +203,10 @@ mod tests { fn try_mutate_works() { let t = GenesisConfig::default().build_storage().unwrap(); TestExternalities::new(t).execute_with(|| { + type Value = self::frame_system::Value; + type NumberMap = self::frame_system::NumberMap; + type DoubleMap = self::frame_system::DoubleMap; + assert_eq!(Value::get(), (0, 0)); assert_eq!(NumberMap::get(0), 0); assert_eq!(DoubleMap::get(0, 0), 0); diff --git a/frame/support/src/storage/generator/nmap.rs b/frame/support/src/storage/generator/nmap.rs index 21fe7b41eda99..5d3d689aa98a6 100755 --- a/frame/support/src/storage/generator/nmap.rs +++ b/frame/support/src/storage/generator/nmap.rs @@ -462,43 +462,12 @@ impl> mod test_iterators { use crate::{ hash::StorageHasher, - storage::{generator::StorageNMap, unhashed, IterableStorageNMap}, + storage::{ + generator::{tests::*, StorageNMap}, + unhashed, + }, }; - use codec::{Decode, Encode}; - - pub trait Config: 'static { - type RuntimeOrigin; - type BlockNumber; - type PalletInfo: crate::traits::PalletInfo; - type DbWeight: crate::traits::Get; - } - - crate::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} - } - - #[derive(PartialEq, Eq, Clone, Encode, Decode)] - struct NoDef(u32); - - crate::decl_storage! { - trait Store for Module as Test { - NMap: nmap hasher(blake2_128_concat) u16, hasher(twox_64_concat) u32 => u64; - } - } - - fn key_before_prefix(mut prefix: Vec) -> Vec { - let last = prefix.iter_mut().last().unwrap(); - assert!(*last != 0, "mock function not implemented for this prefix"); - *last -= 1; - prefix - } - - fn key_after_prefix(mut prefix: Vec) -> Vec { - let last = prefix.iter_mut().last().unwrap(); - assert!(*last != 255, "mock function not implemented for this prefix"); - *last += 1; - prefix - } + use codec::Encode; #[test] fn n_map_iter_from() { @@ -545,22 +514,18 @@ mod test_iterators { #[test] fn n_map_double_map_identical_key() { sp_io::TestExternalities::default().execute_with(|| { + use crate::hash::{Blake2_128Concat, Twox64Concat}; + + type NMap = self::frame_system::NMap; + NMap::insert((1, 2), 50); let key_hash = NMap::hashed_key_for((1, 2)); { #[crate::storage_alias] - type NMap = StorageDoubleMap< - Test, - crate::Blake2_128Concat, - u16, - crate::Twox64Concat, - u32, - u64, - >; - - let value = NMap::get(1, 2).unwrap(); - assert_eq!(value, 50); + type NMap = StorageDoubleMap; + + assert_eq!(NMap::get(1, 2), Some(50)); assert_eq!(NMap::hashed_key_for(1, 2), key_hash); } }); @@ -569,6 +534,8 @@ mod test_iterators { #[test] fn n_map_reversible_reversible_iteration() { sp_io::TestExternalities::default().execute_with(|| { + type NMap = self::frame_system::NMap; + // All map iterator let prefix = NMap::prefix_hash(); diff --git a/frame/support/src/storage/types/counted_map.rs b/frame/support/src/storage/types/counted_map.rs index 24b00be485e49..e57942cbe0667 100644 --- a/frame/support/src/storage/types/counted_map.rs +++ b/frame/support/src/storage/types/counted_map.rs @@ -18,7 +18,7 @@ //! Storage counted map type. use crate::{ - metadata::StorageEntryMetadata, + metadata_ir::StorageEntryMetadataIR, storage::{ generator::StorageMap as _, types::{ @@ -459,7 +459,7 @@ where OnEmpty: Get + 'static, MaxValues: Get>, { - fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { ::Map::build_metadata(docs, entries); CounterFor::::build_metadata( if cfg!(feature = "no-metadata-docs") { @@ -512,7 +512,7 @@ mod test { use super::*; use crate::{ hash::*, - metadata::{StorageEntryModifier, StorageEntryType, StorageHasher}, + metadata_ir::{StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR}, storage::{bounded_vec::BoundedVec, types::ValueQuery}, traits::ConstU32, }; @@ -1147,21 +1147,21 @@ mod test { assert_eq!( entries, vec![ - StorageEntryMetadata { + StorageEntryMetadataIR { name: "foo", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Twox64Concat], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Twox64Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::(), }, default: 97u32.encode(), docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "counter_for_foo", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0, 0, 0, 0], docs: if cfg!(feature = "no-metadata-docs") { vec![] diff --git a/frame/support/src/storage/types/double_map.rs b/frame/support/src/storage/types/double_map.rs index 6a4bdc1e68d72..08ac1709c4b68 100644 --- a/frame/support/src/storage/types/double_map.rs +++ b/frame/support/src/storage/types/double_map.rs @@ -19,7 +19,7 @@ //! StoragePrefixedDoubleMap traits and their methods directly. use crate::{ - metadata::{StorageEntryMetadata, StorageEntryType}, + metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}, storage::{ types::{OptionQuery, QueryKindTrait, StorageEntryMetadataBuilder}, KeyLenOf, StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, @@ -656,13 +656,13 @@ where OnEmpty: Get + 'static, MaxValues: Get>, { - fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { let docs = if cfg!(feature = "no-metadata-docs") { vec![] } else { docs }; - let entry = StorageEntryMetadata { + let entry = StorageEntryMetadataIR { name: Prefix::STORAGE_PREFIX, modifier: QueryKind::METADATA, - ty: StorageEntryType::Map { + ty: StorageEntryTypeIR::Map { hashers: vec![Hasher1::METADATA, Hasher2::METADATA], key: scale_info::meta_type::<(Key1, Key2)>(), value: scale_info::meta_type::(), @@ -736,7 +736,7 @@ mod test { use super::*; use crate::{ hash::*, - metadata::{StorageEntryModifier, StorageEntryType, StorageHasher}, + metadata_ir::{StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR}, storage::types::ValueQuery, }; use sp_io::{hashing::twox_128, TestExternalities}; @@ -916,13 +916,13 @@ mod test { assert_eq!( entries, vec![ - StorageEntryMetadata { + StorageEntryMetadataIR { name: "foo", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Twox64Concat ], key: scale_info::meta_type::<(u16, u8)>(), value: scale_info::meta_type::(), @@ -930,13 +930,13 @@ mod test { default: Option::::None.encode(), docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "foo", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Twox64Concat ], key: scale_info::meta_type::<(u16, u8)>(), value: scale_info::meta_type::(), diff --git a/frame/support/src/storage/types/key.rs b/frame/support/src/storage/types/key.rs index 901bdb8b0514c..bf87e593b063a 100755 --- a/frame/support/src/storage/types/key.rs +++ b/frame/support/src/storage/types/key.rs @@ -41,7 +41,7 @@ pub trait KeyGenerator { type HashFn: FnOnce(&[u8]) -> Vec; type HArg; - const HASHER_METADATA: &'static [crate::metadata::StorageHasher]; + const HASHER_METADATA: &'static [crate::metadata_ir::StorageHasherIR]; /// Given a `key` tuple, calculate the final key by encoding each element individually and /// hashing them using the corresponding hasher in the `KeyGenerator`. @@ -74,7 +74,7 @@ impl KeyGenerator for Key type HashFn = Box Vec>; type HArg = (Self::HashFn,); - const HASHER_METADATA: &'static [crate::metadata::StorageHasher] = &[H::METADATA]; + const HASHER_METADATA: &'static [crate::metadata_ir::StorageHasherIR] = &[H::METADATA]; fn final_key + TupleToEncodedIter>(key: KArg) -> Vec { H::hash(&key.to_encoded_iter().next().expect("should have at least one element!")) @@ -114,7 +114,7 @@ impl KeyGenerator for Tuple { for_tuples!( type HArg = ( #(Tuple::HashFn),* ); ); type HashFn = Box Vec>; - const HASHER_METADATA: &'static [crate::metadata::StorageHasher] = + const HASHER_METADATA: &'static [crate::metadata_ir::StorageHasherIR] = &[for_tuples!( #(Tuple::Hasher::METADATA),* )]; fn final_key + TupleToEncodedIter>(key: KArg) -> Vec { diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs index 53cf74d26f17c..2110732b2f69c 100644 --- a/frame/support/src/storage/types/map.rs +++ b/frame/support/src/storage/types/map.rs @@ -19,7 +19,7 @@ //! methods directly. use crate::{ - metadata::{StorageEntryMetadata, StorageEntryType}, + metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}, storage::{ types::{OptionQuery, QueryKindTrait, StorageEntryMetadataBuilder}, KeyLenOf, StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, @@ -409,13 +409,13 @@ where OnEmpty: Get + 'static, MaxValues: Get>, { - fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { let docs = if cfg!(feature = "no-metadata-docs") { vec![] } else { docs }; - let entry = StorageEntryMetadata { + let entry = StorageEntryMetadataIR { name: Prefix::STORAGE_PREFIX, modifier: QueryKind::METADATA, - ty: StorageEntryType::Map { + ty: StorageEntryTypeIR::Map { hashers: vec![Hasher::METADATA], key: scale_info::meta_type::(), value: scale_info::meta_type::(), @@ -483,7 +483,7 @@ mod test { use super::*; use crate::{ hash::*, - metadata::{StorageEntryModifier, StorageEntryType, StorageHasher}, + metadata_ir::{StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR}, storage::types::ValueQuery, }; use sp_io::{hashing::twox_128, TestExternalities}; @@ -706,22 +706,22 @@ mod test { assert_eq!( entries, vec![ - StorageEntryMetadata { + StorageEntryMetadataIR { name: "foo", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::(), }, default: Option::::None.encode(), docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "foo", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::(), }, diff --git a/frame/support/src/storage/types/mod.rs b/frame/support/src/storage/types/mod.rs index 9a6f15d1ee820..3a5bae2e608b7 100644 --- a/frame/support/src/storage/types/mod.rs +++ b/frame/support/src/storage/types/mod.rs @@ -18,7 +18,7 @@ //! Storage types to build abstraction on storage, they implements storage traits such as //! StorageMap and others. -use crate::metadata::{StorageEntryMetadata, StorageEntryModifier}; +use crate::metadata_ir::{StorageEntryMetadataIR, StorageEntryModifierIR}; use codec::FullCodec; use sp_std::prelude::*; @@ -50,7 +50,7 @@ pub use value::StorageValue; /// value. pub trait QueryKindTrait { /// Metadata for the storage kind. - const METADATA: StorageEntryModifier; + const METADATA: StorageEntryModifierIR; /// Type returned on query type Query: FullCodec + 'static; @@ -73,7 +73,7 @@ impl QueryKindTrait for OptionQuery where Value: FullCodec + 'static, { - const METADATA: StorageEntryModifier = StorageEntryModifier::Optional; + const METADATA: StorageEntryModifierIR = StorageEntryModifierIR::Optional; type Query = Option; @@ -95,7 +95,7 @@ where Error: FullCodec + 'static, OnEmpty: crate::traits::Get>, { - const METADATA: StorageEntryModifier = StorageEntryModifier::Optional; + const METADATA: StorageEntryModifierIR = StorageEntryModifierIR::Optional; type Query = Result; @@ -118,7 +118,7 @@ where Value: FullCodec + 'static, OnEmpty: crate::traits::Get, { - const METADATA: StorageEntryModifier = StorageEntryModifier::Default; + const METADATA: StorageEntryModifierIR = StorageEntryModifierIR::Default; type Query = Value; @@ -136,5 +136,5 @@ where /// Implemented by each of the storage types: value, map, countedmap, doublemap and nmap. pub trait StorageEntryMetadataBuilder { /// Build into `entries` the storage metadata entries of a storage given some `docs`. - fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); } diff --git a/frame/support/src/storage/types/nmap.rs b/frame/support/src/storage/types/nmap.rs index d971035968ff2..9b63ca7b0f417 100755 --- a/frame/support/src/storage/types/nmap.rs +++ b/frame/support/src/storage/types/nmap.rs @@ -19,7 +19,7 @@ //! StoragePrefixedDoubleMap traits and their methods directly. use crate::{ - metadata::{StorageEntryMetadata, StorageEntryType}, + metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}, storage::{ types::{ EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, OptionQuery, QueryKindTrait, @@ -550,13 +550,13 @@ where OnEmpty: Get + 'static, MaxValues: Get>, { - fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { let docs = if cfg!(feature = "no-metadata-docs") { vec![] } else { docs }; - let entry = StorageEntryMetadata { + let entry = StorageEntryMetadataIR { name: Prefix::STORAGE_PREFIX, modifier: QueryKind::METADATA, - ty: StorageEntryType::Map { + ty: StorageEntryTypeIR::Map { key: scale_info::meta_type::(), hashers: Key::HASHER_METADATA.to_vec(), value: scale_info::meta_type::(), @@ -620,7 +620,7 @@ mod test { use super::*; use crate::{ hash::{StorageHasher as _, *}, - metadata::{StorageEntryModifier, StorageHasher}, + metadata_ir::{StorageEntryModifierIR, StorageHasherIR}, storage::types::{Key as NMapKey, ValueQuery}, }; use sp_io::{hashing::twox_128, TestExternalities}; @@ -791,22 +791,22 @@ mod test { assert_eq!( entries, vec![ - StorageEntryMetadata { + StorageEntryMetadataIR { name: "Foo", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::(), }, default: Option::::None.encode(), docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "Foo", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::(), }, @@ -991,13 +991,13 @@ mod test { assert_eq!( entries, vec![ - StorageEntryMetadata { + StorageEntryMetadataIR { name: "Foo", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Twox64Concat ], key: scale_info::meta_type::<(u16, u8)>(), value: scale_info::meta_type::(), @@ -1005,13 +1005,13 @@ mod test { default: Option::::None.encode(), docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "Foo", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Twox64Concat ], key: scale_info::meta_type::<(u16, u8)>(), value: scale_info::meta_type::(), @@ -1232,14 +1232,14 @@ mod test { assert_eq!( entries, vec![ - StorageEntryMetadata { + StorageEntryMetadataIR { name: "Foo", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Twox64Concat ], key: scale_info::meta_type::<(u16, u16, u16)>(), value: scale_info::meta_type::(), @@ -1247,14 +1247,14 @@ mod test { default: Option::::None.encode(), docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "Foo", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Twox64Concat ], key: scale_info::meta_type::<(u16, u16, u16)>(), value: scale_info::meta_type::(), diff --git a/frame/support/src/storage/types/value.rs b/frame/support/src/storage/types/value.rs index 64fb432b2dab1..d5fbb0656bbd1 100644 --- a/frame/support/src/storage/types/value.rs +++ b/frame/support/src/storage/types/value.rs @@ -18,7 +18,7 @@ //! Storage value type. Implements StorageValue trait and its method directly. use crate::{ - metadata::{StorageEntryMetadata, StorageEntryType}, + metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}, storage::{ generator::StorageValue as StorageValueT, types::{OptionQuery, QueryKindTrait, StorageEntryMetadataBuilder}, @@ -221,13 +221,13 @@ where QueryKind: QueryKindTrait, OnEmpty: crate::traits::Get + 'static, { - fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { let docs = if cfg!(feature = "no-metadata-docs") { vec![] } else { docs }; - let entry = StorageEntryMetadata { + let entry = StorageEntryMetadataIR { name: Prefix::STORAGE_PREFIX, modifier: QueryKind::METADATA, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: OnEmpty::get().encode(), docs, }; @@ -278,7 +278,7 @@ where #[cfg(test)] mod test { use super::*; - use crate::{metadata::StorageEntryModifier, storage::types::ValueQuery}; + use crate::{metadata_ir::StorageEntryModifierIR, storage::types::ValueQuery}; use sp_io::{hashing::twox_128, TestExternalities}; struct Prefix; @@ -363,17 +363,17 @@ mod test { assert_eq!( entries, vec![ - StorageEntryMetadata { + StorageEntryMetadataIR { name: "foo", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: Option::::None.encode(), docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "foo", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: 97u32.encode(), docs: vec![], } diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 4b92cfbaee69c..17d2fb06c8118 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -74,8 +74,8 @@ pub use randomness::Randomness; mod metadata; pub use metadata::{ - CallMetadata, CrateVersion, GetCallMetadata, GetCallName, GetStorageVersion, PalletInfo, - PalletInfoAccess, PalletInfoData, PalletsInfoAccess, StorageVersion, + CallMetadata, CrateVersion, GetCallIndex, GetCallMetadata, GetCallName, GetStorageVersion, + PalletInfo, PalletInfoAccess, PalletInfoData, PalletsInfoAccess, StorageVersion, STORAGE_VERSION_STORAGE_KEY_POSTFIX, }; diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index a3374d0bf537f..5fea98ee036b5 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -197,10 +197,10 @@ impl OnRuntimeUpgrade for Tuple { weight } - #[cfg(feature = "try-runtime")] /// We are executing pre- and post-checks sequentially in order to be able to test several /// consecutive migrations for the same pallet without errors. Therefore pre and post upgrade /// hooks for tuples are a noop. + #[cfg(feature = "try-runtime")] fn try_on_runtime_upgrade(checks: bool) -> Result { let mut weight = Weight::zero(); for_tuples!( #( weight = weight.saturating_add(Tuple::try_on_runtime_upgrade(checks)?); )* ); @@ -227,10 +227,15 @@ pub trait Hooks { fn on_finalize(_n: BlockNumber) {} /// This will be run when the block is being finalized (before `on_finalize`). - /// Implement to have something happen using the remaining weight. - /// Will not fire if the remaining weight is 0. - /// Return the weight used, the hook will subtract it from current weight used - /// and pass the result to the next `on_idle` hook if it exists. + /// + /// Implement to have something happen using the remaining weight. Will not fire if the + /// remaining weight is 0. + /// + /// Each pallet's `on_idle` is chosen to be the first to execute in a round-robin fashion + /// indexed by the block number. + /// + /// Return the weight used, the caller will use this to calculate the remaining weight and then + /// call the next pallet `on_idle` hook if there is still weight left. fn on_idle(_n: BlockNumber, _remaining_weight: Weight) -> Weight { Weight::zero() } @@ -359,10 +364,64 @@ pub trait OnTimestampSet { #[cfg(test)] mod tests { use super::*; + use sp_io::TestExternalities; + + #[cfg(feature = "try-runtime")] + #[test] + fn on_runtime_upgrade_pre_post_executed_tuple() { + crate::parameter_types! { + pub static Pre: Vec<&'static str> = Default::default(); + pub static Post: Vec<&'static str> = Default::default(); + } + + macro_rules! impl_test_type { + ($name:ident) => { + struct $name; + impl OnRuntimeUpgrade for $name { + fn on_runtime_upgrade() -> Weight { + Default::default() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + Pre::mutate(|s| s.push(stringify!($name))); + Ok(Vec::new()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_: Vec) -> Result<(), &'static str> { + Post::mutate(|s| s.push(stringify!($name))); + Ok(()) + } + } + }; + } + + impl_test_type!(Foo); + impl_test_type!(Bar); + impl_test_type!(Baz); + + TestExternalities::default().execute_with(|| { + Foo::try_on_runtime_upgrade(true).unwrap(); + assert_eq!(Pre::take(), vec!["Foo"]); + assert_eq!(Post::take(), vec!["Foo"]); + + <(Foo, Bar, Baz)>::try_on_runtime_upgrade(true).unwrap(); + assert_eq!(Pre::take(), vec!["Foo", "Bar", "Baz"]); + assert_eq!(Post::take(), vec!["Foo", "Bar", "Baz"]); + + <((Foo, Bar), Baz)>::try_on_runtime_upgrade(true).unwrap(); + assert_eq!(Pre::take(), vec!["Foo", "Bar", "Baz"]); + assert_eq!(Post::take(), vec!["Foo", "Bar", "Baz"]); + + <(Foo, (Bar, Baz))>::try_on_runtime_upgrade(true).unwrap(); + assert_eq!(Pre::take(), vec!["Foo", "Bar", "Baz"]); + assert_eq!(Post::take(), vec!["Foo", "Bar", "Baz"]); + }); + } #[test] fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { - use sp_io::TestExternalities; struct Test; impl OnInitialize for Test { diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs index f3e4b955da4a9..20ddb1d34ca1a 100644 --- a/frame/support/src/traits/metadata.rs +++ b/frame/support/src/traits/metadata.rs @@ -101,12 +101,20 @@ pub struct CallMetadata { /// Gets the function name of the Call. pub trait GetCallName { - /// Return all function names. + /// Return all function names in the same order as [`GetCallIndex`]. fn get_call_names() -> &'static [&'static str]; /// Return the function name of the Call. fn get_call_name(&self) -> &'static str; } +/// Gets the function index of the Call. +pub trait GetCallIndex { + /// Return all call indices in the same order as [`GetCallName`]. + fn get_call_indices() -> &'static [u8]; + /// Return the index of this Call. + fn get_call_index(&self) -> u8; +} + /// Gets the metadata for the Call - function name and pallet name. pub trait GetCallMetadata { /// Return all module names. diff --git a/frame/support/src/traits/schedule.rs b/frame/support/src/traits/schedule.rs index 4e17092ae329b..74a5951142d52 100644 --- a/frame/support/src/traits/schedule.rs +++ b/frame/support/src/traits/schedule.rs @@ -438,6 +438,8 @@ pub mod v3 { /// Schedule a dispatch to happen at the beginning of some block in the future. /// /// - `id`: The identity of the task. This must be unique and will return an error if not. + /// + /// NOTE: This will request `call` to be made available. fn schedule_named( id: TaskName, when: DispatchTime, diff --git a/frame/support/src/traits/stored_map.rs b/frame/support/src/traits/stored_map.rs index a073b268252ad..cbe70f2932349 100644 --- a/frame/support/src/traits/stored_map.rs +++ b/frame/support/src/traits/stored_map.rs @@ -17,7 +17,7 @@ //! Traits and associated datatypes for managing abstract stored values. -use crate::{storage::StorageMap, traits::misc::HandleLifetime}; +use crate::storage::StorageMap; use codec::FullCodec; use sp_runtime::DispatchError; @@ -81,48 +81,29 @@ pub trait StoredMap { /// be the default value), or where the account is being removed or reset back to the default value /// where previously it did exist (though may have been in a default state). This works well with /// system module's `CallOnCreatedAccount` and `CallKillAccount`. -pub struct StorageMapShim(sp_std::marker::PhantomData<(S, L, K, T)>); -impl< - S: StorageMap, - L: HandleLifetime, - K: FullCodec, - T: FullCodec + Default, - > StoredMap for StorageMapShim +pub struct StorageMapShim(sp_std::marker::PhantomData<(S, K, T)>); +impl, K: FullCodec, T: FullCodec + Default> StoredMap + for StorageMapShim { fn get(k: &K) -> T { S::get(k) } fn insert(k: &K, t: T) -> Result<(), DispatchError> { - if !S::contains_key(&k) { - L::created(k)?; - } S::insert(k, t); Ok(()) } fn remove(k: &K) -> Result<(), DispatchError> { if S::contains_key(&k) { - L::killed(k)?; S::remove(k); } Ok(()) } fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { - if !S::contains_key(&k) { - L::created(k)?; - } Ok(S::mutate(k, f)) } fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> Result { S::try_mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); let r = f(maybe_value); - let exists = maybe_value.is_some(); - - if !existed && exists { - L::created(k)?; - } else if existed && !exists { - L::killed(k)?; - } Ok(r) }) } @@ -131,15 +112,7 @@ impl< f: impl FnOnce(&mut Option) -> Result, ) -> Result { S::try_mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); let r = f(maybe_value)?; - let exists = maybe_value.is_some(); - - if !existed && exists { - L::created(k).map_err(E::from)?; - } else if existed && !exists { - L::killed(k).map_err(E::from)?; - } Ok(r) }) } diff --git a/frame/support/src/traits/tokens.rs b/frame/support/src/traits/tokens.rs index e1a96f621bd4f..253b49c6671f8 100644 --- a/frame/support/src/traits/tokens.rs +++ b/frame/support/src/traits/tokens.rs @@ -27,7 +27,10 @@ pub mod nonfungible_v2; pub mod nonfungibles; pub mod nonfungibles_v2; pub use imbalance::Imbalance; +pub mod pay; pub use misc::{ - AssetId, Balance, BalanceConversion, BalanceStatus, ConvertRank, DepositConsequence, - ExistenceRequirement, GetSalary, Locker, WithdrawConsequence, WithdrawReasons, + AssetId, Balance, BalanceStatus, ConversionFromAssetBalance, ConversionToAssetBalance, + ConvertRank, DepositConsequence, ExistenceRequirement, Fortitude, GetSalary, Locker, Precision, + Preservation, Provenance, Restriction, WithdrawConsequence, WithdrawReasons, }; +pub use pay::{Pay, PayFromAccount, PaymentStatus}; diff --git a/frame/support/src/traits/tokens/fungible.rs b/frame/support/src/traits/tokens/fungible.rs deleted file mode 100644 index 12bac29727dd4..0000000000000 --- a/frame/support/src/traits/tokens/fungible.rs +++ /dev/null @@ -1,367 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! The traits for dealing with a single fungible token class and any associated types. - -use super::{ - misc::{Balance, DepositConsequence, WithdrawConsequence}, - *, -}; -use crate::{ - dispatch::{DispatchError, DispatchResult}, - traits::misc::Get, -}; -use sp_runtime::traits::Saturating; - -mod balanced; -mod imbalance; -pub use balanced::{Balanced, Unbalanced}; -pub use imbalance::{CreditOf, DebtOf, HandleImbalanceDrop, Imbalance}; - -/// Trait for providing balance-inspection access to a fungible asset. -pub trait Inspect { - /// Scalar type for representing balance of an account. - type Balance: Balance; - - /// The total amount of issuance in the system. - fn total_issuance() -> Self::Balance; - - /// The total amount of issuance in the system excluding those which are controlled by the - /// system. - fn active_issuance() -> Self::Balance { - Self::total_issuance() - } - - /// The minimum balance any single account may have. - fn minimum_balance() -> Self::Balance; - - /// Get the balance of `who`. - fn balance(who: &AccountId) -> Self::Balance; - - /// Get the maximum amount that `who` can withdraw/transfer successfully. - fn reducible_balance(who: &AccountId, keep_alive: bool) -> Self::Balance; - - /// Returns `true` if the balance of `who` may be increased by `amount`. - /// - /// - `who`: The account of which the balance should be increased by `amount`. - /// - `amount`: How much should the balance be increased? - /// - `mint`: Will `amount` be minted to deposit it into `account`? - fn can_deposit(who: &AccountId, amount: Self::Balance, mint: bool) -> DepositConsequence; - - /// Returns `Failed` if the balance of `who` may not be decreased by `amount`, otherwise - /// the consequence. - fn can_withdraw(who: &AccountId, amount: Self::Balance) -> WithdrawConsequence; -} - -/// Trait for providing an ERC-20 style fungible asset. -pub trait Mutate: Inspect { - /// Increase the balance of `who` by exactly `amount`, minting new tokens. If that isn't - /// possible then an `Err` is returned and nothing is changed. - fn mint_into(who: &AccountId, amount: Self::Balance) -> DispatchResult; - - /// Decrease the balance of `who` by at least `amount`, possibly slightly more in the case of - /// minimum_balance requirements, burning the tokens. If that isn't possible then an `Err` is - /// returned and nothing is changed. If successful, the amount of tokens reduced is returned. - fn burn_from(who: &AccountId, amount: Self::Balance) -> Result; - - // TODO: Remove. - /// Attempt to reduce the balance of `who` by as much as possible up to `amount`, and possibly - /// slightly more due to minimum_balance requirements. If no decrease is possible then an `Err` - /// is returned and nothing is changed. If successful, the amount of tokens reduced is returned. - /// - /// The default implementation just uses `withdraw` along with `reducible_balance` to ensure - /// that it doesn't fail. - fn slash(who: &AccountId, amount: Self::Balance) -> Result { - Self::burn_from(who, Self::reducible_balance(who, false).min(amount)) - } - - /// Transfer funds from one account into another. The default implementation uses `mint_into` - /// and `burn_from` and may generate unwanted events. - fn teleport( - source: &AccountId, - dest: &AccountId, - amount: Self::Balance, - ) -> Result { - let extra = Self::can_withdraw(&source, amount).into_result()?; - // As we first burn and then mint, we don't need to check if `mint` fits into the supply. - // If we can withdraw/burn it, we can also mint it again. - Self::can_deposit(dest, amount.saturating_add(extra), false).into_result()?; - let actual = Self::burn_from(source, amount)?; - debug_assert!( - actual == amount.saturating_add(extra), - "can_withdraw must agree with withdraw; qed" - ); - match Self::mint_into(dest, actual) { - Ok(_) => Ok(actual), - Err(err) => { - debug_assert!(false, "can_deposit returned true previously; qed"); - // attempt to return the funds back to source - let revert = Self::mint_into(source, actual); - debug_assert!(revert.is_ok(), "withdrew funds previously; qed"); - Err(err) - }, - } - } -} - -/// Trait for providing a fungible asset which can only be transferred. -pub trait Transfer: Inspect { - /// Transfer funds from one account into another. - fn transfer( - source: &AccountId, - dest: &AccountId, - amount: Self::Balance, - keep_alive: bool, - ) -> Result; - - /// Reduce the active issuance by some amount. - fn deactivate(_: Self::Balance) {} - - /// Increase the active issuance by some amount, up to the outstanding amount reduced. - fn reactivate(_: Self::Balance) {} -} - -/// Trait for inspecting a fungible asset which can be reserved. -pub trait InspectHold: Inspect { - /// Amount of funds held in reserve by `who`. - fn balance_on_hold(who: &AccountId) -> Self::Balance; - - /// Check to see if some `amount` of funds of `who` may be placed on hold. - fn can_hold(who: &AccountId, amount: Self::Balance) -> bool; -} - -// TODO: Introduce `HoldReason`. -/// Trait for mutating a fungible asset which can be reserved. -pub trait MutateHold: InspectHold + Transfer { - /// Hold some funds in an account. - fn hold(who: &AccountId, amount: Self::Balance) -> DispatchResult; - - /// Release up to `amount` held funds in an account. - /// - /// The actual amount released is returned with `Ok`. - /// - /// If `best_effort` is `true`, then the amount actually unreserved and returned as the inner - /// value of `Ok` may be smaller than the `amount` passed. - fn release( - who: &AccountId, - amount: Self::Balance, - best_effort: bool, - ) -> Result; - - // TODO: Introduce repatriate_held - - /// Transfer held funds into a destination account. - /// - /// If `on_hold` is `true`, then the destination account must already exist and the assets - /// transferred will still be on hold in the destination account. If not, then the destination - /// account need not already exist, but must be creatable. - /// - /// If `best_effort` is `true`, then an amount less than `amount` may be transferred without - /// error. - /// - /// The actual amount transferred is returned, or `Err` in the case of error and nothing is - /// changed. - fn transfer_held( - source: &AccountId, - dest: &AccountId, - amount: Self::Balance, - best_effort: bool, - on_held: bool, - ) -> Result; -} - -/// Trait for slashing a fungible asset which can be reserved. -pub trait BalancedHold: Balanced + MutateHold { - /// Reduce the balance of some funds on hold in an account. - /// - /// The resulting imbalance is the first item of the tuple returned. - /// - /// As much funds that are on hold up to `amount` will be deducted as possible. If this is less - /// than `amount`, then a non-zero second item will be returned. - fn slash_held( - who: &AccountId, - amount: Self::Balance, - ) -> (CreditOf, Self::Balance); -} - -impl + MutateHold> BalancedHold for T { - // TODO: This should be implemented properly, and `slash` should be removed. - fn slash_held( - who: &AccountId, - amount: Self::Balance, - ) -> (CreditOf, Self::Balance) { - let actual = match Self::release(who, amount, true) { - Ok(x) => x, - Err(_) => return (Imbalance::default(), amount), - }; - >::slash(who, actual) - } -} - -/// Convert a `fungibles` trait implementation into a `fungible` trait implementation by identifying -/// a single item. -pub struct ItemOf< - F: fungibles::Inspect, - A: Get<>::AssetId>, - AccountId, ->(sp_std::marker::PhantomData<(F, A, AccountId)>); - -impl< - F: fungibles::Inspect, - A: Get<>::AssetId>, - AccountId, - > Inspect for ItemOf -{ - type Balance = >::Balance; - fn total_issuance() -> Self::Balance { - >::total_issuance(A::get()) - } - fn active_issuance() -> Self::Balance { - >::active_issuance(A::get()) - } - fn minimum_balance() -> Self::Balance { - >::minimum_balance(A::get()) - } - fn balance(who: &AccountId) -> Self::Balance { - >::balance(A::get(), who) - } - fn reducible_balance(who: &AccountId, keep_alive: bool) -> Self::Balance { - >::reducible_balance(A::get(), who, keep_alive) - } - fn can_deposit(who: &AccountId, amount: Self::Balance, mint: bool) -> DepositConsequence { - >::can_deposit(A::get(), who, amount, mint) - } - fn can_withdraw(who: &AccountId, amount: Self::Balance) -> WithdrawConsequence { - >::can_withdraw(A::get(), who, amount) - } -} - -impl< - F: fungibles::Mutate, - A: Get<>::AssetId>, - AccountId, - > Mutate for ItemOf -{ - fn mint_into(who: &AccountId, amount: Self::Balance) -> DispatchResult { - >::mint_into(A::get(), who, amount) - } - fn burn_from(who: &AccountId, amount: Self::Balance) -> Result { - >::burn_from(A::get(), who, amount) - } -} - -impl< - F: fungibles::Transfer, - A: Get<>::AssetId>, - AccountId, - > Transfer for ItemOf -{ - fn transfer( - source: &AccountId, - dest: &AccountId, - amount: Self::Balance, - keep_alive: bool, - ) -> Result { - >::transfer(A::get(), source, dest, amount, keep_alive) - } - fn deactivate(amount: Self::Balance) { - >::deactivate(A::get(), amount) - } - fn reactivate(amount: Self::Balance) { - >::reactivate(A::get(), amount) - } -} - -impl< - F: fungibles::InspectHold, - A: Get<>::AssetId>, - AccountId, - > InspectHold for ItemOf -{ - fn balance_on_hold(who: &AccountId) -> Self::Balance { - >::balance_on_hold(A::get(), who) - } - fn can_hold(who: &AccountId, amount: Self::Balance) -> bool { - >::can_hold(A::get(), who, amount) - } -} - -impl< - F: fungibles::MutateHold, - A: Get<>::AssetId>, - AccountId, - > MutateHold for ItemOf -{ - fn hold(who: &AccountId, amount: Self::Balance) -> DispatchResult { - >::hold(A::get(), who, amount) - } - fn release( - who: &AccountId, - amount: Self::Balance, - best_effort: bool, - ) -> Result { - >::release(A::get(), who, amount, best_effort) - } - fn transfer_held( - source: &AccountId, - dest: &AccountId, - amount: Self::Balance, - best_effort: bool, - on_hold: bool, - ) -> Result { - >::transfer_held( - A::get(), - source, - dest, - amount, - best_effort, - on_hold, - ) - } -} - -impl< - F: fungibles::Unbalanced, - A: Get<>::AssetId>, - AccountId, - > Unbalanced for ItemOf -{ - fn set_balance(who: &AccountId, amount: Self::Balance) -> DispatchResult { - >::set_balance(A::get(), who, amount) - } - fn set_total_issuance(amount: Self::Balance) -> () { - >::set_total_issuance(A::get(), amount) - } - fn decrease_balance( - who: &AccountId, - amount: Self::Balance, - ) -> Result { - >::decrease_balance(A::get(), who, amount) - } - fn decrease_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { - >::decrease_balance_at_most(A::get(), who, amount) - } - fn increase_balance( - who: &AccountId, - amount: Self::Balance, - ) -> Result { - >::increase_balance(A::get(), who, amount) - } - fn increase_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { - >::increase_balance_at_most(A::get(), who, amount) - } -} diff --git a/frame/support/src/traits/tokens/fungible/freeze.rs b/frame/support/src/traits/tokens/fungible/freeze.rs new file mode 100644 index 0000000000000..1ec3a5fadf555 --- /dev/null +++ b/frame/support/src/traits/tokens/fungible/freeze.rs @@ -0,0 +1,68 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The traits for putting freezes within a single fungible token class. + +use scale_info::TypeInfo; +use sp_runtime::DispatchResult; + +/// Trait for inspecting a fungible asset which can be frozen. Freezing is essentially setting a +/// minimum balance bellow which the total balance (inclusive of any funds placed on hold) may not +/// be normally allowed to drop. Generally, freezers will provide an "update" function such that +/// if the total balance does drop below the limit, then the freezer can update their housekeeping +/// accordingly. +pub trait Inspect: super::Inspect { + /// An identifier for a freeze. + type Id: codec::Encode + TypeInfo + 'static; + + /// Amount of funds held in reserve by `who` for the given `id`. + fn balance_frozen(id: &Self::Id, who: &AccountId) -> Self::Balance; + + /// The amount of the balance which can become frozen. Defaults to `total_balance()`. + fn balance_freezable(who: &AccountId) -> Self::Balance { + Self::total_balance(who) + } + + /// Returns `true` if it's possible to introduce a freeze for the given `id` onto the + /// account of `who`. This will be true as long as the implementor supports as many + /// concurrent freeze locks as there are possible values of `id`. + fn can_freeze(id: &Self::Id, who: &AccountId) -> bool; +} + +/// Trait for introducing, altering and removing locks to freeze an account's funds so they never +/// go below a set minimum. +pub trait Mutate: Inspect { + /// Prevent actions which would reduce the balance of the account of `who` below the given + /// `amount` and identify this restriction though the given `id`. Unlike `extend_freeze`, any + /// outstanding freeze in place for `who` under the `id` are dropped. + /// + /// If `amount` is zero, it is equivalent to using `thaw`. + /// + /// Note that `amount` can be greater than the total balance, if desired. + fn set_freeze(id: &Self::Id, who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Prevent the balance of the account of `who` from being reduced below the given `amount` and + /// identify this restriction though the given `id`. Unlike `set_freeze`, this does not + /// counteract any pre-existing freezes in place for `who` under the `id`. Also unlike + /// `set_freeze`, in the case that `amount` is zero, this is no-op and never fails. + /// + /// Note that more funds can be locked than the total balance, if desired. + fn extend_freeze(id: &Self::Id, who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Remove an existing lock. + fn thaw(id: &Self::Id, who: &AccountId) -> DispatchResult; +} diff --git a/frame/support/src/traits/tokens/fungible/hold.rs b/frame/support/src/traits/tokens/fungible/hold.rs new file mode 100644 index 0000000000000..ddcb8c6ac1da8 --- /dev/null +++ b/frame/support/src/traits/tokens/fungible/hold.rs @@ -0,0 +1,393 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The traits for putting holds within a single fungible token class. + +use crate::{ + ensure, + traits::tokens::{ + DepositConsequence::Success, + Fortitude::{self, Force}, + Precision::{self, BestEffort, Exact}, + Preservation::{self, Protect}, + Provenance::Extant, + Restriction::{self, Free, OnHold}, + }, +}; +use scale_info::TypeInfo; +use sp_arithmetic::{ + traits::{CheckedAdd, CheckedSub, Zero}, + ArithmeticError, +}; +use sp_runtime::{DispatchError, DispatchResult, Saturating, TokenError}; + +use super::*; + +/// Trait for inspecting a fungible asset whose accounts support partitioning and slashing. +pub trait Inspect: super::Inspect { + /// An identifier for a hold. Used for disambiguating different holds so that + /// they can be individually replaced or removed and funds from one hold don't accidentally + /// become unreserved or slashed for another. + type Reason: codec::Encode + TypeInfo + 'static; + + /// Amount of funds on hold (for all hold reasons) of `who`. + fn total_balance_on_hold(who: &AccountId) -> Self::Balance; + + /// Get the maximum amount that the `total_balance_on_hold` of `who` can be reduced successfully + /// based on whether we are willing to force the reduction and potentially go below user-level + /// restrictions on the minimum amount of the account. Note: This cannot bring the account into + /// an inconsistent state with regards any required existential deposit. + /// + /// Always less than `total_balance_on_hold()`. + fn reducible_total_balance_on_hold(who: &AccountId, force: Fortitude) -> Self::Balance; + + /// Amount of funds on hold (for the given reason) of `who`. + fn balance_on_hold(reason: &Self::Reason, who: &AccountId) -> Self::Balance; + + /// Returns `true` if it's possible to place (additional) funds under a hold of a given + /// `reason`. This may fail if the account has exhausted a limited number of concurrent + /// holds or if it cannot be made to exist (e.g. there is no provider reference). + /// + /// NOTE: This does not take into account changes which could be made to the account of `who` + /// (such as removing a provider reference) after this call is made. Any usage of this should + /// therefore ensure the account is already in the appropriate state prior to calling it. + fn hold_available(reason: &Self::Reason, who: &AccountId) -> bool; + + /// Check to see if some `amount` of funds of `who` may be placed on hold with the given + /// `reason`. Reasons why this may not be true: + /// + /// - The implementor supports only a limited number of concurrent holds on an account which is + /// the possible values of `reason`; + /// - The total balance of the account is less than `amount`; + /// - Removing `amount` from the total balance would kill the account and remove the only + /// provider reference. + /// + /// Note: we pass `true` as the third argument to `reducible_balance` since we assume that if + /// needed the balance can slashed. If we are using a simple non-forcing reserve-transfer, then + /// we really ought to check that we are not reducing the funds below the freeze-limit (if any). + /// + /// NOTE: This does not take into account changes which could be made to the account of `who` + /// (such as removing a provider reference) after this call is made. Any usage of this should + /// therefore ensure the account is already in the appropriate state prior to calling it. + fn ensure_can_hold( + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult { + ensure!(Self::hold_available(reason, who), TokenError::CannotCreateHold); + ensure!( + amount <= Self::reducible_balance(who, Protect, Force), + TokenError::FundsUnavailable + ); + Ok(()) + } + + /// Check to see if some `amount` of funds of `who` may be placed on hold for the given + /// `reason`. Reasons why this may not be true: + /// + /// - The implementor supports only a limited number of concurrernt holds on an account which is + /// the possible values of `reason`; + /// - The main balance of the account is less than `amount`; + /// - Removing `amount` from the main balance would kill the account and remove the only + /// provider reference. + /// + /// NOTE: This does not take into account changes which could be made to the account of `who` + /// (such as removing a provider reference) after this call is made. Any usage of this should + /// therefore ensure the account is already in the appropriate state prior to calling it. + fn can_hold(reason: &Self::Reason, who: &AccountId, amount: Self::Balance) -> bool { + Self::ensure_can_hold(reason, who, amount).is_ok() + } +} + +/// A fungible, holdable token class where the balance on hold can be set arbitrarily. +/// +/// **WARNING** +/// Do not use this directly unless you want trouble, since it allows you to alter account balances +/// without keeping the issuance up to date. It has no safeguards against accidentally creating +/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to +/// use. +pub trait Unbalanced: Inspect { + /// Forcefully set the balance on hold of `who` to `amount`. This is independent of any other + /// balances on hold or the main ("free") balance. + /// + /// If this call executes successfully, you can `assert_eq!(Self::balance_on_hold(), amount);`. + /// + /// This function does its best to force the balance change through, but will not break system + /// invariants such as any Existential Deposits needed or overflows/underflows. + /// If this cannot be done for some reason (e.g. because the account doesn't exist) then an + /// `Err` is returned. + // Implmentation note: This should increment the consumer refs if it moves total on hold from + // zero to non-zero and decrement in the opposite direction. + // + // Since this was not done in the previous logic, this will need either a migration or a + // state item which tracks whether the account is on the old logic or new. + fn set_balance_on_hold( + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult; + + /// Reduce the balance on hold of `who` by `amount`. + /// + /// If `precision` is `Exact` and it cannot be reduced by that amount for + /// some reason, return `Err` and don't reduce it at all. If `precision` is `BestEffort`, then + /// reduce the balance of `who` by the most that is possible, up to `amount`. + /// + /// In either case, if `Ok` is returned then the inner is the amount by which is was reduced. + fn decrease_balance_on_hold( + reason: &Self::Reason, + who: &AccountId, + mut amount: Self::Balance, + precision: Precision, + ) -> Result { + let old_balance = Self::balance_on_hold(reason, who); + if let BestEffort = precision { + amount = amount.min(old_balance); + } + let new_balance = old_balance.checked_sub(&amount).ok_or(TokenError::FundsUnavailable)?; + Self::set_balance_on_hold(reason, who, new_balance)?; + Ok(amount) + } + + /// Increase the balance on hold of `who` by `amount`. + /// + /// If it cannot be increased by that amount for some reason, return `Err` and don't increase + /// it at all. If Ok, return the imbalance. + fn increase_balance_on_hold( + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + let old_balance = Self::balance_on_hold(reason, who); + let new_balance = if let BestEffort = precision { + old_balance.saturating_add(amount) + } else { + old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)? + }; + let amount = new_balance.saturating_sub(old_balance); + if !amount.is_zero() { + Self::set_balance_on_hold(reason, who, new_balance)?; + } + Ok(amount) + } +} + +/// Trait for mutating a fungible asset which can be placed on hold. +pub trait Mutate: + Inspect + super::Unbalanced + Unbalanced +{ + /// Hold some funds in an account. If a hold for `reason` is already in place, then this + /// will increase it. + fn hold(reason: &Self::Reason, who: &AccountId, amount: Self::Balance) -> DispatchResult { + // NOTE: This doesn't change the total balance of the account so there's no need to + // check liquidity. + + Self::ensure_can_hold(reason, who, amount)?; + // Should be infallible now, but we proceed softly anyway. + Self::decrease_balance(who, amount, Exact, Protect, Force)?; + Self::increase_balance_on_hold(reason, who, amount, BestEffort)?; + Self::done_hold(reason, who, amount); + Ok(()) + } + + /// Release up to `amount` held funds in an account. + /// + /// The actual amount released is returned with `Ok`. + /// + /// If `precision` is `BestEffort`, then the amount actually unreserved and returned as the + /// inner value of `Ok` may be smaller than the `amount` passed. + /// + /// NOTE! The inner of the `Ok` result variant returns the *actual* amount released. This is the + /// opposite of the `ReservableCurrency::unreserve()` result, which gives the amount not able + /// to be released! + fn release( + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + // NOTE: This doesn't change the total balance of the account so there's no need to + // check liquidity. + + // We want to make sure we can deposit the amount in advance. If we can't then something is + // very wrong. + ensure!(Self::can_deposit(who, amount, Extant) == Success, TokenError::CannotCreate); + // Get the amount we can actually take from the hold. This might be less than what we want + // if we're only doing a best-effort. + let amount = Self::decrease_balance_on_hold(reason, who, amount, precision)?; + // Increase the main balance by what we took. We always do a best-effort here because we + // already checked that we can deposit before. + let actual = Self::increase_balance(who, amount, BestEffort)?; + Self::done_release(reason, who, actual); + Ok(actual) + } + + /// Attempt to decrease the balance of `who` which is held for the given `reason` by `amount`. + /// + /// If `precision` is `BestEffort`, then as much as possible is reduced, up to `amount`, and the + /// amount of tokens reduced is returned. Otherwise, if the total amount can be reduced, then it + /// is and the amount returned, and if not, then nothing changes and `Err` is returned. + /// + /// If `force` is `Force`, then locks/freezes will be ignored. This should only be used when + /// conducting slashing or other activity which materially disadvantages the account holder + /// since it could provide a means of circumventing freezes. + fn burn_held( + reason: &Self::Reason, + who: &AccountId, + mut amount: Self::Balance, + precision: Precision, + force: Fortitude, + ) -> Result { + // We must check total-balance requirements if `!force`. + let liquid = Self::reducible_total_balance_on_hold(who, force); + if let BestEffort = precision { + amount = amount.min(liquid); + } else { + ensure!(amount <= liquid, TokenError::Frozen); + } + let amount = Self::decrease_balance_on_hold(reason, who, amount, precision)?; + Self::set_total_issuance(Self::total_issuance().saturating_sub(amount)); + Self::done_burn_held(reason, who, amount); + Ok(amount) + } + + /// Transfer held funds into a destination account. + /// + /// If `on_hold` is `true`, then the destination account must already exist and the assets + /// transferred will still be on hold in the destination account. If not, then the destination + /// account need not already exist, but must be creatable. + /// + /// If `precision` is `BestEffort`, then an amount less than `amount` may be transferred without + /// error. + /// + /// If `force` is `Force`, then other fund-locking mechanisms may be disregarded. It should be + /// left as `Polite` in most circumstances, but when you want the same power as a `slash`, it + /// may be `Force`. + /// + /// The actual amount transferred is returned, or `Err` in the case of error and nothing is + /// changed. + fn transfer_on_hold( + reason: &Self::Reason, + source: &AccountId, + dest: &AccountId, + mut amount: Self::Balance, + precision: Precision, + mode: Restriction, + force: Fortitude, + ) -> Result { + // We must check total-balance requirements if `force` is `Fortitude::Polite`. + let have = Self::balance_on_hold(reason, source); + let liquid = Self::reducible_total_balance_on_hold(source, force); + if let BestEffort = precision { + amount = amount.min(liquid).min(have); + } else { + ensure!(amount <= liquid, TokenError::Frozen); + ensure!(amount <= have, TokenError::FundsUnavailable); + } + + // We want to make sure we can deposit the amount in advance. If we can't then something is + // very wrong. + ensure!(Self::can_deposit(dest, amount, Extant) == Success, TokenError::CannotCreate); + ensure!(mode == Free || Self::hold_available(reason, dest), TokenError::CannotCreateHold); + + let amount = Self::decrease_balance_on_hold(reason, source, amount, precision)?; + let actual = if mode == OnHold { + Self::increase_balance_on_hold(reason, dest, amount, precision)? + } else { + Self::increase_balance(dest, amount, precision)? + }; + Self::done_transfer_on_hold(reason, source, dest, actual); + Ok(actual) + } + + /// Transfer some `amount` of free balance from `source` to become owned by `dest` but on hold + /// for `reason`. + /// + /// If `precision` is `BestEffort`, then an amount less than `amount` may be transferred without + /// error. + /// + /// `source` must obey the requirements of `keep_alive`. + /// + /// If `force` is `Force`, then other fund-locking mechanisms may be disregarded. It should be + /// left as `Polite` in most circumstances, but when you want the same power as a `slash`, it + /// may be `Force`. + /// + /// The amount placed on hold is returned or `Err` in the case of error and nothing is changed. + /// + /// WARNING: This may return an error after a partial storage mutation. It should be used only + /// inside a transactional storage context and an `Err` result must imply a storage rollback. + fn transfer_and_hold( + reason: &Self::Reason, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + precision: Precision, + expendability: Preservation, + force: Fortitude, + ) -> Result { + ensure!(Self::hold_available(reason, dest), TokenError::CannotCreateHold); + ensure!(Self::can_deposit(dest, amount, Extant) == Success, TokenError::CannotCreate); + let actual = Self::decrease_balance(source, amount, precision, expendability, force)?; + Self::increase_balance_on_hold(reason, dest, actual, precision)?; + Self::done_transfer_on_hold(reason, source, dest, actual); + Ok(actual) + } + + fn done_hold(_reason: &Self::Reason, _who: &AccountId, _amount: Self::Balance) {} + fn done_release(_reason: &Self::Reason, _who: &AccountId, _amount: Self::Balance) {} + fn done_burn_held(_reason: &Self::Reason, _who: &AccountId, _amount: Self::Balance) {} + fn done_transfer_on_hold( + _reason: &Self::Reason, + _source: &AccountId, + _dest: &AccountId, + _amount: Self::Balance, + ) { + } + fn done_transfer_and_hold( + _reason: &Self::Reason, + _source: &AccountId, + _dest: &AccountId, + _transferred: Self::Balance, + ) { + } +} + +/// Trait for slashing a fungible asset which can be place on hold. +pub trait Balanced: super::Balanced + Unbalanced { + /// Reduce the balance of some funds on hold in an account. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds that are on hold up to `amount` will be deducted as possible. If this is less + /// than `amount`, then a non-zero second item will be returned. + fn slash( + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> (Credit, Self::Balance) { + let decrease = Self::decrease_balance_on_hold(reason, who, amount, BestEffort) + .unwrap_or(Default::default()); + let credit = + Imbalance::::new(decrease); + Self::done_slash(reason, who, decrease); + (credit, amount.saturating_sub(decrease)) + } + + fn done_slash(_reason: &Self::Reason, _who: &AccountId, _amount: Self::Balance) {} +} diff --git a/frame/support/src/traits/tokens/fungible/imbalance.rs b/frame/support/src/traits/tokens/fungible/imbalance.rs index 1b3d16c62ddf4..de85924a4de7c 100644 --- a/frame/support/src/traits/tokens/fungible/imbalance.rs +++ b/frame/support/src/traits/tokens/fungible/imbalance.rs @@ -18,8 +18,11 @@ //! The imbalance type and its associates, which handles keeps everything adding up properly with //! unbalanced operations. -use super::{super::Imbalance as ImbalanceT, balanced::Balanced, misc::Balance, *}; -use crate::traits::misc::{SameOrOther, TryDrop}; +use super::{super::Imbalance as ImbalanceT, Balanced, *}; +use crate::traits::{ + misc::{SameOrOther, TryDrop}, + tokens::Balance, +}; use sp_runtime::{traits::Zero, RuntimeDebug}; use sp_std::marker::PhantomData; @@ -30,6 +33,10 @@ pub trait HandleImbalanceDrop { fn handle(amount: Balance); } +impl HandleImbalanceDrop for () { + fn handle(_: Balance) {} +} + /// An imbalance in the system, representing a divergence of recorded token supply from the sum of /// the balances of all accounts. This is `must_use` in order to ensure it gets handled (placing /// into an account, settling from an account or altering the supply). @@ -135,7 +142,7 @@ impl, OppositeOnDrop: HandleImbalance } /// Imbalance implying that the total_issuance value is less than the sum of all account balances. -pub type DebtOf = Imbalance< +pub type Debt = Imbalance< >::Balance, // This will generally be implemented by increasing the total_issuance value. >::OnDropDebt, @@ -144,7 +151,7 @@ pub type DebtOf = Imbalance< /// Imbalance implying that the total_issuance value is greater than the sum of all account /// balances. -pub type CreditOf = Imbalance< +pub type Credit = Imbalance< >::Balance, // This will generally be implemented by decreasing the total_issuance value. >::OnDropCredit, diff --git a/frame/support/src/traits/tokens/fungible/item_of.rs b/frame/support/src/traits/tokens/fungible/item_of.rs new file mode 100644 index 0000000000000..cf2d96ef28791 --- /dev/null +++ b/frame/support/src/traits/tokens/fungible/item_of.rs @@ -0,0 +1,451 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Adapter to use `fungibles::*` implementations as `fungible::*`. + +use sp_core::Get; +use sp_runtime::{DispatchError, DispatchResult}; + +use super::*; +use crate::traits::tokens::{ + fungibles, DepositConsequence, Fortitude, Imbalance as ImbalanceT, Precision, Preservation, + Provenance, Restriction, WithdrawConsequence, +}; + +/// Convert a `fungibles` trait implementation into a `fungible` trait implementation by identifying +/// a single item. +pub struct ItemOf< + F: fungibles::Inspect, + A: Get<>::AssetId>, + AccountId, +>(sp_std::marker::PhantomData<(F, A, AccountId)>); + +impl< + F: fungibles::Inspect, + A: Get<>::AssetId>, + AccountId, + > Inspect for ItemOf +{ + type Balance = >::Balance; + fn total_issuance() -> Self::Balance { + >::total_issuance(A::get()) + } + fn active_issuance() -> Self::Balance { + >::active_issuance(A::get()) + } + fn minimum_balance() -> Self::Balance { + >::minimum_balance(A::get()) + } + fn balance(who: &AccountId) -> Self::Balance { + >::balance(A::get(), who) + } + fn total_balance(who: &AccountId) -> Self::Balance { + >::total_balance(A::get(), who) + } + fn reducible_balance( + who: &AccountId, + preservation: Preservation, + force: Fortitude, + ) -> Self::Balance { + >::reducible_balance(A::get(), who, preservation, force) + } + fn can_deposit( + who: &AccountId, + amount: Self::Balance, + provenance: Provenance, + ) -> DepositConsequence { + >::can_deposit(A::get(), who, amount, provenance) + } + fn can_withdraw(who: &AccountId, amount: Self::Balance) -> WithdrawConsequence { + >::can_withdraw(A::get(), who, amount) + } +} + +impl< + F: fungibles::InspectHold, + A: Get<>::AssetId>, + AccountId, + > InspectHold for ItemOf +{ + type Reason = F::Reason; + + fn reducible_total_balance_on_hold(who: &AccountId, force: Fortitude) -> Self::Balance { + >::reducible_total_balance_on_hold( + A::get(), + who, + force, + ) + } + fn hold_available(reason: &Self::Reason, who: &AccountId) -> bool { + >::hold_available(A::get(), reason, who) + } + fn total_balance_on_hold(who: &AccountId) -> Self::Balance { + >::total_balance_on_hold(A::get(), who) + } + fn balance_on_hold(reason: &Self::Reason, who: &AccountId) -> Self::Balance { + >::balance_on_hold(A::get(), reason, who) + } + fn can_hold(reason: &Self::Reason, who: &AccountId, amount: Self::Balance) -> bool { + >::can_hold(A::get(), reason, who, amount) + } +} + +impl< + F: fungibles::InspectFreeze, + A: Get<>::AssetId>, + AccountId, + > InspectFreeze for ItemOf +{ + type Id = F::Id; + fn balance_frozen(id: &Self::Id, who: &AccountId) -> Self::Balance { + >::balance_frozen(A::get(), id, who) + } + fn balance_freezable(who: &AccountId) -> Self::Balance { + >::balance_freezable(A::get(), who) + } + fn can_freeze(id: &Self::Id, who: &AccountId) -> bool { + >::can_freeze(A::get(), id, who) + } +} + +impl< + F: fungibles::Unbalanced, + A: Get<>::AssetId>, + AccountId, + > Unbalanced for ItemOf +{ + fn handle_dust(dust: regular::Dust) + where + Self: Sized, + { + >::handle_dust(fungibles::Dust(A::get(), dust.0)) + } + fn write_balance( + who: &AccountId, + amount: Self::Balance, + ) -> Result, DispatchError> { + >::write_balance(A::get(), who, amount) + } + fn set_total_issuance(amount: Self::Balance) -> () { + >::set_total_issuance(A::get(), amount) + } + fn decrease_balance( + who: &AccountId, + amount: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result { + >::decrease_balance( + A::get(), + who, + amount, + precision, + preservation, + force, + ) + } + fn increase_balance( + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + >::increase_balance(A::get(), who, amount, precision) + } +} + +impl< + F: fungibles::UnbalancedHold, + A: Get<>::AssetId>, + AccountId, + > UnbalancedHold for ItemOf +{ + fn set_balance_on_hold( + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult { + >::set_balance_on_hold( + A::get(), + reason, + who, + amount, + ) + } + fn decrease_balance_on_hold( + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + >::decrease_balance_on_hold( + A::get(), + reason, + who, + amount, + precision, + ) + } + fn increase_balance_on_hold( + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + >::increase_balance_on_hold( + A::get(), + reason, + who, + amount, + precision, + ) + } +} + +impl< + F: fungibles::Mutate, + A: Get<>::AssetId>, + AccountId, + > Mutate for ItemOf +{ + fn mint_into(who: &AccountId, amount: Self::Balance) -> Result { + >::mint_into(A::get(), who, amount) + } + fn burn_from( + who: &AccountId, + amount: Self::Balance, + precision: Precision, + force: Fortitude, + ) -> Result { + >::burn_from(A::get(), who, amount, precision, force) + } + fn shelve(who: &AccountId, amount: Self::Balance) -> Result { + >::shelve(A::get(), who, amount) + } + fn restore(who: &AccountId, amount: Self::Balance) -> Result { + >::restore(A::get(), who, amount) + } + fn transfer( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + preservation: Preservation, + ) -> Result { + >::transfer(A::get(), source, dest, amount, preservation) + } + + fn set_balance(who: &AccountId, amount: Self::Balance) -> Self::Balance { + >::set_balance(A::get(), who, amount) + } +} + +impl< + F: fungibles::MutateHold, + A: Get<>::AssetId>, + AccountId, + > MutateHold for ItemOf +{ + fn hold(reason: &Self::Reason, who: &AccountId, amount: Self::Balance) -> DispatchResult { + >::hold(A::get(), reason, who, amount) + } + fn release( + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + >::release(A::get(), reason, who, amount, precision) + } + fn burn_held( + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + force: Fortitude, + ) -> Result { + >::burn_held( + A::get(), + reason, + who, + amount, + precision, + force, + ) + } + fn transfer_on_hold( + reason: &Self::Reason, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + precision: Precision, + mode: Restriction, + force: Fortitude, + ) -> Result { + >::transfer_on_hold( + A::get(), + reason, + source, + dest, + amount, + precision, + mode, + force, + ) + } + fn transfer_and_hold( + reason: &Self::Reason, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result { + >::transfer_and_hold( + A::get(), + reason, + source, + dest, + amount, + precision, + preservation, + force, + ) + } +} + +impl< + F: fungibles::MutateFreeze, + A: Get<>::AssetId>, + AccountId, + > MutateFreeze for ItemOf +{ + fn set_freeze(id: &Self::Id, who: &AccountId, amount: Self::Balance) -> DispatchResult { + >::set_freeze(A::get(), id, who, amount) + } + fn extend_freeze(id: &Self::Id, who: &AccountId, amount: Self::Balance) -> DispatchResult { + >::extend_freeze(A::get(), id, who, amount) + } + fn thaw(id: &Self::Id, who: &AccountId) -> DispatchResult { + >::thaw(A::get(), id, who) + } +} + +pub struct ConvertImbalanceDropHandler( + sp_std::marker::PhantomData<(AccountId, Balance, AssetIdType, AssetId, Handler)>, +); + +impl< + AccountId, + Balance, + AssetIdType, + AssetId: Get, + Handler: crate::traits::tokens::fungibles::HandleImbalanceDrop, + > HandleImbalanceDrop + for ConvertImbalanceDropHandler +{ + fn handle(amount: Balance) { + Handler::handle(AssetId::get(), amount) + } +} + +impl< + F: fungibles::Inspect + + fungibles::Unbalanced + + fungibles::Balanced, + A: Get<>::AssetId>, + AccountId, + > Balanced for ItemOf +{ + type OnDropDebt = + ConvertImbalanceDropHandler; + type OnDropCredit = + ConvertImbalanceDropHandler; + fn deposit( + who: &AccountId, + value: Self::Balance, + precision: Precision, + ) -> Result, DispatchError> { + >::deposit(A::get(), who, value, precision) + .map(|debt| Imbalance::new(debt.peek())) + } + fn issue(amount: Self::Balance) -> Credit { + Imbalance::new(>::issue(A::get(), amount).peek()) + } + fn pair(amount: Self::Balance) -> (Debt, Credit) { + let (a, b) = >::pair(A::get(), amount); + (Imbalance::new(a.peek()), Imbalance::new(b.peek())) + } + fn rescind(amount: Self::Balance) -> Debt { + Imbalance::new(>::rescind(A::get(), amount).peek()) + } + fn resolve( + who: &AccountId, + credit: Credit, + ) -> Result<(), Credit> { + let credit = fungibles::Imbalance::new(A::get(), credit.peek()); + >::resolve(who, credit) + .map_err(|credit| Imbalance::new(credit.peek())) + } + fn settle( + who: &AccountId, + debt: Debt, + preservation: Preservation, + ) -> Result, Debt> { + let debt = fungibles::Imbalance::new(A::get(), debt.peek()); + >::settle(who, debt, preservation) + .map(|credit| Imbalance::new(credit.peek())) + .map_err(|debt| Imbalance::new(debt.peek())) + } + fn withdraw( + who: &AccountId, + value: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result, DispatchError> { + >::withdraw( + A::get(), + who, + value, + precision, + preservation, + force, + ) + .map(|credit| Imbalance::new(credit.peek())) + } +} + +impl< + F: fungibles::BalancedHold, + A: Get<>::AssetId>, + AccountId, + > BalancedHold for ItemOf +{ + fn slash( + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> (Credit, Self::Balance) { + let (credit, amount) = + >::slash(A::get(), reason, who, amount); + (Imbalance::new(credit.peek()), amount) + } +} + +#[test] +fn test() {} diff --git a/frame/support/src/traits/tokens/fungible/mod.rs b/frame/support/src/traits/tokens/fungible/mod.rs new file mode 100644 index 0000000000000..204b85ac29448 --- /dev/null +++ b/frame/support/src/traits/tokens/fungible/mod.rs @@ -0,0 +1,56 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The traits for dealing with a single fungible token class and any associated types. +//! +//! ### User-implememted traits +//! - `Inspect`: Regular balance inspector functions. +//! - `Unbalanced`: Low-level balance mutating functions. Does not guarantee proper book-keeping and +//! so should not be called into directly from application code. Other traits depend on this and +//! provide default implementations based on it. +//! - `UnbalancedHold`: Low-level balance mutating functions for balances placed on hold. Does not +//! guarantee proper book-keeping and so should not be called into directly from application code. +//! Other traits depend on this and provide default implementations based on it. +//! - `Mutate`: Regular balance mutator functions. Pre-implemented using `Unbalanced`, though the +//! `done_*` functions should likely be reimplemented in case you want to do something following +//! the operation such as emit events. +//! - `InspectHold`: Inspector functions for balances on hold. +//! - `MutateHold`: Mutator functions for balances on hold. Mostly pre-implemented using +//! `UnbalancedHold`. +//! - `InspectFreeze`: Inspector functions for frozen balance. +//! - `MutateFreeze`: Mutator functions for frozen balance. +//! - `Balanced`: One-sided mutator functions for regular balances, which return imbalance objects +//! which guarantee eventual book-keeping. May be useful for some sophisticated operations where +//! funds must be removed from an account before it is known precisely what should be done with +//! them. + +pub mod freeze; +pub mod hold; +mod imbalance; +mod item_of; +mod regular; + +pub use freeze::{Inspect as InspectFreeze, Mutate as MutateFreeze}; +pub use hold::{ + Balanced as BalancedHold, Inspect as InspectHold, Mutate as MutateHold, + Unbalanced as UnbalancedHold, +}; +pub use imbalance::{Credit, Debt, HandleImbalanceDrop, Imbalance}; +pub use item_of::ItemOf; +pub use regular::{ + Balanced, DecreaseIssuance, Dust, IncreaseIssuance, Inspect, Mutate, Unbalanced, +}; diff --git a/frame/support/src/traits/tokens/fungible/regular.rs b/frame/support/src/traits/tokens/fungible/regular.rs new file mode 100644 index 0000000000000..5a201b33b7cd9 --- /dev/null +++ b/frame/support/src/traits/tokens/fungible/regular.rs @@ -0,0 +1,506 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! `Inspect` and `Mutate` traits for working with regular balances. + +use crate::{ + dispatch::DispatchError, + ensure, + traits::{ + tokens::{ + misc::{ + Balance, DepositConsequence, + Fortitude::{self, Force, Polite}, + Precision::{self, BestEffort, Exact}, + Preservation::{self, Expendable}, + Provenance::{self, Extant}, + WithdrawConsequence, + }, + Imbalance as ImbalanceT, + }, + SameOrOther, TryDrop, + }, +}; +use sp_arithmetic::traits::{CheckedAdd, CheckedSub, One}; +use sp_runtime::{traits::Saturating, ArithmeticError, TokenError}; +use sp_std::marker::PhantomData; + +use super::{Credit, Debt, HandleImbalanceDrop, Imbalance}; + +/// Trait for providing balance-inspection access to a fungible asset. +pub trait Inspect: Sized { + /// Scalar type for representing balance of an account. + type Balance: Balance; + + /// The total amount of issuance in the system. + fn total_issuance() -> Self::Balance; + + /// The total amount of issuance in the system excluding those which are controlled by the + /// system. + fn active_issuance() -> Self::Balance { + Self::total_issuance() + } + + /// The minimum balance any single account may have. + fn minimum_balance() -> Self::Balance; + + /// Get the total amount of funds whose ultimate bneficial ownership can be determined as `who`. + /// + /// This may include funds which are wholly inaccessible to `who`, either temporarily or even + /// indefinitely. + /// + /// For the amount of the balance which is currently free to be removed from the account without + /// error, use `reducible_balance`. + /// + /// For the amount of the balance which may eventually be free to be removed from the account, + /// use `balance()`. + fn total_balance(who: &AccountId) -> Self::Balance; + + /// Get the balance of `who` which does not include funds which are exclusively allocated to + /// subsystems of the chain ("on hold" or "reserved"). + /// + /// In general this isn't especially useful outside of tests, and for practical purposes, you'll + /// want to use `reducible_balance()`. + fn balance(who: &AccountId) -> Self::Balance; + + /// Get the maximum amount that `who` can withdraw/transfer successfully based on whether the + /// account should be kept alive (`preservation`) or whether we are willing to force the + /// reduction and potentially go below user-level restrictions on the minimum amount of the + /// account. + /// + /// Always less than or equal to `balance()`. + fn reducible_balance( + who: &AccountId, + preservation: Preservation, + force: Fortitude, + ) -> Self::Balance; + + /// Returns `true` if the balance of `who` may be increased by `amount`. + /// + /// - `who`: The account of which the balance should be increased by `amount`. + /// - `amount`: How much should the balance be increased? + /// - `provenance`: Will `amount` be minted to deposit it into `account` or is it already in the + /// system? + fn can_deposit( + who: &AccountId, + amount: Self::Balance, + provenance: Provenance, + ) -> DepositConsequence; + + /// Returns `Success` if the balance of `who` may be decreased by `amount`, otherwise + /// the consequence. + fn can_withdraw(who: &AccountId, amount: Self::Balance) -> WithdrawConsequence; +} + +/// Special dust type which can be type-safely converted into a `Credit`. +#[must_use] +pub struct Dust>(pub(crate) T::Balance); + +impl> Dust { + /// Convert `Dust` into an instance of `Credit`. + pub fn into_credit(self) -> Credit { + Credit::::new(self.0) + } +} + +/// A fungible token class where the balance can be set arbitrarily. +/// +/// **WARNING** +/// Do not use this directly unless you want trouble, since it allows you to alter account balances +/// without keeping the issuance up to date. It has no safeguards against accidentally creating +/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to +/// use. +pub trait Unbalanced: Inspect { + /// Create some dust and handle it with `Self::handle_dust`. This is an unbalanced operation + /// and it must only be used when an account is modified in a raw fashion, outside of the entire + /// fungibles API. The `amount` is capped at `Self::minimum_balance() - 1`. + /// + /// This should not be reimplemented. + fn handle_raw_dust(amount: Self::Balance) { + Self::handle_dust(Dust(amount.min(Self::minimum_balance().saturating_sub(One::one())))) + } + + /// Do something with the dust which has been destroyed from the system. `Dust` can be converted + /// into a `Credit` with the `Balanced` trait impl. + fn handle_dust(dust: Dust); + + /// Forcefully set the balance of `who` to `amount`. + /// + /// If this call executes successfully, you can `assert_eq!(Self::balance(), amount);`. + /// + /// For implementations which include one or more balances on hold, then these are *not* + /// included in the `amount`. + /// + /// This function does its best to force the balance change through, but will not break system + /// invariants such as any Existential Deposits needed or overflows/underflows. + /// If this cannot be done for some reason (e.g. because the account cannot be created, deleted + /// or would overflow) then an `Err` is returned. + /// + /// If `Ok` is returned then its inner, if `Some` is the amount which was discarded as dust due + /// to existential deposit requirements. The default implementation of `decrease_balance` and + /// `increase_balance` converts this into an `Imbalance` and then passes it into `handle_dust`. + fn write_balance( + who: &AccountId, + amount: Self::Balance, + ) -> Result, DispatchError>; + + /// Set the total issuance to `amount`. + fn set_total_issuance(amount: Self::Balance); + + /// Reduce the balance of `who` by `amount`. + /// + /// If `precision` is `Exact` and it cannot be reduced by that amount for + /// some reason, return `Err` and don't reduce it at all. If `precision` is `BestEffort`, then + /// reduce the balance of `who` by the most that is possible, up to `amount`. + /// + /// In either case, if `Ok` is returned then the inner is the amount by which is was reduced. + /// Minimum balance will be respected and thus the returned amount may be up to + /// `Self::minimum_balance() - 1` greater than `amount` in the case that the reduction caused + /// the account to be deleted. + fn decrease_balance( + who: &AccountId, + mut amount: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result { + let old_balance = Self::balance(who); + let free = Self::reducible_balance(who, preservation, force); + if let BestEffort = precision { + amount = amount.min(free); + } + let new_balance = old_balance.checked_sub(&amount).ok_or(TokenError::FundsUnavailable)?; + if let Some(dust) = Self::write_balance(who, new_balance)? { + Self::handle_dust(Dust(dust)); + } + Ok(old_balance.saturating_sub(new_balance)) + } + + /// Increase the balance of `who` by `amount`. + /// + /// If it cannot be increased by that amount for some reason, return `Err` and don't increase + /// it at all. If Ok, return the imbalance. + /// Minimum balance will be respected and an error will be returned if + /// `amount < Self::minimum_balance()` when the account of `who` is zero. + fn increase_balance( + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + let old_balance = Self::balance(who); + let new_balance = if let BestEffort = precision { + old_balance.saturating_add(amount) + } else { + old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)? + }; + if new_balance < Self::minimum_balance() { + // Attempt to increase from 0 to below minimum -> stays at zero. + if let BestEffort = precision { + Ok(Default::default()) + } else { + Err(TokenError::BelowMinimum.into()) + } + } else { + if new_balance == old_balance { + Ok(Default::default()) + } else { + if let Some(dust) = Self::write_balance(who, new_balance)? { + Self::handle_dust(Dust(dust)); + } + Ok(new_balance.saturating_sub(old_balance)) + } + } + } + + /// Reduce the active issuance by some amount. + fn deactivate(_: Self::Balance) {} + + /// Increase the active issuance by some amount, up to the outstanding amount reduced. + fn reactivate(_: Self::Balance) {} +} + +/// Trait for providing a basic fungible asset. +pub trait Mutate: Inspect + Unbalanced { + /// Increase the balance of `who` by exactly `amount`, minting new tokens. If that isn't + /// possible then an `Err` is returned and nothing is changed. + fn mint_into(who: &AccountId, amount: Self::Balance) -> Result { + Self::total_issuance().checked_add(&amount).ok_or(ArithmeticError::Overflow)?; + let actual = Self::increase_balance(who, amount, Exact)?; + Self::set_total_issuance(Self::total_issuance().saturating_add(actual)); + Self::done_mint_into(who, amount); + Ok(actual) + } + + /// Decrease the balance of `who` by at least `amount`, possibly slightly more in the case of + /// minimum-balance requirements, burning the tokens. If that isn't possible then an `Err` is + /// returned and nothing is changed. If successful, the amount of tokens reduced is returned. + fn burn_from( + who: &AccountId, + amount: Self::Balance, + precision: Precision, + force: Fortitude, + ) -> Result { + let actual = Self::reducible_balance(who, Expendable, force).min(amount); + ensure!(actual == amount || precision == BestEffort, TokenError::FundsUnavailable); + Self::total_issuance().checked_sub(&actual).ok_or(ArithmeticError::Overflow)?; + let actual = Self::decrease_balance(who, actual, BestEffort, Expendable, force)?; + Self::set_total_issuance(Self::total_issuance().saturating_sub(actual)); + Self::done_burn_from(who, actual); + Ok(actual) + } + + /// Attempt to decrease the `asset` balance of `who` by `amount`. + /// + /// Equivalent to `burn_from`, except with an expectation that within the bounds of some + /// universal issuance, the total assets `suspend`ed and `resume`d will be equivalent. The + /// implementation may be configured such that the total assets suspended may never be less than + /// the total assets resumed (which is the invariant for an issuing system), or the reverse + /// (which the invariant in a non-issuing system). + /// + /// Because of this expectation, any metadata associated with the asset is expected to survive + /// the suspect-resume cycle. + fn shelve(who: &AccountId, amount: Self::Balance) -> Result { + let actual = Self::reducible_balance(who, Expendable, Polite).min(amount); + ensure!(actual == amount, TokenError::FundsUnavailable); + Self::total_issuance().checked_sub(&actual).ok_or(ArithmeticError::Overflow)?; + let actual = Self::decrease_balance(who, actual, BestEffort, Expendable, Polite)?; + Self::set_total_issuance(Self::total_issuance().saturating_sub(actual)); + Self::done_shelve(who, actual); + Ok(actual) + } + + /// Attempt to increase the `asset` balance of `who` by `amount`. + /// + /// Equivalent to `mint_into`, except with an expectation that within the bounds of some + /// universal issuance, the total assets `suspend`ed and `resume`d will be equivalent. The + /// implementation may be configured such that the total assets suspended may never be less than + /// the total assets resumed (which is the invariant for an issuing system), or the reverse + /// (which the invariant in a non-issuing system). + /// + /// Because of this expectation, any metadata associated with the asset is expected to survive + /// the suspect-resume cycle. + fn restore(who: &AccountId, amount: Self::Balance) -> Result { + Self::total_issuance().checked_add(&amount).ok_or(ArithmeticError::Overflow)?; + let actual = Self::increase_balance(who, amount, Exact)?; + Self::set_total_issuance(Self::total_issuance().saturating_add(actual)); + Self::done_restore(who, amount); + Ok(actual) + } + + /// Transfer funds from one account into another. + fn transfer( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + preservation: Preservation, + ) -> Result { + let _extra = Self::can_withdraw(source, amount).into_result(preservation != Expendable)?; + Self::can_deposit(dest, amount, Extant).into_result()?; + Self::decrease_balance(source, amount, BestEffort, preservation, Polite)?; + // This should never fail as we checked `can_deposit` earlier. But we do a best-effort + // anyway. + let _ = Self::increase_balance(dest, amount, BestEffort); + Self::done_transfer(source, dest, amount); + Ok(amount) + } + + /// Simple infallible function to force an account to have a particular balance, good for use + /// in tests and benchmarks but not recommended for production code owing to the lack of + /// error reporting. + /// + /// Returns the new balance. + fn set_balance(who: &AccountId, amount: Self::Balance) -> Self::Balance { + let b = Self::balance(who); + if b > amount { + Self::burn_from(who, b - amount, BestEffort, Force).map(|d| b.saturating_sub(d)) + } else { + Self::mint_into(who, amount - b).map(|d| b.saturating_add(d)) + } + .unwrap_or(b) + } + + fn done_mint_into(_who: &AccountId, _amount: Self::Balance) {} + fn done_burn_from(_who: &AccountId, _amount: Self::Balance) {} + fn done_shelve(_who: &AccountId, _amount: Self::Balance) {} + fn done_restore(_who: &AccountId, _amount: Self::Balance) {} + fn done_transfer(_source: &AccountId, _dest: &AccountId, _amount: Self::Balance) {} +} + +/// Simple handler for an imbalance drop which increases the total issuance of the system by the +/// imbalance amount. Used for leftover debt. +pub struct IncreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for IncreaseIssuance +{ + fn handle(amount: U::Balance) { + U::set_total_issuance(U::total_issuance().saturating_add(amount)) + } +} + +/// Simple handler for an imbalance drop which decreases the total issuance of the system by the +/// imbalance amount. Used for leftover credit. +pub struct DecreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for DecreaseIssuance +{ + fn handle(amount: U::Balance) { + U::set_total_issuance(U::total_issuance().saturating_sub(amount)) + } +} + +/// A fungible token class where any creation and deletion of tokens is semi-explicit and where the +/// total supply is maintained automatically. +/// +/// This is auto-implemented when a token class has `Unbalanced` implemented. +pub trait Balanced: Inspect + Unbalanced { + /// The type for managing what happens when an instance of `Debt` is dropped without being used. + type OnDropDebt: HandleImbalanceDrop; + /// The type for managing what happens when an instance of `Credit` is dropped without being + /// used. + type OnDropCredit: HandleImbalanceDrop; + + /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will + /// typically be used to reduce an account by the same amount with e.g. `settle`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example + /// in the case of underflow. + fn rescind(amount: Self::Balance) -> Debt { + let old = Self::total_issuance(); + let new = old.saturating_sub(amount); + Self::set_total_issuance(new); + let delta = old - new; + Self::done_rescind(delta); + Imbalance::::new(delta) + } + + /// Increase the total issuance by `amount` and return the according imbalance. The imbalance + /// will typically be used to increase an account by the same amount with e.g. + /// `resolve_into_existing` or `resolve_creating`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example + /// in the case of overflow. + fn issue(amount: Self::Balance) -> Credit { + let old = Self::total_issuance(); + let new = old.saturating_add(amount); + Self::set_total_issuance(new); + let delta = new - old; + Self::done_issue(delta); + Imbalance::::new(delta) + } + + /// Produce a pair of imbalances that cancel each other out exactly. + /// + /// This is just the same as burning and issuing the same amount and has no effect on the + /// total issuance. + fn pair(amount: Self::Balance) -> (Debt, Credit) { + (Self::rescind(amount), Self::issue(amount)) + } + + /// Mints `value` into the account of `who`, creating it as needed. + /// + /// If `precision` is `BestEffort` and `value` in full could not be minted (e.g. due to + /// overflow), then the maximum is minted, up to `value`. If `precision` is `Exact`, then + /// exactly `value` must be minted into the account of `who` or the operation will fail with an + /// `Err` and nothing will change. + /// + /// If the operation is successful, this will return `Ok` with a `Debt` of the total value + /// added to the account. + fn deposit( + who: &AccountId, + value: Self::Balance, + precision: Precision, + ) -> Result, DispatchError> { + let increase = Self::increase_balance(who, value, precision)?; + Self::done_deposit(who, increase); + Ok(Imbalance::::new(increase)) + } + + /// Removes `value` balance from `who` account if possible. + /// + /// If `precision` is `BestEffort` and `value` in full could not be removed (e.g. due to + /// underflow), then the maximum is removed, up to `value`. If `precision` is `Exact`, then + /// exactly `value` must be removed from the account of `who` or the operation will fail with an + /// `Err` and nothing will change. + /// + /// If the removal is needed but not possible, then it returns `Err` and nothing is changed. + /// If the account needed to be deleted, then slightly more than `value` may be removed from the + /// account owning since up to (but not including) minimum balance may also need to be removed. + /// + /// If the operation is successful, this will return `Ok` with a `Credit` of the total value + /// removed from the account. + fn withdraw( + who: &AccountId, + value: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result, DispatchError> { + let decrease = Self::decrease_balance(who, value, precision, preservation, force)?; + Self::done_withdraw(who, decrease); + Ok(Imbalance::::new(decrease)) + } + + /// The balance of `who` is increased in order to counter `credit`. If the whole of `credit` + /// cannot be countered, then nothing is changed and the original `credit` is returned in an + /// `Err`. + /// + /// Please note: If `credit.peek()` is less than `Self::minimum_balance()`, then `who` must + /// already exist for this to succeed. + fn resolve( + who: &AccountId, + credit: Credit, + ) -> Result<(), Credit> { + let v = credit.peek(); + let debt = match Self::deposit(who, v, Exact) { + Err(_) => return Err(credit), + Ok(d) => d, + }; + let result = credit.offset(debt).try_drop(); + debug_assert!(result.is_ok(), "ok deposit return must be equal to credit value; qed"); + Ok(()) + } + + /// The balance of `who` is decreased in order to counter `debt`. If the whole of `debt` + /// cannot be countered, then nothing is changed and the original `debt` is returned in an + /// `Err`. + fn settle( + who: &AccountId, + debt: Debt, + preservation: Preservation, + ) -> Result, Debt> { + let amount = debt.peek(); + let credit = match Self::withdraw(who, amount, Exact, preservation, Polite) { + Err(_) => return Err(debt), + Ok(d) => d, + }; + + match credit.offset(debt) { + SameOrOther::None => Ok(Credit::::zero()), + SameOrOther::Same(dust) => Ok(dust), + SameOrOther::Other(rest) => { + debug_assert!(false, "ok withdraw return must be at least debt value; qed"); + Err(rest) + }, + } + } + + fn done_rescind(_amount: Self::Balance) {} + fn done_issue(_amount: Self::Balance) {} + fn done_deposit(_who: &AccountId, _amount: Self::Balance) {} + fn done_withdraw(_who: &AccountId, _amount: Self::Balance) {} +} diff --git a/frame/support/src/traits/tokens/fungibles.rs b/frame/support/src/traits/tokens/fungibles.rs deleted file mode 100644 index d146832f3b649..0000000000000 --- a/frame/support/src/traits/tokens/fungibles.rs +++ /dev/null @@ -1,332 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! The traits for sets of fungible tokens and any associated types. - -use super::{ - misc::{AssetId, Balance}, - *, -}; -use crate::dispatch::{DispatchError, DispatchResult}; -use sp_runtime::traits::Saturating; -use sp_std::vec::Vec; - -pub mod approvals; -mod balanced; -pub mod enumerable; -pub use enumerable::InspectEnumerable; -pub mod metadata; -pub use balanced::{Balanced, Unbalanced}; -mod imbalance; -pub use imbalance::{CreditOf, DebtOf, HandleImbalanceDrop, Imbalance}; -pub mod roles; - -/// Trait for providing balance-inspection access to a set of named fungible assets. -pub trait Inspect { - /// Means of identifying one asset class from another. - type AssetId: AssetId; - - /// Scalar type for representing balance of an account. - type Balance: Balance; - - /// The total amount of issuance in the system. - fn total_issuance(asset: Self::AssetId) -> Self::Balance; - - /// The total amount of issuance in the system excluding those which are controlled by the - /// system. - fn active_issuance(asset: Self::AssetId) -> Self::Balance { - Self::total_issuance(asset) - } - - /// The minimum balance any single account may have. - fn minimum_balance(asset: Self::AssetId) -> Self::Balance; - - /// Get the `asset` balance of `who`. - fn balance(asset: Self::AssetId, who: &AccountId) -> Self::Balance; - - /// Get the maximum amount of `asset` that `who` can withdraw/transfer successfully. - fn reducible_balance(asset: Self::AssetId, who: &AccountId, keep_alive: bool) -> Self::Balance; - - /// Returns `true` if the `asset` balance of `who` may be increased by `amount`. - /// - /// - `asset`: The asset that should be deposited. - /// - `who`: The account of which the balance should be increased by `amount`. - /// - `amount`: How much should the balance be increased? - /// - `mint`: Will `amount` be minted to deposit it into `account`? - fn can_deposit( - asset: Self::AssetId, - who: &AccountId, - amount: Self::Balance, - mint: bool, - ) -> DepositConsequence; - - /// Returns `Failed` if the `asset` balance of `who` may not be decreased by `amount`, otherwise - /// the consequence. - fn can_withdraw( - asset: Self::AssetId, - who: &AccountId, - amount: Self::Balance, - ) -> WithdrawConsequence; - - /// Returns `true` if an `asset` exists. - fn asset_exists(asset: Self::AssetId) -> bool; -} - -/// Trait for reading metadata from a fungible asset. -pub trait InspectMetadata: Inspect { - /// Return the name of an asset. - fn name(asset: &Self::AssetId) -> Vec; - - /// Return the symbol of an asset. - fn symbol(asset: &Self::AssetId) -> Vec; - - /// Return the decimals of an asset. - fn decimals(asset: &Self::AssetId) -> u8; -} - -/// Trait for providing a set of named fungible assets which can be created and destroyed. -pub trait Mutate: Inspect { - /// Attempt to increase the `asset` balance of `who` by `amount`. - /// - /// If not possible then don't do anything. Possible reasons for failure include: - /// - Minimum balance not met. - /// - Account cannot be created (e.g. because there is no provider reference and/or the asset - /// isn't considered worth anything). - /// - /// Since this is an operation which should be possible to take alone, if successful it will - /// increase the overall supply of the underlying token. - fn mint_into(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> DispatchResult; - - /// Attempt to reduce the `asset` balance of `who` by `amount`. - /// - /// If not possible then don't do anything. Possible reasons for failure include: - /// - Less funds in the account than `amount` - /// - Liquidity requirements (locks, reservations) prevent the funds from being removed - /// - Operation would require destroying the account and it is required to stay alive (e.g. - /// because it's providing a needed provider reference). - /// - /// Since this is an operation which should be possible to take alone, if successful it will - /// reduce the overall supply of the underlying token. - /// - /// Due to minimum balance requirements, it's possible that the amount withdrawn could be up to - /// `Self::minimum_balance() - 1` more than the `amount`. The total amount withdrawn is returned - /// in an `Ok` result. This may be safely ignored if you don't mind the overall supply reducing. - fn burn_from( - asset: Self::AssetId, - who: &AccountId, - amount: Self::Balance, - ) -> Result; - - /// Attempt to reduce the `asset` balance of `who` by as much as possible up to `amount`, and - /// possibly slightly more due to minimum_balance requirements. If no decrease is possible then - /// an `Err` is returned and nothing is changed. If successful, the amount of tokens reduced is - /// returned. - /// - /// The default implementation just uses `withdraw` along with `reducible_balance` to ensure - /// that is doesn't fail. - fn slash( - asset: Self::AssetId, - who: &AccountId, - amount: Self::Balance, - ) -> Result { - Self::burn_from(asset, who, Self::reducible_balance(asset, who, false).min(amount)) - } - - /// Transfer funds from one account into another. The default implementation uses `mint_into` - /// and `burn_from` and may generate unwanted events. - fn teleport( - asset: Self::AssetId, - source: &AccountId, - dest: &AccountId, - amount: Self::Balance, - ) -> Result { - let extra = Self::can_withdraw(asset, &source, amount).into_result()?; - // As we first burn and then mint, we don't need to check if `mint` fits into the supply. - // If we can withdraw/burn it, we can also mint it again. - Self::can_deposit(asset, dest, amount.saturating_add(extra), false).into_result()?; - let actual = Self::burn_from(asset, source, amount)?; - debug_assert!( - actual == amount.saturating_add(extra), - "can_withdraw must agree with withdraw; qed" - ); - match Self::mint_into(asset, dest, actual) { - Ok(_) => Ok(actual), - Err(err) => { - debug_assert!(false, "can_deposit returned true previously; qed"); - // attempt to return the funds back to source - let revert = Self::mint_into(asset, source, actual); - debug_assert!(revert.is_ok(), "withdrew funds previously; qed"); - Err(err) - }, - } - } -} - -/// Trait for providing a set of named fungible assets which can only be transferred. -pub trait Transfer: Inspect { - /// Transfer funds from one account into another. - fn transfer( - asset: Self::AssetId, - source: &AccountId, - dest: &AccountId, - amount: Self::Balance, - keep_alive: bool, - ) -> Result; - - /// Reduce the active issuance by some amount. - fn deactivate(_: Self::AssetId, _: Self::Balance) {} - - /// Increase the active issuance by some amount, up to the outstanding amount reduced. - fn reactivate(_: Self::AssetId, _: Self::Balance) {} -} - -/// Trait for inspecting a set of named fungible assets which can be placed on hold. -pub trait InspectHold: Inspect { - /// Amount of funds held in hold. - fn balance_on_hold(asset: Self::AssetId, who: &AccountId) -> Self::Balance; - - /// Check to see if some `amount` of `asset` may be held on the account of `who`. - fn can_hold(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> bool; -} - -/// Trait for mutating a set of named fungible assets which can be placed on hold. -pub trait MutateHold: InspectHold + Transfer { - /// Hold some funds in an account. - fn hold(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> DispatchResult; - - /// Release some funds in an account from being on hold. - /// - /// If `best_effort` is `true`, then the amount actually released and returned as the inner - /// value of `Ok` may be smaller than the `amount` passed. - fn release( - asset: Self::AssetId, - who: &AccountId, - amount: Self::Balance, - best_effort: bool, - ) -> Result; - - /// Transfer held funds into a destination account. - /// - /// If `on_hold` is `true`, then the destination account must already exist and the assets - /// transferred will still be on hold in the destination account. If not, then the destination - /// account need not already exist, but must be creatable. - /// - /// If `best_effort` is `true`, then an amount less than `amount` may be transferred without - /// error. - /// - /// The actual amount transferred is returned, or `Err` in the case of error and nothing is - /// changed. - fn transfer_held( - asset: Self::AssetId, - source: &AccountId, - dest: &AccountId, - amount: Self::Balance, - best_effort: bool, - on_hold: bool, - ) -> Result; -} - -/// Trait for mutating one of several types of fungible assets which can be held. -pub trait BalancedHold: Balanced + MutateHold { - /// Release and slash some funds in an account. - /// - /// The resulting imbalance is the first item of the tuple returned. - /// - /// As much funds up to `amount` will be deducted as possible. If this is less than `amount`, - /// then a non-zero second item will be returned. - fn slash_held( - asset: Self::AssetId, - who: &AccountId, - amount: Self::Balance, - ) -> (CreditOf, Self::Balance); -} - -impl + MutateHold> BalancedHold for T { - fn slash_held( - asset: Self::AssetId, - who: &AccountId, - amount: Self::Balance, - ) -> (CreditOf, Self::Balance) { - let actual = match Self::release(asset, who, amount, true) { - Ok(x) => x, - Err(_) => return (Imbalance::zero(asset), amount), - }; - >::slash(asset, who, actual) - } -} - -/// Trait for providing the ability to create new fungible assets. -pub trait Create: Inspect { - /// Create a new fungible asset. - fn create( - id: Self::AssetId, - admin: AccountId, - is_sufficient: bool, - min_balance: Self::Balance, - ) -> DispatchResult; -} - -/// Trait for providing the ability to destroy existing fungible assets. -pub trait Destroy: Inspect { - /// Start the destruction an existing fungible asset. - /// * `id`: The `AssetId` to be destroyed. successfully. - /// * `maybe_check_owner`: An optional account id that can be used to authorize the destroy - /// command. If not provided, no authorization checks will be performed before destroying - /// asset. - fn start_destroy(id: Self::AssetId, maybe_check_owner: Option) -> DispatchResult; - - /// Destroy all accounts associated with a given asset. - /// `destroy_accounts` should only be called after `start_destroy` has been called, and the - /// asset is in a `Destroying` state - /// - /// * `id`: The identifier of the asset to be destroyed. This must identify an existing asset. - /// * `max_items`: The maximum number of accounts to be destroyed for a given call of the - /// function. This value should be small enough to allow the operation fit into a logical - /// block. - /// - /// Response: - /// * u32: Total number of approvals which were actually destroyed - /// - /// Due to weight restrictions, this function may need to be called multiple - /// times to fully destroy all approvals. It will destroy `max_items` approvals at a - /// time. - fn destroy_accounts(id: Self::AssetId, max_items: u32) -> Result; - /// Destroy all approvals associated with a given asset up to the `max_items` - /// `destroy_approvals` should only be called after `start_destroy` has been called, and the - /// asset is in a `Destroying` state - /// - /// * `id`: The identifier of the asset to be destroyed. This must identify an existing asset. - /// * `max_items`: The maximum number of accounts to be destroyed for a given call of the - /// function. This value should be small enough to allow the operation fit into a logical - /// block. - /// - /// Response: - /// * u32: Total number of approvals which were actually destroyed - /// - /// Due to weight restrictions, this function may need to be called multiple - /// times to fully destroy all approvals. It will destroy `max_items` approvals at a - /// time. - fn destroy_approvals(id: Self::AssetId, max_items: u32) -> Result; - - /// Complete destroying asset and unreserve currency. - /// `finish_destroy` should only be called after `start_destroy` has been called, and the - /// asset is in a `Destroying` state. All accounts or approvals should be destroyed before - /// hand. - /// - /// * `id`: The identifier of the asset to be destroyed. This must identify an existing asset. - fn finish_destroy(id: Self::AssetId) -> DispatchResult; -} diff --git a/frame/support/src/traits/tokens/fungibles/enumerable.rs b/frame/support/src/traits/tokens/fungibles/enumerable.rs index 5d7266d9f2c16..08bb784a7dbf6 100644 --- a/frame/support/src/traits/tokens/fungibles/enumerable.rs +++ b/frame/support/src/traits/tokens/fungibles/enumerable.rs @@ -15,10 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::traits::fungibles::Inspect; - /// Interface for enumerating assets in existence or owned by a given account. -pub trait InspectEnumerable: Inspect { +pub trait Inspect: super::Inspect { type AssetsIterator; /// Returns an iterator of the collections in existence. diff --git a/frame/support/src/traits/tokens/fungibles/freeze.rs b/frame/support/src/traits/tokens/fungibles/freeze.rs new file mode 100644 index 0000000000000..08549c2d4b744 --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/freeze.rs @@ -0,0 +1,78 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The traits for putting freezes within a single fungible token class. + +use scale_info::TypeInfo; +use sp_runtime::DispatchResult; + +/// Trait for inspecting a fungible asset which can be frozen. Freezing is essentially setting a +/// minimum balance below which the total balance (inclusive of any funds placed on hold) may not +/// be normally allowed to drop. Generally, freezers will provide an "update" function such that +/// if the total balance does drop below the limit, then the freezer can update their housekeeping +/// accordingly. +pub trait Inspect: super::Inspect { + /// An identifier for a freeze. + type Id: codec::Encode + TypeInfo + 'static; + + /// Amount of funds held in reserve by `who` for the given `id`. + fn balance_frozen(asset: Self::AssetId, id: &Self::Id, who: &AccountId) -> Self::Balance; + + /// The amount of the balance which can become frozen. Defaults to `total_balance()`. + fn balance_freezable(asset: Self::AssetId, who: &AccountId) -> Self::Balance { + Self::total_balance(asset, who) + } + + /// Returns `true` if it's possible to introduce a freeze for the given `id` onto the + /// account of `who`. This will be true as long as the implementor supports as many + /// concurrent freeze locks as there are possible values of `id`. + fn can_freeze(asset: Self::AssetId, id: &Self::Id, who: &AccountId) -> bool; +} + +/// Trait for introducing, altering and removing locks to freeze an account's funds so they never +/// go below a set minimum. +pub trait Mutate: Inspect { + /// Prevent actions which would reduce the balance of the account of `who` below the given + /// `amount` and identify this restriction though the given `id`. Unlike `extend_freeze`, any + /// outstanding freeze in place for `who` under the `id` are dropped. + /// + /// If `amount` is zero, it is equivalent to using `thaw`. + /// + /// Note that `amount` can be greater than the total balance, if desired. + fn set_freeze( + asset: Self::AssetId, + id: &Self::Id, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult; + + /// Prevent the balance of the account of `who` from being reduced below the given `amount` and + /// identify this restriction though the given `id`. Unlike `set_freeze`, this does not + /// counteract any pre-existing freezes in place for `who` under the `id`. Also unlike + /// `set_freeze`, in the case that `amount` is zero, this is no-op and never fails. + /// + /// Note that more funds can be locked than the total balance, if desired. + fn extend_freeze( + asset: Self::AssetId, + id: &Self::Id, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult; + + /// Remove an existing lock. + fn thaw(asset: Self::AssetId, id: &Self::Id, who: &AccountId) -> DispatchResult; +} diff --git a/frame/support/src/traits/tokens/fungibles/hold.rs b/frame/support/src/traits/tokens/fungibles/hold.rs new file mode 100644 index 0000000000000..68580ebff4bce --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/hold.rs @@ -0,0 +1,457 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The traits for putting holds within a single fungible token class. + +use crate::{ + ensure, + traits::tokens::{ + DepositConsequence::Success, + Fortitude::{self, Force}, + Precision::{self, BestEffort, Exact}, + Preservation::{self, Protect}, + Provenance::Extant, + Restriction::{self, Free, OnHold}, + }, +}; +use scale_info::TypeInfo; +use sp_arithmetic::{ + traits::{CheckedAdd, CheckedSub, Zero}, + ArithmeticError, +}; +use sp_runtime::{DispatchError, DispatchResult, Saturating, TokenError}; + +use super::*; + +/// Trait for inspecting a fungible asset whose accounts support partitioning and slashing. +pub trait Inspect: super::Inspect { + /// An identifier for a hold. Used for disambiguating different holds so that + /// they can be individually replaced or removed and funds from one hold don't accidentally + /// become unreserved or slashed for another. + type Reason: codec::Encode + TypeInfo + 'static; + + /// Amount of funds on hold (for all hold reasons) of `who`. + fn total_balance_on_hold(asset: Self::AssetId, who: &AccountId) -> Self::Balance; + + /// Get the maximum amount that the `total_balance_on_hold` of `who` can be reduced successfully + /// based on whether we are willing to force the reduction and potentially go below user-level + /// restrictions on the minimum amount of the account. Note: This cannot bring the account into + /// an inconsistent state with regards any required existential deposit. + /// + /// Always less than `total_balance_on_hold()`. + fn reducible_total_balance_on_hold( + asset: Self::AssetId, + who: &AccountId, + force: Fortitude, + ) -> Self::Balance; + + /// Amount of funds on hold (for the given reason) of `who`. + fn balance_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + ) -> Self::Balance; + + /// Returns `true` if it's possible to place (additional) funds under a hold of a given + /// `reason`. This may fail if the account has exhausted a limited number of concurrent + /// holds or if it cannot be made to exist (e.g. there is no provider reference). + /// + /// NOTE: This does not take into account changes which could be made to the account of `who` + /// (such as removing a provider reference) after this call is made. Any usage of this should + /// therefore ensure the account is already in the appropriate state prior to calling it. + fn hold_available(asset: Self::AssetId, reason: &Self::Reason, who: &AccountId) -> bool; + + /// Check to see if some `amount` of funds of `who` may be placed on hold with the given + /// `reason`. Reasons why this may not be true: + /// + /// - The implementor supports only a limited number of concurrent holds on an account which is + /// the possible values of `reason`; + /// - The total balance of the account is less than `amount`; + /// - Removing `amount` from the total balance would kill the account and remove the only + /// provider reference. + /// + /// Note: we pass `Fortitude::Force` as the last argument to `reducible_balance` since we assume + /// that if needed the balance can slashed. If we are using a simple non-forcing + /// reserve-transfer, then we really ought to check that we are not reducing the funds below the + /// freeze-limit (if any). + /// + /// NOTE: This does not take into account changes which could be made to the account of `who` + /// (such as removing a provider reference) after this call is made. Any usage of this should + /// therefore ensure the account is already in the appropriate state prior to calling it. + fn ensure_can_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult { + ensure!(Self::hold_available(asset, reason, who), TokenError::CannotCreateHold); + ensure!( + amount <= Self::reducible_balance(asset, who, Protect, Force), + TokenError::FundsUnavailable + ); + Ok(()) + } + + /// Check to see if some `amount` of funds of `who` may be placed on hold for the given + /// `reason`. Reasons why this may not be true: + /// + /// - The implementor supports only a limited number of concurrent holds on an account which is + /// the possible values of `reason`; + /// - The main balance of the account is less than `amount`; + /// - Removing `amount` from the main balance would kill the account and remove the only + /// provider reference. + /// + /// NOTE: This does not take into account changes which could be made to the account of `who` + /// (such as removing a provider reference) after this call is made. Any usage of this should + /// therefore ensure the account is already in the appropriate state prior to calling it. + fn can_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> bool { + Self::ensure_can_hold(asset, reason, who, amount).is_ok() + } +} + +/// A fungible, holdable token class where the balance on hold can be set arbitrarily. +/// +/// **WARNING** +/// Do not use this directly unless you want trouble, since it allows you to alter account balances +/// without keeping the issuance up to date. It has no safeguards against accidentally creating +/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to +/// use. +pub trait Unbalanced: Inspect { + /// Forcefully set the balance on hold of `who` to `amount`. This is independent of any other + /// balances on hold or the main ("free") balance. + /// + /// If this call executes successfully, you can `assert_eq!(Self::balance_on_hold(), amount);`. + /// + /// This function does its best to force the balance change through, but will not break system + /// invariants such as any Existential Deposits needed or overflows/underflows. + /// If this cannot be done for some reason (e.g. because the account doesn't exist) then an + /// `Err` is returned. + // Implmentation note: This should increment the consumer refs if it moves total on hold from + // zero to non-zero and decrement in the opposite direction. + // + // Since this was not done in the previous logic, this will need either a migration or a + // state item which tracks whether the account is on the old logic or new. + fn set_balance_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult; + + /// Reduce the balance on hold of `who` by `amount`. + /// + /// If `precision` is `Precision::Exact` and it cannot be reduced by that amount for + /// some reason, return `Err` and don't reduce it at all. If `precision` is + /// `Precision::BestEffort`, then reduce the balance of `who` by the most that is possible, up + /// to `amount`. + /// + /// In either case, if `Ok` is returned then the inner is the amount by which is was reduced. + fn decrease_balance_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + mut amount: Self::Balance, + precision: Precision, + ) -> Result { + let old_balance = Self::balance_on_hold(asset, reason, who); + if let BestEffort = precision { + amount = amount.min(old_balance); + } + let new_balance = old_balance.checked_sub(&amount).ok_or(TokenError::FundsUnavailable)?; + Self::set_balance_on_hold(asset, reason, who, new_balance)?; + Ok(amount) + } + + /// Increase the balance on hold of `who` by `amount`. + /// + /// If it cannot be increased by that amount for some reason, return `Err` and don't increase + /// it at all. If Ok, return the imbalance. + fn increase_balance_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + let old_balance = Self::balance_on_hold(asset, reason, who); + let new_balance = if let BestEffort = precision { + old_balance.saturating_add(amount) + } else { + old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)? + }; + let amount = new_balance.saturating_sub(old_balance); + if !amount.is_zero() { + Self::set_balance_on_hold(asset, reason, who, new_balance)?; + } + Ok(amount) + } +} + +/// Trait for slashing a fungible asset which can be place on hold. +pub trait Balanced: super::Balanced + Unbalanced { + /// Reduce the balance of some funds on hold in an account. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds that are on hold up to `amount` will be deducted as possible. If this is less + /// than `amount`, then a non-zero second item will be returned. + fn slash( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> (Credit, Self::Balance) { + let decrease = Self::decrease_balance_on_hold(asset, reason, who, amount, BestEffort) + .unwrap_or(Default::default()); + let credit = + Imbalance::::new( + asset, decrease, + ); + Self::done_slash(asset, reason, who, decrease); + (credit, amount.saturating_sub(decrease)) + } + + fn done_slash( + _asset: Self::AssetId, + _reason: &Self::Reason, + _who: &AccountId, + _amount: Self::Balance, + ) { + } +} + +/// Trait for mutating a fungible asset which can be placed on hold. +pub trait Mutate: + Inspect + super::Unbalanced + Unbalanced +{ + /// Hold some funds in an account. If a hold for `reason` is already in place, then this + /// will increase it. + fn hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult { + // NOTE: This doesn't change the total balance of the account so there's no need to + // check liquidity. + + Self::ensure_can_hold(asset, reason, who, amount)?; + // Should be infallible now, but we proceed softly anyway. + Self::decrease_balance(asset, who, amount, Exact, Protect, Force)?; + Self::increase_balance_on_hold(asset, reason, who, amount, BestEffort)?; + Self::done_hold(asset, reason, who, amount); + Ok(()) + } + + /// Release up to `amount` held funds in an account. + /// + /// The actual amount released is returned with `Ok`. + /// + /// If `precision` is `BestEffort`, then the amount actually unreserved and returned as the + /// inner value of `Ok` may be smaller than the `amount` passed. + fn release( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + // NOTE: This doesn't change the total balance of the account so there's no need to + // check liquidity. + + // We want to make sure we can deposit the amount in advance. If we can't then something is + // very wrong. + ensure!(Self::can_deposit(asset, who, amount, Extant) == Success, TokenError::CannotCreate); + // Get the amount we can actually take from the hold. This might be less than what we want + // if we're only doing a best-effort. + let amount = Self::decrease_balance_on_hold(asset, reason, who, amount, precision)?; + // Increase the main balance by what we took. We always do a best-effort here because we + // already checked that we can deposit before. + let actual = Self::increase_balance(asset, who, amount, BestEffort)?; + Self::done_release(asset, reason, who, actual); + Ok(actual) + } + + /// Attempt to decrease the balance of `who` which is held for the given `reason` by `amount`. + /// + /// If `precision` is true, then as much as possible is reduced, up to `amount`, and the + /// amount of tokens reduced is returned. Otherwise, if the total amount can be reduced, then it + /// is and the amount returned, and if not, then nothing changes and `Err` is returned. + /// + /// If `force` is `Force`, then locks/freezes will be ignored. This should only be used when + /// conducting slashing or other activity which materially disadvantages the account holder + /// since it could provide a means of circumventing freezes. + fn burn_held( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + mut amount: Self::Balance, + precision: Precision, + force: Fortitude, + ) -> Result { + // We must check total-balance requirements if `!force`. + let liquid = Self::reducible_total_balance_on_hold(asset, who, force); + if let BestEffort = precision { + amount = amount.min(liquid); + } else { + ensure!(amount <= liquid, TokenError::Frozen); + } + let amount = Self::decrease_balance_on_hold(asset, reason, who, amount, precision)?; + Self::set_total_issuance(asset, Self::total_issuance(asset).saturating_sub(amount)); + Self::done_burn_held(asset, reason, who, amount); + Ok(amount) + } + + /// Transfer held funds into a destination account. + /// + /// If `on_hold` is `true`, then the destination account must already exist and the assets + /// transferred will still be on hold in the destination account. If not, then the destination + /// account need not already exist, but must be creatable. + /// + /// If `precision` is `BestEffort`, then an amount less than `amount` may be transferred without + /// error. + /// + /// If `force` is `Force`, then other fund-locking mechanisms may be disregarded. It should be + /// left as `Regular` in most circumstances, but when you want the same power as a `slash`, it + /// may be `Force`. + /// + /// The actual amount transferred is returned, or `Err` in the case of error and nothing is + /// changed. + fn transfer_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + source: &AccountId, + dest: &AccountId, + mut amount: Self::Balance, + precision: Precision, + mode: Restriction, + force: Fortitude, + ) -> Result { + // We must check total-balance requirements if `!force`. + let have = Self::balance_on_hold(asset, reason, source); + let liquid = Self::reducible_total_balance_on_hold(asset, source, force); + if let BestEffort = precision { + amount = amount.min(liquid).min(have); + } else { + ensure!(amount <= liquid, TokenError::Frozen); + ensure!(amount <= have, TokenError::FundsUnavailable); + } + + // We want to make sure we can deposit the amount in advance. If we can't then something is + // very wrong. + ensure!( + Self::can_deposit(asset, dest, amount, Extant) == Success, + TokenError::CannotCreate + ); + ensure!( + mode == Free || Self::hold_available(asset, reason, dest), + TokenError::CannotCreateHold + ); + + let amount = Self::decrease_balance_on_hold(asset, reason, source, amount, precision)?; + let actual = if mode == OnHold { + Self::increase_balance_on_hold(asset, reason, dest, amount, precision)? + } else { + Self::increase_balance(asset, dest, amount, precision)? + }; + Self::done_transfer_on_hold(asset, reason, source, dest, actual); + Ok(actual) + } + + /// Transfer some `amount` of free balance from `source` to become owned by `dest` but on hold + /// for `reason`. + /// for `reason`. + /// + /// If `precision` is `BestEffort`, then an amount less than `amount` may be transferred without + /// error. + /// + /// `source` must obey the requirements of `keep_alive`. + /// + /// If `force` is `Force`, then other fund-locking mechanisms may be disregarded. It should be + /// left as `Regular` in most circumstances, but when you want the same power as a `slash`, it + /// may be `Force`. + /// + /// The amount placed on hold is returned or `Err` in the case of error and nothing is changed. + /// + /// WARNING: This may return an error after a partial storage mutation. It should be used only + /// inside a transactional storage context and an `Err` result must imply a storage rollback. + fn transfer_and_hold( + asset: Self::AssetId, + reason: &Self::Reason, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + precision: Precision, + expendability: Preservation, + force: Fortitude, + ) -> Result { + ensure!(Self::hold_available(asset, reason, dest), TokenError::CannotCreateHold); + ensure!( + Self::can_deposit(asset, dest, amount, Extant) == Success, + TokenError::CannotCreate + ); + let actual = + Self::decrease_balance(asset, source, amount, precision, expendability, force)?; + Self::increase_balance_on_hold(asset, reason, dest, actual, precision)?; + Self::done_transfer_on_hold(asset, reason, source, dest, actual); + Ok(actual) + } + + fn done_hold( + _asset: Self::AssetId, + _reason: &Self::Reason, + _who: &AccountId, + _amount: Self::Balance, + ) { + } + fn done_release( + _asset: Self::AssetId, + _reason: &Self::Reason, + _who: &AccountId, + _amount: Self::Balance, + ) { + } + fn done_burn_held( + _asset: Self::AssetId, + _reason: &Self::Reason, + _who: &AccountId, + _amount: Self::Balance, + ) { + } + fn done_transfer_on_hold( + _asset: Self::AssetId, + _reason: &Self::Reason, + _source: &AccountId, + _dest: &AccountId, + _amount: Self::Balance, + ) { + } + fn done_transfer_and_hold( + _asset: Self::AssetId, + _reason: &Self::Reason, + _source: &AccountId, + _dest: &AccountId, + _transferred: Self::Balance, + ) { + } +} diff --git a/frame/support/src/traits/tokens/fungibles/imbalance.rs b/frame/support/src/traits/tokens/fungibles/imbalance.rs index 87445859cb5c7..ab18eec3811ff 100644 --- a/frame/support/src/traits/tokens/fungibles/imbalance.rs +++ b/frame/support/src/traits/tokens/fungibles/imbalance.rs @@ -18,12 +18,11 @@ //! The imbalance type and its associates, which handles keeps everything adding up properly with //! unbalanced operations. -use super::{ - balanced::Balanced, - fungibles::{AssetId, Balance}, - *, +use super::*; +use crate::traits::{ + misc::{SameOrOther, TryDrop}, + tokens::{AssetId, Balance}, }; -use crate::traits::misc::{SameOrOther, TryDrop}; use sp_runtime::{traits::Zero, RuntimeDebug}; use sp_std::marker::PhantomData; @@ -160,7 +159,7 @@ impl< } /// Imbalance implying that the total_issuance value is less than the sum of all account balances. -pub type DebtOf = Imbalance< +pub type Debt = Imbalance< >::AssetId, >::Balance, // This will generally be implemented by increasing the total_issuance value. @@ -170,7 +169,7 @@ pub type DebtOf = Imbalance< /// Imbalance implying that the total_issuance value is greater than the sum of all account /// balances. -pub type CreditOf = Imbalance< +pub type Credit = Imbalance< >::AssetId, >::Balance, // This will generally be implemented by decreasing the total_issuance value. diff --git a/frame/support/src/traits/tokens/fungibles/lifetime.rs b/frame/support/src/traits/tokens/fungibles/lifetime.rs new file mode 100644 index 0000000000000..9e2c306f6f38a --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/lifetime.rs @@ -0,0 +1,84 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for creating and destroying assets. + +use sp_runtime::{DispatchError, DispatchResult}; + +use super::Inspect; + +/// Trait for providing the ability to create new fungible assets. +pub trait Create: Inspect { + /// Create a new fungible asset. + fn create( + id: Self::AssetId, + admin: AccountId, + is_sufficient: bool, + min_balance: Self::Balance, + ) -> DispatchResult; +} + +/// Trait for providing the ability to destroy existing fungible assets. +pub trait Destroy: Inspect { + /// Start the destruction an existing fungible asset. + /// * `id`: The `AssetId` to be destroyed. successfully. + /// * `maybe_check_owner`: An optional account id that can be used to authorize the destroy + /// command. If not provided, no authorization checks will be performed before destroying + /// asset. + fn start_destroy(id: Self::AssetId, maybe_check_owner: Option) -> DispatchResult; + + /// Destroy all accounts associated with a given asset. + /// `destroy_accounts` should only be called after `start_destroy` has been called, and the + /// asset is in a `Destroying` state + /// + /// * `id`: The identifier of the asset to be destroyed. This must identify an existing asset. + /// * `max_items`: The maximum number of accounts to be destroyed for a given call of the + /// function. This value should be small enough to allow the operation fit into a logical + /// block. + /// + /// Response: + /// * u32: Total number of approvals which were actually destroyed + /// + /// Due to weight restrictions, this function may need to be called multiple + /// times to fully destroy all approvals. It will destroy `max_items` approvals at a + /// time. + fn destroy_accounts(id: Self::AssetId, max_items: u32) -> Result; + /// Destroy all approvals associated with a given asset up to the `max_items` + /// `destroy_approvals` should only be called after `start_destroy` has been called, and the + /// asset is in a `Destroying` state + /// + /// * `id`: The identifier of the asset to be destroyed. This must identify an existing asset. + /// * `max_items`: The maximum number of accounts to be destroyed for a given call of the + /// function. This value should be small enough to allow the operation fit into a logical + /// block. + /// + /// Response: + /// * u32: Total number of approvals which were actually destroyed + /// + /// Due to weight restrictions, this function may need to be called multiple + /// times to fully destroy all approvals. It will destroy `max_items` approvals at a + /// time. + fn destroy_approvals(id: Self::AssetId, max_items: u32) -> Result; + + /// Complete destroying asset and unreserve currency. + /// `finish_destroy` should only be called after `start_destroy` has been called, and the + /// asset is in a `Destroying` state. All accounts or approvals should be destroyed before + /// hand. + /// + /// * `id`: The identifier of the asset to be destroyed. This must identify an existing asset. + fn finish_destroy(id: Self::AssetId) -> DispatchResult; +} diff --git a/frame/support/src/traits/tokens/fungibles/mod.rs b/frame/support/src/traits/tokens/fungibles/mod.rs new file mode 100644 index 0000000000000..697eff39ff748 --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/mod.rs @@ -0,0 +1,40 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The traits for sets of fungible tokens and any associated types. + +pub mod approvals; +mod enumerable; +pub mod freeze; +pub mod hold; +mod imbalance; +mod lifetime; +pub mod metadata; +mod regular; +pub mod roles; + +pub use enumerable::Inspect as InspectEnumerable; +pub use freeze::{Inspect as InspectFreeze, Mutate as MutateFreeze}; +pub use hold::{ + Balanced as BalancedHold, Inspect as InspectHold, Mutate as MutateHold, + Unbalanced as UnbalancedHold, +}; +pub use imbalance::{Credit, Debt, HandleImbalanceDrop, Imbalance}; +pub use lifetime::{Create, Destroy}; +pub use regular::{ + Balanced, DecreaseIssuance, Dust, IncreaseIssuance, Inspect, Mutate, Unbalanced, +}; diff --git a/frame/support/src/traits/tokens/fungibles/regular.rs b/frame/support/src/traits/tokens/fungibles/regular.rs new file mode 100644 index 0000000000000..27d1a50b34805 --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/regular.rs @@ -0,0 +1,570 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! `Inspect` and `Mutate` traits for working with regular balances. + +use sp_std::marker::PhantomData; + +use crate::{ + dispatch::DispatchError, + ensure, + traits::{ + tokens::{ + misc::{ + Balance, DepositConsequence, + Fortitude::{self, Force, Polite}, + Precision::{self, BestEffort, Exact}, + Preservation::{self, Expendable}, + Provenance::{self, Extant}, + WithdrawConsequence, + }, + AssetId, + }, + SameOrOther, TryDrop, + }, +}; +use sp_arithmetic::traits::{CheckedAdd, CheckedSub, One}; +use sp_runtime::{traits::Saturating, ArithmeticError, TokenError}; + +use super::{Credit, Debt, HandleImbalanceDrop, Imbalance}; + +/// Trait for providing balance-inspection access to a set of named fungible assets. +pub trait Inspect: Sized { + /// Means of identifying one asset class from another. + type AssetId: AssetId; + + /// Scalar type for representing balance of an account. + type Balance: Balance; + + /// The total amount of issuance in the system. + fn total_issuance(asset: Self::AssetId) -> Self::Balance; + + /// The total amount of issuance in the system excluding those which are controlled by the + /// system. + fn active_issuance(asset: Self::AssetId) -> Self::Balance { + Self::total_issuance(asset) + } + + /// The minimum balance any single account may have. + fn minimum_balance(asset: Self::AssetId) -> Self::Balance; + + /// Get the total amount of funds whose ultimate bneficial ownership can be determined as `who`. + /// + /// This may include funds which are wholly inaccessible to `who`, either temporarily or even + /// indefinitely. + /// + /// For the amount of the balance which is currently free to be removed from the account without + /// error, use `reducible_balance`. + /// + /// For the amount of the balance which may eventually be free to be removed from the account, + /// use `balance()`. + fn total_balance(asset: Self::AssetId, who: &AccountId) -> Self::Balance; + + /// Get the balance of `who` which does not include funds which are exclusively allocated to + /// subsystems of the chain ("on hold" or "reserved"). + /// + /// In general this isn't especially useful outside of tests, and for practical purposes, you'll + /// want to use `reducible_balance()`. + fn balance(asset: Self::AssetId, who: &AccountId) -> Self::Balance; + + /// Get the maximum amount that `who` can withdraw/transfer successfully based on whether the + /// account should be kept alive (`preservation`) or whether we are willing to force the + /// transfer and potentially go below user-level restrictions on the minimum amount of the + /// account. + /// + /// Always less than `free_balance()`. + fn reducible_balance( + asset: Self::AssetId, + who: &AccountId, + preservation: Preservation, + force: Fortitude, + ) -> Self::Balance; + + /// Returns `true` if the `asset` balance of `who` may be increased by `amount`. + /// + /// - `asset`: The asset that should be deposited. + /// - `who`: The account of which the balance should be increased by `amount`. + /// - `amount`: How much should the balance be increased? + /// - `mint`: Will `amount` be minted to deposit it into `account`? + fn can_deposit( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + provenance: Provenance, + ) -> DepositConsequence; + + /// Returns `Failed` if the `asset` balance of `who` may not be decreased by `amount`, otherwise + /// the consequence. + fn can_withdraw( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence; + + /// Returns `true` if an `asset` exists. + fn asset_exists(asset: Self::AssetId) -> bool; +} + +/// Special dust type which can be type-safely converted into a `Credit`. +#[must_use] +pub struct Dust>(pub(crate) T::AssetId, pub(crate) T::Balance); + +impl> Dust { + /// Convert `Dust` into an instance of `Credit`. + pub fn into_credit(self) -> Credit { + Credit::::new(self.0, self.1) + } +} + +/// A fungible token class where the balance can be set arbitrarily. +/// +/// **WARNING** +/// Do not use this directly unless you want trouble, since it allows you to alter account balances +/// without keeping the issuance up to date. It has no safeguards against accidentally creating +/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to +/// use. +pub trait Unbalanced: Inspect { + /// Create some dust and handle it with `Self::handle_dust`. This is an unbalanced operation + /// and it must only be used when an account is modified in a raw fashion, outside of the entire + /// fungibles API. The `amount` is capped at `Self::minimum_balance() - 1`. + /// + /// This should not be reimplemented. + fn handle_raw_dust(asset: Self::AssetId, amount: Self::Balance) { + Self::handle_dust(Dust( + asset, + amount.min(Self::minimum_balance(asset).saturating_sub(One::one())), + )) + } + + /// Do something with the dust which has been destroyed from the system. `Dust` can be converted + /// into a `Credit` with the `Balanced` trait impl. + fn handle_dust(dust: Dust); + + /// Forcefully set the balance of `who` to `amount`. + /// + /// If this call executes successfully, you can `assert_eq!(Self::balance(), amount);`. + /// + /// For implementations which include one or more balances on hold, then these are *not* + /// included in the `amount`. + /// + /// This function does its best to force the balance change through, but will not break system + /// invariants such as any Existential Deposits needed or overflows/underflows. + /// If this cannot be done for some reason (e.g. because the account cannot be created, deleted + /// or would overflow) then an `Err` is returned. + fn write_balance( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result, DispatchError>; + + /// Set the total issuance to `amount`. + fn set_total_issuance(asset: Self::AssetId, amount: Self::Balance); + + /// Reduce the balance of `who` by `amount`. + /// + /// If `precision` is `Exact` and it cannot be reduced by that amount for + /// some reason, return `Err` and don't reduce it at all. If `precision` is `BestEffort`, then + /// reduce the balance of `who` by the most that is possible, up to `amount`. + /// + /// In either case, if `Ok` is returned then the inner is the amount by which is was reduced. + /// Minimum balance will be respected and thus the returned amount may be up to + /// `Self::minimum_balance() - 1` greater than `amount` in the case that the reduction caused + /// the account to be deleted. + fn decrease_balance( + asset: Self::AssetId, + who: &AccountId, + mut amount: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result { + let old_balance = Self::balance(asset, who); + let free = Self::reducible_balance(asset, who, preservation, force); + if let BestEffort = precision { + amount = amount.min(free); + } + let new_balance = old_balance.checked_sub(&amount).ok_or(TokenError::FundsUnavailable)?; + if let Some(dust) = Self::write_balance(asset, who, new_balance)? { + Self::handle_dust(Dust(asset, dust)); + } + Ok(old_balance.saturating_sub(new_balance)) + } + + /// Increase the balance of `who` by `amount`. + /// + /// If it cannot be increased by that amount for some reason, return `Err` and don't increase + /// it at all. If Ok, return the imbalance. + /// Minimum balance will be respected and an error will be returned if + /// `amount < Self::minimum_balance()` when the account of `who` is zero. + fn increase_balance( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + let old_balance = Self::balance(asset, who); + let new_balance = if let BestEffort = precision { + old_balance.saturating_add(amount) + } else { + old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)? + }; + if new_balance < Self::minimum_balance(asset) { + // Attempt to increase from 0 to below minimum -> stays at zero. + if let BestEffort = precision { + Ok(Self::Balance::default()) + } else { + Err(TokenError::BelowMinimum.into()) + } + } else { + if new_balance == old_balance { + Ok(Self::Balance::default()) + } else { + if let Some(dust) = Self::write_balance(asset, who, new_balance)? { + Self::handle_dust(Dust(asset, dust)); + } + Ok(new_balance.saturating_sub(old_balance)) + } + } + } + + /// Reduce the active issuance by some amount. + fn deactivate(_asset: Self::AssetId, _: Self::Balance) {} + + /// Increase the active issuance by some amount, up to the outstanding amount reduced. + fn reactivate(_asset: Self::AssetId, _: Self::Balance) {} +} + +/// Trait for providing a basic fungible asset. +pub trait Mutate: Inspect + Unbalanced { + /// Increase the balance of `who` by exactly `amount`, minting new tokens. If that isn't + /// possible then an `Err` is returned and nothing is changed. + fn mint_into( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { + Self::total_issuance(asset) + .checked_add(&amount) + .ok_or(ArithmeticError::Overflow)?; + let actual = Self::increase_balance(asset, who, amount, Exact)?; + Self::set_total_issuance(asset, Self::total_issuance(asset).saturating_add(actual)); + Self::done_mint_into(asset, who, amount); + Ok(actual) + } + + /// Decrease the balance of `who` by at least `amount`, possibly slightly more in the case of + /// minimum-balance requirements, burning the tokens. If that isn't possible then an `Err` is + /// returned and nothing is changed. If successful, the amount of tokens reduced is returned. + fn burn_from( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + force: Fortitude, + ) -> Result { + let actual = Self::reducible_balance(asset, who, Expendable, force).min(amount); + ensure!(actual == amount || precision == BestEffort, TokenError::FundsUnavailable); + Self::total_issuance(asset) + .checked_sub(&actual) + .ok_or(ArithmeticError::Overflow)?; + let actual = Self::decrease_balance(asset, who, actual, BestEffort, Expendable, force)?; + Self::set_total_issuance(asset, Self::total_issuance(asset).saturating_sub(actual)); + Self::done_burn_from(asset, who, actual); + Ok(actual) + } + + /// Attempt to decrease the `asset` balance of `who` by `amount`. + /// + /// Equivalent to `burn_from`, except with an expectation that within the bounds of some + /// universal issuance, the total assets `suspend`ed and `resume`d will be equivalent. The + /// implementation may be configured such that the total assets suspended may never be less than + /// the total assets resumed (which is the invariant for an issuing system), or the reverse + /// (which the invariant in a non-issuing system). + /// + /// Because of this expectation, any metadata associated with the asset is expected to survive + /// the suspect-resume cycle. + fn shelve( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { + let actual = Self::reducible_balance(asset, who, Expendable, Polite).min(amount); + ensure!(actual == amount, TokenError::FundsUnavailable); + Self::total_issuance(asset) + .checked_sub(&actual) + .ok_or(ArithmeticError::Overflow)?; + let actual = Self::decrease_balance(asset, who, actual, BestEffort, Expendable, Polite)?; + Self::set_total_issuance(asset, Self::total_issuance(asset).saturating_sub(actual)); + Self::done_shelve(asset, who, actual); + Ok(actual) + } + + /// Attempt to increase the `asset` balance of `who` by `amount`. + /// + /// Equivalent to `mint_into`, except with an expectation that within the bounds of some + /// universal issuance, the total assets `suspend`ed and `resume`d will be equivalent. The + /// implementation may be configured such that the total assets suspended may never be less than + /// the total assets resumed (which is the invariant for an issuing system), or the reverse + /// (which the invariant in a non-issuing system). + /// + /// Because of this expectation, any metadata associated with the asset is expected to survive + /// the suspect-resume cycle. + fn restore( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { + Self::total_issuance(asset) + .checked_add(&amount) + .ok_or(ArithmeticError::Overflow)?; + let actual = Self::increase_balance(asset, who, amount, Exact)?; + Self::set_total_issuance(asset, Self::total_issuance(asset).saturating_add(actual)); + Self::done_restore(asset, who, amount); + Ok(actual) + } + + /// Transfer funds from one account into another. + fn transfer( + asset: Self::AssetId, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + preservation: Preservation, + ) -> Result { + let _extra = + Self::can_withdraw(asset, source, amount).into_result(preservation != Expendable)?; + Self::can_deposit(asset, dest, amount, Extant).into_result()?; + Self::decrease_balance(asset, source, amount, BestEffort, preservation, Polite)?; + // This should never fail as we checked `can_deposit` earlier. But we do a best-effort + // anyway. + let _ = Self::increase_balance(asset, dest, amount, BestEffort); + Self::done_transfer(asset, source, dest, amount); + Ok(amount) + } + + /// Simple infallible function to force an account to have a particular balance, good for use + /// in tests and benchmarks but not recommended for production code owing to the lack of + /// error reporting. + /// + /// Returns the new balance. + fn set_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> Self::Balance { + let b = Self::balance(asset, who); + if b > amount { + Self::burn_from(asset, who, b - amount, BestEffort, Force).map(|d| b.saturating_sub(d)) + } else { + Self::mint_into(asset, who, amount - b).map(|d| b.saturating_add(d)) + } + .unwrap_or(b) + } + fn done_mint_into(_asset: Self::AssetId, _who: &AccountId, _amount: Self::Balance) {} + fn done_burn_from(_asset: Self::AssetId, _who: &AccountId, _amount: Self::Balance) {} + fn done_shelve(_asset: Self::AssetId, _who: &AccountId, _amount: Self::Balance) {} + fn done_restore(_asset: Self::AssetId, _who: &AccountId, _amount: Self::Balance) {} + fn done_transfer( + _asset: Self::AssetId, + _source: &AccountId, + _dest: &AccountId, + _amount: Self::Balance, + ) { + } +} + +/// Simple handler for an imbalance drop which increases the total issuance of the system by the +/// imbalance amount. Used for leftover debt. +pub struct IncreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for IncreaseIssuance +{ + fn handle(asset: U::AssetId, amount: U::Balance) { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_add(amount)) + } +} + +/// Simple handler for an imbalance drop which decreases the total issuance of the system by the +/// imbalance amount. Used for leftover credit. +pub struct DecreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for DecreaseIssuance +{ + fn handle(asset: U::AssetId, amount: U::Balance) { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_sub(amount)) + } +} + +/// A fungible token class where any creation and deletion of tokens is semi-explicit and where the +/// total supply is maintained automatically. +/// +/// This is auto-implemented when a token class has `Unbalanced` implemented. +pub trait Balanced: Inspect + Unbalanced { + /// The type for managing what happens when an instance of `Debt` is dropped without being used. + type OnDropDebt: HandleImbalanceDrop; + /// The type for managing what happens when an instance of `Credit` is dropped without being + /// used. + type OnDropCredit: HandleImbalanceDrop; + + /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will + /// typically be used to reduce an account by the same amount with e.g. `settle`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example + /// in the case of underflow. + fn rescind(asset: Self::AssetId, amount: Self::Balance) -> Debt { + let old = Self::total_issuance(asset); + let new = old.saturating_sub(amount); + Self::set_total_issuance(asset, new); + let delta = old - new; + Self::done_rescind(asset, delta); + Imbalance::::new( + asset, delta, + ) + } + + /// Increase the total issuance by `amount` and return the according imbalance. The imbalance + /// will typically be used to increase an account by the same amount with e.g. + /// `resolve_into_existing` or `resolve_creating`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example + /// in the case of overflow. + fn issue(asset: Self::AssetId, amount: Self::Balance) -> Credit { + let old = Self::total_issuance(asset); + let new = old.saturating_add(amount); + Self::set_total_issuance(asset, new); + let delta = new - old; + Self::done_issue(asset, delta); + Imbalance::::new( + asset, delta, + ) + } + + /// Produce a pair of imbalances that cancel each other out exactly. + /// + /// This is just the same as burning and issuing the same amount and has no effect on the + /// total issuance. + fn pair( + asset: Self::AssetId, + amount: Self::Balance, + ) -> (Debt, Credit) { + (Self::rescind(asset, amount), Self::issue(asset, amount)) + } + + /// Mints `value` into the account of `who`, creating it as needed. + /// + /// If `precision` is `BestEffort` and `value` in full could not be minted (e.g. due to + /// overflow), then the maximum is minted, up to `value`. If `precision` is `Exact`, then + /// exactly `value` must be minted into the account of `who` or the operation will fail with an + /// `Err` and nothing will change. + /// + /// If the operation is successful, this will return `Ok` with a `Debt` of the total value + /// added to the account. + fn deposit( + asset: Self::AssetId, + who: &AccountId, + value: Self::Balance, + precision: Precision, + ) -> Result, DispatchError> { + let increase = Self::increase_balance(asset, who, value, precision)?; + Self::done_deposit(asset, who, increase); + Ok(Imbalance::::new( + asset, increase, + )) + } + + /// Removes `value` balance from `who` account if possible. + /// + /// If `precision` is `BestEffort` and `value` in full could not be removed (e.g. due to + /// underflow), then the maximum is removed, up to `value`. If `precision` is `Exact`, then + /// exactly `value` must be removed from the account of `who` or the operation will fail with an + /// `Err` and nothing will change. + /// + /// If the removal is needed but not possible, then it returns `Err` and nothing is changed. + /// If the account needed to be deleted, then slightly more than `value` may be removed from the + /// account owning since up to (but not including) minimum balance may also need to be removed. + /// + /// If the operation is successful, this will return `Ok` with a `Credit` of the total value + /// removed from the account. + fn withdraw( + asset: Self::AssetId, + who: &AccountId, + value: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result, DispatchError> { + let decrease = Self::decrease_balance(asset, who, value, precision, preservation, force)?; + Self::done_withdraw(asset, who, decrease); + Ok(Imbalance::::new( + asset, decrease, + )) + } + + /// The balance of `who` is increased in order to counter `credit`. If the whole of `credit` + /// cannot be countered, then nothing is changed and the original `credit` is returned in an + /// `Err`. + /// + /// Please note: If `credit.peek()` is less than `Self::minimum_balance()`, then `who` must + /// already exist for this to succeed. + fn resolve( + who: &AccountId, + credit: Credit, + ) -> Result<(), Credit> { + let v = credit.peek(); + let debt = match Self::deposit(credit.asset(), who, v, Exact) { + Err(_) => return Err(credit), + Ok(d) => d, + }; + if let Ok(result) = credit.offset(debt) { + let result = result.try_drop(); + debug_assert!(result.is_ok(), "ok deposit return must be equal to credit value; qed"); + } else { + debug_assert!(false, "debt.asset is credit.asset; qed"); + } + Ok(()) + } + + /// The balance of `who` is decreased in order to counter `debt`. If the whole of `debt` + /// cannot be countered, then nothing is changed and the original `debt` is returned in an + /// `Err`. + fn settle( + who: &AccountId, + debt: Debt, + preservation: Preservation, + ) -> Result, Debt> { + let amount = debt.peek(); + let asset = debt.asset(); + let credit = match Self::withdraw(asset, who, amount, Exact, preservation, Polite) { + Err(_) => return Err(debt), + Ok(d) => d, + }; + match credit.offset(debt) { + Ok(SameOrOther::None) => Ok(Credit::::zero(asset)), + Ok(SameOrOther::Same(dust)) => Ok(dust), + Ok(SameOrOther::Other(rest)) => { + debug_assert!(false, "ok withdraw return must be at least debt value; qed"); + Err(rest) + }, + Err(_) => { + debug_assert!(false, "debt.asset is credit.asset; qed"); + Ok(Credit::::zero(asset)) + }, + } + } + + fn done_rescind(_asset: Self::AssetId, _amount: Self::Balance) {} + fn done_issue(_asset: Self::AssetId, _amount: Self::Balance) {} + fn done_deposit(_asset: Self::AssetId, _who: &AccountId, _amount: Self::Balance) {} + fn done_withdraw(_asset: Self::AssetId, _who: &AccountId, _amount: Self::Balance) {} +} diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs index 6113642c83460..75aef0e04ea65 100644 --- a/frame/support/src/traits/tokens/misc.rs +++ b/frame/support/src/traits/tokens/misc.rs @@ -23,12 +23,65 @@ use sp_core::RuntimeDebug; use sp_runtime::{traits::Convert, ArithmeticError, DispatchError, TokenError}; use sp_std::fmt::Debug; +/// The origin of funds to be used for a deposit operation. +#[derive(Copy, Clone, RuntimeDebug, Eq, PartialEq)] +pub enum Provenance { + /// The funds will be minted into the system, increasing total issuance (and potentially + /// causing an overflow there). + Minted, + /// The funds already exist in the system, therefore will not affect total issuance. + Extant, +} + +/// The mode under which usage of funds may be restricted. +#[derive(Copy, Clone, RuntimeDebug, Eq, PartialEq)] +pub enum Restriction { + /// Funds are under the normal conditions. + Free, + /// Funds are on hold. + OnHold, +} + +/// The mode by which we describe whether an operation should keep an account alive. +#[derive(Copy, Clone, RuntimeDebug, Eq, PartialEq)] +pub enum Preservation { + /// We don't care if the account gets killed by this operation. + Expendable, + /// The account may not be killed, but we don't care if the balance gets dusted. + Protect, + /// The account may not be killed and our provider reference must remain (in the context of + /// tokens, this means that the account may not be dusted). + Preserve, +} + +/// The privilege with which a withdraw operation is conducted. +#[derive(Copy, Clone, RuntimeDebug, Eq, PartialEq)] +pub enum Fortitude { + /// The operation should execute with regular privilege. + Polite, + /// The operation should be forced to succeed if possible. This is usually employed for system- + /// level security-critical events such as slashing. + Force, +} + +/// The precision required of an operation generally involving some aspect of quantitative fund +/// withdrawal or transfer. +#[derive(Copy, Clone, RuntimeDebug, Eq, PartialEq)] +pub enum Precision { + /// The operation should must either proceed either exactly according to the amounts involved + /// or not at all. + Exact, + /// The operation may be considered successful even if less than the specified amounts are + /// available to be used. In this case a best effort will be made. + BestEffort, +} + /// One of a number of consequences of withdrawing a fungible from an account. #[derive(Copy, Clone, RuntimeDebug, Eq, PartialEq)] pub enum WithdrawConsequence { /// Withdraw could not happen since the amount to be withdrawn is less than the total funds in /// the account. - NoFunds, + BalanceLow, /// The withdraw would mean the account dying when it needs to exist (usually because it is a /// provider and there are consumer references on it). WouldDie, @@ -53,15 +106,16 @@ pub enum WithdrawConsequence { impl WithdrawConsequence { /// Convert the type into a `Result` with `DispatchError` as the error or the additional /// `Balance` by which the account will be reduced. - pub fn into_result(self) -> Result { + pub fn into_result(self, keep_nonzero: bool) -> Result { use WithdrawConsequence::*; match self { - NoFunds => Err(TokenError::NoFunds.into()), - WouldDie => Err(TokenError::WouldDie.into()), + BalanceLow => Err(TokenError::FundsUnavailable.into()), + WouldDie => Err(TokenError::OnlyProvider.into()), UnknownAsset => Err(TokenError::UnknownAsset.into()), Underflow => Err(ArithmeticError::Underflow.into()), Overflow => Err(ArithmeticError::Overflow.into()), Frozen => Err(TokenError::Frozen.into()), + ReducedToZero(_) if keep_nonzero => Err(TokenError::NotExpendable.into()), ReducedToZero(result) => Ok(result), Success => Ok(Zero::zero()), } @@ -190,9 +244,19 @@ impl< } /// Converts a balance value into an asset balance. -pub trait BalanceConversion { +pub trait ConversionToAssetBalance { + type Error; + fn to_asset_balance(balance: InBalance, asset_id: AssetId) + -> Result; +} + +/// Converts an asset balance value into balance. +pub trait ConversionFromAssetBalance { type Error; - fn to_asset_balance(balance: InBalance, asset_id: AssetId) -> Result; + fn from_asset_balance( + balance: AssetBalance, + asset_id: AssetId, + ) -> Result; } /// Trait to handle asset locking mechanism to ensure interactions with the asset can be implemented diff --git a/frame/support/src/traits/tokens/pay.rs b/frame/support/src/traits/tokens/pay.rs new file mode 100644 index 0000000000000..23bd113bfef56 --- /dev/null +++ b/frame/support/src/traits/tokens/pay.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Pay trait and associated types. + +use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; +use scale_info::TypeInfo; +use sp_core::{RuntimeDebug, TypedGet}; +use sp_std::fmt::Debug; + +use super::{fungible, Balance, Preservation::Expendable}; + +/// Can be implemented by `PayFromAccount` using a `fungible` impl, but can also be implemented with +/// XCM/MultiAsset and made generic over assets. +pub trait Pay { + /// The type by which we measure units of the currency in which we make payments. + type Balance: Balance; + /// The type by which we identify the beneficiaries to whom a payment may be made. + type Beneficiary; + /// The type for the kinds of asset that are going to be paid. + /// + /// The unit type can be used here to indicate there's only one kind of asset to do payments + /// with. When implementing, it should be clear from the context what that asset is. + type AssetKind; + /// An identifier given to an individual payment. + type Id: FullCodec + MaxEncodedLen + TypeInfo + Clone + Eq + PartialEq + Debug + Copy; + /// Make a payment and return an identifier for later evaluation of success in some off-chain + /// mechanism (likely an event, but possibly not on this chain). + fn pay( + who: &Self::Beneficiary, + asset_kind: Self::AssetKind, + amount: Self::Balance, + ) -> Result; + /// Check how a payment has proceeded. `id` must have been previously returned by `pay` for + /// the result of this call to be meaningful. Once this returns anything other than + /// `InProgress` for some `id` it must return `Unknown` rather than the actual result + /// value. + fn check_payment(id: Self::Id) -> PaymentStatus; + /// Ensure that a call to pay with the given parameters will be successful if done immediately + /// after this call. Used in benchmarking code. + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful(who: &Self::Beneficiary, amount: Self::Balance); + /// Ensure that a call to `check_payment` with the given parameters will return either `Success` + /// or `Failure`. + #[cfg(feature = "runtime-benchmarks")] + fn ensure_concluded(id: Self::Id); +} + +/// Status for making a payment via the `Pay::pay` trait function. +#[derive(Encode, Decode, Eq, PartialEq, Clone, TypeInfo, MaxEncodedLen, RuntimeDebug)] +pub enum PaymentStatus { + /// Payment is in progress. Nothing to report yet. + InProgress, + /// Payment status is unknowable. It may already have reported the result, or if not then + /// it will never be reported successful or failed. + Unknown, + /// Payment happened successfully. + Success, + /// Payment failed. It may safely be retried. + Failure, +} + +/// Simple implementation of `Pay` which makes a payment from a "pot" - i.e. a single account. +pub struct PayFromAccount(sp_std::marker::PhantomData<(F, A)>); +impl> Pay for PayFromAccount { + type Balance = F::Balance; + type Beneficiary = A::Type; + type AssetKind = (); + type Id = (); + fn pay( + who: &Self::Beneficiary, + _: Self::AssetKind, + amount: Self::Balance, + ) -> Result { + >::transfer(&A::get(), who, amount, Expendable).map_err(|_| ())?; + Ok(()) + } + fn check_payment(_: ()) -> PaymentStatus { + PaymentStatus::Success + } + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful(_: &Self::Beneficiary, amount: Self::Balance) { + >::mint_into(&A::get(), amount).unwrap(); + } + #[cfg(feature = "runtime-benchmarks")] + fn ensure_concluded(_: Self::Id) {} +} diff --git a/frame/support/src/traits/try_runtime.rs b/frame/support/src/traits/try_runtime.rs index 6103f07a73c78..bebc248721c99 100644 --- a/frame/support/src/traits/try_runtime.rs +++ b/frame/support/src/traits/try_runtime.rs @@ -168,11 +168,19 @@ impl TryState::name(), Tuple::try_state) ),* )]; let mut result = Ok(()); - for (name, try_state_fn) in try_state_fns { - if pallet_names.iter().any(|n| n == name.as_bytes()) { + pallet_names.iter().for_each(|pallet_name| { + if let Some((name, try_state_fn)) = + try_state_fns.iter().find(|(name, _)| name.as_bytes() == pallet_name) + { result = result.and(try_state_fn(n.clone(), targets.clone())); + } else { + crate::log::warn!( + "Pallet {:?} not found", + sp_std::str::from_utf8(pallet_name).unwrap_or_default() + ); } - } + }); + result }, } diff --git a/frame/support/src/weights/block_weights.rs b/frame/support/src/weights/block_weights.rs index 2389c51d9de9c..b358aa473bfd7 100644 --- a/frame/support/src/weights/block_weights.rs +++ b/frame/support/src/weights/block_weights.rs @@ -16,7 +16,7 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25 (Y/M/D) +//! DATE: 2023-04-06 (Y/M/D) //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` @@ -44,17 +44,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 377_722, 414_752 - /// Average: 381_015 - /// Median: 379_751 - /// Std-Dev: 5462.64 + /// Min, Max: 386_820, 419_676 + /// Average: 392_184 + /// Median: 389_668 + /// Std-Dev: 5285.57 /// /// Percentiles nanoseconds: - /// 99th: 413_074 - /// 95th: 384_876 - /// 75th: 380_642 + /// 99th: 406_316 + /// 95th: 399_697 + /// 75th: 396_532 pub const BlockExecutionWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(381_015), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(392_184), 0); } #[cfg(test)] diff --git a/frame/support/src/weights/extrinsic_weights.rs b/frame/support/src/weights/extrinsic_weights.rs index 78798d2e3a277..1a6facc3d42c3 100644 --- a/frame/support/src/weights/extrinsic_weights.rs +++ b/frame/support/src/weights/extrinsic_weights.rs @@ -16,7 +16,7 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25 (Y/M/D) +//! DATE: 2023-04-06 (Y/M/D) //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` @@ -44,17 +44,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 99_481, 103_304 - /// Average: 99_840 - /// Median: 99_795 - /// Std-Dev: 376.17 + /// Min, Max: 113_246, 114_346 + /// Average: 113_638 + /// Median: 113_641 + /// Std-Dev: 188.44 /// /// Percentiles nanoseconds: - /// 99th: 100_078 - /// 95th: 100_051 - /// 75th: 99_916 + /// 99th: 114_181 + /// 95th: 113_961 + /// 75th: 113_703 pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(99_840), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(113_638), 0); } #[cfg(test)] diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 90ef243eed6c6..9564b6a04d7e7 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -14,7 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.136", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../../primitives/arithmetic" } sp-io = { version = "7.0.0", path = "../../../primitives/io", default-features = false } sp-state-machine = { version = "0.13.0", optional = true, path = "../../../primitives/state-machine" } @@ -47,6 +48,7 @@ std = [ "sp-state-machine", "sp-arithmetic/std", "sp-version/std", + "sp-api/std", ] try-runtime = ["frame-support/try-runtime"] # WARNING: diff --git a/frame/support/test/compile_pass/Cargo.toml b/frame/support/test/compile_pass/Cargo.toml index dd5e1b996db9c..4466c24d64f15 100644 --- a/frame/support/test/compile_pass/Cargo.toml +++ b/frame/support/test/compile_pass/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../system" } sp-core = { version = "7.0.0", default-features = false, path = "../../../../primitives/core" } diff --git a/frame/support/test/tests/benchmark_ui/bad_return_type_blank_with_question.stderr b/frame/support/test/tests/benchmark_ui/bad_return_type_blank_with_question.stderr index 1f8c9f1e171eb..601bbd20fb73d 100644 --- a/frame/support/test/tests/benchmark_ui/bad_return_type_blank_with_question.stderr +++ b/frame/support/test/tests/benchmark_ui/bad_return_type_blank_with_question.stderr @@ -7,4 +7,4 @@ error[E0277]: the `?` operator can only be used in a function that returns `Resu 15 | something()?; | ^ cannot use the `?` operator in a function that returns `()` | - = help: the trait `FromResidual>` is not implemented for `()` + = help: the trait `FromResidual>` is not implemented for `()` diff --git a/frame/support/test/tests/benchmark_ui/invalid_origin.rs b/frame/support/test/tests/benchmark_ui/invalid_origin.rs new file mode 100644 index 0000000000000..81cfc39790969 --- /dev/null +++ b/frame/support/test/tests/benchmark_ui/invalid_origin.rs @@ -0,0 +1,17 @@ +use frame_benchmarking::v2::*; +#[allow(unused_imports)] +use frame_support_test::Config; +use frame_support_test::Call; + +#[benchmarks] +mod benches { + use super::*; + + #[benchmark] + fn bench() { + #[extrinsic_call] + thing(1); + } +} + +fn main() {} diff --git a/frame/support/test/tests/benchmark_ui/invalid_origin.stderr b/frame/support/test/tests/benchmark_ui/invalid_origin.stderr new file mode 100644 index 0000000000000..ab8499d995d47 --- /dev/null +++ b/frame/support/test/tests/benchmark_ui/invalid_origin.stderr @@ -0,0 +1,16 @@ +error[E0599]: no variant or associated item named `new_call_variant_thing` found for enum `Call` in the current scope + --> tests/benchmark_ui/invalid_origin.rs:6:1 + | +6 | #[benchmarks] + | ^^^^^^^^^^^^^ variant or associated item not found in `Call` + | + = note: this error originates in the attribute macro `benchmarks` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `::RuntimeOrigin: From<{integer}>` is not satisfied + --> tests/benchmark_ui/invalid_origin.rs:6:1 + | +6 | #[benchmarks] + | ^^^^^^^^^^^^^ the trait `From<{integer}>` is not implemented for `::RuntimeOrigin` + | + = note: required for `{integer}` to implement `Into<::RuntimeOrigin>` + = note: this error originates in the attribute macro `benchmarks` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/benchmark_ui/pass/valid_const_expr.rs b/frame/support/test/tests/benchmark_ui/pass/valid_const_expr.rs new file mode 100644 index 0000000000000..bead3bf277be2 --- /dev/null +++ b/frame/support/test/tests/benchmark_ui/pass/valid_const_expr.rs @@ -0,0 +1,28 @@ +use frame_benchmarking::v2::*; +use frame_support_test::Config; +use frame_support::parameter_types; + +#[benchmarks] +mod benches { + use super::*; + + const MY_CONST: u32 = 100; + + const fn my_fn() -> u32 { + 200 + } + + parameter_types! { + const MyConst: u32 = MY_CONST; + } + + #[benchmark(skip_meta, extra)] + fn bench(a: Linear<{MY_CONST * 2}, {my_fn() + MyConst::get()}>) { + let a = 2 + 2; + #[block] + {} + assert_eq!(a, 4); + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr index 5f1fccd43c549..b1c1879aa56ad 100644 --- a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr +++ b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr @@ -8,9 +8,12 @@ error[E0412]: cannot find type `RuntimeCall` in this scope --> tests/construct_runtime_ui/both_use_and_excluded_parts.rs:18:64 | 18 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | - ^^^^^^^^^^^ not found in this scope - | | - | help: you might be missing a type parameter: `` + | ^^^^^^^^^^^ not found in this scope + | +help: you might be missing a type parameter + | +18 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | +++++++++++++ error[E0412]: cannot find type `Runtime` in this scope --> tests/construct_runtime_ui/both_use_and_excluded_parts.rs:20:25 diff --git a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr index c623ecfbf4cdf..66098898bb877 100644 --- a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr @@ -8,9 +8,12 @@ error[E0412]: cannot find type `RuntimeCall` in this scope --> tests/construct_runtime_ui/exclude_undefined_part.rs:23:64 | 23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | - ^^^^^^^^^^^ not found in this scope - | | - | help: you might be missing a type parameter: `` + | ^^^^^^^^^^^ not found in this scope + | +help: you might be missing a type parameter + | +23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | +++++++++++++ error[E0412]: cannot find type `Runtime` in this scope --> tests/construct_runtime_ui/exclude_undefined_part.rs:25:25 diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr index 29df6e4bd8cb5..42f79e96d4473 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr @@ -1,4 +1,4 @@ -error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned` +error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned`, `FreezeReason`, `HoldReason`, `LockId`, `SlashReason` --> $DIR/invalid_module_details_keyword.rs:9:20 | 9 | system: System::{enum}, diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr index bd3e672dc8b40..6d535ca4335fc 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr @@ -1,4 +1,4 @@ -error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned` +error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned`, `FreezeReason`, `HoldReason`, `LockId`, `SlashReason` --> $DIR/invalid_module_entry.rs:10:23 | 10 | Balance: balances::{Error}, diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr index 2cf451aa65135..cfc8c9264eec6 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr @@ -29,18 +29,3 @@ help: consider importing this struct | 1 | use frame_system::GenesisConfig; | - -error[E0283]: type annotations needed - --> tests/construct_runtime_ui/no_std_genesis_config.rs:40:1 - | -40 | / construct_runtime! { -41 | | pub struct Runtime where -42 | | Block = Block, -43 | | NodeBlock = Block, -... | -48 | | } -49 | | } - | |_^ cannot infer type - | - = note: cannot satisfy `_: std::default::Default` - = note: this error originates in the derive macro `Default` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs b/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs new file mode 100644 index 0000000000000..5dfc67c83836a --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs @@ -0,0 +1,169 @@ +use frame_support::construct_runtime; +use sp_core::sr25519; +use sp_runtime::{generic, traits::BlakeTwo256}; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u32; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = u32; + type RuntimeCall = RuntimeCall; + type Hash = sp_runtime::testing::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = frame_support::traits::ConstU32<250>; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +construct_runtime! { + pub struct Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Pallet, Call, Storage, Config, Event}, + Pallet1: pallet::{Pallet}, + Pallet2: pallet::{Pallet}, + Pallet3: pallet::{Pallet}, + Pallet4: pallet::{Pallet}, + Pallet5: pallet::{Pallet}, + Pallet6: pallet::{Pallet}, + Pallet7: pallet::{Pallet}, + Pallet8: pallet::{Pallet}, + Pallet9: pallet::{Pallet}, + Pallet10: pallet::{Pallet}, + Pallet11: pallet::{Pallet}, + Pallet12: pallet::{Pallet}, + Pallet13: pallet::{Pallet}, + Pallet14: pallet::{Pallet}, + Pallet15: pallet::{Pallet}, + Pallet16: pallet::{Pallet}, + Pallet17: pallet::{Pallet}, + Pallet18: pallet::{Pallet}, + Pallet19: pallet::{Pallet}, + Pallet20: pallet::{Pallet}, + Pallet21: pallet::{Pallet}, + Pallet22: pallet::{Pallet}, + Pallet23: pallet::{Pallet}, + Pallet24: pallet::{Pallet}, + Pallet25: pallet::{Pallet}, + Pallet26: pallet::{Pallet}, + Pallet27: pallet::{Pallet}, + Pallet28: pallet::{Pallet}, + Pallet29: pallet::{Pallet}, + Pallet30: pallet::{Pallet}, + Pallet31: pallet::{Pallet}, + Pallet32: pallet::{Pallet}, + Pallet33: pallet::{Pallet}, + Pallet34: pallet::{Pallet}, + Pallet35: pallet::{Pallet}, + Pallet36: pallet::{Pallet}, + Pallet37: pallet::{Pallet}, + Pallet38: pallet::{Pallet}, + Pallet39: pallet::{Pallet}, + Pallet40: pallet::{Pallet}, + Pallet41: pallet::{Pallet}, + Pallet42: pallet::{Pallet}, + Pallet43: pallet::{Pallet}, + Pallet44: pallet::{Pallet}, + Pallet45: pallet::{Pallet}, + Pallet46: pallet::{Pallet}, + Pallet47: pallet::{Pallet}, + Pallet48: pallet::{Pallet}, + Pallet49: pallet::{Pallet}, + Pallet50: pallet::{Pallet}, + Pallet51: pallet::{Pallet}, + Pallet52: pallet::{Pallet}, + Pallet53: pallet::{Pallet}, + Pallet54: pallet::{Pallet}, + Pallet55: pallet::{Pallet}, + Pallet56: pallet::{Pallet}, + Pallet57: pallet::{Pallet}, + Pallet58: pallet::{Pallet}, + Pallet59: pallet::{Pallet}, + Pallet60: pallet::{Pallet}, + Pallet61: pallet::{Pallet}, + Pallet62: pallet::{Pallet}, + Pallet63: pallet::{Pallet}, + Pallet64: pallet::{Pallet}, + Pallet65: pallet::{Pallet}, + Pallet66: pallet::{Pallet}, + Pallet67: pallet::{Pallet}, + Pallet68: pallet::{Pallet}, + Pallet69: pallet::{Pallet}, + Pallet70: pallet::{Pallet}, + Pallet71: pallet::{Pallet}, + Pallet72: pallet::{Pallet}, + Pallet73: pallet::{Pallet}, + Pallet74: pallet::{Pallet}, + Pallet75: pallet::{Pallet}, + Pallet76: pallet::{Pallet}, + Pallet77: pallet::{Pallet}, + Pallet78: pallet::{Pallet}, + Pallet79: pallet::{Pallet}, + Pallet80: pallet::{Pallet}, + Pallet81: pallet::{Pallet}, + Pallet82: pallet::{Pallet}, + Pallet83: pallet::{Pallet}, + Pallet84: pallet::{Pallet}, + Pallet85: pallet::{Pallet}, + Pallet86: pallet::{Pallet}, + Pallet87: pallet::{Pallet}, + Pallet88: pallet::{Pallet}, + Pallet89: pallet::{Pallet}, + Pallet90: pallet::{Pallet}, + Pallet91: pallet::{Pallet}, + Pallet92: pallet::{Pallet}, + Pallet93: pallet::{Pallet}, + Pallet94: pallet::{Pallet}, + Pallet95: pallet::{Pallet}, + Pallet96: pallet::{Pallet}, + Pallet97: pallet::{Pallet}, + Pallet98: pallet::{Pallet}, + Pallet99: pallet::{Pallet}, + Pallet100: pallet::{Pallet}, + Pallet101: pallet::{Pallet}, + Pallet102: pallet::{Pallet}, + Pallet103: pallet::{Pallet}, + Pallet104: pallet::{Pallet}, + Pallet105: pallet::{Pallet}, + Pallet106: pallet::{Pallet}, + Pallet107: pallet::{Pallet}, + Pallet108: pallet::{Pallet}, + Pallet109: pallet::{Pallet}, + Pallet110: pallet::{Pallet}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr b/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr new file mode 100644 index 0000000000000..dbd81ef367a9f --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr @@ -0,0 +1,61 @@ +error: The number of pallets exceeds the maximum number of tuple elements. To increase this limit, enable the tuples-96 feature of [frame_support]. + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:50:2 + | +50 | pub struct Runtime where + | ^^^ + +error[E0412]: cannot find type `RuntimeCall` in this scope + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:18:64 + | +18 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | ^^^^^^^^^^^ not found in this scope + | +help: you might be missing a type parameter + | +18 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | +++++++++++++ + +error[E0412]: cannot find type `Runtime` in this scope + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:20:25 + | +20 | impl pallet::Config for Runtime {} + | ^^^^^^^ not found in this scope + +error[E0412]: cannot find type `Runtime` in this scope + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:22:31 + | +22 | impl frame_system::Config for Runtime { + | ^^^^^^^ not found in this scope + +error[E0412]: cannot find type `RuntimeOrigin` in this scope + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:24:23 + | +24 | type RuntimeOrigin = RuntimeOrigin; + | ^^^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeOrigin` + +error[E0412]: cannot find type `RuntimeCall` in this scope + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:27:21 + | +27 | type RuntimeCall = RuntimeCall; + | ^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeCall` + +error[E0412]: cannot find type `RuntimeEvent` in this scope + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:33:22 + | +33 | type RuntimeEvent = RuntimeEvent; + | ^^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeEvent` + +error[E0412]: cannot find type `PalletInfo` in this scope + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:39:20 + | +39 | type PalletInfo = PalletInfo; + | ^^^^^^^^^^ + | +help: you might have meant to use the associated type + | +39 | type PalletInfo = Self::PalletInfo; + | ~~~~~~~~~~~~~~~~ +help: consider importing this trait + | +1 | use frame_support::traits::PalletInfo; + | diff --git a/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.stderr b/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.stderr index 6029e7e6f48bb..b1338e8920f41 100644 --- a/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.stderr +++ b/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.stderr @@ -18,7 +18,7 @@ error: cannot find macro `decl_storage` in this scope 6 | decl_storage! { | ^^^^^^^^^^^^ | - = note: consider importing this macro: + = help: consider importing this macro: frame_support::decl_storage error: cannot find macro `decl_module` in this scope @@ -27,5 +27,5 @@ error: cannot find macro `decl_module` in this scope 10 | decl_module! { | ^^^^^^^^^^^ | - = note: consider importing this macro: + = help: consider importing this macro: frame_support::decl_module diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index 72099c1b94c0d..f56780c0b0c0d 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -32,18 +32,3 @@ help: consider importing this struct | 1 | use frame_system::GenesisConfig; | - -error[E0283]: type annotations needed - --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:49:1 - | -49 | / construct_runtime! { -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } - | |_^ cannot infer type - | - = note: cannot satisfy `_: std::default::Default` - = note: this error originates in the derive macro `Default` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index dab6514261420..1801379130913 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -15,7 +15,7 @@ error: `Pallet` does not have #[pallet::inherent] defined, perhaps you should re | = note: this error originates in the macro `pallet::__substrate_inherent_check::is_inherent_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0599]: no function or associated item named `create_inherent` found for struct `pallet::Pallet` in the current scope +error[E0599]: no function or associated item named `create_inherent` found for struct `Pallet` in the current scope --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); @@ -28,14 +28,14 @@ error[E0599]: no function or associated item named `create_inherent` found for s ... | 57 | | } 58 | | } - | |_^ function or associated item not found in `pallet::Pallet` + | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following trait defines an item `create_inherent`, perhaps you need to implement it: candidate #1: `ProvideInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0599]: no function or associated item named `is_inherent` found for struct `pallet::Pallet` in the current scope +error[E0599]: no function or associated item named `is_inherent` found for struct `Pallet` in the current scope --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); @@ -48,14 +48,14 @@ error[E0599]: no function or associated item named `is_inherent` found for struc ... | 57 | | } 58 | | } - | |_^ function or associated item not found in `pallet::Pallet` + | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following trait defines an item `is_inherent`, perhaps you need to implement it: candidate #1: `ProvideInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0599]: no function or associated item named `check_inherent` found for struct `pallet::Pallet` in the current scope +error[E0599]: no function or associated item named `check_inherent` found for struct `Pallet` in the current scope --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); @@ -68,14 +68,14 @@ error[E0599]: no function or associated item named `check_inherent` found for st ... | 57 | | } 58 | | } - | |_^ function or associated item not found in `pallet::Pallet` + | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following trait defines an item `check_inherent`, perhaps you need to implement it: candidate #1: `ProvideInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0599]: no associated item named `INHERENT_IDENTIFIER` found for struct `pallet::Pallet` in the current scope +error[E0599]: no associated item named `INHERENT_IDENTIFIER` found for struct `Pallet` in the current scope --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); @@ -88,14 +88,14 @@ error[E0599]: no associated item named `INHERENT_IDENTIFIER` found for struct `p ... | 57 | | } 58 | | } - | |_^ associated item not found in `pallet::Pallet` + | |_^ associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following trait defines an item `INHERENT_IDENTIFIER`, perhaps you need to implement it: candidate #1: `ProvideInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0599]: no function or associated item named `is_inherent_required` found for struct `pallet::Pallet` in the current scope +error[E0599]: no function or associated item named `is_inherent_required` found for struct `Pallet` in the current scope --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); @@ -108,7 +108,7 @@ error[E0599]: no function or associated item named `is_inherent_required` found ... | 57 | | } 58 | | } - | |_^ function or associated item not found in `pallet::Pallet` + | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following trait defines an item `is_inherent_required`, perhaps you need to implement it: diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index dd1bdf76106e6..dbade9fed2cff 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -32,21 +32,3 @@ help: consider importing this type alias | 1 | use frame_system::Origin; | - -error[E0282]: type annotations needed - --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 - | -49 | / construct_runtime! { -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } - | |_^ cannot infer type of the type parameter `AccountId` declared on the enum `RawOrigin` - | - = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider specifying the generic argument - | -58 | }:: - | +++++++++++++ diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr index 14106ddd09c17..fa52ce2276635 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -29,7 +29,7 @@ error[E0599]: no variant or associated item named `Pallet` found for enum `Runti 58 | | } | |_- variant or associated item `Pallet` not found for this enum -error[E0599]: no function or associated item named `pre_dispatch` found for struct `pallet::Pallet` in the current scope +error[E0599]: no function or associated item named `pre_dispatch` found for struct `Pallet` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:49:1 | 11 | pub struct Pallet(_); @@ -42,7 +42,7 @@ error[E0599]: no function or associated item named `pre_dispatch` found for stru ... | 57 | | } 58 | | } - | |_^ function or associated item not found in `pallet::Pallet` + | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following traits define an item `pre_dispatch`, perhaps you need to implement one of them: @@ -50,7 +50,7 @@ error[E0599]: no function or associated item named `pre_dispatch` found for stru candidate #2: `ValidateUnsigned` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0599]: no function or associated item named `validate_unsigned` found for struct `pallet::Pallet` in the current scope +error[E0599]: no function or associated item named `validate_unsigned` found for struct `Pallet` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:49:1 | 11 | pub struct Pallet(_); @@ -63,7 +63,7 @@ error[E0599]: no function or associated item named `validate_unsigned` found for ... | 57 | | } 58 | | } - | |_^ function or associated item not found in `pallet::Pallet` + | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following traits define an item `validate_unsigned`, perhaps you need to implement one of them: diff --git a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr index e289c75fb008a..cb6b6a44d61da 100644 --- a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr @@ -8,9 +8,12 @@ error[E0412]: cannot find type `RuntimeCall` in this scope --> tests/construct_runtime_ui/use_undefined_part.rs:23:64 | 23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | - ^^^^^^^^^^^ not found in this scope - | | - | help: you might be missing a type parameter: `` + | ^^^^^^^^^^^ not found in this scope + | +help: you might be missing a type parameter + | +23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | +++++++++++++ error[E0412]: cannot find type `Runtime` in this scope --> tests/construct_runtime_ui/use_undefined_part.rs:25:25 diff --git a/frame/support/test/tests/decl_storage.rs b/frame/support/test/tests/decl_storage.rs index c2ee7742426bb..bee1dc83d5589 100644 --- a/frame/support/test/tests/decl_storage.rs +++ b/frame/support/test/tests/decl_storage.rs @@ -19,7 +19,7 @@ // Do not complain about unused `dispatch` and `dispatch_aux`. #[allow(dead_code)] mod tests { - use frame_support::metadata::*; + use frame_support::metadata_ir::*; use sp_io::TestExternalities; frame_support::decl_module! { @@ -104,195 +104,195 @@ mod tests { type Origin2 = u32; } - fn expected_metadata() -> PalletStorageMetadata { - PalletStorageMetadata { + fn expected_metadata() -> PalletStorageMetadataIR { + PalletStorageMetadataIR { prefix: "TestStorage", entries: vec![ - StorageEntryMetadata { + StorageEntryMetadataIR { name: "U32", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0], docs: vec![" Hello, this is doc!"], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "PUBU32", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "U32MYDEF", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "PUBU32MYDEF", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GETU32", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "PUBGETU32", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GETU32WITHCONFIG", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "PUBGETU32WITHCONFIG", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GETU32MYDEF", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "PUBGETU32MYDEF", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![3, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GETU32WITHCONFIGMYDEF", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![2, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "PUBGETU32WITHCONFIGMYDEF", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![1, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "PUBGETU32WITHCONFIGMYDEFOPT", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GetU32WithBuilder", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GetOptU32WithBuilderSome", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GetOptU32WithBuilderNone", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "MAPU32", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::<[u8; 4]>(), }, default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "PUBMAPU32", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::<[u8; 4]>(), }, default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GETMAPU32", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::<[u8; 4]>(), }, default: vec![0, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "PUBGETMAPU32", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::<[u8; 4]>(), }, default: vec![0, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "GETMAPU32MYDEF", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::<[u8; 4]>(), }, default: vec![109, 97, 112, 100], // "map" docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "PUBGETMAPU32MYDEF", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Blake2_128Concat], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], key: scale_info::meta_type::(), value: scale_info::meta_type::<[u8; 4]>(), }, default: vec![112, 117, 98, 109], // "pubmap" docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "DOUBLEMAP", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Blake2_128Concat, + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Blake2_128Concat, ], key: scale_info::meta_type::<(u32, u32)>(), value: scale_info::meta_type::<[u8; 4]>(), @@ -300,13 +300,13 @@ mod tests { default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "DOUBLEMAP2", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Blake2_128Concat, + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Blake2_128Concat, ], key: scale_info::meta_type::<(u32, u32)>(), value: scale_info::meta_type::<[u8; 4]>(), @@ -314,47 +314,50 @@ mod tests { default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "COMPLEXTYPE1", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::<(Option,)>()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::<(Option,)>()), default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "COMPLEXTYPE2", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::<( + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::<( [[(u16, Option<()>); 32]; 12], u32, )>()), default: [0u8; 1156].to_vec(), docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "COMPLEXTYPE3", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::<[u32; 25]>()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::<[u32; 25]>()), default: [0u8; 100].to_vec(), docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "NMAP", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { key: scale_info::meta_type::<(u32, u16)>(), - hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], + hashers: vec![ + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Twox64Concat, + ], value: scale_info::meta_type::(), }, default: vec![0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "NMAP2", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { key: scale_info::meta_type::(), - hashers: vec![StorageHasher::Blake2_128Concat], + hashers: vec![StorageHasherIR::Blake2_128Concat], value: scale_info::meta_type::(), }, default: vec![0], diff --git a/frame/support/test/tests/derive_no_bound_ui/eq.stderr b/frame/support/test/tests/derive_no_bound_ui/eq.stderr index 97ac29f96902d..eb3345eede508 100644 --- a/frame/support/test/tests/derive_no_bound_ui/eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/eq.stderr @@ -1,12 +1,12 @@ error[E0277]: can't compare `Foo` with `Foo` - --> tests/derive_no_bound_ui/eq.rs:6:8 - | -6 | struct Foo { - | ^^^ no implementation for `Foo == Foo` - | - = help: the trait `PartialEq` is not implemented for `Foo` + --> tests/derive_no_bound_ui/eq.rs:6:8 + | +6 | struct Foo { + | ^^^^^^^^^^^^^^ no implementation for `Foo == Foo` + | + = help: the trait `PartialEq` is not implemented for `Foo` note: required by a bound in `std::cmp::Eq` - --> $RUST/core/src/cmp.rs - | - | pub trait Eq: PartialEq { - | ^^^^^^^^^^^^^^^ required by this bound in `std::cmp::Eq` + --> $RUST/core/src/cmp.rs + | + | pub trait Eq: PartialEq { + | ^^^^^^^^^^^^^^^ required by this bound in `Eq` diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 8780e18405f27..9b08e175b1ca8 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -20,9 +20,9 @@ use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen}; use frame_support::{ inherent::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}, - metadata::{ - PalletStorageMetadata, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, - StorageHasher, + metadata_ir::{ + PalletStorageMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, + StorageEntryTypeIR, StorageHasherIR, }, traits::{ConstU32, Get}, Parameter, StorageDoubleMap, StorageMap, StorageValue, @@ -410,33 +410,33 @@ fn storage_with_instance_basic_operation() { }); } -fn expected_metadata() -> PalletStorageMetadata { - PalletStorageMetadata { +fn expected_metadata() -> PalletStorageMetadataIR { + PalletStorageMetadataIR { prefix: "Instance2Module2", entries: vec![ - StorageEntryMetadata { + StorageEntryMetadataIR { name: "Value", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(scale_info::meta_type::()), + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), default: vec![0, 0, 0, 0], docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "Map", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Identity], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Identity], key: scale_info::meta_type::(), value: scale_info::meta_type::(), }, default: [0u8; 8].to_vec(), docs: vec![], }, - StorageEntryMetadata { + StorageEntryMetadataIR { name: "DoubleMap", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Identity, StorageHasher::Identity], + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Identity, StorageHasherIR::Identity], key: scale_info::meta_type::<(u64, u64)>(), value: scale_info::meta_type::(), }, diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index fbebbef455f7b..f3c4e07403af8 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -25,8 +25,8 @@ use frame_support::{ pallet_prelude::{StorageInfoTrait, ValueQuery}, storage::unhashed, traits::{ - ConstU32, GetCallName, GetStorageVersion, OnFinalize, OnGenesis, OnInitialize, - OnRuntimeUpgrade, PalletError, PalletInfoAccess, StorageVersion, + ConstU32, GetCallIndex, GetCallName, GetStorageVersion, OnFinalize, OnGenesis, + OnInitialize, OnRuntimeUpgrade, PalletError, PalletInfoAccess, StorageVersion, }, weights::{RuntimeDbWeight, Weight}, }; @@ -37,6 +37,9 @@ use sp_io::{ }; use sp_runtime::{DispatchError, ModuleError}; +/// Latest stable metadata version used for testing. +const LATEST_METADATA_VERSION: u32 = 14; + pub struct SomeType1; impl From for u64 { fn from(_t: SomeType1) -> Self { @@ -215,7 +218,7 @@ pub mod pallet { /// Doc comment put in metadata #[pallet::call_index(1)] - #[pallet::weight(1)] + #[pallet::weight({1})] pub fn foo_storage_layer( _origin: OriginFor, #[pallet::compact] foo: u32, @@ -228,15 +231,21 @@ pub mod pallet { Ok(().into()) } + #[pallet::call_index(4)] + #[pallet::weight({1})] + pub fn foo_index_out_of_order(_origin: OriginFor) -> DispatchResult { + Ok(()) + } + // Test for DispatchResult return type #[pallet::call_index(2)] - #[pallet::weight(1)] + #[pallet::weight({1})] pub fn foo_no_post_info(_origin: OriginFor) -> DispatchResult { Ok(()) } #[pallet::call_index(3)] - #[pallet::weight(1)] + #[pallet::weight({1})] pub fn check_for_dispatch_context(_origin: OriginFor) -> DispatchResult { with_context::<(), _>(|_| ()).ok_or_else(|| DispatchError::Unavailable) } @@ -467,6 +476,11 @@ pub mod pallet { } } + #[pallet::composite_enum] + pub enum HoldReason { + Staking, + } + #[derive(codec::Encode, sp_runtime::RuntimeDebug)] #[cfg_attr(feature = "std", derive(codec::Decode))] pub enum InherentError { @@ -561,6 +575,16 @@ pub mod pallet2 { { fn build(&self) {} } + + #[pallet::composite_enum] + pub enum HoldReason { + Governance, + } + + #[pallet::composite_enum] + pub enum SlashReason { + Equivocation, + } } /// Test that the supertrait check works when we pass some parameter to the `frame_system::Config`. @@ -725,8 +749,25 @@ fn call_expand() { assert_eq!(call_foo.get_call_name(), "foo"); assert_eq!( pallet::Call::::get_call_names(), - &["foo", "foo_storage_layer", "foo_no_post_info", "check_for_dispatch_context"], + &[ + "foo", + "foo_storage_layer", + "foo_index_out_of_order", + "foo_no_post_info", + "check_for_dispatch_context" + ], ); + + assert_eq!(call_foo.get_call_index(), 0u8); + assert_eq!(pallet::Call::::get_call_indices(), &[0u8, 1u8, 4u8, 2u8, 3u8]) +} + +#[test] +fn call_expand_index() { + let call_foo = pallet::Call::::foo_index_out_of_order {}; + + assert_eq!(call_foo.get_call_index(), 4u8); + assert_eq!(pallet::Call::::get_call_indices(), &[0u8, 1u8, 4u8, 2u8, 3u8]) } #[test] @@ -948,6 +989,23 @@ fn validate_unsigned_expand() { assert_eq!(validity, ValidTransaction::default()); } +#[test] +fn composite_expand() { + use codec::Encode; + + let hold_reason: RuntimeHoldReason = pallet::HoldReason::Staking.into(); + let hold_reason2: RuntimeHoldReason = pallet2::HoldReason::Governance.into(); + let slash_reason: RuntimeSlashReason = pallet2::SlashReason::Equivocation.into(); + + assert_eq!(hold_reason, RuntimeHoldReason::Example(pallet::HoldReason::Staking)); + assert_eq!(hold_reason2, RuntimeHoldReason::Example2(pallet2::HoldReason::Governance)); + assert_eq!(slash_reason, RuntimeSlashReason::Example2(pallet2::SlashReason::Equivocation)); + + assert_eq!(hold_reason.encode(), [1, 0]); + assert_eq!(hold_reason2.encode(), [2, 0]); + assert_eq!(slash_reason.encode(), [2, 0]); +} + #[test] fn pallet_expand_deposit_event() { TestExternalities::default().execute_with(|| { @@ -1558,9 +1616,9 @@ fn metadata() { }, ]; - let empty_doc = pallets[0].event.as_ref().unwrap().ty.type_info().docs().is_empty() && - pallets[0].error.as_ref().unwrap().ty.type_info().docs().is_empty() && - pallets[0].calls.as_ref().unwrap().ty.type_info().docs().is_empty(); + let empty_doc = pallets[0].event.as_ref().unwrap().ty.type_info().docs.is_empty() && + pallets[0].error.as_ref().unwrap().ty.type_info().docs.is_empty() && + pallets[0].calls.as_ref().unwrap().ty.type_info().docs.is_empty(); if cfg!(feature = "no-metadata-docs") { assert!(empty_doc) @@ -1593,6 +1651,43 @@ fn metadata() { pretty_assertions::assert_eq!(actual_metadata.pallets, expected_metadata.pallets); } +#[test] +fn metadata_at_version() { + use frame_support::metadata::*; + use sp_core::Decode; + + let metadata = Runtime::metadata(); + let at_metadata = match Runtime::metadata_at_version(LATEST_METADATA_VERSION) { + Some(opaque) => { + let bytes = &*opaque; + let metadata: RuntimeMetadataPrefixed = Decode::decode(&mut &bytes[..]).unwrap(); + metadata + }, + _ => panic!("metadata has been bumped, test needs to be updated"), + }; + + assert_eq!(metadata, at_metadata); +} + +#[test] +fn metadata_versions() { + assert_eq!(vec![LATEST_METADATA_VERSION, u32::MAX], Runtime::metadata_versions()); +} + +#[test] +fn metadata_ir_pallet_runtime_docs() { + let ir = Runtime::metadata_ir(); + let pallet = ir + .pallets + .iter() + .find(|pallet| pallet.name == "Example") + .expect("Pallet should be present"); + + let readme = "Support code for the runtime.\n\nLicense: Apache-2.0"; + let expected = vec![" Pallet documentation", readme, readme]; + assert_eq!(pallet.docs, expected); +} + #[test] fn test_pallet_runtime_docs() { let docs = crate::pallet::Pallet::::pallet_documentation_metadata(); diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index 077b3c96e992c..610316ecf1a6f 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -295,14 +295,14 @@ mod test { }; let assert_meta_types = |ty_id1, ty_id2| { - let ty1 = types.resolve(ty_id1).map(|ty| ty.type_def()); - let ty2 = types.resolve(ty_id2).map(|ty| ty.type_def()); + let ty1 = types.resolve(ty_id1).map(|ty| ty.type_def.clone()); + let ty2 = types.resolve(ty_id2).map(|ty| ty.type_def.clone()); pretty_assertions::assert_eq!(ty1, ty2); }; - let get_enum_variants = |ty_id| match types.resolve(ty_id).map(|ty| ty.type_def()) { + let get_enum_variants = |ty_id| match types.resolve(ty_id).map(|ty| ty.type_def.clone()) { Some(ty) => match ty { - scale_info::TypeDef::Variant(var) => var.variants(), + scale_info::TypeDef::Variant(var) => var.variants, _ => panic!("Expected variant type"), }, _ => panic!("No type found"), @@ -314,12 +314,12 @@ mod test { for i in 0..vs1.len() { let v1 = &vs2[i]; let v2 = &vs2[i]; - assert_eq!(v1.fields().len(), v2.fields().len()); - for f in 0..v1.fields().len() { - let f1 = &v1.fields()[f]; - let f2 = &v2.fields()[f]; - pretty_assertions::assert_eq!(f1.name(), f2.name()); - pretty_assertions::assert_eq!(f1.ty(), f2.ty()); + assert_eq!(v1.fields.len(), v2.fields.len()); + for f in 0..v1.fields.len() { + let f1 = &v1.fields[f]; + let f2 = &v2.fields[f]; + pretty_assertions::assert_eq!(f1.name, f2.name); + pretty_assertions::assert_eq!(f1.ty, f2.ty); } } }; @@ -328,21 +328,21 @@ mod test { let calls1 = pallets[1].calls.as_ref().unwrap(); let calls2 = pallets[2].calls.as_ref().unwrap(); - assert_meta_types(calls1.ty.id(), calls2.ty.id()); + assert_meta_types(calls1.ty.id, calls2.ty.id); // event: check variants and fields but ignore the type name which will be different - let event1_variants = get_enum_variants(pallets[1].event.as_ref().unwrap().ty.id()); - let event2_variants = get_enum_variants(pallets[2].event.as_ref().unwrap().ty.id()); - assert_enum_variants(event1_variants, event2_variants); + let event1_variants = get_enum_variants(pallets[1].event.as_ref().unwrap().ty.id); + let event2_variants = get_enum_variants(pallets[2].event.as_ref().unwrap().ty.id); + assert_enum_variants(&event1_variants, &event2_variants); - let err1 = get_enum_variants(pallets[1].error.as_ref().unwrap().ty.id()) + let err1 = get_enum_variants(pallets[1].error.as_ref().unwrap().ty.id) .iter() - .filter(|v| v.name() == "__Ignore") + .filter(|v| v.name == "__Ignore") .cloned() .collect::>(); - let err2 = get_enum_variants(pallets[2].error.as_ref().unwrap().ty.id()) + let err2 = get_enum_variants(pallets[2].error.as_ref().unwrap().ty.id) .iter() - .filter(|v| v.name() == "__Ignore") + .filter(|v| v.name == "__Ignore") .cloned() .collect::>(); assert_enum_variants(&err1, &err2); diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index c641556beda20..73014d99657c9 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -298,9 +298,9 @@ mod test { _ => unreachable!(), }; - let get_enum_variants = |ty_id| match types.resolve(ty_id).map(|ty| ty.type_def()) { + let get_enum_variants = |ty_id| match types.resolve(ty_id).map(|ty| ty.type_def.clone()) { Some(ty) => match ty { - scale_info::TypeDef::Variant(var) => var.variants(), + scale_info::TypeDef::Variant(var) => var.variants, _ => panic!("Expected variant type"), }, _ => panic!("No type found"), @@ -312,12 +312,12 @@ mod test { for i in 0..vs1.len() { let v1 = &vs2[i]; let v2 = &vs2[i]; - assert_eq!(v1.fields().len(), v2.fields().len()); - for f in 0..v1.fields().len() { - let f1 = &v1.fields()[f]; - let f2 = &v2.fields()[f]; - pretty_assertions::assert_eq!(f1.name(), f2.name()); - pretty_assertions::assert_eq!(f1.ty(), f2.ty()); + assert_eq!(v1.fields.len(), v2.fields.len()); + for f in 0..v1.fields.len() { + let f1 = &v1.fields[f]; + let f2 = &v2.fields[f]; + pretty_assertions::assert_eq!(f1.name, f2.name); + pretty_assertions::assert_eq!(f1.ty, f2.ty); } } }; @@ -325,23 +325,23 @@ mod test { for i in vec![1, 3, 5].into_iter() { pretty_assertions::assert_eq!(pallets[i].storage, pallets[i + 1].storage); - let call1_variants = get_enum_variants(pallets[i].calls.as_ref().unwrap().ty.id()); - let call2_variants = get_enum_variants(pallets[i + 1].calls.as_ref().unwrap().ty.id()); - assert_enum_variants(call1_variants, call2_variants); + let call1_variants = get_enum_variants(pallets[i].calls.as_ref().unwrap().ty.id); + let call2_variants = get_enum_variants(pallets[i + 1].calls.as_ref().unwrap().ty.id); + assert_enum_variants(&call1_variants, &call2_variants); // event: check variants and fields but ignore the type name which will be different - let event1_variants = get_enum_variants(pallets[i].event.as_ref().unwrap().ty.id()); - let event2_variants = get_enum_variants(pallets[i + 1].event.as_ref().unwrap().ty.id()); - assert_enum_variants(event1_variants, event2_variants); + let event1_variants = get_enum_variants(pallets[i].event.as_ref().unwrap().ty.id); + let event2_variants = get_enum_variants(pallets[i + 1].event.as_ref().unwrap().ty.id); + assert_enum_variants(&event1_variants, &event2_variants); - let err1 = get_enum_variants(pallets[i].error.as_ref().unwrap().ty.id()) + let err1 = get_enum_variants(pallets[i].error.as_ref().unwrap().ty.id) .iter() - .filter(|v| v.name() == "__Ignore") + .filter(|v| v.name == "__Ignore") .cloned() .collect::>(); - let err2 = get_enum_variants(pallets[i + 1].error.as_ref().unwrap().ty.id()) + let err2 = get_enum_variants(pallets[i + 1].error.as_ref().unwrap().ty.id) .iter() - .filter(|v| v.name() == "__Ignore") + .filter(|v| v.name == "__Ignore") .cloned() .collect::>(); assert_enum_variants(&err1, &err2); diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 517f920c5ce07..241492a596993 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -27,7 +27,7 @@ use sp_io::{ }; use sp_runtime::{DispatchError, ModuleError}; -#[frame_support::pallet] +#[frame_support::pallet(dev_mode)] pub mod pallet { use codec::MaxEncodedLen; use frame_support::{pallet_prelude::*, parameter_types, scale_info}; diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index aed53b07b5e63..d10bf1359019a 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -1,3 +1,16 @@ +error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: + It is deprecated to use hard-coded constant as call weight. + Please instead benchmark all calls or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/call_argument_invalid_bound.rs:19:20 + | +19 | #[pallet::weight(0)] + | ^ + | + = note: `-D deprecated` implied by `-D warnings` + error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` --> tests/pallet_ui/call_argument_invalid_bound.rs:21:36 | diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index 3e2e70432a9c9..7173cdcd47361 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -1,3 +1,16 @@ +error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: + It is deprecated to use hard-coded constant as call weight. + Please instead benchmark all calls or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/call_argument_invalid_bound_2.rs:19:20 + | +19 | #[pallet::weight(0)] + | ^ + | + = note: `-D deprecated` implied by `-D warnings` + error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` --> tests/pallet_ui/call_argument_invalid_bound_2.rs:21:36 | @@ -23,17 +36,11 @@ error[E0369]: binary operation `==` cannot be applied to type `&::Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:21:36 | -1 | / #[frame_support::pallet] -2 | | mod pallet { -3 | | use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; -4 | | use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; -... | -16 | | -17 | | #[pallet::call] - | |__________________- required by a bound introduced by this call +1 | #[frame_support::pallet] + | ------------------------ required by a bound introduced by this call ... -21 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { - | ^^^^ the trait `WrapperTypeEncode` is not implemented for `::Bar` +21 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^^ the trait `WrapperTypeEncode` is not implemented for `::Bar` | = note: required for `::Bar` to implement `Encode` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index 395da09cb0d6d..1b084dd2f76bc 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -1,3 +1,16 @@ +error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: + It is deprecated to use hard-coded constant as call weight. + Please instead benchmark all calls or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/call_argument_invalid_bound_3.rs:21:20 + | +21 | #[pallet::weight(0)] + | ^ + | + = note: `-D deprecated` implied by `-D warnings` + error[E0277]: `Bar` doesn't implement `std::fmt::Debug` --> tests/pallet_ui/call_argument_invalid_bound_3.rs:23:36 | diff --git a/frame/support/test/tests/pallet_ui/call_missing_index.rs b/frame/support/test/tests/pallet_ui/call_missing_index.rs index 7e305a573d622..98c49f493e50d 100644 --- a/frame/support/test/tests/pallet_ui/call_missing_index.rs +++ b/frame/support/test/tests/pallet_ui/call_missing_index.rs @@ -15,6 +15,11 @@ mod pallet { pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + + #[pallet::weight(0)] + pub fn bar(_: OriginFor) -> DispatchResult { + Ok(()) + } } } diff --git a/frame/support/test/tests/pallet_ui/call_missing_index.stderr b/frame/support/test/tests/pallet_ui/call_missing_index.stderr index 9d00204979211..82dbe1d24c28e 100644 --- a/frame/support/test/tests/pallet_ui/call_missing_index.stderr +++ b/frame/support/test/tests/pallet_ui/call_missing_index.stderr @@ -1,12 +1,47 @@ -error: use of deprecated struct `pallet::warnings::foo`: - Implicit call indices are deprecated in favour of explicit ones. - Please ensure that all calls have the `pallet::call_index` attribute or that the - `dev-mode` of the pallet is enabled. For more info see: - and - . +error: use of deprecated constant `pallet::warnings::ImplicitCallIndex_0::_w`: + It is deprecated to use implicit call indices. + Please instead ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/call_missing_index.rs:15:10 | 15 | pub fn foo(_: OriginFor) -> DispatchResult { | ^^^ | = note: `-D deprecated` implied by `-D warnings` + +error: use of deprecated constant `pallet::warnings::ImplicitCallIndex_1::_w`: + It is deprecated to use implicit call indices. + Please instead ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode. + + For more info see: + + + --> tests/pallet_ui/call_missing_index.rs:20:10 + | +20 | pub fn bar(_: OriginFor) -> DispatchResult { + | ^^^ + +error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: + It is deprecated to use hard-coded constant as call weight. + Please instead benchmark all calls or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/call_missing_index.rs:14:20 + | +14 | #[pallet::weight(0)] + | ^ + +error: use of deprecated constant `pallet::warnings::ConstantWeight_1::_w`: + It is deprecated to use hard-coded constant as call weight. + Please instead benchmark all calls or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/call_missing_index.rs:19:20 + | +19 | #[pallet::weight(0)] + | ^ diff --git a/frame/support/test/tests/pallet_ui/call_missing_weight.stderr b/frame/support/test/tests/pallet_ui/call_missing_weight.stderr index ec45d478870c1..0a6cf16571f95 100644 --- a/frame/support/test/tests/pallet_ui/call_missing_weight.stderr +++ b/frame/support/test/tests/pallet_ui/call_missing_weight.stderr @@ -1,5 +1,7 @@ -error: Invalid pallet::call, requires weight attribute i.e. `#[pallet::weight($expr)]` - --> $DIR/call_missing_weight.rs:17:7 +error: A pallet::call requires either a concrete `#[pallet::weight($expr)]` or an + inherited weight from the `#[pallet:call(weight($type))]` attribute, but + none were given. + --> tests/pallet_ui/call_missing_weight.rs:17:7 | 17 | pub fn foo(origin: OriginFor) -> DispatchResultWithPostInfo {} | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_weight_argument_has_suffix.rs b/frame/support/test/tests/pallet_ui/call_weight_argument_has_suffix.rs new file mode 100644 index 0000000000000..c122877e8a075 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_argument_has_suffix.rs @@ -0,0 +1,21 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight(10_000something)] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_weight_argument_has_suffix.stderr b/frame/support/test/tests/pallet_ui/call_weight_argument_has_suffix.stderr new file mode 100644 index 0000000000000..0651f003b9e23 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_argument_has_suffix.stderr @@ -0,0 +1,20 @@ +error: invalid suffix `something` for number literal + --> tests/pallet_ui/call_weight_argument_has_suffix.rs:15:26 + | +15 | #[pallet::weight(10_000something)] + | ^^^^^^^^^^^^^^^ invalid suffix `something` + | + = help: the suffix must be one of the numeric types (`u32`, `isize`, `f32`, etc.) + +error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: + It is deprecated to use hard-coded constant as call weight. + Please instead benchmark all calls or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/call_weight_argument_has_suffix.rs:15:26 + | +15 | #[pallet::weight(10_000something)] + | ^^^^^^^^^^^^^^^ + | + = note: `-D deprecated` implied by `-D warnings` diff --git a/frame/support/test/tests/pallet_ui/call_weight_const_warning.rs b/frame/support/test/tests/pallet_ui/call_weight_const_warning.rs new file mode 100644 index 0000000000000..2e5dc2a649e70 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_const_warning.rs @@ -0,0 +1,21 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight(123_u64)] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_weight_const_warning.stderr b/frame/support/test/tests/pallet_ui/call_weight_const_warning.stderr new file mode 100644 index 0000000000000..b04c3ec395ce6 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_const_warning.stderr @@ -0,0 +1,12 @@ +error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: + It is deprecated to use hard-coded constant as call weight. + Please instead benchmark all calls or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/call_weight_const_warning.rs:15:26 + | +15 | #[pallet::weight(123_u64)] + | ^^^^^^^ + | + = note: `-D deprecated` implied by `-D warnings` diff --git a/frame/support/test/tests/pallet_ui/call_weight_const_warning_twice.rs b/frame/support/test/tests/pallet_ui/call_weight_const_warning_twice.rs new file mode 100644 index 0000000000000..798911dba33d9 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_const_warning_twice.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight(123)] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + + #[pallet::call_index(1)] + #[pallet::weight(123_custom_prefix)] + pub fn bar(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_weight_const_warning_twice.stderr b/frame/support/test/tests/pallet_ui/call_weight_const_warning_twice.stderr new file mode 100644 index 0000000000000..c656790207504 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_const_warning_twice.stderr @@ -0,0 +1,31 @@ +error: invalid suffix `custom_prefix` for number literal + --> tests/pallet_ui/call_weight_const_warning_twice.rs:19:26 + | +19 | #[pallet::weight(123_custom_prefix)] + | ^^^^^^^^^^^^^^^^^ invalid suffix `custom_prefix` + | + = help: the suffix must be one of the numeric types (`u32`, `isize`, `f32`, etc.) + +error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: + It is deprecated to use hard-coded constant as call weight. + Please instead benchmark all calls or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/call_weight_const_warning_twice.rs:15:26 + | +15 | #[pallet::weight(123)] + | ^^^ + | + = note: `-D deprecated` implied by `-D warnings` + +error: use of deprecated constant `pallet::warnings::ConstantWeight_1::_w`: + It is deprecated to use hard-coded constant as call weight. + Please instead benchmark all calls or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/call_weight_const_warning_twice.rs:19:26 + | +19 | #[pallet::weight(123_custom_prefix)] + | ^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid.rs b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid.rs new file mode 100644 index 0000000000000..ff235e986099f --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid.rs @@ -0,0 +1,50 @@ +use frame_support::pallet_prelude::*; + +pub trait WeightInfo { + fn foo() -> Weight; +} + +#[frame_support::pallet] +mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: crate::WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(invalid)] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +#[frame_support::pallet] +mod assign { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: crate::WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call = invalid] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid.stderr b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid.stderr new file mode 100644 index 0000000000000..7eed646e7b190 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid.stderr @@ -0,0 +1,11 @@ +error: expected `weight` + --> tests/pallet_ui/call_weight_inherited_invalid.rs:19:17 + | +19 | #[pallet::call(invalid)] + | ^^^^^^^ + +error: expected parentheses + --> tests/pallet_ui/call_weight_inherited_invalid.rs:40:17 + | +40 | #[pallet::call = invalid] + | ^ diff --git a/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid2.rs b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid2.rs new file mode 100644 index 0000000000000..76ccf5db22019 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid2.rs @@ -0,0 +1,53 @@ +// Weight is an ident instead of a type. + +use frame_support::pallet_prelude::*; +use frame_system::pallet_prelude::*; + +pub trait WeightInfo { + fn foo() -> Weight; +} + +#[frame_support::pallet] +mod parentheses { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: crate::WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight(prefix))] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +#[frame_support::pallet] +mod assign { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: crate::WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight = prefix)] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid2.stderr b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid2.stderr new file mode 100644 index 0000000000000..29f3b6bfd2b0d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid2.stderr @@ -0,0 +1,11 @@ +error[E0412]: cannot find type `prefix` in this scope + --> tests/pallet_ui/call_weight_inherited_invalid2.rs:22:24 + | +22 | #[pallet::call(weight(prefix))] + | ^^^^^^ not found in this scope + +error[E0412]: cannot find type `prefix` in this scope + --> tests/pallet_ui/call_weight_inherited_invalid2.rs:43:26 + | +43 | #[pallet::call(weight = prefix)] + | ^^^^^^ not found in this scope diff --git a/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid3.rs b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid3.rs new file mode 100644 index 0000000000000..b31bc0ae234b2 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid3.rs @@ -0,0 +1,53 @@ +// Call weight is an LitInt instead of a type. + +use frame_support::pallet_prelude::*; +use frame_system::pallet_prelude::*; + +pub trait WeightInfo { + fn foo() -> Weight; +} + +#[frame_support::pallet] +mod parentheses { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: crate::WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight(123))] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +#[frame_support::pallet] +mod assign { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: crate::WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight = 123)] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid3.stderr b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid3.stderr new file mode 100644 index 0000000000000..fab7acb90deaf --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid3.stderr @@ -0,0 +1,19 @@ +error: expected one of: `for`, parentheses, `fn`, `unsafe`, `extern`, identifier, `::`, `<`, `dyn`, square brackets, `*`, `&`, `!`, `impl`, `_`, lifetime + --> tests/pallet_ui/call_weight_inherited_invalid3.rs:22:24 + | +22 | #[pallet::call(weight(123))] + | ^^^ + +error: expected one of: `for`, parentheses, `fn`, `unsafe`, `extern`, identifier, `::`, `<`, `dyn`, square brackets, `*`, `&`, `!`, `impl`, `_`, lifetime + --> tests/pallet_ui/call_weight_inherited_invalid3.rs:43:26 + | +43 | #[pallet::call(weight = 123)] + | ^^^ + +error: unused import: `frame_system::pallet_prelude::*` + --> tests/pallet_ui/call_weight_inherited_invalid3.rs:4:5 + | +4 | use frame_system::pallet_prelude::*; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: `-D unused-imports` implied by `-D warnings` diff --git a/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid4.rs b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid4.rs new file mode 100644 index 0000000000000..39c0929d603ad --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid4.rs @@ -0,0 +1,52 @@ +// Function does not exist in the trait. + +use frame_support::pallet_prelude::*; +use frame_system::pallet_prelude::*; + +pub trait WeightInfo { +} + +#[frame_support::pallet] +mod parentheses { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: crate::WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight(::WeightInfo))] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +#[frame_support::pallet] +mod assign { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: crate::WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight = ::WeightInfo)] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid4.stderr b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid4.stderr new file mode 100644 index 0000000000000..46872fc7c3691 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid4.stderr @@ -0,0 +1,11 @@ +error[E0599]: no function or associated item named `foo` found for associated type `::WeightInfo` in the current scope + --> tests/pallet_ui/call_weight_inherited_invalid4.rs:24:10 + | +24 | pub fn foo(_: OriginFor) -> DispatchResult { + | ^^^ function or associated item not found in `::WeightInfo` + +error[E0599]: no function or associated item named `foo` found for associated type `::WeightInfo` in the current scope + --> tests/pallet_ui/call_weight_inherited_invalid4.rs:45:10 + | +45 | pub fn foo(_: OriginFor) -> DispatchResult { + | ^^^ function or associated item not found in `::WeightInfo` diff --git a/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid5.rs b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid5.rs new file mode 100644 index 0000000000000..a5b2f5c7f6aa6 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid5.rs @@ -0,0 +1,44 @@ +// Stray tokens after good input. + +#[frame_support::pallet] +mod parentheses { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight(::WeightInfo straycat))] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +#[frame_support::pallet] +mod assign { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight = ::WeightInfo straycat)] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid5.stderr b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid5.stderr new file mode 100644 index 0000000000000..c0e9ef2d9e9d8 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid5.stderr @@ -0,0 +1,11 @@ +error: unexpected token + --> tests/pallet_ui/call_weight_inherited_invalid5.rs:14:50 + | +14 | #[pallet::call(weight(::WeightInfo straycat))] + | ^^^^^^^^ + +error: unexpected token + --> tests/pallet_ui/call_weight_inherited_invalid5.rs:34:52 + | +34 | #[pallet::call(weight = ::WeightInfo straycat)] + | ^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/composite_enum_unsupported_identifier.rs b/frame/support/test/tests/pallet_ui/composite_enum_unsupported_identifier.rs new file mode 100644 index 0000000000000..74692ee94efd2 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/composite_enum_unsupported_identifier.rs @@ -0,0 +1,13 @@ +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::composite_enum] + pub enum HoldReasons {} +} + +fn main() {} \ No newline at end of file diff --git a/frame/support/test/tests/pallet_ui/composite_enum_unsupported_identifier.stderr b/frame/support/test/tests/pallet_ui/composite_enum_unsupported_identifier.stderr new file mode 100644 index 0000000000000..902e8923759b2 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/composite_enum_unsupported_identifier.stderr @@ -0,0 +1,5 @@ +error: expected one of: `FreezeReason`, `HoldReason`, `LockId`, `SlashReason` + --> tests/pallet_ui/composite_enum_unsupported_identifier.rs:10:11 + | +10 | pub enum HoldReasons {} + | ^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg.rs b/frame/support/test/tests/pallet_ui/dev_mode_without_arg.rs index f044ae6d7878f..2a413eea9b4aa 100644 --- a/frame/support/test/tests/pallet_ui/dev_mode_without_arg.rs +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg.rs @@ -1,7 +1,5 @@ #![cfg_attr(not(feature = "std"), no_std)] -pub use pallet::*; - #[frame_support::pallet] pub mod pallet { use frame_support::pallet_prelude::*; diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg.stderr index fac7fd77df9ae..1e2011b2a30cf 100644 --- a/frame/support/test/tests/pallet_ui/dev_mode_without_arg.stderr +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg.stderr @@ -1,11 +1,7 @@ -error: Invalid pallet::call, requires weight attribute i.e. `#[pallet::weight($expr)]` - --> tests/pallet_ui/dev_mode_without_arg.rs:24:7 +error: A pallet::call requires either a concrete `#[pallet::weight($expr)]` or an + inherited weight from the `#[pallet:call(weight($type))]` attribute, but + none were given. + --> tests/pallet_ui/dev_mode_without_arg.rs:22:7 | -24 | pub fn my_call(_origin: OriginFor) -> DispatchResult { +22 | pub fn my_call(_origin: OriginFor) -> DispatchResult { | ^^ - -error[E0432]: unresolved import `pallet` - --> tests/pallet_ui/dev_mode_without_arg.rs:3:9 - | -3 | pub use pallet::*; - | ^^^^^^ help: a similar path exists: `test_pallet::pallet` diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_default_hasher.rs b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_default_hasher.rs new file mode 100644 index 0000000000000..fb1139479566a --- /dev/null +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_default_hasher.rs @@ -0,0 +1,33 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + + // The struct on which we build all of our Pallet logic. + #[pallet::pallet] + pub struct Pallet(_); + + // Your Pallet's configuration trait, representing custom external types and interfaces. + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::storage] + type MyStorage = StorageValue<_, Vec>; + + #[pallet::storage] + type MyStorageMap = StorageMap<_, _, u32, u64>; + + #[pallet::storage] + type MyStorageDoubleMap = StorageDoubleMap<_, _, u32, _, u64, u64>; + + #[pallet::storage] + type MyCountedStorageMap = CountedStorageMap<_, _, u32, u64>; + + // Your Pallet's internal functions. + impl Pallet {} +} + +fn main() {} diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_default_hasher.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_default_hasher.stderr new file mode 100644 index 0000000000000..e0dbc8c953b4e --- /dev/null +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_default_hasher.stderr @@ -0,0 +1,11 @@ +error: `_` can only be used in dev_mode. Please specify an appropriate hasher. + --> tests/pallet_ui/dev_mode_without_arg_default_hasher.rs:21:47 + | +21 | type MyStorageMap = StorageMap<_, _, u32, u64>; + | ^ + +error[E0432]: unresolved import `pallet` + --> tests/pallet_ui/dev_mode_without_arg_default_hasher.rs:3:9 + | +3 | pub use pallet::*; + | ^^^^^^ help: a similar path exists: `test_pallet::pallet` diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr index c9de991135082..cf502ac5be475 100644 --- a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -1,9 +1,10 @@ -error: use of deprecated struct `pallet::warnings::my_call`: - Implicit call indices are deprecated in favour of explicit ones. - Please ensure that all calls have the `pallet::call_index` attribute or that the - `dev-mode` of the pallet is enabled. For more info see: - and - . +error: use of deprecated constant `pallet::warnings::ImplicitCallIndex_0::_w`: + It is deprecated to use implicit call indices. + Please instead ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs:25:10 | 25 | pub fn my_call(_origin: OriginFor) -> DispatchResult { @@ -11,6 +12,17 @@ error: use of deprecated struct `pallet::warnings::my_call`: | = note: `-D deprecated` implied by `-D warnings` +error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: + It is deprecated to use hard-coded constant as call weight. + Please instead benchmark all calls or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs:24:20 + | +24 | #[pallet::weight(0)] + | ^ + error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied --> tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs:11:12 | diff --git a/frame/support/test/tests/pallet_ui/event_wrong_item.stderr b/frame/support/test/tests/pallet_ui/event_wrong_item.stderr index 21eb0ed35e936..0ef150dfd62e1 100644 --- a/frame/support/test/tests/pallet_ui/event_wrong_item.stderr +++ b/frame/support/test/tests/pallet_ui/event_wrong_item.stderr @@ -1,4 +1,4 @@ -error: Invalid pallet::event, expected item enum +error: Invalid pallet::event, expected enum item --> $DIR/event_wrong_item.rs:19:2 | 19 | pub struct Foo; diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr index 80903928585b4..22b5ce9412ce7 100644 --- a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr @@ -1,14 +1,14 @@ error[E0277]: the trait bound `pallet::GenesisConfig: std::default::Default` is not satisfied - --> tests/pallet_ui/genesis_default_not_satisfied.rs:22:18 - | -22 | impl GenesisBuild for GenesisConfig {} - | ^^^^^^^^^^^^^^^ the trait `std::default::Default` is not implemented for `pallet::GenesisConfig` - | + --> tests/pallet_ui/genesis_default_not_satisfied.rs:22:38 + | +22 | impl GenesisBuild for GenesisConfig {} + | ^^^^^^^^^^^^^ the trait `std::default::Default` is not implemented for `pallet::GenesisConfig` + | note: required by a bound in `GenesisBuild` - --> $WORKSPACE/frame/support/src/traits/hooks.rs - | - | pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeDeserialize { - | ^^^^^^^ required by this bound in `GenesisBuild` + --> $WORKSPACE/frame/support/src/traits/hooks.rs + | + | pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeDeserialize { + | ^^^^^^^ required by this bound in `GenesisBuild` help: consider annotating `pallet::GenesisConfig` with `#[derive(Default)]` | 19 | #[derive(Default)] diff --git a/frame/support/test/tests/pallet_ui/hold_reason_non_enum.rs b/frame/support/test/tests/pallet_ui/hold_reason_non_enum.rs new file mode 100644 index 0000000000000..8008c465e61ad --- /dev/null +++ b/frame/support/test/tests/pallet_ui/hold_reason_non_enum.rs @@ -0,0 +1,14 @@ +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::composite_enum] + pub struct HoldReason; +} + +fn main() { +} \ No newline at end of file diff --git a/frame/support/test/tests/pallet_ui/hold_reason_non_enum.stderr b/frame/support/test/tests/pallet_ui/hold_reason_non_enum.stderr new file mode 100644 index 0000000000000..7d86b8d4f1bd5 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/hold_reason_non_enum.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::composite_enum, expected enum item + --> tests/pallet_ui/hold_reason_non_enum.rs:10:2 + | +10 | pub struct HoldReason; + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/hold_reason_not_pub.rs b/frame/support/test/tests/pallet_ui/hold_reason_not_pub.rs new file mode 100644 index 0000000000000..626dad7411319 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/hold_reason_not_pub.rs @@ -0,0 +1,14 @@ +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::composite_enum] + enum HoldReason {} +} + +fn main() { +} \ No newline at end of file diff --git a/frame/support/test/tests/pallet_ui/hold_reason_not_pub.stderr b/frame/support/test/tests/pallet_ui/hold_reason_not_pub.stderr new file mode 100644 index 0000000000000..e8b0c14e967dd --- /dev/null +++ b/frame/support/test/tests/pallet_ui/hold_reason_not_pub.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::composite_enum, `HoldReason` must be public + --> tests/pallet_ui/hold_reason_not_pub.rs:10:5 + | +10 | enum HoldReason {} + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index ff52a094d6f8d..9c30179bc11ef 100644 --- a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -1,15 +1,15 @@ error[E0107]: missing generics for trait `Hooks` - --> tests/pallet_ui/hooks_invalid_item.rs:12:18 - | -12 | impl Hooks for Pallet {} - | ^^^^^ expected 1 generic argument - | + --> tests/pallet_ui/hooks_invalid_item.rs:12:18 + | +12 | impl Hooks for Pallet {} + | ^^^^^ expected 1 generic argument + | note: trait defined here, with 1 generic parameter: `BlockNumber` - --> $WORKSPACE/frame/support/src/traits/hooks.rs - | - | pub trait Hooks { - | ^^^^^ ----------- + --> $WORKSPACE/frame/support/src/traits/hooks.rs + | + | pub trait Hooks { + | ^^^^^ ----------- help: add missing generic argument | 12 | impl Hooks for Pallet {} - | ~~~~~~~~~~~~~~~~~~ + | +++++++++++++ diff --git a/frame/support/test/tests/pallet_ui/lock_id_duplicate.rs b/frame/support/test/tests/pallet_ui/lock_id_duplicate.rs new file mode 100644 index 0000000000000..70418efc41421 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/lock_id_duplicate.rs @@ -0,0 +1,17 @@ +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::composite_enum] + pub enum LockId {} + + #[pallet::composite_enum] + pub enum LockId {} +} + +fn main() { +} \ No newline at end of file diff --git a/frame/support/test/tests/pallet_ui/lock_id_duplicate.stderr b/frame/support/test/tests/pallet_ui/lock_id_duplicate.stderr new file mode 100644 index 0000000000000..1b7097d0a1095 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/lock_id_duplicate.stderr @@ -0,0 +1,5 @@ +error: Invalid duplicated `LockId` definition + --> tests/pallet_ui/lock_id_duplicate.rs:13:14 + | +13 | pub enum LockId {} + | ^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/pallet_doc_arg_non_path.stderr b/frame/support/test/tests/pallet_ui/pallet_doc_arg_non_path.stderr index ba60479c07202..9a1249dd36f37 100644 --- a/frame/support/test/tests/pallet_ui/pallet_doc_arg_non_path.stderr +++ b/frame/support/test/tests/pallet_ui/pallet_doc_arg_non_path.stderr @@ -1,4 +1,4 @@ -error: The `pallet_doc` received an unsupported argument. Supported format: `pallet_doc(PATH)` +error: The `pallet_doc` received an unsupported argument. Supported format: `pallet_doc("PATH")` --> tests/pallet_ui/pallet_doc_arg_non_path.rs:3:1 | 3 | #[pallet_doc(X)] diff --git a/frame/support/test/tests/pallet_ui/pallet_doc_empty.stderr b/frame/support/test/tests/pallet_ui/pallet_doc_empty.stderr index d6a189d7918f5..a220cbe9e9990 100644 --- a/frame/support/test/tests/pallet_ui/pallet_doc_empty.stderr +++ b/frame/support/test/tests/pallet_ui/pallet_doc_empty.stderr @@ -1,4 +1,4 @@ -error: The `pallet_doc` attribute must receive arguments as a list. Supported format: `pallet_doc(PATH)` +error: The `pallet_doc` received an unsupported argument. Supported format: `pallet_doc("PATH")` --> tests/pallet_ui/pallet_doc_empty.rs:3:1 | 3 | #[pallet_doc] diff --git a/frame/support/test/tests/pallet_ui/pallet_doc_invalid_arg.stderr b/frame/support/test/tests/pallet_ui/pallet_doc_invalid_arg.stderr index 9dd03978934a4..bee7c708507d2 100644 --- a/frame/support/test/tests/pallet_ui/pallet_doc_invalid_arg.stderr +++ b/frame/support/test/tests/pallet_ui/pallet_doc_invalid_arg.stderr @@ -1,4 +1,4 @@ -error: The `pallet_doc` attribute must receive arguments as a list. Supported format: `pallet_doc(PATH)` +error: The `pallet_doc` received an unsupported argument. Supported format: `pallet_doc("PATH")` --> tests/pallet_ui/pallet_doc_invalid_arg.rs:3:1 | 3 | #[pallet_doc = "invalid"] diff --git a/frame/support/test/tests/pallet_ui/pallet_doc_multiple_args.stderr b/frame/support/test/tests/pallet_ui/pallet_doc_multiple_args.stderr index 58ad75a0a2aec..e769555438e13 100644 --- a/frame/support/test/tests/pallet_ui/pallet_doc_multiple_args.stderr +++ b/frame/support/test/tests/pallet_ui/pallet_doc_multiple_args.stderr @@ -1,4 +1,4 @@ -error: The `pallet_doc` attribute must receive only one argument. Supported format: `pallet_doc(PATH)` +error: The `pallet_doc` received an unsupported argument. Supported format: `pallet_doc("PATH")` --> tests/pallet_ui/pallet_doc_multiple_args.rs:3:1 | 3 | #[pallet_doc("A", "B")] diff --git a/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs b/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs index 97e0d585d0ce9..483ed9579bd0a 100644 --- a/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs +++ b/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs @@ -1,5 +1,11 @@ #![cfg_attr(not(feature = "std"), no_std)] +use frame_support::{ + traits::{ + ConstU32, + }, +}; + pub use pallet::*; #[frame_support::pallet(dev_mode)] @@ -19,6 +25,16 @@ pub mod pallet { #[pallet::storage] type MyStorage = StorageValue<_, Vec>; + // The Hasher requirement skipped by `dev_mode`. + #[pallet::storage] + pub type MyStorageMap = StorageMap<_, _, u32, u64>; + + #[pallet::storage] + type MyStorageDoubleMap = StorageDoubleMap<_, _, u32, _, u64, u64>; + + #[pallet::storage] + type MyCountedStorageMap = CountedStorageMap<_, _, u32, u64>; + // Your Pallet's callable functions. #[pallet::call] impl Pallet { @@ -32,4 +48,71 @@ pub mod pallet { impl Pallet {} } -fn main() {} +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = u32; + type RuntimeCall = RuntimeCall; + type Hash = sp_runtime::testing::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU32<250>; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +frame_support::construct_runtime!( + pub struct Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + // Exclude part `Storage` in order not to check its metadata in tests. + System: frame_system exclude_parts { Pallet, Storage }, + Example: pallet, + } +); + +impl pallet::Config for Runtime { + +} + +fn main() { + use frame_support::{pallet_prelude::*}; + use storage::unhashed; + use sp_io::{ + hashing::{blake2_128, twox_128}, + TestExternalities, + }; + + fn blake2_128_concat(d: &[u8]) -> Vec { + let mut v = blake2_128(d).to_vec(); + v.extend_from_slice(d); + v + } + + TestExternalities::default().execute_with(|| { + pallet::MyStorageMap::::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"MyStorageMap")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u64)); + }); +} diff --git a/frame/support/test/tests/pallet_ui/pass/inherited_call_weight.rs b/frame/support/test/tests/pallet_ui/pass/inherited_call_weight.rs new file mode 100644 index 0000000000000..355a1c978df06 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/pass/inherited_call_weight.rs @@ -0,0 +1,51 @@ +use frame_support::pallet_prelude::*; +use frame_system::pallet_prelude::*; + +pub trait WeightInfo { + fn foo() -> Weight; +} + +#[frame_support::pallet] +mod parentheses { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: crate::WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight(::WeightInfo))] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +#[frame_support::pallet] +mod assign { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: crate::WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight = ::WeightInfo)] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/pass/inherited_call_weight2.rs b/frame/support/test/tests/pallet_ui/pass/inherited_call_weight2.rs new file mode 100644 index 0000000000000..ae70c295d8db2 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/pass/inherited_call_weight2.rs @@ -0,0 +1,57 @@ +use frame_support::pallet_prelude::*; +use frame_system::pallet_prelude::*; + +pub trait WeightInfo { + fn foo() -> Weight; +} + +impl WeightInfo for () { + fn foo() -> Weight { + Weight::zero() + } +} + +#[frame_support::pallet] +mod parentheses { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + // Crazy man just uses `()`, but it still works ;) + #[pallet::call(weight(()))] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +#[frame_support::pallet] +mod assign { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + // Crazy man just uses `()`, but it still works ;) + #[pallet::call(weight = ())] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs b/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs new file mode 100644 index 0000000000000..567fd2e5fa032 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs @@ -0,0 +1,54 @@ +use frame_support::pallet_prelude::*; +use frame_system::pallet_prelude::*; + +// If, for whatever reason, you dont to not use a `WeightInfo` trait - it will still work. +struct Impl; + +impl Impl { + fn foo() -> Weight { + Weight::zero() + } +} + +#[frame_support::pallet] +mod parentheses { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight(crate::Impl))] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +#[frame_support::pallet] +mod assign { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight = crate::Impl)] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/pass/inherited_call_weight_dev_mode.rs b/frame/support/test/tests/pallet_ui/pass/inherited_call_weight_dev_mode.rs new file mode 100644 index 0000000000000..04ce49ee71e99 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/pass/inherited_call_weight_dev_mode.rs @@ -0,0 +1,30 @@ +use frame_support::pallet_prelude::*; +use frame_system::pallet_prelude::*; + +pub trait WeightInfo { + fn foo() -> Weight; +} + +#[frame_support::pallet(dev_mode)] +mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call(weight(::WeightInfo))] + impl Pallet { + #[pallet::call_index(0)] + pub fn foo(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr index 98265462bbdfb..9e63fd03db52b 100644 --- a/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr +++ b/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr @@ -12,4 +12,4 @@ note: enum defined here, with 1 generic parameter: `T` help: add missing generic argument | 17 | type Foo = StorageValue<_, u8, ResultQuery::NonExistentValue>>; - | ~~~~~~~~ + | +++ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr index caffd846f272a..89ddd1599ac97 100644 --- a/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr +++ b/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr @@ -1,5 +1,5 @@ -error: Invalid pallet::storage, unexpected generic args for ResultQuery, expected angle-bracketed arguments, found `(NonExistentValue)` +error: expected `,` --> tests/pallet_ui/storage_result_query_parenthesized_generics.rs:18:55 | 18 | type Foo = StorageValue<_, u8, ResultQuery(NonExistentValue)>; - | ^^^^^^^^^^^^^^^^^^ + | ^ diff --git a/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr b/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr index 6288dcd534b60..4b4c78b148455 100644 --- a/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr +++ b/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr @@ -1,52 +1,52 @@ error[E0277]: the trait bound `::AccountId: From` is not satisfied - --> $DIR/type_value_forgotten_where_clause.rs:24:34 + --> tests/pallet_ui/type_value_forgotten_where_clause.rs:24:34 | 24 | #[pallet::type_value] fn Foo() -> u32 { 3u32 } | ^^^^^^ the trait `From` is not implemented for `::AccountId` | note: required by a bound in `pallet::Config` - --> $DIR/type_value_forgotten_where_clause.rs:8:51 + --> tests/pallet_ui/type_value_forgotten_where_clause.rs:8:51 | 7 | pub trait Config: frame_system::Config | ------ required by a bound in this 8 | where ::AccountId: From - | ^^^^^^^^^ required by this bound in `pallet::Config` + | ^^^^^^^^^ required by this bound in `Config` help: consider further restricting the associated type | 24 | #[pallet::type_value] fn Foo() -> u32 where ::AccountId: From { 3u32 } | +++++++++++++++++++++++++++++++++++++++++++++++++++++++ error[E0277]: the trait bound `::AccountId: From` is not satisfied - --> $DIR/type_value_forgotten_where_clause.rs:24:12 + --> tests/pallet_ui/type_value_forgotten_where_clause.rs:24:12 | 24 | #[pallet::type_value] fn Foo() -> u32 { 3u32 } | ^^^^^^^^^^ the trait `From` is not implemented for `::AccountId` | note: required by a bound in `pallet::Config` - --> $DIR/type_value_forgotten_where_clause.rs:8:51 + --> tests/pallet_ui/type_value_forgotten_where_clause.rs:8:51 | 7 | pub trait Config: frame_system::Config | ------ required by a bound in this 8 | where ::AccountId: From - | ^^^^^^^^^ required by this bound in `pallet::Config` + | ^^^^^^^^^ required by this bound in `Config` help: consider further restricting the associated type | 24 | #[pallet::type_value where ::AccountId: From] fn Foo() -> u32 { 3u32 } | +++++++++++++++++++++++++++++++++++++++++++++++++++++++ error[E0277]: the trait bound `::AccountId: From` is not satisfied - --> $DIR/type_value_forgotten_where_clause.rs:24:12 + --> tests/pallet_ui/type_value_forgotten_where_clause.rs:24:12 | 24 | #[pallet::type_value] fn Foo() -> u32 { 3u32 } | ^^^^^^^^^^ the trait `From` is not implemented for `::AccountId` | note: required by a bound in `pallet::Config` - --> $DIR/type_value_forgotten_where_clause.rs:8:51 + --> tests/pallet_ui/type_value_forgotten_where_clause.rs:8:51 | 7 | pub trait Config: frame_system::Config | ------ required by a bound in this 8 | where ::AccountId: From - | ^^^^^^^^^ required by this bound in `pallet::Config` + | ^^^^^^^^^ required by this bound in `Config` help: consider further restricting the associated type | 24 | #[pallet::type_value] fn Foo() -> u32 where ::AccountId: From { 3u32 } diff --git a/frame/support/test/tests/runtime_metadata.rs b/frame/support/test/tests/runtime_metadata.rs new file mode 100644 index 0000000000000..8c04c785a35af --- /dev/null +++ b/frame/support/test/tests/runtime_metadata.rs @@ -0,0 +1,222 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{ + metadata_ir::{ + RuntimeApiMetadataIR, RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, + }, + traits::ConstU32, +}; +use scale_info::{form::MetaForm, meta_type}; +use sp_runtime::traits::Block as BlockT; + +pub type BlockNumber = u64; +pub type Index = u64; +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +impl frame_system::Config for Runtime { + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = u32; + type RuntimeCall = RuntimeCall; + type Hash = sp_runtime::testing::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU32<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system, + } +); + +sp_api::decl_runtime_apis! { + /// ApiWithCustomVersion trait documentation + /// + /// Documentation on multiline. + pub trait Api { + fn test(data: u64); + /// something_with_block. + fn something_with_block(block: Block) -> Block; + fn function_with_two_args(data: u64, block: Block); + fn same_name(); + fn wild_card(_: u32); + } +} + +sp_api::impl_runtime_apis! { + impl self::Api for Runtime { + fn test(_data: u64) { + unimplemented!() + } + + fn something_with_block(_: Block) -> Block { + unimplemented!() + } + + fn function_with_two_args(_: u64, _: Block) { + unimplemented!() + } + + fn same_name() {} + + fn wild_card(_: u32) {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +#[test] +fn runtime_metadata() { + fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { + if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + doc + } + } + + let expected_runtime_metadata = vec![ + RuntimeApiMetadataIR { + name: "Api", + methods: vec![ + RuntimeApiMethodMetadataIR { + name: "test", + inputs: vec![RuntimeApiMethodParamMetadataIR:: { + name: "data", + ty: meta_type::(), + }], + output: meta_type::<()>(), + docs: vec![], + }, + RuntimeApiMethodMetadataIR { + name: "something_with_block", + inputs: vec![RuntimeApiMethodParamMetadataIR:: { + name: "block", + ty: meta_type::(), + }], + output: meta_type::(), + docs: maybe_docs(vec![" something_with_block."]), + }, + RuntimeApiMethodMetadataIR { + name: "function_with_two_args", + inputs: vec![ + RuntimeApiMethodParamMetadataIR:: { + name: "data", + ty: meta_type::(), + }, + RuntimeApiMethodParamMetadataIR:: { + name: "block", + ty: meta_type::(), + }, + ], + output: meta_type::<()>(), + docs: vec![], + }, + RuntimeApiMethodMetadataIR { + name: "same_name", + inputs: vec![], + output: meta_type::<()>(), + docs: vec![], + }, + RuntimeApiMethodMetadataIR { + name: "wild_card", + inputs: vec![RuntimeApiMethodParamMetadataIR:: { + name: "_", + ty: meta_type::(), + }], + output: meta_type::<()>(), + docs: vec![], + }, + ], + docs: maybe_docs(vec![ + " ApiWithCustomVersion trait documentation", + "", + " Documentation on multiline.", + ]), + }, + RuntimeApiMetadataIR { + name: "Core", + methods: vec![ + RuntimeApiMethodMetadataIR { + name: "version", + inputs: vec![], + output: meta_type::(), + docs: maybe_docs(vec![" Returns the version of the runtime."]), + }, + RuntimeApiMethodMetadataIR { + name: "execute_block", + inputs: vec![RuntimeApiMethodParamMetadataIR:: { + name: "block", + ty: meta_type::(), + }], + output: meta_type::<()>(), + docs: maybe_docs(vec![" Execute the given block."]), + }, + RuntimeApiMethodMetadataIR { + name: "initialize_block", + inputs: vec![RuntimeApiMethodParamMetadataIR:: { + name: "header", + ty: meta_type::<&::Header>(), + }], + output: meta_type::<()>(), + docs: maybe_docs(vec![" Initialize a block with the given header."]), + }, + ], + docs: maybe_docs(vec![ + " The `Core` runtime api that every Substrate runtime needs to implement.", + ]), + }, + ]; + + let rt = Runtime; + let runtime_metadata = (&rt).runtime_metadata(); + assert_eq!(runtime_metadata, expected_runtime_metadata); +} diff --git a/frame/support/test/tests/storage_layers.rs b/frame/support/test/tests/storage_layers.rs index f124aed9aa561..82174bf9d7141 100644 --- a/frame/support/test/tests/storage_layers.rs +++ b/frame/support/test/tests/storage_layers.rs @@ -22,7 +22,7 @@ use frame_support::{ use pallet::*; use sp_io::TestExternalities; -#[frame_support::pallet] +#[frame_support::pallet(dev_mode)] pub mod pallet { use frame_support::{ensure, pallet_prelude::*}; use frame_system::pallet_prelude::*; diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index 0888e26f6a754..ed95df6888ea6 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index 8f00097254609..50009387a2088 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 32257642c8fa3..79f61ddfc10e7 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1311,10 +1311,20 @@ impl Pallet { a.consumers == 0 || a.providers > 1 } - /// True if the account has at least one provider reference. - pub fn can_inc_consumer(who: &T::AccountId) -> bool { + /// True if the account has at least one provider reference and adding `amount` consumer + /// references would not take it above the the maximum. + pub fn can_accrue_consumers(who: &T::AccountId, amount: u32) -> bool { let a = Account::::get(who); - a.providers > 0 && a.consumers < T::MaxConsumers::max_consumers() + match a.consumers.checked_add(amount) { + Some(c) => a.providers > 0 && c <= T::MaxConsumers::max_consumers(), + None => false, + } + } + + /// True if the account has at least one provider reference and fewer consumer references than + /// the maximum. + pub fn can_inc_consumer(who: &T::AccountId) -> bool { + Self::can_accrue_consumers(who, 1) } /// Deposits an event into this block's event record. @@ -1869,10 +1879,6 @@ impl BlockNumberProvider for Pallet { } } -fn is_providing(d: &T) -> bool { - d != &T::default() -} - /// Implement StoredMap for a simple single-item, provide-when-not-default system. This works fine /// for storing a single item which allows the account to continue existing as long as it's not /// empty/default. @@ -1888,23 +1894,14 @@ impl StoredMap for Pallet { f: impl FnOnce(&mut Option) -> Result, ) -> Result { let account = Account::::get(k); - let was_providing = is_providing(&account.data); - let mut some_data = if was_providing { Some(account.data) } else { None }; + let is_default = account.data == T::AccountData::default(); + let mut some_data = if is_default { None } else { Some(account.data) }; let result = f(&mut some_data)?; - let is_providing = some_data.is_some(); - if !was_providing && is_providing { - Self::inc_providers(k); - } else if was_providing && !is_providing { - match Self::dec_providers(k)? { - DecRefStatus::Reaped => return Ok(result), - DecRefStatus::Exists => { - // Update value as normal... - }, - } - } else if !was_providing && !is_providing { - return Ok(result) + if Self::providers(k) > 0 || Self::sufficients(k) > 0 { + Account::::mutate(k, |a| a.data = some_data.unwrap_or_default()); + } else { + Account::::remove(k) } - Account::::mutate(k, |a| a.data = some_data.unwrap_or_default()); Ok(result) } } diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index 78baf0a4fefd2..a9bf800e4a476 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -37,6 +37,7 @@ fn origin_works() { #[test] fn stored_map_works() { new_test_ext().execute_with(|| { + assert_eq!(System::inc_providers(&0), IncRefStatus::Created); assert_ok!(System::insert(&0, 42)); assert!(!System::is_provider_required(&0)); @@ -56,6 +57,7 @@ fn stored_map_works() { assert!(Killed::get().is_empty()); assert_ok!(System::remove(&0)); + assert_ok!(System::dec_providers(&0)); assert_eq!(Killed::get(), vec![0u64]); }); } @@ -234,17 +236,23 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { .get(DispatchClass::Normal) .base_extrinsic; let pre_info = DispatchInfo { weight: Weight::from_parts(1000, 0), ..Default::default() }; - System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); - System::note_applied_extrinsic(&Ok(Some(1000).into()), pre_info); + System::note_applied_extrinsic(&Ok(from_actual_ref_time(Some(300))), pre_info); + System::note_applied_extrinsic(&Ok(from_actual_ref_time(Some(1000))), pre_info); System::note_applied_extrinsic( // values over the pre info should be capped at pre dispatch value - &Ok(Some(1200).into()), + &Ok(from_actual_ref_time(Some(1200))), + pre_info, + ); + System::note_applied_extrinsic( + &Ok(from_post_weight_info(Some(2_500_000), Pays::Yes)), pre_info, ); - System::note_applied_extrinsic(&Ok((Some(2_500_000), Pays::Yes).into()), pre_info); System::note_applied_extrinsic(&Ok(Pays::No.into()), pre_info); - System::note_applied_extrinsic(&Ok((Some(2_500_000), Pays::No).into()), pre_info); - System::note_applied_extrinsic(&Ok((Some(500), Pays::No).into()), pre_info); + System::note_applied_extrinsic( + &Ok(from_post_weight_info(Some(2_500_000), Pays::No)), + pre_info, + ); + System::note_applied_extrinsic(&Ok(from_post_weight_info(Some(500), Pays::No)), pre_info); System::note_applied_extrinsic( &Err(DispatchError::BadOrigin.with_weight(Weight::from_parts(999, 0))), pre_info, @@ -287,7 +295,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { class: DispatchClass::Operational, ..Default::default() }; - System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); + System::note_applied_extrinsic(&Ok(from_actual_ref_time(Some(300))), pre_info); let got = System::events(); let want = vec![ @@ -581,7 +589,7 @@ fn assert_runtime_updated_digest(num: usize) { #[test] fn set_code_with_real_wasm_blob() { - let executor = substrate_test_runtime_client::new_native_executor(); + let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); let mut ext = new_test_ext(); ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(executor)); ext.execute_with(|| { @@ -605,7 +613,7 @@ fn set_code_with_real_wasm_blob() { #[test] fn runtime_upgraded_with_set_storage() { - let executor = substrate_test_runtime_client::new_native_executor(); + let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); let mut ext = new_test_ext(); ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(executor)); ext.execute_with(|| { @@ -820,3 +828,13 @@ fn do_not_allow_for_storing_txs_when_queue_is_full() { System::enqueue_txs(RuntimeOrigin::none(), dummy_txs.clone()).unwrap(); }); } +pub fn from_actual_ref_time(ref_time: Option) -> PostDispatchInfo { + PostDispatchInfo { + actual_weight: ref_time.map(|t| Weight::from_all(t)), + pays_fee: Default::default(), + } +} + +pub fn from_post_weight_info(ref_time: Option, pays_fee: Pays) -> PostDispatchInfo { + PostDispatchInfo { actual_weight: ref_time.map(|t| Weight::from_all(t)), pays_fee } +} diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index 812f2d091fcfe..96fd7a1972eb2 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for frame_system //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -64,8 +64,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_018 nanoseconds. - Weight::from_parts(2_091_000, 0) + // Minimum execution time: 2_430_000 picoseconds. + Weight::from_parts(2_522_000, 0) // Standard Error: 0 .saturating_add(Weight::from_parts(362, 0).saturating_mul(b.into())) } @@ -74,10 +74,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_449 nanoseconds. - Weight::from_parts(7_748_000, 0) + // Minimum execution time: 8_738_000 picoseconds. + Weight::from_parts(8_963_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_423, 0).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(1_113, 0).saturating_mul(b.into())) } /// Storage: System Digest (r:1 w:1) /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) @@ -86,9 +86,9 @@ impl WeightInfo for SubstrateWeight { fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `495` - // Minimum execution time: 4_440 nanoseconds. - Weight::from_parts(4_605_000, 495) + // Estimated: `1485` + // Minimum execution time: 5_109_000 picoseconds. + Weight::from_parts(5_336_000, 1485) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -99,10 +99,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_981 nanoseconds. - Weight::from_parts(2_114_000, 0) - // Standard Error: 804 - .saturating_add(Weight::from_parts(631_438, 0).saturating_mul(i.into())) + // Minimum execution time: 2_531_000 picoseconds. + Weight::from_parts(2_617_000, 0) + // Standard Error: 861 + .saturating_add(Weight::from_parts(761_465, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: Skipped Metadata (r:0 w:0) @@ -112,10 +112,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_061 nanoseconds. - Weight::from_parts(2_153_000, 0) - // Standard Error: 952 - .saturating_add(Weight::from_parts(502_629, 0).saturating_mul(i.into())) + // Minimum execution time: 2_631_000 picoseconds. + Weight::from_parts(2_717_000, 0) + // Standard Error: 1_097 + .saturating_add(Weight::from_parts(570_803, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: Skipped Metadata (r:0 w:0) @@ -125,10 +125,11 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `116 + p * (69 ±0)` // Estimated: `121 + p * (70 ±0)` - // Minimum execution time: 4_026 nanoseconds. - Weight::from_parts(4_174_000, 121) - // Standard Error: 1_148 - .saturating_add(Weight::from_parts(1_093_099, 0).saturating_mul(p.into())) + // Minimum execution time: 4_692_000 picoseconds. + Weight::from_parts(4_789_000, 121) + // Standard Error: 1_013 + .saturating_add(Weight::from_parts(1_143_616, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) } @@ -141,8 +142,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_018 nanoseconds. - Weight::from_parts(2_091_000, 0) + // Minimum execution time: 2_430_000 picoseconds. + Weight::from_parts(2_522_000, 0) // Standard Error: 0 .saturating_add(Weight::from_parts(362, 0).saturating_mul(b.into())) } @@ -151,10 +152,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_449 nanoseconds. - Weight::from_parts(7_748_000, 0) + // Minimum execution time: 8_738_000 picoseconds. + Weight::from_parts(8_963_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_423, 0).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(1_113, 0).saturating_mul(b.into())) } /// Storage: System Digest (r:1 w:1) /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) @@ -163,9 +164,9 @@ impl WeightInfo for () { fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `495` - // Minimum execution time: 4_440 nanoseconds. - Weight::from_parts(4_605_000, 495) + // Estimated: `1485` + // Minimum execution time: 5_109_000 picoseconds. + Weight::from_parts(5_336_000, 1485) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -176,10 +177,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_981 nanoseconds. - Weight::from_parts(2_114_000, 0) - // Standard Error: 804 - .saturating_add(Weight::from_parts(631_438, 0).saturating_mul(i.into())) + // Minimum execution time: 2_531_000 picoseconds. + Weight::from_parts(2_617_000, 0) + // Standard Error: 861 + .saturating_add(Weight::from_parts(761_465, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: Skipped Metadata (r:0 w:0) @@ -189,10 +190,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_061 nanoseconds. - Weight::from_parts(2_153_000, 0) - // Standard Error: 952 - .saturating_add(Weight::from_parts(502_629, 0).saturating_mul(i.into())) + // Minimum execution time: 2_631_000 picoseconds. + Weight::from_parts(2_717_000, 0) + // Standard Error: 1_097 + .saturating_add(Weight::from_parts(570_803, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: Skipped Metadata (r:0 w:0) @@ -202,10 +203,11 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `116 + p * (69 ±0)` // Estimated: `121 + p * (70 ±0)` - // Minimum execution time: 4_026 nanoseconds. - Weight::from_parts(4_174_000, 121) - // Standard Error: 1_148 - .saturating_add(Weight::from_parts(1_093_099, 0).saturating_mul(p.into())) + // Minimum execution time: 4_692_000 picoseconds. + Weight::from_parts(4_789_000, 121) + // Standard Error: 1_013 + .saturating_add(Weight::from_parts(1_143_616, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) } diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index 84c56da24e9d4..0f28955259cce 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/timestamp/src/weights.rs b/frame/timestamp/src/weights.rs index 943f941d55305..1c254bf220076 100644 --- a/frame/timestamp/src/weights.rs +++ b/frame/timestamp/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_timestamp //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -62,9 +62,9 @@ impl WeightInfo for SubstrateWeight { fn set() -> Weight { // Proof Size summary in bytes: // Measured: `312` - // Estimated: `1006` - // Minimum execution time: 9_106 nanoseconds. - Weight::from_parts(9_258_000, 1006) + // Estimated: `1493` + // Minimum execution time: 10_834_000 picoseconds. + Weight::from_parts(11_099_000, 1493) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -72,8 +72,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `161` // Estimated: `0` - // Minimum execution time: 3_927 nanoseconds. - Weight::from_parts(4_078_000, 0) + // Minimum execution time: 4_472_000 picoseconds. + Weight::from_parts(4_645_000, 0) } } @@ -86,9 +86,9 @@ impl WeightInfo for () { fn set() -> Weight { // Proof Size summary in bytes: // Measured: `312` - // Estimated: `1006` - // Minimum execution time: 9_106 nanoseconds. - Weight::from_parts(9_258_000, 1006) + // Estimated: `1493` + // Minimum execution time: 10_834_000 picoseconds. + Weight::from_parts(11_099_000, 1493) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -96,7 +96,7 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `161` // Estimated: `0` - // Minimum execution time: 3_927 nanoseconds. - Weight::from_parts(4_078_000, 0) + // Minimum execution time: 4_472_000 picoseconds. + Weight::from_parts(4_645_000, 0) } } diff --git a/frame/tips/Cargo.toml b/frame/tips/Cargo.toml index c7db61613c03a..caed4edec5f48 100644 --- a/frame/tips/Cargo.toml +++ b/frame/tips/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs index fa4ec15014292..b2d97de18312f 100644 --- a/frame/tips/src/tests.rs +++ b/frame/tips/src/tests.rs @@ -97,6 +97,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { static TenToFourteenTestValue: Vec = vec![10,11,12,13,14]; diff --git a/frame/tips/src/weights.rs b/frame/tips/src/weights.rs index 2b265d7879712..ec5eef8c8bd81 100644 --- a/frame/tips/src/weights.rs +++ b/frame/tips/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_tips //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -67,11 +67,11 @@ impl WeightInfo for SubstrateWeight { fn report_awesome(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4` - // Estimated: `4958` - // Minimum execution time: 23_262 nanoseconds. - Weight::from_parts(24_104_224, 4958) - // Standard Error: 148 - .saturating_add(Weight::from_parts(1_963, 0).saturating_mul(r.into())) + // Estimated: `3469` + // Minimum execution time: 30_728_000 picoseconds. + Weight::from_parts(31_794_924, 3469) + // Standard Error: 171 + .saturating_add(Weight::from_parts(1_020, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -81,10 +81,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) fn retract_tip() -> Weight { // Proof Size summary in bytes: - // Measured: `253` - // Estimated: `2981` - // Minimum execution time: 22_282 nanoseconds. - Weight::from_parts(22_737_000, 2981) + // Measured: `221` + // Estimated: `3686` + // Minimum execution time: 29_183_000 picoseconds. + Weight::from_parts(30_017_000, 3686) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -98,17 +98,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `558 + t * (64 ±0)` - // Estimated: `4644 + t * (192 ±0)` - // Minimum execution time: 18_382 nanoseconds. - Weight::from_parts(18_195_288, 4644) - // Standard Error: 103 - .saturating_add(Weight::from_parts(2_096, 0).saturating_mul(r.into())) - // Standard Error: 2_469 - .saturating_add(Weight::from_parts(97_092, 0).saturating_mul(t.into())) + // Measured: `526 + t * (64 ±0)` + // Estimated: `3991 + t * (64 ±0)` + // Minimum execution time: 20_726_000 picoseconds. + Weight::from_parts(20_964_411, 3991) + // Standard Error: 119 + .saturating_add(Weight::from_parts(1_230, 0).saturating_mul(r.into())) + // Standard Error: 2_837 + .saturating_add(Weight::from_parts(81_831, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 192).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) } /// Storage: Elections Members (r:1 w:0) /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) @@ -117,15 +117,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `811 + t * (112 ±0)` - // Estimated: `4592 + t * (224 ±0)` - // Minimum execution time: 13_564 nanoseconds. - Weight::from_parts(13_867_280, 4592) - // Standard Error: 4_245 - .saturating_add(Weight::from_parts(206_587, 0).saturating_mul(t.into())) + // Measured: `747 + t * (112 ±0)` + // Estimated: `4212 + t * (112 ±0)` + // Minimum execution time: 16_048_000 picoseconds. + Weight::from_parts(16_694_981, 4212) + // Standard Error: 4_480 + .saturating_add(Weight::from_parts(179_723, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 224).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } /// Storage: Tips Tips (r:1 w:1) /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) @@ -138,15 +138,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `850 + t * (112 ±0)` - // Estimated: `8096 + t * (336 ±0)` - // Minimum execution time: 39_902 nanoseconds. - Weight::from_parts(40_747_650, 8096) - // Standard Error: 5_322 - .saturating_add(Weight::from_parts(144_298, 0).saturating_mul(t.into())) + // Measured: `786 + t * (112 ±0)` + // Estimated: `4242 + t * (112 ±0)` + // Minimum execution time: 61_319_000 picoseconds. + Weight::from_parts(62_217_195, 4242) + // Standard Error: 6_721 + .saturating_add(Weight::from_parts(186_620, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 336).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } /// Storage: Tips Tips (r:1 w:1) /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) @@ -155,12 +155,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[1, 13]`. fn slash_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `3077` - // Minimum execution time: 13_511 nanoseconds. - Weight::from_parts(14_114_101, 3077) - // Standard Error: 1_815 - .saturating_add(Weight::from_parts(7_825, 0).saturating_mul(t.into())) + // Measured: `269` + // Estimated: `3734` + // Minimum execution time: 15_397_000 picoseconds. + Weight::from_parts(15_942_494, 3734) + // Standard Error: 1_657 + .saturating_add(Weight::from_parts(4_128, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -176,11 +176,11 @@ impl WeightInfo for () { fn report_awesome(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4` - // Estimated: `4958` - // Minimum execution time: 23_262 nanoseconds. - Weight::from_parts(24_104_224, 4958) - // Standard Error: 148 - .saturating_add(Weight::from_parts(1_963, 0).saturating_mul(r.into())) + // Estimated: `3469` + // Minimum execution time: 30_728_000 picoseconds. + Weight::from_parts(31_794_924, 3469) + // Standard Error: 171 + .saturating_add(Weight::from_parts(1_020, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -190,10 +190,10 @@ impl WeightInfo for () { /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) fn retract_tip() -> Weight { // Proof Size summary in bytes: - // Measured: `253` - // Estimated: `2981` - // Minimum execution time: 22_282 nanoseconds. - Weight::from_parts(22_737_000, 2981) + // Measured: `221` + // Estimated: `3686` + // Minimum execution time: 29_183_000 picoseconds. + Weight::from_parts(30_017_000, 3686) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -207,17 +207,17 @@ impl WeightInfo for () { /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `558 + t * (64 ±0)` - // Estimated: `4644 + t * (192 ±0)` - // Minimum execution time: 18_382 nanoseconds. - Weight::from_parts(18_195_288, 4644) - // Standard Error: 103 - .saturating_add(Weight::from_parts(2_096, 0).saturating_mul(r.into())) - // Standard Error: 2_469 - .saturating_add(Weight::from_parts(97_092, 0).saturating_mul(t.into())) + // Measured: `526 + t * (64 ±0)` + // Estimated: `3991 + t * (64 ±0)` + // Minimum execution time: 20_726_000 picoseconds. + Weight::from_parts(20_964_411, 3991) + // Standard Error: 119 + .saturating_add(Weight::from_parts(1_230, 0).saturating_mul(r.into())) + // Standard Error: 2_837 + .saturating_add(Weight::from_parts(81_831, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 192).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) } /// Storage: Elections Members (r:1 w:0) /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) @@ -226,15 +226,15 @@ impl WeightInfo for () { /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `811 + t * (112 ±0)` - // Estimated: `4592 + t * (224 ±0)` - // Minimum execution time: 13_564 nanoseconds. - Weight::from_parts(13_867_280, 4592) - // Standard Error: 4_245 - .saturating_add(Weight::from_parts(206_587, 0).saturating_mul(t.into())) + // Measured: `747 + t * (112 ±0)` + // Estimated: `4212 + t * (112 ±0)` + // Minimum execution time: 16_048_000 picoseconds. + Weight::from_parts(16_694_981, 4212) + // Standard Error: 4_480 + .saturating_add(Weight::from_parts(179_723, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 224).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } /// Storage: Tips Tips (r:1 w:1) /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) @@ -247,15 +247,15 @@ impl WeightInfo for () { /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `850 + t * (112 ±0)` - // Estimated: `8096 + t * (336 ±0)` - // Minimum execution time: 39_902 nanoseconds. - Weight::from_parts(40_747_650, 8096) - // Standard Error: 5_322 - .saturating_add(Weight::from_parts(144_298, 0).saturating_mul(t.into())) + // Measured: `786 + t * (112 ±0)` + // Estimated: `4242 + t * (112 ±0)` + // Minimum execution time: 61_319_000 picoseconds. + Weight::from_parts(62_217_195, 4242) + // Standard Error: 6_721 + .saturating_add(Weight::from_parts(186_620, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 336).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } /// Storage: Tips Tips (r:1 w:1) /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) @@ -264,12 +264,12 @@ impl WeightInfo for () { /// The range of component `t` is `[1, 13]`. fn slash_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `3077` - // Minimum execution time: 13_511 nanoseconds. - Weight::from_parts(14_114_101, 3077) - // Standard Error: 1_815 - .saturating_add(Weight::from_parts(7_825, 0).saturating_mul(t.into())) + // Measured: `269` + // Estimated: `3734` + // Minimum execution time: 15_397_000 picoseconds. + Weight::from_parts(15_942_494, 3734) + // Standard Error: 1_657 + .saturating_add(Weight::from_parts(4_128, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/frame/transaction-payment-mangata/asset-tx-payment/src/lib.rs b/frame/transaction-payment-mangata/asset-tx-payment/src/lib.rs index b60841c678bca..c0c4605777e83 100644 --- a/frame/transaction-payment-mangata/asset-tx-payment/src/lib.rs +++ b/frame/transaction-payment-mangata/asset-tx-payment/src/lib.rs @@ -42,7 +42,7 @@ use frame_support::{ dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, traits::{ tokens::{ - fungibles::{Balanced, CreditOf, Inspect}, + fungibles::{Balanced, Credit, Inspect}, WithdrawConsequence, }, IsType, @@ -102,7 +102,7 @@ pub enum InitialPayment { /// The initial fee was payed in the native currency. Native(LiquidityInfoOf), /// The initial fee was payed in an asset. - Asset(CreditOf), + Asset(Credit), } pub use pallet::*; @@ -158,7 +158,7 @@ where AssetBalanceOf: Send + Sync + FixedPointOperand, BalanceOf: Send + Sync + FixedPointOperand + IsType>, ChargeAssetIdOf: Send + Sync, - CreditOf: IsType>, + Credit: IsType>, { /// Utility constructor. Used only in client/factory code. pub fn from(tip: BalanceOf, asset_id: Option>) -> Self { @@ -217,7 +217,7 @@ where AssetBalanceOf: Send + Sync + FixedPointOperand, BalanceOf: Send + Sync + From + FixedPointOperand + IsType>, ChargeAssetIdOf: Send + Sync, - CreditOf: IsType>, + Credit: IsType>, { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; type AccountId = T::AccountId; diff --git a/frame/transaction-payment-mangata/asset-tx-payment/src/payment.rs b/frame/transaction-payment-mangata/asset-tx-payment/src/payment.rs index 80ff4e40dcffa..10222fe3699f6 100644 --- a/frame/transaction-payment-mangata/asset-tx-payment/src/payment.rs +++ b/frame/transaction-payment-mangata/asset-tx-payment/src/payment.rs @@ -20,12 +20,13 @@ use crate::Config; use codec::FullCodec; use frame_support::{ traits::{ - fungibles::{Balanced, CreditOf, Inspect}, - tokens::BalanceConversion, + fungibles::{Balanced, Credit, Inspect}, + tokens::ConversionToAssetBalance, }, unsigned::TransactionValidityError, }; use scale_info::TypeInfo; +use frame_support::traits::tokens::{Fortitude, Precision, Preservation}; use sp_runtime::{ traits::{ AtLeast32BitUnsigned, DispatchInfoOf, MaybeSerializeDeserialize, One, PostDispatchInfoOf, @@ -81,17 +82,17 @@ pub trait HandleCredit> { /// Implement to determine what to do with the withdrawn asset fees. /// Default for `CreditOf` from the assets pallet is to burn and /// decrease total issuance. - fn handle_credit(credit: CreditOf); + fn handle_credit(credit: Credit); } /// Default implementation that just drops the credit according to the `OnDrop` in the underlying /// imbalance type. impl> HandleCredit for () { - fn handle_credit(_credit: CreditOf) {} + fn handle_credit(_credit: Credit) {} } /// Implements the asset transaction for a balance to asset converter (implementing -/// [`BalanceConversion`]) and a credit handler (implementing [`HandleCredit`]). +/// [`ConversionToAssetBalance`]) and a credit handler (implementing [`HandleCredit`]). /// /// The credit handler is given the complete fee in terms of the asset used for the transaction. pub struct FungiblesAdapter(PhantomData<(CON, HC)>); @@ -101,13 +102,13 @@ pub struct FungiblesAdapter(PhantomData<(CON, HC)>); impl OnChargeAssetTransaction for FungiblesAdapter where T: Config, - CON: BalanceConversion, AssetIdOf, AssetBalanceOf>, + CON: ConversionToAssetBalance, AssetIdOf, AssetBalanceOf>, HC: HandleCredit, AssetIdOf: FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default + Eq + TypeInfo, { type Balance = BalanceOf; type AssetId = AssetIdOf; - type LiquidityInfo = CreditOf; + type LiquidityInfo = Credit; /// Withdraw the predicted fee from the transaction origin. /// @@ -132,7 +133,8 @@ where if !matches!(can_withdraw, WithdrawConsequence::Success) { return Err(InvalidTransaction::Payment.into()) } - >::withdraw(asset_id, who, converted_fee) + // TODO: FIX WE NEED CORRECT VALUES + >::withdraw(asset_id, who, converted_fee, Precision::Exact, Preservation::Preserve, Fortitude::Polite) .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment)) } diff --git a/frame/transaction-payment-mangata/rpc/runtime-api/src/lib.rs b/frame/transaction-payment-mangata/rpc/runtime-api/src/lib.rs index 1bb9c5ec9323a..855aa99147a3b 100644 --- a/frame/transaction-payment-mangata/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment-mangata/rpc/runtime-api/src/lib.rs @@ -30,7 +30,7 @@ sp_api::decl_runtime_apis! { Balance: Codec + MaybeDisplay, { #[changed_in(2)] - fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; + fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_fee_details(uxt: Block::Extrinsic, len: u32) -> FeeDetails; } diff --git a/frame/transaction-payment-mangata/rpc/src/lib.rs b/frame/transaction-payment-mangata/rpc/src/lib.rs index 1d7d4550328ad..a7c8ca763a0ce 100644 --- a/frame/transaction-payment-mangata/rpc/src/lib.rs +++ b/frame/transaction-payment-mangata/rpc/src/lib.rs @@ -86,7 +86,7 @@ impl From for i32 { impl TransactionPaymentApiServer< ::Hash, - RuntimeDispatchInfo, + RuntimeDispatchInfo, > for TransactionPayment where Block: BlockT, @@ -98,7 +98,7 @@ where &self, encoded_xt: Bytes, at: Option, - ) -> RpcResult> { + ) -> RpcResult> { let api = self.client.runtime_api(); let at = self.client.info().best_hash; @@ -141,7 +141,7 @@ where .map_err(|e| map_err(e, "Unable to query dispatch info."))?; Ok(RuntimeDispatchInfo { - weight: sp_weights::OldWeight(res.weight.ref_time()), + weight: res.weight, class: res.class, partial_fee: res.partial_fee, }) diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index 0c98796e4d1a7..b1e22d3e9145a 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/transaction-payment/asset-tx-payment/Cargo.toml b/frame/transaction-payment/asset-tx-payment/Cargo.toml index 324e17a0808a4..574eb9e23742b 100644 --- a/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -26,7 +26,7 @@ frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = " # Other dependencies codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true } [dev-dependencies] diff --git a/frame/transaction-payment/asset-tx-payment/src/lib.rs b/frame/transaction-payment/asset-tx-payment/src/lib.rs index b9cd6ef995578..4e83d8b489b70 100644 --- a/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -42,7 +42,7 @@ use frame_support::{ dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, traits::{ tokens::{ - fungibles::{Balanced, CreditOf, Inspect}, + fungibles::{Balanced, Credit, Inspect}, WithdrawConsequence, }, IsType, @@ -104,7 +104,7 @@ pub enum InitialPayment { /// The initial fee was payed in the native currency. Native(LiquidityInfoOf), /// The initial fee was payed in an asset. - Asset(CreditOf), + Asset(Credit), } pub use pallet::*; @@ -159,7 +159,7 @@ where AssetBalanceOf: Send + Sync + FixedPointOperand, BalanceOf: Send + Sync + FixedPointOperand + IsType>, ChargeAssetIdOf: Send + Sync, - CreditOf: IsType>, + Credit: IsType>, { /// Utility constructor. Used only in client/factory code. pub fn from(tip: BalanceOf, asset_id: Option>) -> Self { @@ -216,7 +216,7 @@ where AssetBalanceOf: Send + Sync + FixedPointOperand, BalanceOf: Send + Sync + From + FixedPointOperand + IsType>, ChargeAssetIdOf: Send + Sync, - CreditOf: IsType>, + Credit: IsType>, { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; type AccountId = T::AccountId; diff --git a/frame/transaction-payment/asset-tx-payment/src/mock.rs b/frame/transaction-payment/asset-tx-payment/src/mock.rs index cd5147c3acd13..be7baaf2b370e 100644 --- a/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -120,6 +120,10 @@ impl pallet_balances::Config for Runtime { type WeightInfo = (); type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl WeightToFeeT for WeightToFee { @@ -193,7 +197,7 @@ impl pallet_authorship::Config for Runtime { pub struct CreditToBlockAuthor; impl HandleCredit for CreditToBlockAuthor { - fn handle_credit(credit: CreditOf) { + fn handle_credit(credit: Credit) { if let Some(author) = pallet_authorship::Pallet::::author() { // What to do in case paying the author fails (e.g. because `fee < min_balance`) // default: drop the result which will trigger the `OnDrop` of the imbalance. diff --git a/frame/transaction-payment/asset-tx-payment/src/payment.rs b/frame/transaction-payment/asset-tx-payment/src/payment.rs index e273ffcfc565c..49e78fb8bce01 100644 --- a/frame/transaction-payment/asset-tx-payment/src/payment.rs +++ b/frame/transaction-payment/asset-tx-payment/src/payment.rs @@ -20,8 +20,11 @@ use crate::Config; use codec::FullCodec; use frame_support::{ traits::{ - fungibles::{Balanced, CreditOf, Inspect}, - tokens::{Balance, BalanceConversion}, + fungibles::{Balanced, Credit, Inspect}, + tokens::{ + Balance, ConversionToAssetBalance, Fortitude::Polite, Precision::Exact, + Preservation::Protect, + }, }, unsigned::TransactionValidityError, }; @@ -75,17 +78,17 @@ pub trait HandleCredit> { /// Implement to determine what to do with the withdrawn asset fees. /// Default for `CreditOf` from the assets pallet is to burn and /// decrease total issuance. - fn handle_credit(credit: CreditOf); + fn handle_credit(credit: Credit); } /// Default implementation that just drops the credit according to the `OnDrop` in the underlying /// imbalance type. impl> HandleCredit for () { - fn handle_credit(_credit: CreditOf) {} + fn handle_credit(_credit: Credit) {} } /// Implements the asset transaction for a balance to asset converter (implementing -/// [`BalanceConversion`]) and a credit handler (implementing [`HandleCredit`]). +/// [`ConversionToAssetBalance`]) and a credit handler (implementing [`HandleCredit`]). /// /// The credit handler is given the complete fee in terms of the asset used for the transaction. pub struct FungiblesAdapter(PhantomData<(CON, HC)>); @@ -95,13 +98,13 @@ pub struct FungiblesAdapter(PhantomData<(CON, HC)>); impl OnChargeAssetTransaction for FungiblesAdapter where T: Config, - CON: BalanceConversion, AssetIdOf, AssetBalanceOf>, + CON: ConversionToAssetBalance, AssetIdOf, AssetBalanceOf>, HC: HandleCredit, AssetIdOf: FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default + Eq + TypeInfo, { type Balance = BalanceOf; type AssetId = AssetIdOf; - type LiquidityInfo = CreditOf; + type LiquidityInfo = Credit; /// Withdraw the predicted fee from the transaction origin. /// @@ -126,8 +129,15 @@ where if !matches!(can_withdraw, WithdrawConsequence::Success) { return Err(InvalidTransaction::Payment.into()) } - >::withdraw(asset_id, who, converted_fee) - .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment)) + >::withdraw( + asset_id, + who, + converted_fee, + Exact, + Protect, + Polite, + ) + .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment)) } /// Hand the fee and the tip over to the `[HandleCredit]` implementation. diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index cd9891825db32..2fee9c849f4b4 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -28,7 +28,7 @@ use pallet_balances::Call as BalancesCall; use sp_runtime::traits::StaticLookup; const CALL: &::RuntimeCall = - &RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + &RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); pub struct ExtBuilder { balance_factor: u64, diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index ae4a25bf1711d..0d9c3338250e7 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -25,12 +25,10 @@ use sp_runtime::traits::MaybeDisplay; pub use pallet_transaction_payment::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; sp_api::decl_runtime_apis! { - #[api_version(3)] + #[api_version(4)] pub trait TransactionPaymentApi where Balance: Codec + MaybeDisplay, { - #[changed_in(2)] - fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_fee_details(uxt: Block::Extrinsic, len: u32) -> FeeDetails; fn query_weight_to_fee(weight: sp_weights::Weight) -> Balance; diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index 1207a8cbf747a..7f8ed4b802675 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -26,7 +26,7 @@ use jsonrpsee::{ types::error::{CallError, ErrorCode, ErrorObject}, }; use pallet_transaction_payment_rpc_runtime_api::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; -use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::Bytes; use sp_rpc::number::NumberOrHex; @@ -81,7 +81,7 @@ impl From for i32 { impl TransactionPaymentApiServer< ::Hash, - RuntimeDispatchInfo, + RuntimeDispatchInfo, > for TransactionPayment where Block: BlockT, @@ -93,7 +93,7 @@ where &self, encoded_xt: Bytes, at: Option, - ) -> RpcResult> { + ) -> RpcResult> { let api = self.client.runtime_api(); let at_hash = at.unwrap_or_else(|| self.client.info().best_hash); @@ -115,32 +115,15 @@ where )) } - let api_version = api - .api_version::>(at_hash) - .map_err(|e| map_err(e, "Failed to get transaction payment runtime api version"))? - .ok_or_else(|| { - CallError::Custom(ErrorObject::owned( - Error::RuntimeError.into(), - "Transaction payment runtime api wasn't found in the runtime", - None::, - )) - })?; - - if api_version < 2 { - #[allow(deprecated)] - api.query_info_before_version_2(at_hash, uxt, encoded_len) - .map_err(|e| map_err(e, "Unable to query dispatch info.").into()) - } else { - let res = api - .query_info(at_hash, uxt, encoded_len) - .map_err(|e| map_err(e, "Unable to query dispatch info."))?; - - Ok(RuntimeDispatchInfo { - weight: sp_weights::OldWeight(res.weight.ref_time()), - class: res.class, - partial_fee: res.partial_fee, - }) - } + let res = api + .query_info(at_hash, uxt, encoded_len) + .map_err(|e| map_err(e, "Unable to query dispatch info."))?; + + Ok(RuntimeDispatchInfo { + weight: res.weight, + class: res.class, + partial_fee: res.partial_fee, + }) } fn query_fee_details( diff --git a/frame/transaction-payment/src/mock.rs b/frame/transaction-payment/src/mock.rs index bfb9a194f8cb2..741f094481c38 100644 --- a/frame/transaction-payment/src/mock.rs +++ b/frame/transaction-payment/src/mock.rs @@ -49,7 +49,7 @@ frame_support::construct_runtime!( ); pub(crate) const CALL: &::RuntimeCall = - &RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + &RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); parameter_types! { pub(crate) static ExtrinsicBaseWeight: Weight = Weight::zero(); @@ -113,6 +113,10 @@ impl pallet_balances::Config for Runtime { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl WeightToFeeT for WeightToFee { diff --git a/frame/transaction-payment/src/tests.rs b/frame/transaction-payment/src/tests.rs index 218f50e1cc95f..d5109609e2975 100644 --- a/frame/transaction-payment/src/tests.rs +++ b/frame/transaction-payment/src/tests.rs @@ -285,7 +285,7 @@ fn signed_ext_length_fee_is_also_updated_per_congestion() { #[test] fn query_info_and_fee_details_works() { - let call = RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + let call = RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); let origin = 111111; let extra = (); let xt = TestXt::new(call.clone(), Some((origin, extra))); @@ -348,7 +348,7 @@ fn query_info_and_fee_details_works() { #[test] fn query_call_info_and_fee_details_works() { - let call = RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + let call = RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); let info = call.get_dispatch_info(); let encoded_call = call.encode(); let len = encoded_call.len() as u32; @@ -530,7 +530,11 @@ fn refund_does_not_recreate_account() { assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); // kill the account between pre and post dispatch - assert_ok!(Balances::transfer(Some(2).into(), 3, Balances::free_balance(2))); + assert_ok!(Balances::transfer_allow_death( + Some(2).into(), + 3, + Balances::free_balance(2) + )); assert_eq!(Balances::free_balance(2), 0); assert_ok!(ChargeTransactionPayment::::post_dispatch( diff --git a/frame/transaction-storage/Cargo.toml b/frame/transaction-storage/Cargo.toml index 527ff4f240169..aa19ce3fe2840 100644 --- a/frame/transaction-storage/Cargo.toml +++ b/frame/transaction-storage/Cargo.toml @@ -3,7 +3,7 @@ name = "pallet-transaction-storage" version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2021" -license = "Unlicense" +license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Storage chain pallet" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "4.1", optional = true } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } diff --git a/frame/transaction-storage/src/mock.rs b/frame/transaction-storage/src/mock.rs index f54c134d74a0e..3a87d8eaea707 100644 --- a/frame/transaction-storage/src/mock.rs +++ b/frame/transaction-storage/src/mock.rs @@ -84,6 +84,10 @@ impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = (); type ReserveIdentifier = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl pallet_transaction_storage::Config for Test { diff --git a/frame/transaction-storage/src/weights.rs b/frame/transaction-storage/src/weights.rs index 896e1ebab43aa..5103ac375e97a 100644 --- a/frame/transaction-storage/src/weights.rs +++ b/frame/transaction-storage/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_transaction_storage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -66,11 +66,11 @@ impl WeightInfo for SubstrateWeight { fn store(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `176` - // Estimated: `38383` - // Minimum execution time: 28_702 nanoseconds. - Weight::from_parts(29_164_000, 38383) + // Estimated: `38351` + // Minimum execution time: 36_983_000 picoseconds. + Weight::from_parts(37_296_000, 38351) // Standard Error: 2 - .saturating_add(Weight::from_parts(5_601, 0).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(4_908, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -84,10 +84,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: TransactionStorage BlockTransactions (max_values: Some(1), max_size: Some(36866), added: 37361, mode: MaxEncodedLen) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `358` - // Estimated: `77744` - // Minimum execution time: 36_219 nanoseconds. - Weight::from_parts(36_979_000, 77744) + // Measured: `326` + // Estimated: `40351` + // Minimum execution time: 44_637_000 picoseconds. + Weight::from_parts(45_464_000, 40351) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -103,10 +103,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: TransactionStorage Transactions (max_values: None, max_size: Some(36886), added: 39361, mode: MaxEncodedLen) fn check_proof_max() -> Weight { // Proof Size summary in bytes: - // Measured: `37177` - // Estimated: `43382` - // Minimum execution time: 55_793 nanoseconds. - Weight::from_parts(57_128_000, 43382) + // Measured: `37145` + // Estimated: `40351` + // Minimum execution time: 59_653_000 picoseconds. + Weight::from_parts(61_068_000, 40351) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -124,11 +124,11 @@ impl WeightInfo for () { fn store(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `176` - // Estimated: `38383` - // Minimum execution time: 28_702 nanoseconds. - Weight::from_parts(29_164_000, 38383) + // Estimated: `38351` + // Minimum execution time: 36_983_000 picoseconds. + Weight::from_parts(37_296_000, 38351) // Standard Error: 2 - .saturating_add(Weight::from_parts(5_601, 0).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(4_908, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -142,10 +142,10 @@ impl WeightInfo for () { /// Proof: TransactionStorage BlockTransactions (max_values: Some(1), max_size: Some(36866), added: 37361, mode: MaxEncodedLen) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `358` - // Estimated: `77744` - // Minimum execution time: 36_219 nanoseconds. - Weight::from_parts(36_979_000, 77744) + // Measured: `326` + // Estimated: `40351` + // Minimum execution time: 44_637_000 picoseconds. + Weight::from_parts(45_464_000, 40351) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -161,10 +161,10 @@ impl WeightInfo for () { /// Proof: TransactionStorage Transactions (max_values: None, max_size: Some(36886), added: 39361, mode: MaxEncodedLen) fn check_proof_max() -> Weight { // Proof Size summary in bytes: - // Measured: `37177` - // Estimated: `43382` - // Minimum execution time: 55_793 nanoseconds. - Weight::from_parts(57_128_000, 43382) + // Measured: `37145` + // Estimated: `40351` + // Minimum execution time: 59_653_000 picoseconds. + Weight::from_parts(61_068_000, 40351) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index 23fcb7944bfb7..f3c06735f8871 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = "max-encoded-len", ] } impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 24d2d01f92f8a..67b21ff6252a8 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -90,6 +90,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl pallet_utility::Config for Test { diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index abf461c622a0f..edf1a674f73ff 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_treasury //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -68,9 +68,9 @@ impl WeightInfo for SubstrateWeight { fn spend() -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `1396` - // Minimum execution time: 14_277 nanoseconds. - Weight::from_parts(14_749_000, 1396) + // Estimated: `1887` + // Minimum execution time: 16_592_000 picoseconds. + Weight::from_parts(16_959_000, 1887) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -80,10 +80,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) fn propose_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `175` - // Estimated: `499` - // Minimum execution time: 23_297 nanoseconds. - Weight::from_parts(23_585_000, 499) + // Measured: `143` + // Estimated: `1489` + // Minimum execution time: 29_742_000 picoseconds. + Weight::from_parts(30_359_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -93,10 +93,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn reject_proposal() -> Weight { // Proof Size summary in bytes: - // Measured: `365` - // Estimated: `5186` - // Minimum execution time: 23_996 nanoseconds. - Weight::from_parts(24_548_000, 5186) + // Measured: `301` + // Estimated: `3593` + // Minimum execution time: 31_248_000 picoseconds. + Weight::from_parts(31_882_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -107,12 +107,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `501 + p * (8 ±0)` - // Estimated: `3480` - // Minimum execution time: 9_366 nanoseconds. - Weight::from_parts(11_731_455, 3480) - // Standard Error: 761 - .saturating_add(Weight::from_parts(21_665, 0).saturating_mul(p.into())) + // Measured: `470 + p * (8 ±0)` + // Estimated: `3573` + // Minimum execution time: 10_441_000 picoseconds. + Weight::from_parts(13_061_079, 3573) + // Standard Error: 877 + .saturating_add(Weight::from_parts(26_940, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -121,9 +121,9 @@ impl WeightInfo for SubstrateWeight { fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `127` - // Estimated: `897` - // Minimum execution time: 7_012 nanoseconds. - Weight::from_parts(7_270_000, 897) + // Estimated: `1887` + // Minimum execution time: 7_935_000 picoseconds. + Weight::from_parts(8_153_000, 1887) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -140,17 +140,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 100]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `415 + p * (314 ±0)` - // Estimated: `2305 + p * (7789 ±0)` - // Minimum execution time: 37_834 nanoseconds. - Weight::from_parts(47_496_917, 2305) - // Standard Error: 12_505 - .saturating_add(Weight::from_parts(26_902_474, 0).saturating_mul(p.into())) + // Measured: `387 + p * (251 ±0)` + // Estimated: `1887 + p * (5206 ±0)` + // Minimum execution time: 45_306_000 picoseconds. + Weight::from_parts(53_639_830, 1887) + // Standard Error: 32_330 + .saturating_add(Weight::from_parts(38_930_307, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 7789).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) } } @@ -165,9 +165,9 @@ impl WeightInfo for () { fn spend() -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `1396` - // Minimum execution time: 14_277 nanoseconds. - Weight::from_parts(14_749_000, 1396) + // Estimated: `1887` + // Minimum execution time: 16_592_000 picoseconds. + Weight::from_parts(16_959_000, 1887) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -177,10 +177,10 @@ impl WeightInfo for () { /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) fn propose_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `175` - // Estimated: `499` - // Minimum execution time: 23_297 nanoseconds. - Weight::from_parts(23_585_000, 499) + // Measured: `143` + // Estimated: `1489` + // Minimum execution time: 29_742_000 picoseconds. + Weight::from_parts(30_359_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -190,10 +190,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn reject_proposal() -> Weight { // Proof Size summary in bytes: - // Measured: `365` - // Estimated: `5186` - // Minimum execution time: 23_996 nanoseconds. - Weight::from_parts(24_548_000, 5186) + // Measured: `301` + // Estimated: `3593` + // Minimum execution time: 31_248_000 picoseconds. + Weight::from_parts(31_882_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -204,12 +204,12 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `501 + p * (8 ±0)` - // Estimated: `3480` - // Minimum execution time: 9_366 nanoseconds. - Weight::from_parts(11_731_455, 3480) - // Standard Error: 761 - .saturating_add(Weight::from_parts(21_665, 0).saturating_mul(p.into())) + // Measured: `470 + p * (8 ±0)` + // Estimated: `3573` + // Minimum execution time: 10_441_000 picoseconds. + Weight::from_parts(13_061_079, 3573) + // Standard Error: 877 + .saturating_add(Weight::from_parts(26_940, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -218,9 +218,9 @@ impl WeightInfo for () { fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `127` - // Estimated: `897` - // Minimum execution time: 7_012 nanoseconds. - Weight::from_parts(7_270_000, 897) + // Estimated: `1887` + // Minimum execution time: 7_935_000 picoseconds. + Weight::from_parts(8_153_000, 1887) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -237,16 +237,16 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 100]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `415 + p * (314 ±0)` - // Estimated: `2305 + p * (7789 ±0)` - // Minimum execution time: 37_834 nanoseconds. - Weight::from_parts(47_496_917, 2305) - // Standard Error: 12_505 - .saturating_add(Weight::from_parts(26_902_474, 0).saturating_mul(p.into())) + // Measured: `387 + p * (251 ±0)` + // Estimated: `1887 + p * (5206 ±0)` + // Minimum execution time: 45_306_000 picoseconds. + Weight::from_parts(53_639_830, 1887) + // Standard Error: 32_330 + .saturating_add(Weight::from_parts(38_930_307, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 7789).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) } } diff --git a/frame/uniques/Cargo.toml b/frame/uniques/Cargo.toml index a01ea60d8c3e5..f88a862daba48 100644 --- a/frame/uniques/Cargo.toml +++ b/frame/uniques/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/uniques/README.md b/frame/uniques/README.md index 8a91a558b5b5f..6cdbcf79f1c95 100644 --- a/frame/uniques/README.md +++ b/frame/uniques/README.md @@ -4,67 +4,71 @@ A simple, secure module for dealing with non-fungible assets. ## Overview -The Uniques module provides functionality for asset management of non-fungible asset classes, including: +The Uniques module provides functionality for non-fungible tokens' management, including: -* Asset Issuance -* Asset Transfer -* Asset Destruction +* Collection Creation +* Item Minting +* Item Transfers +* Item Trading methods +* Attributes Management +* Item Burning -To use it in your runtime, you need to implement the assets [`uniques::Config`](https://paritytech.github.io/substrate/master/pallet_uniques/pallet/trait.Config.html). +To use it in your runtime, you need to implement [`uniques::Config`](https://paritytech.github.io/substrate/master/pallet_uniques/pallet/trait.Config.html). The supported dispatchable functions are documented in the [`uniques::Call`](https://paritytech.github.io/substrate/master/pallet_uniques/pallet/enum.Call.html) enum. ### Terminology -* **Asset issuance:** The creation of a new asset instance. -* **Asset transfer:** The action of transferring an asset instance from one account to another. -* **Asset burning:** The destruction of an asset instance. -* **Non-fungible asset:** An asset for which each unit has unique characteristics. There is exactly - one instance of such an asset in existence and there is exactly one owning account. +* **Collection creation:** The creation of a new collection. +* **Item minting:** The action of creating a new item within a collection. +* **Item transfer:** The action of sending an item from one account to another. +* **Item burning:** The destruction of an item. +* **Non-fungible token (NFT):** An item for which each unit has unique characteristics. There is exactly + one instance of such an item in existence and there is exactly one owning account. ### Goals The Uniques pallet in Substrate is designed to make the following possible: -* Allow accounts to permissionlessly create asset classes (collections of asset instances). -* Allow a named (permissioned) account to mint and burn unique assets within a class. -* Move asset instances between accounts permissionlessly. -* Allow a named (permissioned) account to freeze and unfreeze unique assets within a - class or the entire class. -* Allow the owner of an asset instance to delegate the ability to transfer the asset to some +* Allow accounts to permissionlessly create NFT collections. +* Allow a named (permissioned) account to mint and burn unique items within a collection. +* Move items between accounts permissionlessly. +* Allow a named (permissioned) account to freeze and unfreeze unique items within a + collection or the entire collection. +* Allow the owner of an item to delegate the ability to transfer the item to some named third-party. ## Interface ### Permissionless dispatchables -* `create`: Create a new asset class by placing a deposit. -* `transfer`: Transfer an asset instance to a new owner. -* `redeposit`: Update the deposit amount of an asset instance, potentially freeing funds. +* `create`: Create a new collection by placing a deposit. +* `transfer`: Transfer an item to a new owner. +* `redeposit`: Update the deposit amount of an item, potentially freeing funds. * `approve_transfer`: Name a delegate who may authorise a transfer. * `cancel_approval`: Revert the effects of a previous `approve_transfer`. ### Permissioned dispatchables -* `destroy`: Destroy an asset class. -* `mint`: Mint a new asset instance within an asset class. -* `burn`: Burn an asset instance within an asset class. -* `freeze`: Prevent an individual asset from being transferred. +* `destroy`: Destroy a collection. +* `mint`: Mint a new item within a collection. +* `burn`: Burn an item within a collection. +* `freeze`: Prevent an individual item from being transferred. * `thaw`: Revert the effects of a previous `freeze`. -* `freeze_class`: Prevent all asset within a class from being transferred. -* `thaw_class`: Revert the effects of a previous `freeze_class`. -* `transfer_ownership`: Alter the owner of an asset class, moving all associated deposits. -* `set_team`: Alter the permissioned accounts of an asset class. +* `freeze_collection`: Prevent all items within a collection from being transferred. +* `thaw_collection`: Revert the effects of a previous `freeze_collection`. +* `transfer_ownership`: Alter the owner of a collection, moving all associated deposits. +* `set_team`: Alter the permissioned accounts of a collection. ### Metadata (permissioned) dispatchables -* `set_attribute`: Set a metadata attribute of an asset instance or class. -* `clear_attribute`: Remove a metadata attribute of an asset instance or class. -* `set_metadata`: Set general metadata of an asset instance. -* `clear_metadata`: Remove general metadata of an asset instance. -* `set_class_metadata`: Set general metadata of an asset class. -* `clear_class_metadata`: Remove general metadata of an asset class. +* `set_attribute`: Set an attribute of an item or collection. +* `clear_attribute`: Remove an attribute of an item or collection. +* `set_metadata`: Set general metadata of an item. +* `clear_metadata`: Remove general metadata of an item. +* `set_collection_metadata`: Set general metadata of a collection. +* `clear_collection_metadata`: Remove general metadata of a collection. ### Force (i.e. governance) dispatchables -* `force_create`: Create a new asset class. -* `force_asset_status`: Alter the underlying characteristics of an asset class. +* `force_create`: Create a new collection. +* `force_asset_status`: Alter the underlying characteristics of a collection. Please refer to the [`Call`](https://paritytech.github.io/substrate/master/pallet_uniques/pallet/enum.Call.html) enum and its associated variants for documentation on each function. diff --git a/frame/uniques/src/mock.rs b/frame/uniques/src/mock.rs index 67d994d7933c2..bad393a489582 100644 --- a/frame/uniques/src/mock.rs +++ b/frame/uniques/src/mock.rs @@ -82,6 +82,10 @@ impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl Config for Test { diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index edf3bd35dcef0..14a55d163e0ff 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_uniques //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -85,10 +85,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `281` - // Estimated: `2653` - // Minimum execution time: 24_242 nanoseconds. - Weight::from_parts(24_682_000, 2653) + // Measured: `249` + // Estimated: `3643` + // Minimum execution time: 32_067_000 picoseconds. + Weight::from_parts(32_817_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -99,9 +99,9 @@ impl WeightInfo for SubstrateWeight { fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `109` - // Estimated: `2653` - // Minimum execution time: 14_145 nanoseconds. - Weight::from_parts(14_598_000, 2653) + // Estimated: `3643` + // Minimum execution time: 16_365_000 picoseconds. + Weight::from_parts(16_707_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -109,14 +109,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) /// Storage: Uniques Asset (r:1001 w:1000) /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) + /// Storage: Uniques InstanceMetadataOf (r:1000 w:1000) + /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: Uniques Attribute (r:1000 w:1000) + /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) /// Storage: Uniques ClassAccount (r:0 w:1) /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) - /// Storage: Uniques Attribute (r:0 w:1000) - /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) /// Storage: Uniques ClassMetadataOf (r:0 w:1) /// Proof: Uniques ClassMetadataOf (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Uniques InstanceMetadataOf (r:0 w:1000) - /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) /// Storage: Uniques Account (r:0 w:1000) /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) /// Storage: Uniques CollectionMaxSupply (r:0 w:1) @@ -126,22 +126,26 @@ impl WeightInfo for SubstrateWeight { /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `450 + n * (108 ±0) + m * (56 ±0) + a * (107 ±0)` - // Estimated: `5250 + n * (2597 ±0)` - // Minimum execution time: 2_404_081 nanoseconds. - Weight::from_parts(2_419_004_000, 5250) - // Standard Error: 27_175 - .saturating_add(Weight::from_parts(8_616_904, 0).saturating_mul(n.into())) - // Standard Error: 27_175 - .saturating_add(Weight::from_parts(334_249, 0).saturating_mul(m.into())) - // Standard Error: 27_175 - .saturating_add(Weight::from_parts(213_038, 0).saturating_mul(a.into())) + // Measured: `418 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` + // Estimated: `3643 + a * (2839 ±0) + m * (2583 ±0) + n * (2597 ±0)` + // Minimum execution time: 2_498_918_000 picoseconds. + Weight::from_parts(2_516_809_000, 3643) + // Standard Error: 26_297 + .saturating_add(Weight::from_parts(6_648_035, 0).saturating_mul(n.into())) + // Standard Error: 26_297 + .saturating_add(Weight::from_parts(354_268, 0).saturating_mul(m.into())) + // Standard Error: 26_297 + .saturating_add(Weight::from_parts(223_770, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) + .saturating_add(Weight::from_parts(0, 2839).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(0, 2583).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) } /// Storage: Uniques Asset (r:1 w:1) @@ -154,10 +158,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn mint() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `7749` - // Minimum execution time: 29_326 nanoseconds. - Weight::from_parts(29_671_000, 7749) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 38_157_000 picoseconds. + Weight::from_parts(38_677_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -171,10 +175,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn burn() -> Weight { // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `5250` - // Minimum execution time: 30_497 nanoseconds. - Weight::from_parts(30_714_000, 5250) + // Measured: `495` + // Estimated: `3643` + // Minimum execution time: 39_069_000 picoseconds. + Weight::from_parts(40_442_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -188,10 +192,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `5250` - // Minimum execution time: 24_212 nanoseconds. - Weight::from_parts(24_681_000, 5250) + // Measured: `495` + // Estimated: `3643` + // Minimum execution time: 28_085_000 picoseconds. + Weight::from_parts(28_403_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -202,12 +206,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `837 + i * (108 ±0)` - // Estimated: `2653 + i * (2597 ±0)` - // Minimum execution time: 13_633 nanoseconds. - Weight::from_parts(13_797_000, 2653) - // Standard Error: 9_293 - .saturating_add(Weight::from_parts(11_163_914, 0).saturating_mul(i.into())) + // Measured: `805 + i * (76 ±0)` + // Estimated: `3643 + i * (2597 ±0)` + // Minimum execution time: 16_202_000 picoseconds. + Weight::from_parts(16_380_000, 3643) + // Standard Error: 18_639 + .saturating_add(Weight::from_parts(16_047_161, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -220,10 +224,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `5250` - // Minimum execution time: 17_126 nanoseconds. - Weight::from_parts(17_572_000, 5250) + // Measured: `495` + // Estimated: `3643` + // Minimum execution time: 20_131_000 picoseconds. + Weight::from_parts(20_535_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -233,10 +237,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) fn thaw() -> Weight { // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `5250` - // Minimum execution time: 17_209 nanoseconds. - Weight::from_parts(17_411_000, 5250) + // Measured: `495` + // Estimated: `3643` + // Minimum execution time: 19_895_000 picoseconds. + Weight::from_parts(20_198_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -244,10 +248,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) fn freeze_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `2653` - // Minimum execution time: 13_048 nanoseconds. - Weight::from_parts(13_589_000, 2653) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 15_312_000 picoseconds. + Weight::from_parts(15_555_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -255,10 +259,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) fn thaw_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `2653` - // Minimum execution time: 12_908 nanoseconds. - Weight::from_parts(13_098_000, 2653) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 15_145_000 picoseconds. + Weight::from_parts(15_371_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -270,10 +274,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `455` - // Estimated: `5180` - // Minimum execution time: 20_629 nanoseconds. - Weight::from_parts(21_448_000, 5180) + // Measured: `423` + // Estimated: `3643` + // Minimum execution time: 23_800_000 picoseconds. + Weight::from_parts(23_991_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -281,10 +285,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) fn set_team() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `2653` - // Minimum execution time: 13_740 nanoseconds. - Weight::from_parts(14_020_000, 2653) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 15_929_000 picoseconds. + Weight::from_parts(16_219_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -294,10 +298,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn force_item_status() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `2653` - // Minimum execution time: 16_293 nanoseconds. - Weight::from_parts(16_509_000, 2653) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 18_617_000 picoseconds. + Weight::from_parts(19_016_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -309,10 +313,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `611` - // Estimated: `8075` - // Minimum execution time: 33_560 nanoseconds. - Weight::from_parts(34_263_000, 8075) + // Measured: `547` + // Estimated: `3829` + // Minimum execution time: 41_982_000 picoseconds. + Weight::from_parts(42_329_000, 3829) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -324,10 +328,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `1031` - // Estimated: `8075` - // Minimum execution time: 31_955 nanoseconds. - Weight::from_parts(32_447_000, 8075) + // Measured: `936` + // Estimated: `3829` + // Minimum execution time: 39_921_000 picoseconds. + Weight::from_parts(40_499_000, 3829) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -337,10 +341,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) fn set_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `5236` - // Minimum execution time: 25_520 nanoseconds. - Weight::from_parts(25_843_000, 5236) + // Measured: `415` + // Estimated: `3643` + // Minimum execution time: 31_774_000 picoseconds. + Weight::from_parts(32_327_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -350,10 +354,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `611` - // Estimated: `5236` - // Minimum execution time: 25_812 nanoseconds. - Weight::from_parts(26_141_000, 5236) + // Measured: `547` + // Estimated: `3643` + // Minimum execution time: 32_551_000 picoseconds. + Weight::from_parts(32_891_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -363,10 +367,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques ClassMetadataOf (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `5216` - // Minimum execution time: 25_055 nanoseconds. - Weight::from_parts(25_244_000, 5216) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 33_490_000 picoseconds. + Weight::from_parts(34_617_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -376,10 +380,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques ClassMetadataOf (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `525` - // Estimated: `5216` - // Minimum execution time: 23_311 nanoseconds. - Weight::from_parts(23_682_000, 5216) + // Measured: `461` + // Estimated: `3643` + // Minimum execution time: 31_691_000 picoseconds. + Weight::from_parts(32_042_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -389,10 +393,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) fn approve_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `5250` - // Minimum execution time: 17_709 nanoseconds. - Weight::from_parts(18_308_000, 5250) + // Measured: `495` + // Estimated: `3643` + // Minimum execution time: 20_738_000 picoseconds. + Weight::from_parts(21_067_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -402,10 +406,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) fn cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `592` - // Estimated: `5250` - // Minimum execution time: 17_656 nanoseconds. - Weight::from_parts(18_039_000, 5250) + // Measured: `528` + // Estimated: `3643` + // Minimum execution time: 20_404_000 picoseconds. + Weight::from_parts(20_999_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -414,9 +418,9 @@ impl WeightInfo for SubstrateWeight { fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `109` - // Estimated: `2527` - // Minimum execution time: 14_615 nanoseconds. - Weight::from_parts(14_931_000, 2527) + // Estimated: `3517` + // Minimum execution time: 17_047_000 picoseconds. + Weight::from_parts(17_307_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -426,10 +430,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `5152` - // Minimum execution time: 14_974 nanoseconds. - Weight::from_parts(15_314_000, 5152) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 17_829_000 picoseconds. + Weight::from_parts(18_194_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -439,10 +443,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn set_price() -> Weight { // Proof Size summary in bytes: - // Measured: `358` - // Estimated: `2597` - // Minimum execution time: 15_444 nanoseconds. - Weight::from_parts(15_886_000, 2597) + // Measured: `326` + // Estimated: `3587` + // Minimum execution time: 17_620_000 picoseconds. + Weight::from_parts(17_931_000, 3587) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -456,10 +460,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn buy_item() -> Weight { // Proof Size summary in bytes: - // Measured: `703` - // Estimated: `7814` - // Minimum execution time: 35_628 nanoseconds. - Weight::from_parts(35_886_000, 7814) + // Measured: `607` + // Estimated: `3643` + // Minimum execution time: 39_550_000 picoseconds. + Weight::from_parts(40_052_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -473,10 +477,10 @@ impl WeightInfo for () { /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `281` - // Estimated: `2653` - // Minimum execution time: 24_242 nanoseconds. - Weight::from_parts(24_682_000, 2653) + // Measured: `249` + // Estimated: `3643` + // Minimum execution time: 32_067_000 picoseconds. + Weight::from_parts(32_817_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -487,9 +491,9 @@ impl WeightInfo for () { fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `109` - // Estimated: `2653` - // Minimum execution time: 14_145 nanoseconds. - Weight::from_parts(14_598_000, 2653) + // Estimated: `3643` + // Minimum execution time: 16_365_000 picoseconds. + Weight::from_parts(16_707_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -497,14 +501,14 @@ impl WeightInfo for () { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) /// Storage: Uniques Asset (r:1001 w:1000) /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) + /// Storage: Uniques InstanceMetadataOf (r:1000 w:1000) + /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: Uniques Attribute (r:1000 w:1000) + /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) /// Storage: Uniques ClassAccount (r:0 w:1) /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) - /// Storage: Uniques Attribute (r:0 w:1000) - /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) /// Storage: Uniques ClassMetadataOf (r:0 w:1) /// Proof: Uniques ClassMetadataOf (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Uniques InstanceMetadataOf (r:0 w:1000) - /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) /// Storage: Uniques Account (r:0 w:1000) /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) /// Storage: Uniques CollectionMaxSupply (r:0 w:1) @@ -514,22 +518,26 @@ impl WeightInfo for () { /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `450 + n * (108 ±0) + m * (56 ±0) + a * (107 ±0)` - // Estimated: `5250 + n * (2597 ±0)` - // Minimum execution time: 2_404_081 nanoseconds. - Weight::from_parts(2_419_004_000, 5250) - // Standard Error: 27_175 - .saturating_add(Weight::from_parts(8_616_904, 0).saturating_mul(n.into())) - // Standard Error: 27_175 - .saturating_add(Weight::from_parts(334_249, 0).saturating_mul(m.into())) - // Standard Error: 27_175 - .saturating_add(Weight::from_parts(213_038, 0).saturating_mul(a.into())) + // Measured: `418 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` + // Estimated: `3643 + a * (2839 ±0) + m * (2583 ±0) + n * (2597 ±0)` + // Minimum execution time: 2_498_918_000 picoseconds. + Weight::from_parts(2_516_809_000, 3643) + // Standard Error: 26_297 + .saturating_add(Weight::from_parts(6_648_035, 0).saturating_mul(n.into())) + // Standard Error: 26_297 + .saturating_add(Weight::from_parts(354_268, 0).saturating_mul(m.into())) + // Standard Error: 26_297 + .saturating_add(Weight::from_parts(223_770, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(a.into()))) + .saturating_add(Weight::from_parts(0, 2839).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(0, 2583).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) } /// Storage: Uniques Asset (r:1 w:1) @@ -542,10 +550,10 @@ impl WeightInfo for () { /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn mint() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `7749` - // Minimum execution time: 29_326 nanoseconds. - Weight::from_parts(29_671_000, 7749) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 38_157_000 picoseconds. + Weight::from_parts(38_677_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -559,10 +567,10 @@ impl WeightInfo for () { /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn burn() -> Weight { // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `5250` - // Minimum execution time: 30_497 nanoseconds. - Weight::from_parts(30_714_000, 5250) + // Measured: `495` + // Estimated: `3643` + // Minimum execution time: 39_069_000 picoseconds. + Weight::from_parts(40_442_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -576,10 +584,10 @@ impl WeightInfo for () { /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `5250` - // Minimum execution time: 24_212 nanoseconds. - Weight::from_parts(24_681_000, 5250) + // Measured: `495` + // Estimated: `3643` + // Minimum execution time: 28_085_000 picoseconds. + Weight::from_parts(28_403_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -590,12 +598,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `837 + i * (108 ±0)` - // Estimated: `2653 + i * (2597 ±0)` - // Minimum execution time: 13_633 nanoseconds. - Weight::from_parts(13_797_000, 2653) - // Standard Error: 9_293 - .saturating_add(Weight::from_parts(11_163_914, 0).saturating_mul(i.into())) + // Measured: `805 + i * (76 ±0)` + // Estimated: `3643 + i * (2597 ±0)` + // Minimum execution time: 16_202_000 picoseconds. + Weight::from_parts(16_380_000, 3643) + // Standard Error: 18_639 + .saturating_add(Weight::from_parts(16_047_161, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -608,10 +616,10 @@ impl WeightInfo for () { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `5250` - // Minimum execution time: 17_126 nanoseconds. - Weight::from_parts(17_572_000, 5250) + // Measured: `495` + // Estimated: `3643` + // Minimum execution time: 20_131_000 picoseconds. + Weight::from_parts(20_535_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -621,10 +629,10 @@ impl WeightInfo for () { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) fn thaw() -> Weight { // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `5250` - // Minimum execution time: 17_209 nanoseconds. - Weight::from_parts(17_411_000, 5250) + // Measured: `495` + // Estimated: `3643` + // Minimum execution time: 19_895_000 picoseconds. + Weight::from_parts(20_198_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -632,10 +640,10 @@ impl WeightInfo for () { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) fn freeze_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `2653` - // Minimum execution time: 13_048 nanoseconds. - Weight::from_parts(13_589_000, 2653) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 15_312_000 picoseconds. + Weight::from_parts(15_555_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -643,10 +651,10 @@ impl WeightInfo for () { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) fn thaw_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `2653` - // Minimum execution time: 12_908 nanoseconds. - Weight::from_parts(13_098_000, 2653) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 15_145_000 picoseconds. + Weight::from_parts(15_371_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -658,10 +666,10 @@ impl WeightInfo for () { /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `455` - // Estimated: `5180` - // Minimum execution time: 20_629 nanoseconds. - Weight::from_parts(21_448_000, 5180) + // Measured: `423` + // Estimated: `3643` + // Minimum execution time: 23_800_000 picoseconds. + Weight::from_parts(23_991_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -669,10 +677,10 @@ impl WeightInfo for () { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) fn set_team() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `2653` - // Minimum execution time: 13_740 nanoseconds. - Weight::from_parts(14_020_000, 2653) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 15_929_000 picoseconds. + Weight::from_parts(16_219_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -682,10 +690,10 @@ impl WeightInfo for () { /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn force_item_status() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `2653` - // Minimum execution time: 16_293 nanoseconds. - Weight::from_parts(16_509_000, 2653) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 18_617_000 picoseconds. + Weight::from_parts(19_016_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -697,10 +705,10 @@ impl WeightInfo for () { /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `611` - // Estimated: `8075` - // Minimum execution time: 33_560 nanoseconds. - Weight::from_parts(34_263_000, 8075) + // Measured: `547` + // Estimated: `3829` + // Minimum execution time: 41_982_000 picoseconds. + Weight::from_parts(42_329_000, 3829) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -712,10 +720,10 @@ impl WeightInfo for () { /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `1031` - // Estimated: `8075` - // Minimum execution time: 31_955 nanoseconds. - Weight::from_parts(32_447_000, 8075) + // Measured: `936` + // Estimated: `3829` + // Minimum execution time: 39_921_000 picoseconds. + Weight::from_parts(40_499_000, 3829) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -725,10 +733,10 @@ impl WeightInfo for () { /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) fn set_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `5236` - // Minimum execution time: 25_520 nanoseconds. - Weight::from_parts(25_843_000, 5236) + // Measured: `415` + // Estimated: `3643` + // Minimum execution time: 31_774_000 picoseconds. + Weight::from_parts(32_327_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -738,10 +746,10 @@ impl WeightInfo for () { /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `611` - // Estimated: `5236` - // Minimum execution time: 25_812 nanoseconds. - Weight::from_parts(26_141_000, 5236) + // Measured: `547` + // Estimated: `3643` + // Minimum execution time: 32_551_000 picoseconds. + Weight::from_parts(32_891_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -751,10 +759,10 @@ impl WeightInfo for () { /// Proof: Uniques ClassMetadataOf (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `5216` - // Minimum execution time: 25_055 nanoseconds. - Weight::from_parts(25_244_000, 5216) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 33_490_000 picoseconds. + Weight::from_parts(34_617_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -764,10 +772,10 @@ impl WeightInfo for () { /// Proof: Uniques ClassMetadataOf (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `525` - // Estimated: `5216` - // Minimum execution time: 23_311 nanoseconds. - Weight::from_parts(23_682_000, 5216) + // Measured: `461` + // Estimated: `3643` + // Minimum execution time: 31_691_000 picoseconds. + Weight::from_parts(32_042_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -777,10 +785,10 @@ impl WeightInfo for () { /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) fn approve_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `5250` - // Minimum execution time: 17_709 nanoseconds. - Weight::from_parts(18_308_000, 5250) + // Measured: `495` + // Estimated: `3643` + // Minimum execution time: 20_738_000 picoseconds. + Weight::from_parts(21_067_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -790,10 +798,10 @@ impl WeightInfo for () { /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) fn cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `592` - // Estimated: `5250` - // Minimum execution time: 17_656 nanoseconds. - Weight::from_parts(18_039_000, 5250) + // Measured: `528` + // Estimated: `3643` + // Minimum execution time: 20_404_000 picoseconds. + Weight::from_parts(20_999_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -802,9 +810,9 @@ impl WeightInfo for () { fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `109` - // Estimated: `2527` - // Minimum execution time: 14_615 nanoseconds. - Weight::from_parts(14_931_000, 2527) + // Estimated: `3517` + // Minimum execution time: 17_047_000 picoseconds. + Weight::from_parts(17_307_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -814,10 +822,10 @@ impl WeightInfo for () { /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `5152` - // Minimum execution time: 14_974 nanoseconds. - Weight::from_parts(15_314_000, 5152) + // Measured: `349` + // Estimated: `3643` + // Minimum execution time: 17_829_000 picoseconds. + Weight::from_parts(18_194_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -827,10 +835,10 @@ impl WeightInfo for () { /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn set_price() -> Weight { // Proof Size summary in bytes: - // Measured: `358` - // Estimated: `2597` - // Minimum execution time: 15_444 nanoseconds. - Weight::from_parts(15_886_000, 2597) + // Measured: `326` + // Estimated: `3587` + // Minimum execution time: 17_620_000 picoseconds. + Weight::from_parts(17_931_000, 3587) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -844,10 +852,10 @@ impl WeightInfo for () { /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn buy_item() -> Weight { // Proof Size summary in bytes: - // Measured: `703` - // Estimated: `7814` - // Minimum execution time: 35_628 nanoseconds. - Weight::from_parts(35_886_000, 7814) + // Measured: `607` + // Estimated: `3643` + // Minimum execution time: 39_550_000 picoseconds. + Weight::from_parts(40_052_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index 099526d3dcb9e..a30feec467804 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } @@ -47,5 +47,6 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index d9ac2ebbc15a9..6f156e318c8cf 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -35,12 +35,13 @@ use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BlakeTwo256, Hash, IdentityLookup}, + TokenError, }; type BlockNumber = u64; // example module to test behaviors. -#[frame_support::pallet] +#[frame_support::pallet(dev_mode)] pub mod example { use frame_support::{dispatch::WithPostDispatchInfo, pallet_prelude::*}; use frame_system::pallet_prelude::*; @@ -90,7 +91,7 @@ pub mod example { mod mock_democracy { pub use pallet::*; - #[frame_support::pallet] + #[frame_support::pallet(dev_mode)] pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -185,6 +186,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl pallet_root_testing::Config for Test {} @@ -204,6 +209,7 @@ parameter_types! { pub const MotionDuration: BlockNumber = MOTION_DURATION_IN_BLOCKS; pub const MaxProposals: u32 = 100; pub const MaxMembers: u32 = 100; + pub MaxProposalWeight: Weight = sp_runtime::Perbill::from_percent(50) * BlockWeights::get().max_block; } type CouncilCollective = pallet_collective::Instance1; @@ -217,6 +223,7 @@ impl pallet_collective::Config for Test { type DefaultVote = pallet_collective::PrimeDefaultVote; type WeightInfo = (); type SetMembersOrigin = frame_system::EnsureRoot; + type MaxProposalWeight = MaxProposalWeight; } impl example::Config for Test {} @@ -226,7 +233,7 @@ impl Contains for TestBaseCallFilter { fn contains(c: &RuntimeCall) -> bool { match *c { // Transfer works. Use `transfer_keep_alive` for a call that doesn't pass the filter. - RuntimeCall::Balances(pallet_balances::Call::transfer { .. }) => true, + RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { .. }) => true, RuntimeCall::Utility(_) => true, // For benchmarking, this acts as a noop call RuntimeCall::System(frame_system::Call::remark { .. }) => true, @@ -253,7 +260,7 @@ type ExampleCall = example::Call; type UtilityCall = crate::Call; use frame_system::Call as SystemCall; -use pallet_balances::{Call as BalancesCall, Error as BalancesError}; +use pallet_balances::Call as BalancesCall; use pallet_root_testing::Call as RootTestingCall; use pallet_timestamp::Call as TimestampCall; @@ -278,7 +285,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { } fn call_transfer(dest: u64, value: u64) -> RuntimeCall { - RuntimeCall::Balances(BalancesCall::transfer { dest, value }) + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }) } fn call_foobar(err: bool, start_weight: Weight, end_weight: Option) -> RuntimeCall { @@ -289,10 +296,10 @@ fn call_foobar(err: bool, start_weight: Weight, end_weight: Option) -> R fn as_derivative_works() { new_test_ext().execute_with(|| { let sub_1_0 = Utility::derivative_account_id(1, 0); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), sub_1_0, 5)); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(1), sub_1_0, 5)); assert_err_ignore_postinfo!( Utility::as_derivative(RuntimeOrigin::signed(1), 1, Box::new(call_transfer(6, 3)),), - BalancesError::::InsufficientBalance + TokenError::FundsUnavailable, ); assert_ok!(Utility::as_derivative( RuntimeOrigin::signed(1), @@ -599,7 +606,7 @@ fn batch_all_revert() { ), pays_fee: Pays::Yes }, - error: pallet_balances::Error::::InsufficientBalance.into() + error: TokenError::FundsUnavailable.into(), } ); assert_eq!(Balances::free_balance(1), 10); diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs index c680c9ff00f9f..0ff261a33f362 100644 --- a/frame/utility/src/weights.rs +++ b/frame/utility/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_utility //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -63,44 +63,44 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_742 nanoseconds. - Weight::from_parts(16_087_635, 0) - // Standard Error: 2_411 - .saturating_add(Weight::from_parts(3_665_506, 0).saturating_mul(c.into())) + // Minimum execution time: 7_932_000 picoseconds. + Weight::from_parts(24_064_040, 0) + // Standard Error: 2_486 + .saturating_add(Weight::from_parts(4_238_449, 0).saturating_mul(c.into())) } fn as_derivative() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_802 nanoseconds. - Weight::from_parts(5_269_000, 0) + // Minimum execution time: 5_536_000 picoseconds. + Weight::from_parts(5_963_000, 0) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_100 nanoseconds. - Weight::from_parts(14_090_381, 0) - // Standard Error: 1_917 - .saturating_add(Weight::from_parts(3_744_891, 0).saturating_mul(c.into())) + // Minimum execution time: 7_820_000 picoseconds. + Weight::from_parts(18_969_535, 0) + // Standard Error: 2_228 + .saturating_add(Weight::from_parts(4_448_073, 0).saturating_mul(c.into())) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_840 nanoseconds. - Weight::from_parts(9_280_000, 0) + // Minimum execution time: 9_811_000 picoseconds. + Weight::from_parts(10_162_000, 0) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_245 nanoseconds. - Weight::from_parts(14_292_923, 0) - // Standard Error: 1_803 - .saturating_add(Weight::from_parts(3_645_950, 0).saturating_mul(c.into())) + // Minimum execution time: 7_829_000 picoseconds. + Weight::from_parts(12_960_288, 0) + // Standard Error: 2_222 + .saturating_add(Weight::from_parts(4_272_019, 0).saturating_mul(c.into())) } } @@ -111,43 +111,43 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_742 nanoseconds. - Weight::from_parts(16_087_635, 0) - // Standard Error: 2_411 - .saturating_add(Weight::from_parts(3_665_506, 0).saturating_mul(c.into())) + // Minimum execution time: 7_932_000 picoseconds. + Weight::from_parts(24_064_040, 0) + // Standard Error: 2_486 + .saturating_add(Weight::from_parts(4_238_449, 0).saturating_mul(c.into())) } fn as_derivative() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_802 nanoseconds. - Weight::from_parts(5_269_000, 0) + // Minimum execution time: 5_536_000 picoseconds. + Weight::from_parts(5_963_000, 0) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_100 nanoseconds. - Weight::from_parts(14_090_381, 0) - // Standard Error: 1_917 - .saturating_add(Weight::from_parts(3_744_891, 0).saturating_mul(c.into())) + // Minimum execution time: 7_820_000 picoseconds. + Weight::from_parts(18_969_535, 0) + // Standard Error: 2_228 + .saturating_add(Weight::from_parts(4_448_073, 0).saturating_mul(c.into())) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_840 nanoseconds. - Weight::from_parts(9_280_000, 0) + // Minimum execution time: 9_811_000 picoseconds. + Weight::from_parts(10_162_000, 0) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_245 nanoseconds. - Weight::from_parts(14_292_923, 0) - // Standard Error: 1_803 - .saturating_add(Weight::from_parts(3_645_950, 0).saturating_mul(c.into())) + // Minimum execution time: 7_829_000 picoseconds. + Weight::from_parts(12_960_288, 0) + // Standard Error: 2_222 + .saturating_add(Weight::from_parts(4_272_019, 0).saturating_mul(c.into())) } } diff --git a/frame/vesting-mangata/src/lib.rs b/frame/vesting-mangata/src/lib.rs index 9d73dd43f2b57..0ac79935739f1 100644 --- a/frame/vesting-mangata/src/lib.rs +++ b/frame/vesting-mangata/src/lib.rs @@ -417,7 +417,7 @@ pub mod pallet { // TODO // Needs to be benchmarked #[pallet::call_index(4)] - #[pallet::weight(400_000_000u64)] + #[pallet::weight({400_000_000u64})] pub fn sudo_unlock_all_vesting_tokens( origin: OriginFor, target: ::Source, diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index c18d1f45e038b..2aa095bd74bff 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = "derive", ] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 35a094a9f4f19..15be519842992 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -139,6 +139,7 @@ benchmarks! { let other: T::AccountId = account("other", 0, SEED); let other_lookup = T::Lookup::unlookup(other.clone()); + T::Currency::make_free_balance_be(&other, T::Currency::minimum_balance()); add_locks::(&other, l as u8); let expected_balance = add_vesting_schedules::(other_lookup.clone(), s)?; @@ -168,6 +169,7 @@ benchmarks! { let other: T::AccountId = account("other", 0, SEED); let other_lookup = T::Lookup::unlookup(other.clone()); + T::Currency::make_free_balance_be(&other, T::Currency::minimum_balance()); add_locks::(&other, l as u8); add_vesting_schedules::(other_lookup.clone(), s)?; // At block 21 everything is unlocked. @@ -200,8 +202,10 @@ benchmarks! { let target: T::AccountId = account("target", 0, SEED); let target_lookup = T::Lookup::unlookup(target.clone()); // Give target existing locks + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); add_locks::(&target, l as u8); // Add one vesting schedules. + let orig_balance = T::Currency::free_balance(&target); let mut expected_balance = add_vesting_schedules::(target_lookup.clone(), s)?; let transfer_amount = T::MinVestedTransfer::get(); @@ -216,7 +220,7 @@ benchmarks! { }: _(RawOrigin::Signed(caller), target_lookup, vesting_schedule) verify { assert_eq!( - expected_balance, + orig_balance + expected_balance, T::Currency::free_balance(&target), "Transfer didn't happen", ); @@ -238,8 +242,10 @@ benchmarks! { let target: T::AccountId = account("target", 0, SEED); let target_lookup = T::Lookup::unlookup(target.clone()); // Give target existing locks + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); add_locks::(&target, l as u8); // Add one less than max vesting schedules + let orig_balance = T::Currency::free_balance(&target); let mut expected_balance = add_vesting_schedules::(target_lookup.clone(), s)?; let transfer_amount = T::MinVestedTransfer::get(); @@ -254,7 +260,7 @@ benchmarks! { }: _(RawOrigin::Root, source_lookup, target_lookup, vesting_schedule) verify { assert_eq!( - expected_balance, + orig_balance + expected_balance, T::Currency::free_balance(&target), "Transfer didn't happen", ); @@ -272,6 +278,7 @@ benchmarks! { let caller: T::AccountId = account("caller", 0, SEED); let caller_lookup = T::Lookup::unlookup(caller.clone()); // Give target existing locks. + T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); add_locks::(&caller, l as u8); // Add max vesting schedules. let expected_balance = add_vesting_schedules::(caller_lookup, s)?; @@ -322,6 +329,7 @@ benchmarks! { let caller: T::AccountId = account("caller", 0, SEED); let caller_lookup = T::Lookup::unlookup(caller.clone()); // Give target other locks. + T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); add_locks::(&caller, l as u8); // Add max vesting schedules. let total_transferred = add_vesting_schedules::(caller_lookup, s)?; diff --git a/frame/vesting/src/mock.rs b/frame/vesting/src/mock.rs index f2ad6a70025e5..1adb36b730b1a 100644 --- a/frame/vesting/src/mock.rs +++ b/frame/vesting/src/mock.rs @@ -80,12 +80,16 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } parameter_types! { pub const MinVestedTransfer: u64 = 256 * 2; pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); - pub static ExistentialDeposit: u64 = 0; + pub static ExistentialDeposit: u64 = 1; } impl Config for Test { type BlockNumberToBalance = Identity; diff --git a/frame/vesting/src/tests.rs b/frame/vesting/src/tests.rs index efc134cab7279..46afe895f6fcc 100644 --- a/frame/vesting/src/tests.rs +++ b/frame/vesting/src/tests.rs @@ -17,7 +17,10 @@ use frame_support::{assert_noop, assert_ok, assert_storage_noop, dispatch::EncodeLike}; use frame_system::RawOrigin; -use sp_runtime::traits::{BadOrigin, Identity}; +use sp_runtime::{ + traits::{BadOrigin, Identity}, + TokenError, +}; use super::{Vesting as VestingStorage, *}; use crate::mock::{Balances, ExtBuilder, System, Test, Vesting}; @@ -180,10 +183,8 @@ fn unvested_balance_should_not_transfer() { assert_eq!(user1_free_balance, 100); // Account 1 has free balance // Account 1 has only 5 units vested at block 1 (plus 50 unvested) assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_noop!( - Balances::transfer(Some(1).into(), 2, 56), - pallet_balances::Error::::LiquidityRestrictions, - ); // Account 1 cannot send more than vested amount + // Account 1 cannot send more than vested amount... + assert_noop!(Balances::transfer_allow_death(Some(1).into(), 2, 56), TokenError::Frozen); }); } @@ -195,7 +196,7 @@ fn vested_balance_should_transfer() { // Account 1 has only 5 units vested at block 1 (plus 50 unvested) assert_eq!(Vesting::vesting_balance(&1), Some(45)); assert_ok!(Vesting::vest(Some(1).into())); - assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); + assert_ok!(Balances::transfer_allow_death(Some(1).into(), 2, 55)); }); } @@ -213,7 +214,7 @@ fn vested_balance_should_transfer_with_multi_sched() { // Account 1 has only 256 units unlocking at block 1 (plus 1280 already fee). assert_eq!(Vesting::vesting_balance(&1), Some(2304)); assert_ok!(Vesting::vest(Some(1).into())); - assert_ok!(Balances::transfer(Some(1).into(), 2, 1536)); + assert_ok!(Balances::transfer_allow_death(Some(1).into(), 2, 1536)); }); } @@ -233,7 +234,7 @@ fn vested_balance_should_transfer_using_vest_other() { // Account 1 has only 5 units vested at block 1 (plus 50 unvested) assert_eq!(Vesting::vesting_balance(&1), Some(45)); assert_ok!(Vesting::vest_other(Some(2).into(), 1)); - assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); + assert_ok!(Balances::transfer_allow_death(Some(1).into(), 2, 55)); }); } @@ -251,7 +252,7 @@ fn vested_balance_should_transfer_using_vest_other_with_multi_sched() { // Account 1 has only 256 units unlocking at block 1 (plus 1280 already free). assert_eq!(Vesting::vesting_balance(&1), Some(2304)); assert_ok!(Vesting::vest_other(Some(2).into(), 1)); - assert_ok!(Balances::transfer(Some(1).into(), 2, 1536)); + assert_ok!(Balances::transfer_allow_death(Some(1).into(), 2, 1536)); }); } @@ -266,8 +267,8 @@ fn non_vested_cannot_vest_other() { #[test] fn extra_balance_should_transfer() { ExtBuilder::default().existential_deposit(10).build().execute_with(|| { - assert_ok!(Balances::transfer(Some(3).into(), 1, 100)); - assert_ok!(Balances::transfer(Some(3).into(), 2, 100)); + assert_ok!(Balances::transfer_allow_death(Some(3).into(), 1, 100)); + assert_ok!(Balances::transfer_allow_death(Some(3).into(), 2, 100)); let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 200); // Account 1 has 100 more free balance than normal @@ -278,12 +279,13 @@ fn extra_balance_should_transfer() { // Account 1 has only 5 units vested at block 1 (plus 150 unvested) assert_eq!(Vesting::vesting_balance(&1), Some(45)); assert_ok!(Vesting::vest(Some(1).into())); - assert_ok!(Balances::transfer(Some(1).into(), 3, 155)); // Account 1 can send extra units gained + assert_ok!(Balances::transfer_allow_death(Some(1).into(), 3, 155)); // Account 1 can send extra units gained // Account 2 has no units vested at block 1, but gained 100 assert_eq!(Vesting::vesting_balance(&2), Some(200)); assert_ok!(Vesting::vest(Some(2).into())); - assert_ok!(Balances::transfer(Some(2).into(), 3, 100)); // Account 2 can send extra units gained + assert_ok!(Balances::transfer_allow_death(Some(2).into(), 3, 100)); // Account 2 can send extra + // units gained }); } @@ -305,7 +307,7 @@ fn liquid_funds_should_transfer_with_delayed_vesting() { assert_eq!(Vesting::vesting(&12).unwrap(), vec![user12_vesting_schedule]); // Account 12 can still send liquid funds - assert_ok!(Balances::transfer(Some(12).into(), 3, 256 * 5)); + assert_ok!(Balances::transfer_allow_death(Some(12).into(), 3, 256 * 5)); }); } @@ -1144,14 +1146,11 @@ fn vested_transfer_less_than_existential_deposit_fails() { ); // vested_transfer fails. - assert_noop!( - Vesting::vested_transfer(Some(3).into(), 99, sched), - pallet_balances::Error::::ExistentialDeposit, - ); + assert_noop!(Vesting::vested_transfer(Some(3).into(), 99, sched), TokenError::BelowMinimum,); // force_vested_transfer fails. assert_noop!( Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 99, sched), - pallet_balances::Error::::ExistentialDeposit, + TokenError::BelowMinimum, ); }); } diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs index de3260fa1e6a6..4cf3b3dcfbfd0 100644 --- a/frame/vesting/src/weights.rs +++ b/frame/vesting/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_vesting //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -65,80 +65,88 @@ impl WeightInfo for SubstrateWeight { /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `444 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `7306` - // Minimum execution time: 28_062 nanoseconds. - Weight::from_parts(26_857_563, 7306) - // Standard Error: 623 - .saturating_add(Weight::from_parts(55_988, 0).saturating_mul(l.into())) - // Standard Error: 1_109 - .saturating_add(Weight::from_parts(59_714, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `381 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 36_182_000 picoseconds. + Weight::from_parts(35_159_830, 4764) + // Standard Error: 952 + .saturating_add(Weight::from_parts(63_309, 0).saturating_mul(l.into())) + // Standard Error: 1_694 + .saturating_add(Weight::from_parts(62_244, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: Vesting Vesting (r:1 w:1) /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `444 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `7306` - // Minimum execution time: 27_027 nanoseconds. - Weight::from_parts(26_509_364, 7306) - // Standard Error: 815 - .saturating_add(Weight::from_parts(54_711, 0).saturating_mul(l.into())) - // Standard Error: 1_451 - .saturating_add(Weight::from_parts(32_792, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `381 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 39_344_000 picoseconds. + Weight::from_parts(38_921_936, 4764) + // Standard Error: 1_283 + .saturating_add(Weight::from_parts(61_531, 0).saturating_mul(l.into())) + // Standard Error: 2_283 + .saturating_add(Weight::from_parts(36_175, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: Vesting Vesting (r:1 w:1) /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `579 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `9909` - // Minimum execution time: 29_554 nanoseconds. - Weight::from_parts(28_269_203, 9909) - // Standard Error: 623 - .saturating_add(Weight::from_parts(59_058, 0).saturating_mul(l.into())) - // Standard Error: 1_108 - .saturating_add(Weight::from_parts(63_429, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `484 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 39_461_000 picoseconds. + Weight::from_parts(38_206_465, 4764) + // Standard Error: 743 + .saturating_add(Weight::from_parts(56_973, 0).saturating_mul(l.into())) + // Standard Error: 1_322 + .saturating_add(Weight::from_parts(65_059, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Vesting Vesting (r:1 w:1) /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `579 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `9909` - // Minimum execution time: 28_546 nanoseconds. - Weight::from_parts(28_299_251, 9909) - // Standard Error: 786 - .saturating_add(Weight::from_parts(53_401, 0).saturating_mul(l.into())) - // Standard Error: 1_399 - .saturating_add(Weight::from_parts(29_713, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `484 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 42_029_000 picoseconds. + Weight::from_parts(42_153_438, 4764) + // Standard Error: 1_108 + .saturating_add(Weight::from_parts(50_058, 0).saturating_mul(l.into())) + // Standard Error: 1_971 + .saturating_add(Weight::from_parts(32_391, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Vesting Vesting (r:1 w:1) @@ -147,19 +155,21 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `650 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `9909` - // Minimum execution time: 43_436 nanoseconds. - Weight::from_parts(44_885_707, 9909) - // Standard Error: 1_516 - .saturating_add(Weight::from_parts(59_066, 0).saturating_mul(l.into())) - // Standard Error: 2_698 - .saturating_add(Weight::from_parts(32_053, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `555 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 75_223_000 picoseconds. + Weight::from_parts(76_675_778, 4764) + // Standard Error: 2_534 + .saturating_add(Weight::from_parts(70_731, 0).saturating_mul(l.into())) + // Standard Error: 4_509 + .saturating_add(Weight::from_parts(108_866, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Vesting Vesting (r:1 w:1) @@ -168,61 +178,67 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `785 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `12512` - // Minimum execution time: 45_805 nanoseconds. - Weight::from_parts(46_869_490, 12512) - // Standard Error: 1_445 - .saturating_add(Weight::from_parts(52_654, 0).saturating_mul(l.into())) - // Standard Error: 2_571 - .saturating_add(Weight::from_parts(34_202, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `658 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `6196` + // Minimum execution time: 76_922_000 picoseconds. + Weight::from_parts(78_634_098, 6196) + // Standard Error: 2_099 + .saturating_add(Weight::from_parts(68_218, 0).saturating_mul(l.into())) + // Standard Error: 3_736 + .saturating_add(Weight::from_parts(95_990, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: Vesting Vesting (r:1 w:1) /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `577 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `9909` - // Minimum execution time: 30_460 nanoseconds. - Weight::from_parts(29_407_637, 9909) - // Standard Error: 794 - .saturating_add(Weight::from_parts(63_757, 0).saturating_mul(l.into())) - // Standard Error: 1_466 - .saturating_add(Weight::from_parts(56_032, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `482 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 39_476_000 picoseconds. + Weight::from_parts(38_261_747, 4764) + // Standard Error: 1_794 + .saturating_add(Weight::from_parts(69_639, 0).saturating_mul(l.into())) + // Standard Error: 3_313 + .saturating_add(Weight::from_parts(73_202, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Vesting Vesting (r:1 w:1) /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `577 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `9909` - // Minimum execution time: 30_413 nanoseconds. - Weight::from_parts(29_350_467, 9909) - // Standard Error: 724 - .saturating_add(Weight::from_parts(65_366, 0).saturating_mul(l.into())) - // Standard Error: 1_337 - .saturating_add(Weight::from_parts(53_799, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `482 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 43_764_000 picoseconds. + Weight::from_parts(42_679_386, 4764) + // Standard Error: 1_224 + .saturating_add(Weight::from_parts(65_857, 0).saturating_mul(l.into())) + // Standard Error: 2_261 + .saturating_add(Weight::from_parts(70_861, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } } @@ -233,80 +249,88 @@ impl WeightInfo for () { /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `444 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `7306` - // Minimum execution time: 28_062 nanoseconds. - Weight::from_parts(26_857_563, 7306) - // Standard Error: 623 - .saturating_add(Weight::from_parts(55_988, 0).saturating_mul(l.into())) - // Standard Error: 1_109 - .saturating_add(Weight::from_parts(59_714, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `381 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 36_182_000 picoseconds. + Weight::from_parts(35_159_830, 4764) + // Standard Error: 952 + .saturating_add(Weight::from_parts(63_309, 0).saturating_mul(l.into())) + // Standard Error: 1_694 + .saturating_add(Weight::from_parts(62_244, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: Vesting Vesting (r:1 w:1) /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `444 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `7306` - // Minimum execution time: 27_027 nanoseconds. - Weight::from_parts(26_509_364, 7306) - // Standard Error: 815 - .saturating_add(Weight::from_parts(54_711, 0).saturating_mul(l.into())) - // Standard Error: 1_451 - .saturating_add(Weight::from_parts(32_792, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `381 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 39_344_000 picoseconds. + Weight::from_parts(38_921_936, 4764) + // Standard Error: 1_283 + .saturating_add(Weight::from_parts(61_531, 0).saturating_mul(l.into())) + // Standard Error: 2_283 + .saturating_add(Weight::from_parts(36_175, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: Vesting Vesting (r:1 w:1) /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `579 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `9909` - // Minimum execution time: 29_554 nanoseconds. - Weight::from_parts(28_269_203, 9909) - // Standard Error: 623 - .saturating_add(Weight::from_parts(59_058, 0).saturating_mul(l.into())) - // Standard Error: 1_108 - .saturating_add(Weight::from_parts(63_429, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `484 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 39_461_000 picoseconds. + Weight::from_parts(38_206_465, 4764) + // Standard Error: 743 + .saturating_add(Weight::from_parts(56_973, 0).saturating_mul(l.into())) + // Standard Error: 1_322 + .saturating_add(Weight::from_parts(65_059, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Vesting Vesting (r:1 w:1) /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `579 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `9909` - // Minimum execution time: 28_546 nanoseconds. - Weight::from_parts(28_299_251, 9909) - // Standard Error: 786 - .saturating_add(Weight::from_parts(53_401, 0).saturating_mul(l.into())) - // Standard Error: 1_399 - .saturating_add(Weight::from_parts(29_713, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `484 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 42_029_000 picoseconds. + Weight::from_parts(42_153_438, 4764) + // Standard Error: 1_108 + .saturating_add(Weight::from_parts(50_058, 0).saturating_mul(l.into())) + // Standard Error: 1_971 + .saturating_add(Weight::from_parts(32_391, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Vesting Vesting (r:1 w:1) @@ -315,19 +339,21 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `650 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `9909` - // Minimum execution time: 43_436 nanoseconds. - Weight::from_parts(44_885_707, 9909) - // Standard Error: 1_516 - .saturating_add(Weight::from_parts(59_066, 0).saturating_mul(l.into())) - // Standard Error: 2_698 - .saturating_add(Weight::from_parts(32_053, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `555 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 75_223_000 picoseconds. + Weight::from_parts(76_675_778, 4764) + // Standard Error: 2_534 + .saturating_add(Weight::from_parts(70_731, 0).saturating_mul(l.into())) + // Standard Error: 4_509 + .saturating_add(Weight::from_parts(108_866, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Vesting Vesting (r:1 w:1) @@ -336,61 +362,67 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `785 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `12512` - // Minimum execution time: 45_805 nanoseconds. - Weight::from_parts(46_869_490, 12512) - // Standard Error: 1_445 - .saturating_add(Weight::from_parts(52_654, 0).saturating_mul(l.into())) - // Standard Error: 2_571 - .saturating_add(Weight::from_parts(34_202, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `658 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `6196` + // Minimum execution time: 76_922_000 picoseconds. + Weight::from_parts(78_634_098, 6196) + // Standard Error: 2_099 + .saturating_add(Weight::from_parts(68_218, 0).saturating_mul(l.into())) + // Standard Error: 3_736 + .saturating_add(Weight::from_parts(95_990, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: Vesting Vesting (r:1 w:1) /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `577 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `9909` - // Minimum execution time: 30_460 nanoseconds. - Weight::from_parts(29_407_637, 9909) - // Standard Error: 794 - .saturating_add(Weight::from_parts(63_757, 0).saturating_mul(l.into())) - // Standard Error: 1_466 - .saturating_add(Weight::from_parts(56_032, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `482 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 39_476_000 picoseconds. + Weight::from_parts(38_261_747, 4764) + // Standard Error: 1_794 + .saturating_add(Weight::from_parts(69_639, 0).saturating_mul(l.into())) + // Standard Error: 3_313 + .saturating_add(Weight::from_parts(73_202, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Vesting Vesting (r:1 w:1) /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `577 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `9909` - // Minimum execution time: 30_413 nanoseconds. - Weight::from_parts(29_350_467, 9909) - // Standard Error: 724 - .saturating_add(Weight::from_parts(65_366, 0).saturating_mul(l.into())) - // Standard Error: 1_337 - .saturating_add(Weight::from_parts(53_799, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `482 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 43_764_000 picoseconds. + Weight::from_parts(42_679_386, 4764) + // Standard Error: 1_224 + .saturating_add(Weight::from_parts(65_857, 0).saturating_mul(l.into())) + // Standard Error: 2_261 + .saturating_add(Weight::from_parts(70_861, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } } diff --git a/frame/whitelist/Cargo.toml b/frame/whitelist/Cargo.toml index 56ceaca9f81a4..21ecc4be374d3 100644 --- a/frame/whitelist/Cargo.toml +++ b/frame/whitelist/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } diff --git a/frame/whitelist/src/mock.rs b/frame/whitelist/src/mock.rs index b16286863d71c..d644cd661ec9f 100644 --- a/frame/whitelist/src/mock.rs +++ b/frame/whitelist/src/mock.rs @@ -86,6 +86,10 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl pallet_preimage::Config for Test { diff --git a/frame/whitelist/src/weights.rs b/frame/whitelist/src/weights.rs index 667d602a3c0ba..8636ea376e246 100644 --- a/frame/whitelist/src/weights.rs +++ b/frame/whitelist/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_whitelist //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -64,9 +64,9 @@ impl WeightInfo for SubstrateWeight { fn whitelist_call() -> Weight { // Proof Size summary in bytes: // Measured: `217` - // Estimated: `5081` - // Minimum execution time: 18_618 nanoseconds. - Weight::from_parts(19_133_000, 5081) + // Estimated: `3556` + // Minimum execution time: 21_370_000 picoseconds. + Weight::from_parts(21_834_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -77,9 +77,9 @@ impl WeightInfo for SubstrateWeight { fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: // Measured: `346` - // Estimated: `5081` - // Minimum execution time: 16_685 nanoseconds. - Weight::from_parts(17_325_000, 5081) + // Estimated: `3556` + // Minimum execution time: 19_222_000 picoseconds. + Weight::from_parts(19_582_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -92,12 +92,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `454 + n * (1 ±0)` - // Estimated: `8007 + n * (1 ±0)` - // Minimum execution time: 27_539 nanoseconds. - Weight::from_parts(27_950_000, 8007) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_134, 0).saturating_mul(n.into())) + // Measured: `422 + n * (1 ±0)` + // Estimated: `3886 + n * (1 ±0)` + // Minimum execution time: 31_417_000 picoseconds. + Weight::from_parts(31_620_000, 3886) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_145, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -110,11 +110,11 @@ impl WeightInfo for SubstrateWeight { fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `346` - // Estimated: `5081` - // Minimum execution time: 20_581 nanoseconds. - Weight::from_parts(21_762_318, 5081) - // Standard Error: 4 - .saturating_add(Weight::from_parts(1_480, 0).saturating_mul(n.into())) + // Estimated: `3556` + // Minimum execution time: 23_092_000 picoseconds. + Weight::from_parts(24_043_432, 3556) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_227, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -129,9 +129,9 @@ impl WeightInfo for () { fn whitelist_call() -> Weight { // Proof Size summary in bytes: // Measured: `217` - // Estimated: `5081` - // Minimum execution time: 18_618 nanoseconds. - Weight::from_parts(19_133_000, 5081) + // Estimated: `3556` + // Minimum execution time: 21_370_000 picoseconds. + Weight::from_parts(21_834_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -142,9 +142,9 @@ impl WeightInfo for () { fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: // Measured: `346` - // Estimated: `5081` - // Minimum execution time: 16_685 nanoseconds. - Weight::from_parts(17_325_000, 5081) + // Estimated: `3556` + // Minimum execution time: 19_222_000 picoseconds. + Weight::from_parts(19_582_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -157,12 +157,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `454 + n * (1 ±0)` - // Estimated: `8007 + n * (1 ±0)` - // Minimum execution time: 27_539 nanoseconds. - Weight::from_parts(27_950_000, 8007) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_134, 0).saturating_mul(n.into())) + // Measured: `422 + n * (1 ±0)` + // Estimated: `3886 + n * (1 ±0)` + // Minimum execution time: 31_417_000 picoseconds. + Weight::from_parts(31_620_000, 3886) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_145, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -175,11 +175,11 @@ impl WeightInfo for () { fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `346` - // Estimated: `5081` - // Minimum execution time: 20_581 nanoseconds. - Weight::from_parts(21_762_318, 5081) - // Standard Error: 4 - .saturating_add(Weight::from_parts(1_480, 0).saturating_mul(n.into())) + // Estimated: `3556` + // Minimum execution time: 23_092_000 picoseconds. + Weight::from_parts(24_043_432, 3556) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_227, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index ae1b3294c281f..55e3993466a6d 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -23,7 +23,8 @@ sp-state-machine = { version = "0.13.0", default-features = false, optional = tr sp-trie = { version = "7.0.0", default-features = false, optional = true, path = "../trie" } hash-db = { version = "0.16.0", optional = true } thiserror = { version = "1.0.30", optional = true } - +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +sp-metadata-ir = { version = "0.1.0", default-features = false, path = "../metadata-ir" } log = { version = "0.4.17", default-features = false } [dev-dependencies] @@ -42,6 +43,8 @@ std = [ "hash-db", "thiserror", "log/std", + "scale-info/std", + "sp-metadata-ir/std", ] # Special feature to disable logging completly. # @@ -51,3 +54,5 @@ std = [ # # This sets the max logging level to `off` for `log`. disable-logging = ["log/max_level_off"] +# Do not report the documentation in the metadata. +no-metadata-docs = [] diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index ba7c6312042c9..594c20e82c94e 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -16,14 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.10" -syn = { version = "1.0.98", features = ["full", "fold", "extra-traits", "visit"] } -proc-macro2 = "1.0.37" +quote = "1.0.26" +syn = { version = "2.0.14", features = ["full", "fold", "extra-traits", "visit"] } +proc-macro2 = "1.0.56" blake2 = { version = "0.10.4", default-features = false } proc-macro-crate = "1.1.3" expander = "1.0.0" Inflector = "0.11.4" +[dev-dependencies] +assert_matches = "1.3.0" + # Required for the doc tests [features] default = ["std"] diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index 3c3056d34487b..cde33c19016bd 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -15,16 +15,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::utils::{ - extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, generate_crate_access, - generate_runtime_mod_name_for_trait, parse_runtime_api_version, prefix_function_with_trait, - replace_wild_card_parameter_names, return_type_extract_type, versioned_trait_name, - AllowSelfRefInParameters, -}; - -use crate::common::{ - API_VERSION_ATTRIBUTE, BLOCK_GENERIC_IDENT, CHANGED_IN_ATTRIBUTE, CORE_TRAIT_ATTRIBUTE, - RENAMED_ATTRIBUTE, SUPPORTED_ATTRIBUTE_NAMES, +use crate::{ + common::{ + API_VERSION_ATTRIBUTE, BLOCK_GENERIC_IDENT, CHANGED_IN_ATTRIBUTE, CORE_TRAIT_ATTRIBUTE, + RENAMED_ATTRIBUTE, SUPPORTED_ATTRIBUTE_NAMES, + }, + runtime_metadata::generate_decl_runtime_metadata, + utils::{ + extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, + generate_crate_access, generate_runtime_mod_name_for_trait, parse_runtime_api_version, + prefix_function_with_trait, replace_wild_card_parameter_names, return_type_extract_type, + versioned_trait_name, AllowSelfRefInParameters, + }, }; use proc_macro2::{Span, TokenStream}; @@ -36,9 +38,10 @@ use syn::{ parse::{Error, Parse, ParseStream, Result}, parse_macro_input, parse_quote, spanned::Spanned, + token::Comma, visit::{self, Visit}, - Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, TraitBound, - TraitItem, TraitItemMethod, + Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, LitInt, LitStr, TraitBound, + TraitItem, TraitItemFn, }; use std::collections::{BTreeMap, HashMap}; @@ -74,7 +77,7 @@ fn extend_generics_with_block(generics: &mut Generics) { /// attribute body as `TokenStream`. fn remove_supported_attributes(attrs: &mut Vec) -> HashMap<&'static str, Attribute> { let mut result = HashMap::new(); - attrs.retain(|v| match SUPPORTED_ATTRIBUTE_NAMES.iter().find(|a| v.path.is_ident(a)) { + attrs.retain(|v| match SUPPORTED_ATTRIBUTE_NAMES.iter().find(|a| v.path().is_ident(a)) { Some(attribute) => { result.insert(*attribute, v.clone()); false @@ -157,7 +160,7 @@ impl Fold for ReplaceBlockWithNodeBlock { /// ``` fn generate_versioned_api_traits( api: ItemTrait, - methods: BTreeMap>, + methods: BTreeMap>, ) -> Vec { let mut result = Vec::::new(); for (version, _) in &methods { @@ -167,7 +170,7 @@ fn generate_versioned_api_traits( // Add the methods from the current version and all previous one. Versions are sorted so // it's safe to stop early. for (_, m) in methods.iter().take_while(|(v, _)| v <= &version) { - versioned_trait.items.extend(m.iter().cloned().map(|m| TraitItem::Method(m))); + versioned_trait.items.extend(m.iter().cloned().map(|m| TraitItem::Fn(m))); } result.push(versioned_trait); @@ -178,37 +181,29 @@ fn generate_versioned_api_traits( /// Try to parse the given `Attribute` as `renamed` attribute. fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { - let meta = renamed.parse_meta()?; - - let err = Err(Error::new( - meta.span(), + let err = || { + Error::new( + renamed.span(), &format!( - "Unexpected `{renamed}` attribute. The supported format is `{renamed}(\"old_name\", version_it_was_renamed)`", - renamed = RENAMED_ATTRIBUTE, - ) + "Unexpected `{RENAMED_ATTRIBUTE}` attribute. \ + The supported format is `{RENAMED_ATTRIBUTE}(\"old_name\", version_it_was_renamed)`", + ), ) - ); - - match meta { - Meta::List(list) => - if list.nested.len() > 2 && list.nested.is_empty() { - err - } else { - let mut itr = list.nested.iter(); - let old_name = match itr.next() { - Some(NestedMeta::Lit(Lit::Str(i))) => i.value(), - _ => return err, - }; - - let version = match itr.next() { - Some(NestedMeta::Lit(Lit::Int(i))) => i.base10_parse()?, - _ => return err, - }; - - Ok((old_name, version)) - }, - _ => err, - } + }; + + renamed + .parse_args_with(|input: ParseStream| { + let old_name: LitStr = input.parse()?; + let _comma: Comma = input.parse()?; + let version: LitInt = input.parse()?; + + if !input.is_empty() { + return Err(input.error("No more arguments expected")) + } + + Ok((old_name.value(), version.base10_parse()?)) + }) + .map_err(|_| err()) } /// Generate the declaration of the trait for the runtime. @@ -219,6 +214,7 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { let mut decl = decl.clone(); let decl_span = decl.span(); extend_generics_with_block(&mut decl.generics); + let metadata = generate_decl_runtime_metadata(&decl); let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); let found_attributes = remove_supported_attributes(&mut decl.attrs); let api_version = @@ -227,12 +223,12 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { let trait_api_version = get_api_version(&found_attributes)?; - let mut methods_by_version: BTreeMap> = BTreeMap::new(); + let mut methods_by_version: BTreeMap> = BTreeMap::new(); // Process the items in the declaration. The filter_map function below does a lot of stuff // because the method attributes are stripped at this point decl.items.iter_mut().for_each(|i| match i { - TraitItem::Method(ref mut method) => { + TraitItem::Fn(ref mut method) => { let method_attrs = remove_supported_attributes(&mut method.attrs); let mut method_version = trait_api_version; // validate the api version for the method (if any) and generate default @@ -304,6 +300,8 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { pub use #versioned_ident as #main_api_ident; + #metadata + pub #api_version pub #id @@ -359,9 +357,8 @@ impl<'a> ToClientSideDecl<'a> { let mut result = Vec::new(); items.into_iter().for_each(|i| match i { - TraitItem::Method(method) => { - let (fn_decl, fn_decl_ctx) = - self.fold_trait_item_method(method, trait_generics_num); + TraitItem::Fn(method) => { + let (fn_decl, fn_decl_ctx) = self.fold_trait_item_fn(method, trait_generics_num); result.push(fn_decl.into()); result.push(fn_decl_ctx.into()); }, @@ -371,11 +368,11 @@ impl<'a> ToClientSideDecl<'a> { result } - fn fold_trait_item_method( + fn fold_trait_item_fn( &mut self, - method: TraitItemMethod, + method: TraitItemFn, trait_generics_num: usize, - ) -> (TraitItemMethod, TraitItemMethod) { + ) -> (TraitItemFn, TraitItemFn) { let crate_ = self.crate_; let context = quote!( #crate_::ExecutionContext::OffchainCall(None) ); let fn_decl = self.create_method_decl(method.clone(), context, trait_generics_num); @@ -386,9 +383,9 @@ impl<'a> ToClientSideDecl<'a> { fn create_method_decl_with_context( &mut self, - method: TraitItemMethod, + method: TraitItemFn, trait_generics_num: usize, - ) -> TraitItemMethod { + ) -> TraitItemFn { let crate_ = self.crate_; let context_arg: syn::FnArg = parse_quote!( context: #crate_::ExecutionContext ); let mut fn_decl_ctx = self.create_method_decl(method, quote!(context), trait_generics_num); @@ -404,10 +401,10 @@ impl<'a> ToClientSideDecl<'a> { /// the actual call into the runtime. fn create_method_decl( &mut self, - mut method: TraitItemMethod, + mut method: TraitItemFn, context: TokenStream, trait_generics_num: usize, - ) -> TraitItemMethod { + ) -> TraitItemFn { let params = match extract_parameter_names_types_and_borrows( &method.sig, AllowSelfRefInParameters::No, @@ -656,7 +653,7 @@ impl CheckTraitDecl { /// All errors will be collected in `self.errors`. fn check(&mut self, trait_: &ItemTrait) { self.check_method_declarations(trait_.items.iter().filter_map(|i| match i { - TraitItem::Method(method) => Some(method), + TraitItem::Fn(method) => Some(method), _ => None, })); @@ -666,10 +663,7 @@ impl CheckTraitDecl { /// Check that the given method declarations are correct. /// /// Any error is stored in `self.errors`. - fn check_method_declarations<'a>( - &mut self, - methods: impl Iterator, - ) { + fn check_method_declarations<'a>(&mut self, methods: impl Iterator) { let mut method_to_signature_changed = HashMap::>>::new(); methods.into_iter().for_each(|method| { diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 5ac07975df0f7..7e46f0a5a65eb 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -15,15 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::utils::{ - extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, - extract_parameter_names_types_and_borrows, generate_crate_access, - generate_runtime_mod_name_for_trait, parse_runtime_api_version, prefix_function_with_trait, - versioned_trait_name, AllowSelfRefInParameters, RequireQualifiedTraitPath, +use crate::{ + common::API_VERSION_ATTRIBUTE, + runtime_metadata::generate_impl_runtime_metadata, + utils::{ + extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, + extract_parameter_names_types_and_borrows, generate_crate_access, + generate_runtime_mod_name_for_trait, parse_runtime_api_version, prefix_function_with_trait, + versioned_trait_name, AllowSelfRefInParameters, RequireQualifiedTraitPath, + }, }; -use crate::common::API_VERSION_ATTRIBUTE; - use proc_macro2::{Span, TokenStream}; use quote::quote; @@ -79,7 +81,14 @@ fn generate_impl_call( let pborrow = params.iter().map(|v| &v.2); let decode_params = if params.is_empty() { - quote!() + quote!( + if !#input.is_empty() { + panic!( + "Bad input data provided to {}: expected no parameters, but input buffer is not empty.", + #fn_name_str + ); + } + ) } else { let let_binding = if params.len() == 1 { quote! { @@ -130,7 +139,7 @@ fn generate_impl_calls( .ident; for item in &impl_.items { - if let ImplItem::Method(method) = item { + if let ImplItem::Fn(method) = item { let impl_call = generate_impl_call(&method.sig, &impl_.self_ty, input, &impl_trait)?; @@ -218,7 +227,7 @@ fn generate_runtime_api_base_structures() -> Result { #[cfg(any(feature = "std", test))] pub struct RuntimeApiImpl + 'static> { call: &'static C, - commit_on_success: std::cell::RefCell, + in_transaction: std::cell::RefCell, changes: std::cell::RefCell<#crate_::OverlayedChanges>, storage_transaction_cache: std::cell::RefCell< #crate_::StorageTransactionCache @@ -236,10 +245,11 @@ fn generate_runtime_api_base_structures() -> Result { &self, call: F, ) -> R where Self: Sized { - #crate_::OverlayedChanges::start_transaction(&mut std::cell::RefCell::borrow_mut(&self.changes)); - *std::cell::RefCell::borrow_mut(&self.commit_on_success) = false; + self.start_transaction(); + + let old_value = std::cell::RefCell::replace(&self.in_transaction, true); let res = call(self); - *std::cell::RefCell::borrow_mut(&self.commit_on_success) = true; + *std::cell::RefCell::borrow_mut(&self.in_transaction) = old_value; self.commit_or_rollback(std::matches!(res, #crate_::TransactionOutcome::Commit(_))); @@ -322,7 +332,7 @@ fn generate_runtime_api_base_structures() -> Result { ) -> #crate_::ApiRef<'a, Self::RuntimeApi> { RuntimeApiImpl { call: unsafe { std::mem::transmute(call) }, - commit_on_success: true.into(), + in_transaction: false.into(), changes: std::default::Default::default(), recorder: std::default::Default::default(), storage_transaction_cache: std::default::Default::default(), @@ -338,18 +348,40 @@ fn generate_runtime_api_base_structures() -> Result { Other parts of the runtime that make use of transactions (state-machine) also balance their transactions. The runtime cannot close client initiated transactions; qed"; - if *std::cell::RefCell::borrow(&self.commit_on_success) { - let res = if commit { - #crate_::OverlayedChanges::commit_transaction( - &mut std::cell::RefCell::borrow_mut(&self.changes) - ) + let res = if commit { + let res = if let Some(recorder) = &self.recorder { + #crate_::ProofRecorder::::commit_transaction(&recorder) + } else { + Ok(()) + }; + let res2 = #crate_::OverlayedChanges::commit_transaction( + &mut std::cell::RefCell::borrow_mut(&self.changes) + ); + // Will panic on an `Err` below, however we should call commit + // on the recorder and the changes together. + std::result::Result::and(res, std::result::Result::map_err(res2, drop)) + } else { + let res = if let Some(recorder) = &self.recorder { + #crate_::ProofRecorder::::rollback_transaction(&recorder) } else { - #crate_::OverlayedChanges::rollback_transaction( - &mut std::cell::RefCell::borrow_mut(&self.changes) - ) + Ok(()) }; + let res2 = #crate_::OverlayedChanges::rollback_transaction( + &mut std::cell::RefCell::borrow_mut(&self.changes) + ); + // Will panic on an `Err` below, however we should call commit + // on the recorder and the changes together. + std::result::Result::and(res, std::result::Result::map_err(res2, drop)) + }; + std::result::Result::expect(res, proof); + } - std::result::Result::expect(res, proof); + fn start_transaction(&self) { + #crate_::OverlayedChanges::start_transaction( + &mut std::cell::RefCell::borrow_mut(&self.changes) + ); + if let Some(recorder) = &self.recorder { + #crate_::ProofRecorder::::start_transaction(&recorder); } } } @@ -443,10 +475,12 @@ impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> { params: std::vec::Vec, fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, ) -> std::result::Result, #crate_::ApiError> { - if *std::cell::RefCell::borrow(&self.commit_on_success) { - #crate_::OverlayedChanges::start_transaction( - &mut std::cell::RefCell::borrow_mut(&self.changes) - ); + // If we are not already in a transaction, we should create a new transaction + // and then commit/roll it back at the end! + let in_transaction = *std::cell::RefCell::borrow(&self.in_transaction); + + if !in_transaction { + self.start_transaction(); } let res = (|| { @@ -471,7 +505,9 @@ impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> { ) })(); - self.commit_or_rollback(std::result::Result::is_ok(&res)); + if !in_transaction { + self.commit_or_rollback(std::result::Result::is_ok(&res)); + } res } @@ -648,6 +684,7 @@ fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { let runtime_api_versions = generate_runtime_api_versions(api_impls)?; let wasm_interface = generate_wasm_interface(api_impls)?; let api_impls_for_runtime_api = generate_api_impl_for_runtime_api(api_impls)?; + let runtime_metadata = generate_impl_runtime_metadata(api_impls)?; let impl_ = quote!( #base_runtime_api @@ -658,6 +695,8 @@ fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { #runtime_api_versions + #runtime_metadata + pub mod api { use super::*; @@ -678,7 +717,7 @@ fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { // Filters all attributes except the cfg ones. fn filter_cfg_attrs(attrs: &[Attribute]) -> Vec { - attrs.iter().filter(|a| a.path.is_ident("cfg")).cloned().collect() + attrs.iter().filter(|a| a.path().is_ident("cfg")).cloned().collect() } // Extracts the value of `API_VERSION_ATTRIBUTE` and handles errors. @@ -690,7 +729,7 @@ fn extract_api_version(attrs: &Vec, span: Span) -> Result // First fetch all `API_VERSION_ATTRIBUTE` values (should be only one) let api_ver = attrs .iter() - .filter(|a| a.path.is_ident(API_VERSION_ATTRIBUTE)) + .filter(|a| a.path().is_ident(API_VERSION_ATTRIBUTE)) .collect::>(); if api_ver.len() > 1 { diff --git a/primitives/api/proc-macro/src/lib.rs b/primitives/api/proc-macro/src/lib.rs index cea958426879e..d34f4b7f9cf6a 100644 --- a/primitives/api/proc-macro/src/lib.rs +++ b/primitives/api/proc-macro/src/lib.rs @@ -25,6 +25,7 @@ mod common; mod decl_runtime_apis; mod impl_runtime_apis; mod mock_impl_runtime_apis; +mod runtime_metadata; mod utils; #[proc_macro] diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index fc0a754e26730..be8c8ca0f8527 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -192,7 +192,7 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result) -> bool { let mut found = false; attributes.retain(|attr| { - if attr.path.is_ident(ADVANCED_ATTRIBUTE) { + if attr.path().is_ident(ADVANCED_ATTRIBUTE) { found = true; false } else { @@ -258,7 +258,7 @@ impl<'a> FoldRuntimeApiImpl<'a> { // We also need to overwrite all the `_with_context` methods. To do this, // we clone all methods and add them again with the new name plus one more argument. impl_item.items.extend(impl_item.items.clone().into_iter().filter_map(|i| { - if let syn::ImplItem::Method(mut m) = i { + if let syn::ImplItem::Fn(mut m) = i { m.sig.ident = quote::format_ident!("{}_with_context", m.sig.ident); m.sig.inputs.insert(2, parse_quote!( _: #crate_::ExecutionContext )); @@ -290,7 +290,7 @@ impl<'a> FoldRuntimeApiImpl<'a> { } impl<'a> Fold for FoldRuntimeApiImpl<'a> { - fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { + fn fold_impl_item_fn(&mut self, mut input: syn::ImplItemFn) -> syn::ImplItemFn { let block = { let crate_ = generate_crate_access(); let is_advanced = has_advanced_attribute(&mut input.attrs); @@ -377,7 +377,7 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { ) }; - let mut input = fold::fold_impl_item_method(self, input); + let mut input = fold::fold_impl_item_fn(self, input); // We need to set the block, after we modified the rest of the ast, otherwise we would // modify our generated block as well. input.block = block; diff --git a/primitives/api/proc-macro/src/runtime_metadata.rs b/primitives/api/proc-macro/src/runtime_metadata.rs new file mode 100644 index 0000000000000..ae78fb52dbd5a --- /dev/null +++ b/primitives/api/proc-macro/src/runtime_metadata.rs @@ -0,0 +1,271 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; +use syn::{parse_quote, ItemImpl, ItemTrait, Result}; + +use crate::{ + common::CHANGED_IN_ATTRIBUTE, + utils::{ + extract_impl_trait, filter_cfg_attributes, generate_crate_access, + generate_runtime_mod_name_for_trait, get_doc_literals, RequireQualifiedTraitPath, + }, +}; + +/// Get the type parameter argument without lifetime or mutability +/// of a runtime metadata function. +/// +/// In the following example, both the `AccountId` and `Index` generic +/// type parameters must implement `scale_info::TypeInfo` because they +/// are added into the metadata using `scale_info::meta_type`. +/// +/// ```ignore +/// trait ExampleAccountNonceApi { +/// fn account_nonce<'a>(account: &'a AccountId) -> Index; +/// } +/// ``` +/// +/// Instead of returning `&'a AccountId` for the first parameter, this function +/// returns `AccountId` to place bounds around it. +fn get_type_param(ty: &syn::Type) -> syn::Type { + // Remove the lifetime and mutability of the type T to + // place bounds around it. + let ty_elem = match &ty { + syn::Type::Reference(reference) => &reference.elem, + syn::Type::Ptr(ptr) => &ptr.elem, + syn::Type::Slice(slice) => &slice.elem, + syn::Type::Array(arr) => &arr.elem, + _ => ty, + }; + + ty_elem.clone() +} + +/// Extract the documentation from the provided attributes. +/// +/// It takes into account the `no-metadata-docs` feature. +fn collect_docs(attrs: &[syn::Attribute], crate_: &TokenStream2) -> TokenStream2 { + if cfg!(feature = "no-metadata-docs") { + quote!(#crate_::vec![]) + } else { + let docs = get_doc_literals(&attrs); + quote!(#crate_::vec![ #( #docs, )* ]) + } +} + +/// Generate the runtime metadata of the provided trait. +/// +/// The metadata is exposed as a generic function on the hidden module +/// of the trait generated by the `decl_runtime_apis`. +pub fn generate_decl_runtime_metadata(decl: &ItemTrait) -> TokenStream2 { + let crate_ = generate_crate_access(); + let mut methods = Vec::new(); + + // Ensure that any function parameter that relies on the `BlockT` bounds + // also has `TypeInfo + 'static` bounds (required by `scale_info::meta_type`). + // + // For example, if a runtime API defines a method that has an input: + // `fn func(input: ::Header)` + // then the runtime metadata will imply `::Header: TypeInfo + 'static`. + // + // This restricts the bounds at the metadata level, without needing to modify the `BlockT` + // itself, since the concrete implementations are already satisfying `TypeInfo`. + let mut where_clause = Vec::new(); + for item in &decl.items { + // Collect metadata for methods only. + let syn::TraitItem::Fn(method) = item else { + continue + }; + + // Collect metadata only for the latest methods. + let is_changed_in = + method.attrs.iter().any(|attr| attr.path().is_ident(CHANGED_IN_ATTRIBUTE)); + if is_changed_in { + continue + } + + let mut inputs = Vec::new(); + let signature = &method.sig; + for input in &signature.inputs { + // Exclude `self` from metadata collection. + let syn::FnArg::Typed(typed) = input else { + continue + }; + + let pat = &typed.pat; + let name = quote!(#pat).to_string(); + let ty = &typed.ty; + + where_clause.push(get_type_param(ty)); + + inputs.push(quote!( + #crate_::metadata_ir::RuntimeApiMethodParamMetadataIR { + name: #name, + ty: #crate_::scale_info::meta_type::<#ty>(), + } + )); + } + + let output = match &signature.output { + syn::ReturnType::Default => quote!(#crate_::scale_info::meta_type::<()>()), + syn::ReturnType::Type(_, ty) => { + where_clause.push(get_type_param(ty)); + quote!(#crate_::scale_info::meta_type::<#ty>()) + }, + }; + + // String method name including quotes for constructing `v15::RuntimeApiMethodMetadata`. + let method_name = signature.ident.to_string(); + let docs = collect_docs(&method.attrs, &crate_); + + // Include the method metadata only if its `cfg` features are enabled. + let attrs = filter_cfg_attributes(&method.attrs); + methods.push(quote!( + #( #attrs )* + #crate_::metadata_ir::RuntimeApiMethodMetadataIR { + name: #method_name, + inputs: #crate_::vec![ #( #inputs, )* ], + output: #output, + docs: #docs, + } + )); + } + + let trait_name_ident = &decl.ident; + let trait_name = trait_name_ident.to_string(); + let docs = collect_docs(&decl.attrs, &crate_); + let attrs = filter_cfg_attributes(&decl.attrs); + // The trait generics where already extended with `Block: BlockT`. + let mut generics = decl.generics.clone(); + for generic_param in generics.params.iter_mut() { + let syn::GenericParam::Type(ty) = generic_param else { + continue + }; + + // Default type parameters are not allowed in functions. + ty.eq_token = None; + ty.default = None; + } + + let where_clause = where_clause + .iter() + .map(|ty| quote!(#ty: #crate_::scale_info::TypeInfo + 'static)); + + quote!( + #( #attrs )* + #[inline(always)] + pub fn runtime_metadata #generics () -> #crate_::metadata_ir::RuntimeApiMetadataIR + where #( #where_clause, )* + { + #crate_::metadata_ir::RuntimeApiMetadataIR { + name: #trait_name, + methods: #crate_::vec![ #( #methods, )* ], + docs: #docs, + } + } + ) +} + +/// Implement the `runtime_metadata` function on the runtime that +/// generates the metadata for the given traits. +/// +/// The metadata of each trait is extracted from the generic function +/// exposed by `generate_decl_runtime_metadata`. +pub fn generate_impl_runtime_metadata(impls: &[ItemImpl]) -> Result { + if impls.is_empty() { + return Ok(quote!()) + } + + let crate_ = generate_crate_access(); + + // Get the name of the runtime for which the traits are implemented. + let runtime_name = &impls + .get(0) + .expect("Traits should contain at least one implementation; qed") + .self_ty; + + let mut metadata = Vec::new(); + + for impl_ in impls { + let mut trait_ = extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?.clone(); + + // Implementation traits are always references with a path `impl client::Core ...` + // The trait name is the last segment of this path. + let trait_name_ident = &trait_ + .segments + .last() + .as_ref() + .expect("Trait path should always contain at least one item; qed") + .ident; + + // Extract the generics from the trait to pass to the `runtime_metadata` + // function on the hidden module. + let generics = trait_ + .segments + .iter() + .find_map(|segment| { + if let syn::PathArguments::AngleBracketed(generics) = &segment.arguments { + Some(generics.clone()) + } else { + None + } + }) + .expect("Trait path should always contain at least one generic parameter; qed"); + + let mod_name = generate_runtime_mod_name_for_trait(&trait_name_ident); + // Get absolute path to the `runtime_decl_for_` module by replacing the last segment. + if let Some(segment) = trait_.segments.last_mut() { + *segment = parse_quote!(#mod_name); + } + + let attrs = filter_cfg_attributes(&impl_.attrs); + metadata.push(quote!( + #( #attrs )* + #trait_::runtime_metadata::#generics() + )); + } + + // Each runtime must expose the `runtime_metadata()` to fetch the runtime API metadata. + // The function is implemented by calling `impl_runtime_apis!`. + // + // However, the `construct_runtime!` may be called without calling `impl_runtime_apis!`. + // Rely on the `Deref` trait to differentiate between a runtime that implements + // APIs (by macro impl_runtime_apis!) and a runtime that is simply created (by macro + // construct_runtime!). + // + // Both `InternalConstructRuntime` and `InternalImplRuntimeApis` expose a `runtime_metadata()` + // function. `InternalConstructRuntime` is implemented by the `construct_runtime!` for Runtime + // references (`& Runtime`), while `InternalImplRuntimeApis` is implemented by the + // `impl_runtime_apis!` for Runtime (`Runtime`). + // + // Therefore, the `Deref` trait will resolve the `runtime_metadata` from `impl_runtime_apis!` + // when both macros are called; and will resolve an empty `runtime_metadata` when only the + // `construct_runtime!` is called. + + Ok(quote!( + #[doc(hidden)] + trait InternalImplRuntimeApis { + #[inline(always)] + fn runtime_metadata(&self) -> #crate_::vec::Vec<#crate_::metadata_ir::RuntimeApiMetadataIR> { + #crate_::vec![ #( #metadata, )* ] + } + } + #[doc(hidden)] + impl InternalImplRuntimeApis for #runtime_name {} + )) +} diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 4444a2624b669..6716be142febc 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -22,7 +22,7 @@ use syn::{ ImplItem, ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, }; -use quote::{format_ident, quote}; +use quote::{format_ident, quote, ToTokens}; use proc_macro_crate::{crate_name, FoundCrate}; @@ -158,7 +158,7 @@ pub fn extract_all_signature_types(items: &[ImplItem]) -> Vec { items .iter() .filter_map(|i| match i { - ImplItem::Method(method) => Some(&method.sig), + ImplItem::Fn(method) => Some(&method.sig), _ => None, }) .flat_map(|sig| { @@ -253,7 +253,76 @@ pub fn parse_runtime_api_version(version: &Attribute) -> Result { version.base10_parse() } -// Each versioned trait is named 'ApiNameVN' where N is the specific version. E.g. ParachainHostV2 +/// Each versioned trait is named 'ApiNameVN' where N is the specific version. E.g. ParachainHostV2 pub fn versioned_trait_name(trait_ident: &Ident, version: u64) -> Ident { format_ident!("{}V{}", trait_ident, version) } + +/// Extract the documentation from the provided attributes. +pub fn get_doc_literals(attrs: &[syn::Attribute]) -> Vec { + attrs + .iter() + .filter_map(|attr| { + let syn::Meta::NameValue(meta) = &attr.meta else { + return None + }; + let Ok(lit) = syn::parse2::(meta.value.to_token_stream()) else { + unreachable!("non-lit doc attribute values do not exist"); + }; + meta.path.get_ident().filter(|ident| *ident == "doc").map(|_| lit) + }) + .collect() +} + +/// Filters all attributes except the cfg ones. +pub fn filter_cfg_attributes(attrs: &[syn::Attribute]) -> Vec { + attrs.iter().filter(|a| a.path().is_ident("cfg")).cloned().collect() +} + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + + use super::*; + + #[test] + fn check_get_doc_literals() { + const FIRST: &'static str = "hello"; + const SECOND: &'static str = "WORLD"; + + let doc: Attribute = parse_quote!(#[doc = #FIRST]); + let doc_world: Attribute = parse_quote!(#[doc = #SECOND]); + + let attrs = vec![ + doc.clone(), + parse_quote!(#[derive(Debug)]), + parse_quote!(#[test]), + parse_quote!(#[allow(non_camel_case_types)]), + doc_world.clone(), + ]; + + let docs = get_doc_literals(&attrs); + assert_eq!(docs.len(), 2); + assert_matches!(&docs[0], syn::Lit::Str(val) if val.value() == FIRST); + assert_matches!(&docs[1], syn::Lit::Str(val) if val.value() == SECOND); + } + + #[test] + fn check_filter_cfg_attributes() { + let cfg_std: Attribute = parse_quote!(#[cfg(feature = "std")]); + let cfg_benchmarks: Attribute = parse_quote!(#[cfg(feature = "runtime-benchmarks")]); + + let attrs = vec![ + cfg_std.clone(), + parse_quote!(#[derive(Debug)]), + parse_quote!(#[test]), + cfg_benchmarks.clone(), + parse_quote!(#[allow(non_camel_case_types)]), + ]; + + let filtered = filter_cfg_attributes(&attrs); + assert_eq!(filtered.len(), 2); + assert_eq!(cfg_std, filtered[0]); + assert_eq!(cfg_benchmarks, filtered[1]); + } +} diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 7542ca3f20ccd..02770280f7b90 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -76,12 +76,16 @@ pub use codec::{self, Decode, DecodeLimit, Encode}; #[cfg(feature = "std")] pub use hash_db::Hasher; #[doc(hidden)] +pub use scale_info; +#[doc(hidden)] #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; use sp_core::OpaqueMetadata; #[doc(hidden)] pub use sp_core::{offchain, ExecutionContext}; #[doc(hidden)] +pub use sp_metadata_ir::{self as metadata_ir, frame_metadata as metadata}; +#[doc(hidden)] #[cfg(feature = "std")] pub use sp_runtime::StateVersion; #[doc(hidden)] @@ -101,7 +105,7 @@ pub use sp_state_machine::{ StorageProof, TrieBackend, TrieBackendBuilder, }; #[doc(hidden)] -pub use sp_std::{mem, slice}; +pub use sp_std::{mem, slice, vec}; #[doc(hidden)] pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; #[cfg(feature = "std")] @@ -729,8 +733,20 @@ decl_runtime_apis! { } /// The `Metadata` api trait that returns metadata for the runtime. + #[api_version(2)] pub trait Metadata { /// Returns the metadata of a runtime. fn metadata() -> OpaqueMetadata; + + /// Returns the metadata at a given version. + /// + /// If the given `version` isn't supported, this will return `None`. + /// Use [`Self::metadata_versions`] to find out about supported metadata version of the runtime. + fn metadata_at_version(version: u32) -> Option; + + /// Returns the supported metadata versions. + /// + /// This can be used to call `metadata_at_version`. + fn metadata_versions() -> sp_std::vec::Vec; } } diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 5b6c144ef3f6b..338644fc1dc21 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -29,6 +29,7 @@ criterion = "0.4.0" futures = "0.3.21" log = "0.4.17" sp-core = { version = "7.0.0", path = "../../core" } +static_assertions = "1.1.0" [[bench]] name = "bench" diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index 69f9a88ffadb3..b4d5002d7c194 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -15,20 +15,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_api::{Core, ProvideRuntimeApi}; -use sp_runtime::traits::{HashFor, Header as HeaderT}; +use std::panic::UnwindSafe; + +use sp_api::{ApiExt, Core, ProvideRuntimeApi}; +use sp_runtime::{ + traits::{HashFor, Header as HeaderT}, + TransactionOutcome, +}; use sp_state_machine::{ create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionStrategy, }; use substrate_test_runtime_client::{ prelude::*, runtime::{Block, Header, TestAPI, Transfer}, - DefaultTestClientBuilderExt, TestClientBuilder, + DefaultTestClientBuilderExt, TestClient, TestClientBuilder, }; use codec::Encode; use sc_block_builder::BlockBuilderProvider; use sp_consensus::SelectChain; +use substrate_test_runtime_client::sc_executor::WasmExecutor; fn calling_function_with_strat(strat: ExecutionStrategy) { let client = TestClientBuilder::new().set_execution_strategy(strat).build(); @@ -178,17 +184,13 @@ fn record_proof_works() { // Use the proof backend to execute `execute_block`. let mut overlay = Default::default(); - let executor = NativeElseWasmExecutor::::new( - WasmExecutionMethod::Interpreted, - None, - 8, - 2, + let executor = NativeElseWasmExecutor::::new_with_wasm_executor( + WasmExecutor::builder().build(), ); execution_proof_check_on_trie_backend( &backend, &mut overlay, &executor, - sp_core::testing::TaskExecutor::new(), "Core_execute_block", &block.encode(), &runtime_code, @@ -208,6 +210,49 @@ fn call_runtime_api_with_multiple_arguments() { .unwrap(); } +// Certain logic like the transaction handling is not unwind safe. +// +// Ensure that the type is not unwind safe! +static_assertions::assert_not_impl_any!(>::Api: UnwindSafe); + +#[test] +fn ensure_transactional_works() { + const KEY: &[u8] = b"test"; + + let client = TestClientBuilder::new().build(); + let best_hash = client.chain_info().best_hash; + + let runtime_api = client.runtime_api(); + runtime_api.execute_in_transaction(|api| { + api.write_key_value(best_hash, KEY.to_vec(), vec![1, 2, 3], false).unwrap(); + + api.execute_in_transaction(|api| { + api.write_key_value(best_hash, KEY.to_vec(), vec![1, 2, 3, 4], false).unwrap(); + + TransactionOutcome::Commit(()) + }); + + TransactionOutcome::Commit(()) + }); + + let changes = runtime_api + .into_storage_changes(&client.state_at(best_hash).unwrap(), best_hash) + .unwrap(); + assert_eq!(changes.main_storage_changes[0].1, Some(vec![1, 2, 3, 4])); + + let runtime_api = client.runtime_api(); + runtime_api.execute_in_transaction(|api| { + assert!(api.write_key_value(best_hash, KEY.to_vec(), vec![1, 2, 3], true).is_err()); + + TransactionOutcome::Commit(()) + }); + + let changes = runtime_api + .into_storage_changes(&client.state_at(best_hash).unwrap(), best_hash) + .unwrap(); + assert_eq!(changes.main_storage_changes[0].1, Some(vec![1, 2, 3])); +} + #[test] fn disable_logging_works() { if std::env::var("RUN_TEST").is_ok() { diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index f5b6ac1da4576..5b36248228bff 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -4,7 +4,7 @@ error[E0053]: method `test` has an incompatible type for trait 19 | fn test(data: String) {} | ^^^^^^ | | - | expected `u64`, found struct `std::string::String` + | expected `u64`, found struct `String` | help: change the parameter type to match the trait: `u64` | note: type in trait @@ -12,8 +12,8 @@ note: type in trait | 13 | fn test(data: u64); | ^^^ - = note: expected fn pointer `fn(u64)` - found fn pointer `fn(std::string::String)` + = note: expected signature `fn(u64)` + found signature `fn(std::string::String)` error[E0308]: mismatched types --> tests/ui/impl_incorrect_method_signature.rs:19:11 @@ -21,7 +21,7 @@ error[E0308]: mismatched types 17 | / sp_api::impl_runtime_apis! { 18 | | impl self::Api for Runtime { 19 | | fn test(data: String) {} - | | ^^^^ expected `u64`, found struct `std::string::String` + | | ^^^^ expected `u64`, found struct `String` 20 | | } ... | 32 | | } diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index 06f8226ec88bf..1515bd3a1208f 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -12,8 +12,8 @@ note: type in trait | 13 | fn test(data: u64); | ^^^ - = note: expected fn pointer `fn(u64)` - found fn pointer `fn(&u64)` + = note: expected signature `fn(u64)` + found signature `fn(&u64)` error[E0308]: mismatched types --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:19:11 diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index e7aa118dfd6ac..37b58bc2bf649 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-core = { version = "7.0.0", default-features = false, path = "../core" } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true, features = ["derive"] } sp-std = { version = "5.0.0", default-features = false, path = "../std" } sp-io = { version = "7.0.0", default-features = false, path = "../io" } diff --git a/primitives/application-crypto/src/ecdsa.rs b/primitives/application-crypto/src/ecdsa.rs index 011456df08fae..27ffe12579f55 100644 --- a/primitives/application-crypto/src/ecdsa.rs +++ b/primitives/application-crypto/src/ecdsa.rs @@ -24,13 +24,7 @@ use sp_std::vec::Vec; pub use sp_core::ecdsa::*; mod app { - use sp_core::testing::ECDSA; - - crate::app_crypto!(super, ECDSA); - - impl crate::traits::BoundToRuntimeAppPublic for Public { - type Public = Self; - } + crate::app_crypto!(super, sp_core::testing::ECDSA); } #[cfg(feature = "full_crypto")] diff --git a/primitives/application-crypto/src/ed25519.rs b/primitives/application-crypto/src/ed25519.rs index 822dd3fa5c422..bc05018370edb 100644 --- a/primitives/application-crypto/src/ed25519.rs +++ b/primitives/application-crypto/src/ed25519.rs @@ -24,13 +24,7 @@ use sp_std::vec::Vec; pub use sp_core::ed25519::*; mod app { - use sp_core::testing::ED25519; - - crate::app_crypto!(super, ED25519); - - impl crate::traits::BoundToRuntimeAppPublic for Public { - type Public = Self; - } + crate::app_crypto!(super, sp_core::testing::ED25519); } #[cfg(feature = "full_crypto")] diff --git a/primitives/application-crypto/src/lib.rs b/primitives/application-crypto/src/lib.rs index 992ecd1d05621..5e77795199dd9 100644 --- a/primitives/application-crypto/src/lib.rs +++ b/primitives/application-crypto/src/lib.rs @@ -23,14 +23,11 @@ pub use sp_core::crypto::{key_types, CryptoTypeId, KeyTypeId}; #[doc(hidden)] #[cfg(feature = "full_crypto")] -pub use sp_core::crypto::{DeriveJunction, Pair, SecretStringError, Ss58Codec}; +pub use sp_core::crypto::{DeriveError, DeriveJunction, Pair, SecretStringError, Ss58Codec}; #[doc(hidden)] pub use sp_core::{ self, - crypto::{ - ByteArray, CryptoType, CryptoTypePublicPair, Derive, IsWrappedBy, Public, UncheckedFrom, - Wraps, - }, + crypto::{ByteArray, CryptoType, Derive, IsWrappedBy, Public, UncheckedFrom, Wraps}, RuntimeDebug, }; @@ -51,14 +48,15 @@ mod traits; pub use traits::*; -/// Declares Public, Pair, Signature types which are functionally equivalent to `$pair`, but are new -/// Application-specific types whose identifier is `$key_type`. +/// Declares `Public`, `Pair` and `Signature` types which are functionally equivalent +/// to the corresponding types defined by `$module` but are new application-specific +/// types whose identifier is `$key_type`. /// /// ```rust -/// # use sp_application_crypto::{app_crypto, wrap, ed25519, KeyTypeId}; -/// // Declare a new set of crypto types using Ed25519 logic that identifies as `KeyTypeId` +/// # use sp_application_crypto::{app_crypto, ed25519, KeyTypeId}; +/// // Declare a new set of crypto types using ed25519 logic that identifies as `KeyTypeId` /// // of value `b"fuba"`. -/// app_crypto!(ed25519, KeyTypeId(*b"_uba")); +/// app_crypto!(ed25519, KeyTypeId(*b"fuba")); /// ``` #[cfg(feature = "full_crypto")] #[macro_export] @@ -81,14 +79,15 @@ macro_rules! app_crypto { }; } -/// Declares Public, Pair, Signature types which are functionally equivalent to `$pair`, but are new -/// Application-specific types whose identifier is `$key_type`. +/// Declares `Public`, `Pair` and `Signature` types which are functionally equivalent +/// to the corresponding types defined by `$module` but that are new application-specific +/// types whose identifier is `$key_type`. /// /// ```rust -/// # use sp_application_crypto::{app_crypto, wrap, ed25519, KeyTypeId}; -/// // Declare a new set of crypto types using Ed25519 logic that identifies as `KeyTypeId` +/// # use sp_application_crypto::{app_crypto, ed25519, KeyTypeId}; +/// // Declare a new set of crypto types using ed25519 logic that identifies as `KeyTypeId` /// // of value `b"fuba"`. -/// app_crypto!(ed25519, KeyTypeId(*b"_uba")); +/// app_crypto!(ed25519, KeyTypeId(*b"fuba")); /// ``` #[cfg(not(feature = "full_crypto"))] #[macro_export] @@ -110,8 +109,8 @@ macro_rules! app_crypto { }; } -/// Declares Pair type which is functionally equivalent to `$pair`, but is new -/// Application-specific type whose identifier is `$key_type`. +/// Declares `Pair` type which is functionally equivalent to `$pair`, but is +/// new application-specific type whose identifier is `$key_type`. #[macro_export] macro_rules! app_crypto_pair { ($pair:ty, $key_type:expr, $crypto_type:expr) => { @@ -129,7 +128,6 @@ macro_rules! app_crypto_pair { type Public = Public; type Seed = <$pair as $crate::Pair>::Seed; type Signature = Signature; - type DeriveError = <$pair as $crate::Pair>::DeriveError; $crate::app_crypto_pair_functions_if_std!($pair); @@ -137,7 +135,7 @@ macro_rules! app_crypto_pair { &self, path: Iter, seed: Option, - ) -> Result<(Self, Option), Self::DeriveError> { + ) -> Result<(Self, Option), $crate::DeriveError> { self.0.derive(path, seed).map(|x| (Self(x.0), x.1)) } fn from_seed(seed: &Self::Seed) -> Self { @@ -156,13 +154,6 @@ macro_rules! app_crypto_pair { ) -> bool { <$pair>::verify(&sig.0, message, pubkey.as_ref()) } - fn verify_weak, M: AsRef<[u8]>>( - sig: &[u8], - message: M, - pubkey: P, - ) -> bool { - <$pair>::verify_weak(sig, message, pubkey) - } fn public(&self) -> Self::Public { Public(self.0.public()) } @@ -171,8 +162,7 @@ macro_rules! app_crypto_pair { } } - impl $crate::AppKey for Pair { - type UntypedGeneric = $pair; + impl $crate::AppCrypto for Pair { type Public = Public; type Pair = Pair; type Signature = Signature; @@ -213,10 +203,10 @@ macro_rules! app_crypto_pair_functions_if_std { ($pair:ty) => {}; } -/// Declares Public type which is functionally equivalent to `$public`, but is new -/// Application-specific type whose identifier is `$key_type`. -/// can only be used together with `full_crypto` feature -/// For full functionality, app_crypto_public_common! must be called too. +/// Declares `Public` type which is functionally equivalent to `$public` but is +/// new application-specific type whose identifier is `$key_type`. +/// For full functionality, `app_crypto_public_common!` must be called too. +/// Can only be used with `full_crypto` feature. #[doc(hidden)] #[macro_export] macro_rules! app_crypto_public_full_crypto { @@ -239,8 +229,7 @@ macro_rules! app_crypto_public_full_crypto { type Pair = Pair; } - impl $crate::AppKey for Public { - type UntypedGeneric = $public; + impl $crate::AppCrypto for Public { type Public = Public; type Pair = Pair; type Signature = Signature; @@ -250,10 +239,10 @@ macro_rules! app_crypto_public_full_crypto { }; } -/// Declares Public type which is functionally equivalent to `$public`, but is new -/// Application-specific type whose identifier is `$key_type`. -/// can only be used without `full_crypto` feature -/// For full functionality, app_crypto_public_common! must be called too. +/// Declares `Public` type which is functionally equivalent to `$public` but is +/// new application-specific type whose identifier is `$key_type`. +/// For full functionality, `app_crypto_public_common!` must be called too. +/// Can only be used without `full_crypto` feature. #[doc(hidden)] #[macro_export] macro_rules! app_crypto_public_not_full_crypto { @@ -273,8 +262,7 @@ macro_rules! app_crypto_public_not_full_crypto { impl $crate::CryptoType for Public {} - impl $crate::AppKey for Public { - type UntypedGeneric = $public; + impl $crate::AppCrypto for Public { type Public = Public; type Signature = Signature; const ID: $crate::KeyTypeId = $key_type; @@ -283,9 +271,9 @@ macro_rules! app_crypto_public_not_full_crypto { }; } -/// Declares Public type which is functionally equivalent to `$public`, but is new -/// Application-specific type whose identifier is `$key_type`. -/// For full functionality, app_crypto_public_(not)_full_crypto! must be called too. +/// Declares `Public` type which is functionally equivalent to `$public` but is +/// new application-specific type whose identifier is `$key_type`. +/// For full functionality, `app_crypto_public_(not)_full_crypto!` must be called too. #[doc(hidden)] #[macro_export] macro_rules! app_crypto_public_common { @@ -307,62 +295,13 @@ macro_rules! app_crypto_public_common { impl $crate::ByteArray for Public { const LEN: usize = <$public>::LEN; } - impl $crate::Public for Public { - fn to_public_crypto_pair(&self) -> $crate::CryptoTypePublicPair { - $crate::CryptoTypePublicPair($crypto_type, $crate::ByteArray::to_raw_vec(self)) - } - } + + impl $crate::Public for Public {} impl $crate::AppPublic for Public { type Generic = $public; } - impl $crate::RuntimeAppPublic for Public - where - $public: $crate::RuntimePublic, - { - const ID: $crate::KeyTypeId = $key_type; - const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; - - type Signature = Signature; - - fn all() -> $crate::Vec { - <$public as $crate::RuntimePublic>::all($key_type) - .into_iter() - .map(Self) - .collect() - } - - fn generate_pair(seed: Option<$crate::Vec>) -> Self { - Self(<$public as $crate::RuntimePublic>::generate_pair($key_type, seed)) - } - - fn sign>(&self, msg: &M) -> Option { - <$public as $crate::RuntimePublic>::sign(self.as_ref(), $key_type, msg) - .map(Signature) - } - - fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { - <$public as $crate::RuntimePublic>::verify(self.as_ref(), msg, &signature.as_ref()) - } - - fn to_raw_vec(&self) -> $crate::Vec { - <$public as $crate::RuntimePublic>::to_raw_vec(&self.0) - } - } - - impl From for $crate::CryptoTypePublicPair { - fn from(key: Public) -> Self { - (&key).into() - } - } - - impl From<&Public> for $crate::CryptoTypePublicPair { - fn from(key: &Public) -> Self { - $crate::CryptoTypePublicPair($crypto_type, $crate::ByteArray::to_raw_vec(key)) - } - } - impl<'a> TryFrom<&'a [u8]> for Public { type Error = (); @@ -429,8 +368,8 @@ macro_rules! app_crypto_public_common_if_std { /// Declares Signature type which is functionally equivalent to `$sig`, but is new /// Application-specific type whose identifier is `$key_type`. -/// can only be used together with `full_crypto` feature /// For full functionality, app_crypto_public_common! must be called too. +/// Can only be used with `full_crypto` feature #[doc(hidden)] #[macro_export] macro_rules! app_crypto_signature_full_crypto { @@ -451,8 +390,7 @@ macro_rules! app_crypto_signature_full_crypto { type Pair = Pair; } - impl $crate::AppKey for Signature { - type UntypedGeneric = $sig; + impl $crate::AppCrypto for Signature { type Public = Public; type Pair = Pair; type Signature = Signature; @@ -462,10 +400,10 @@ macro_rules! app_crypto_signature_full_crypto { }; } -/// Declares Signature type which is functionally equivalent to `$sig`, but is new -/// Application-specific type whose identifier is `$key_type`. -/// can only be used without `full_crypto` feature -/// For full functionality, app_crypto_public_common! must be called too. +/// Declares `Signature` type which is functionally equivalent to `$sig`, but is new +/// application-specific type whose identifier is `$key_type`. +/// For full functionality, `app_crypto_signature_common` must be called too. +/// Can only be used without `full_crypto` feature. #[doc(hidden)] #[macro_export] macro_rules! app_crypto_signature_not_full_crypto { @@ -475,16 +413,15 @@ macro_rules! app_crypto_signature_not_full_crypto { #[derive(Clone, Eq, PartialEq, $crate::codec::Encode, $crate::codec::Decode, - $crate::scale_info::TypeInfo, $crate::RuntimeDebug, + $crate::scale_info::TypeInfo, )] pub struct Signature($sig); } impl $crate::CryptoType for Signature {} - impl $crate::AppKey for Signature { - type UntypedGeneric = $sig; + impl $crate::AppCrypto for Signature { type Public = Public; type Signature = Signature; const ID: $crate::KeyTypeId = $key_type; @@ -493,9 +430,9 @@ macro_rules! app_crypto_signature_not_full_crypto { }; } -/// Declares Signature type which is functionally equivalent to `$sig`, but is new -/// Application-specific type whose identifier is `$key_type`. -/// For full functionality, app_crypto_public_(not)_full_crypto! must be called too. +/// Declares `Signature` type which is functionally equivalent to `$sig`, but is new +/// application-specific type whose identifier is `$key_type`. +/// For full functionality, app_crypto_signature_(not)_full_crypto! must be called too. #[doc(hidden)] #[macro_export] macro_rules! app_crypto_signature_common { diff --git a/primitives/application-crypto/src/sr25519.rs b/primitives/application-crypto/src/sr25519.rs index a961f5cf3edf0..7c91bfa7bb5ff 100644 --- a/primitives/application-crypto/src/sr25519.rs +++ b/primitives/application-crypto/src/sr25519.rs @@ -24,13 +24,7 @@ use sp_std::vec::Vec; pub use sp_core::sr25519::*; mod app { - use sp_core::testing::SR25519; - - crate::app_crypto!(super, SR25519); - - impl crate::traits::BoundToRuntimeAppPublic for Public { - type Public = Self; - } + crate::app_crypto!(super, sp_core::testing::SR25519); } #[cfg(feature = "full_crypto")] diff --git a/primitives/application-crypto/src/traits.rs b/primitives/application-crypto/src/traits.rs index e3586ccecd0d8..88d4bf36915d0 100644 --- a/primitives/application-crypto/src/traits.rs +++ b/primitives/application-crypto/src/traits.rs @@ -15,32 +15,38 @@ // See the License for the specific language governing permissions and // limitations under the License. +use codec::Codec; +use scale_info::TypeInfo; + #[cfg(feature = "full_crypto")] use sp_core::crypto::Pair; - -use codec::Codec; use sp_core::crypto::{CryptoType, CryptoTypeId, IsWrappedBy, KeyTypeId, Public}; use sp_std::{fmt::Debug, vec::Vec}; -/// An application-specific key. -pub trait AppKey: 'static + Send + Sync + Sized + CryptoType + Clone { - /// The corresponding type as a generic crypto type. - type UntypedGeneric: IsWrappedBy; +/// An application-specific cryptographic object. +/// +/// Combines all the core types and constants that are defined by a particular +/// cryptographic scheme when it is used in a specific application domain. +/// +/// Typically, the implementers of this trait are its associated types themselves. +/// This provides a convenient way to access generic information about the scheme +/// given any of the associated types. +pub trait AppCrypto: 'static + Send + Sync + Sized + CryptoType + Clone { + /// Identifier for application-specific key type. + const ID: KeyTypeId; + + /// Identifier of the crypto type of this application-specific key type. + const CRYPTO_ID: CryptoTypeId; /// The corresponding public key type in this application scheme. type Public: AppPublic; - /// The corresponding key pair type in this application scheme. - #[cfg(feature = "full_crypto")] - type Pair: AppPair; - /// The corresponding signature type in this application scheme. type Signature: AppSignature; - /// An identifier for this application-specific key type. - const ID: KeyTypeId; - /// The identifier of the crypto type of this application-specific key type. - const CRYPTO_ID: CryptoTypeId; + /// The corresponding key pair type in this application scheme. + #[cfg(feature = "full_crypto")] + type Pair: AppPair; } /// Type which implements Hash in std, not when no-std (std variant). @@ -55,15 +61,9 @@ pub trait MaybeHash {} #[cfg(all(not(feature = "std"), not(feature = "full_crypto")))] impl MaybeHash for T {} -/// Type which implements Debug and Hash in std, not when no-std (no-std variant with crypto). -#[cfg(all(not(feature = "std"), feature = "full_crypto"))] -pub trait MaybeDebugHash: sp_std::hash::Hash {} -#[cfg(all(not(feature = "std"), feature = "full_crypto"))] -impl MaybeDebugHash for T {} - /// A application's public key. pub trait AppPublic: - AppKey + Public + Ord + PartialOrd + Eq + PartialEq + Debug + MaybeHash + codec::Codec + AppCrypto + Public + Ord + PartialOrd + Eq + PartialEq + Debug + MaybeHash + Codec { /// The wrapped type which is just a plain instance of `Public`. type Generic: IsWrappedBy @@ -74,27 +74,28 @@ pub trait AppPublic: + PartialEq + Debug + MaybeHash - + codec::Codec; + + Codec; } /// A application's key pair. #[cfg(feature = "full_crypto")] -pub trait AppPair: AppKey + Pair::Public> { +pub trait AppPair: AppCrypto + Pair::Public> { /// The wrapped type which is just a plain instance of `Pair`. type Generic: IsWrappedBy - + Pair::Public as AppPublic>::Generic>; + + Pair::Public as AppPublic>::Generic> + + Pair::Signature as AppSignature>::Generic>; } /// A application's signature. -pub trait AppSignature: AppKey + Eq + PartialEq + Debug + MaybeHash { +pub trait AppSignature: AppCrypto + Eq + PartialEq + Debug { /// The wrapped type which is just a plain instance of `Signature`. - type Generic: IsWrappedBy + Eq + PartialEq + Debug + MaybeHash; + type Generic: IsWrappedBy + Eq + PartialEq + Debug; } /// A runtime interface for a public key. pub trait RuntimePublic: Sized { /// The signature that will be generated when signing with the corresponding private key. - type Signature: Codec + Debug + MaybeHash + Eq + PartialEq + Clone; + type Signature: Debug + Eq + PartialEq + Clone; /// Returns all public keys for the given key type in the keystore. fn all(key_type: KeyTypeId) -> crate::Vec; @@ -126,11 +127,9 @@ pub trait RuntimePublic: Sized { pub trait RuntimeAppPublic: Sized { /// An identifier for this application-specific key type. const ID: KeyTypeId; - /// The identifier of the crypto type of this application-specific key type. - const CRYPTO_ID: CryptoTypeId; /// The signature that will be generated when signing with the corresponding private key. - type Signature: Codec + Debug + MaybeHash + Eq + PartialEq + Clone + scale_info::TypeInfo; + type Signature: Debug + Eq + PartialEq + Clone + TypeInfo + Codec; /// Returns all public keys for this application in the keystore. fn all() -> crate::Vec; @@ -157,8 +156,50 @@ pub trait RuntimeAppPublic: Sized { fn to_raw_vec(&self) -> Vec; } -/// Something that bound to a fixed [`RuntimeAppPublic`]. +impl RuntimeAppPublic for T +where + T: AppPublic + AsRef<::Generic>, + ::Generic: RuntimePublic, + ::Signature: TypeInfo + + Codec + + From<<::Generic as RuntimePublic>::Signature> + + AsRef<<::Generic as RuntimePublic>::Signature>, +{ + const ID: KeyTypeId = ::ID; + + type Signature = ::Signature; + + fn all() -> crate::Vec { + <::Generic as RuntimePublic>::all(Self::ID) + .into_iter() + .map(|p| p.into()) + .collect() + } + + fn generate_pair(seed: Option>) -> Self { + <::Generic as RuntimePublic>::generate_pair(Self::ID, seed).into() + } + + fn sign>(&self, msg: &M) -> Option { + <::Generic as RuntimePublic>::sign(self.as_ref(), Self::ID, msg) + .map(|s| s.into()) + } + + fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { + <::Generic as RuntimePublic>::verify(self.as_ref(), msg, signature.as_ref()) + } + + fn to_raw_vec(&self) -> Vec { + <::Generic as RuntimePublic>::to_raw_vec(self.as_ref()) + } +} + +/// Something that is bound to a fixed [`RuntimeAppPublic`]. pub trait BoundToRuntimeAppPublic { /// The [`RuntimeAppPublic`] this type is bound to. type Public: RuntimeAppPublic; } + +impl BoundToRuntimeAppPublic for T { + type Public = Self; +} diff --git a/primitives/application-crypto/test/src/ecdsa.rs b/primitives/application-crypto/test/src/ecdsa.rs index df3be6f934304..99ca6f4c4adf2 100644 --- a/primitives/application-crypto/test/src/ecdsa.rs +++ b/primitives/application-crypto/test/src/ecdsa.rs @@ -17,9 +17,12 @@ //! Integration tests for ecdsa use sp_api::ProvideRuntimeApi; -use sp_application_crypto::ecdsa::{AppPair, AppPublic}; -use sp_core::{crypto::Pair, testing::ECDSA}; -use sp_keystore::{testing::KeyStore, SyncCryptoStore}; +use sp_application_crypto::ecdsa::AppPair; +use sp_core::{ + crypto::{ByteArray, Pair}, + testing::ECDSA, +}; +use sp_keystore::{testing::MemoryKeystore, Keystore}; use std::sync::Arc; use substrate_test_runtime_client::{ runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, @@ -27,14 +30,14 @@ use substrate_test_runtime_client::{ #[test] fn ecdsa_works_in_runtime() { - let keystore = Arc::new(KeyStore::new()); + let keystore = Arc::new(MemoryKeystore::new()); let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); let (signature, public) = test_client .runtime_api() .test_ecdsa_crypto(test_client.chain_info().genesis_hash) .expect("Tests `ecdsa` crypto."); - let supported_keys = SyncCryptoStore::keys(&*keystore, ECDSA).unwrap(); - assert!(supported_keys.contains(&public.clone().into())); - assert!(AppPair::verify(&signature, "ecdsa", &AppPublic::from(public))); + let supported_keys = keystore.keys(ECDSA).unwrap(); + assert!(supported_keys.contains(&public.to_raw_vec())); + assert!(AppPair::verify(&signature, "ecdsa", &public)); } diff --git a/primitives/application-crypto/test/src/ed25519.rs b/primitives/application-crypto/test/src/ed25519.rs index 8ce08717a8947..f4553f95bf1f8 100644 --- a/primitives/application-crypto/test/src/ed25519.rs +++ b/primitives/application-crypto/test/src/ed25519.rs @@ -18,9 +18,12 @@ //! Integration tests for ed25519 use sp_api::ProvideRuntimeApi; -use sp_application_crypto::ed25519::{AppPair, AppPublic}; -use sp_core::{crypto::Pair, testing::ED25519}; -use sp_keystore::{testing::KeyStore, SyncCryptoStore}; +use sp_application_crypto::ed25519::AppPair; +use sp_core::{ + crypto::{ByteArray, Pair}, + testing::ED25519, +}; +use sp_keystore::{testing::MemoryKeystore, Keystore}; use std::sync::Arc; use substrate_test_runtime_client::{ runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, @@ -28,14 +31,14 @@ use substrate_test_runtime_client::{ #[test] fn ed25519_works_in_runtime() { - let keystore = Arc::new(KeyStore::new()); + let keystore = Arc::new(MemoryKeystore::new()); let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); let (signature, public) = test_client .runtime_api() .test_ed25519_crypto(test_client.chain_info().genesis_hash) .expect("Tests `ed25519` crypto."); - let supported_keys = SyncCryptoStore::keys(&*keystore, ED25519).unwrap(); - assert!(supported_keys.contains(&public.clone().into())); - assert!(AppPair::verify(&signature, "ed25519", &AppPublic::from(public))); + let supported_keys = keystore.keys(ED25519).unwrap(); + assert!(supported_keys.contains(&public.to_raw_vec())); + assert!(AppPair::verify(&signature, "ed25519", &public)); } diff --git a/primitives/application-crypto/test/src/sr25519.rs b/primitives/application-crypto/test/src/sr25519.rs index 4c1d0f1ef7a97..736521d7d9f3a 100644 --- a/primitives/application-crypto/test/src/sr25519.rs +++ b/primitives/application-crypto/test/src/sr25519.rs @@ -18,9 +18,12 @@ //! Integration tests for sr25519 use sp_api::ProvideRuntimeApi; -use sp_application_crypto::sr25519::{AppPair, AppPublic}; -use sp_core::{crypto::Pair, testing::SR25519}; -use sp_keystore::{testing::KeyStore, SyncCryptoStore}; +use sp_application_crypto::sr25519::AppPair; +use sp_core::{ + crypto::{ByteArray, Pair}, + testing::SR25519, +}; +use sp_keystore::{testing::MemoryKeystore, Keystore}; use std::sync::Arc; use substrate_test_runtime_client::{ runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, @@ -28,14 +31,14 @@ use substrate_test_runtime_client::{ #[test] fn sr25519_works_in_runtime() { - let keystore = Arc::new(KeyStore::new()); + let keystore = Arc::new(MemoryKeystore::new()); let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); let (signature, public) = test_client .runtime_api() .test_sr25519_crypto(test_client.chain_info().genesis_hash) .expect("Tests `sr25519` crypto."); - let supported_keys = SyncCryptoStore::keys(&*keystore, SR25519).unwrap(); - assert!(supported_keys.contains(&public.clone().into())); - assert!(AppPair::verify(&signature, "sr25519", &AppPublic::from(public))); + let supported_keys = keystore.keys(SR25519).unwrap(); + assert!(supported_keys.contains(&public.to_raw_vec())); + assert!(AppPair::verify(&signature, "sr25519", &public)); } diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index b3fc6abc33ffd..92eebd4a6d8d6 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = ] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.8", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } static_assertions = "1.1.0" sp-std = { version = "5.0.0", default-features = false, path = "../std" } diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index 7be800a2e966c..99dbdf7487320 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -14,8 +14,11 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] +arbitrary = "1.3.0" +fraction = "0.13.1" honggfuzz = "0.5.49" num-bigint = "0.4.3" +num-traits = "0.2.15" primitive-types = "0.12.0" sp-arithmetic = { version = "6.0.0", path = ".." } @@ -28,8 +31,12 @@ name = "normalize" path = "src/normalize.rs" [[bin]] -name = "per_thing_rational" -path = "src/per_thing_rational.rs" +name = "per_thing_from_rational" +path = "src/per_thing_from_rational.rs" + +[[bin]] +name = "per_thing_mult_fraction" +path = "src/per_thing_mult_fraction.rs" [[bin]] name = "multiply_by_rational_with_rounding" diff --git a/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs b/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs index 13c9d022be6bd..e9a3208a738ef 100644 --- a/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs +++ b/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs @@ -29,52 +29,79 @@ //! More information about `honggfuzz` can be found //! [here](https://docs.rs/honggfuzz/). +use fraction::prelude::BigFraction as Fraction; use honggfuzz::fuzz; -use sp_arithmetic::{helpers_128bit::multiply_by_rational_with_rounding, traits::Zero, Rounding}; +use sp_arithmetic::{MultiplyRational, Rounding, Rounding::*}; +/// Tries to demonstrate that `multiply_by_rational_with_rounding` is incorrect. fn main() { loop { - fuzz!(|data: ([u8; 16], [u8; 16], [u8; 16])| { - let (a_bytes, b_bytes, c_bytes) = data; - let (a, b, c) = ( - u128::from_be_bytes(a_bytes), - u128::from_be_bytes(b_bytes), - u128::from_be_bytes(c_bytes), - ); + fuzz!(|data: (u128, u128, u128, ArbitraryRounding)| { + let (f, n, d, r) = (data.0, data.1, data.2, data.3 .0); - println!("++ Equation: {} * {} / {}", a, b, c); - - // The point of this fuzzing is to make sure that `multiply_by_rational_with_rounding` - // is 100% accurate as long as the value fits in a u128. - if let Some(result) = multiply_by_rational_with_rounding(a, b, c, Rounding::Down) { - let truth = mul_div(a, b, c); - - if result != truth && result != truth + 1 { - println!("++ Expected {}", truth); - println!("+++++++ Got {}", result); - panic!(); - } - } + check::(f as u8, n as u8, d as u8, r); + check::(f as u16, n as u16, d as u16, r); + check::(f as u32, n as u32, d as u32, r); + check::(f as u64, n as u64, d as u64, r); + check::(f, n, d, r); }) } } -fn mul_div(a: u128, b: u128, c: u128) -> u128 { - use primitive_types::U256; - if a.is_zero() { - return Zero::zero() - } - let c = c.max(1); +fn check(f: N, n: N, d: N, r: Rounding) +where + N: MultiplyRational + Into + Copy + core::fmt::Debug, +{ + let Some(got) = f.multiply_rational(n, d, r) else { + return; + }; + + let (ae, be, ce) = + (Fraction::from(f.into()), Fraction::from(n.into()), Fraction::from(d.into())); + let want = round(ae * be / ce, r); - // e for extended - let ae: U256 = a.into(); - let be: U256 = b.into(); - let ce: U256 = c.into(); + assert_eq!( + Fraction::from(got.into()), + want, + "{:?} * {:?} / {:?} = {:?} != {:?}", + f, + n, + d, + got, + want + ); +} + +/// Round a `Fraction` according to the given mode. +fn round(f: Fraction, r: Rounding) -> Fraction { + match r { + Up => f.ceil(), + NearestPrefUp => + if f.fract() < Fraction::from(0.5) { + f.floor() + } else { + f.ceil() + }, + Down => f.floor(), + NearestPrefDown => + if f.fract() > Fraction::from(0.5) { + f.ceil() + } else { + f.floor() + }, + } +} - let r = ae * be / ce; - if r > u128::MAX.into() { - a - } else { - r.as_u128() +/// An [`arbitrary::Arbitrary`] [`Rounding`] mode. +struct ArbitraryRounding(Rounding); +impl arbitrary::Arbitrary<'_> for ArbitraryRounding { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + Ok(Self(match u.int_in_range(0..=3).unwrap() { + 0 => Up, + 1 => NearestPrefUp, + 2 => Down, + 3 => NearestPrefDown, + _ => unreachable!(), + })) } } diff --git a/primitives/arithmetic/fuzzer/src/per_thing_from_rational.rs b/primitives/arithmetic/fuzzer/src/per_thing_from_rational.rs new file mode 100644 index 0000000000000..93af4df9e8e55 --- /dev/null +++ b/primitives/arithmetic/fuzzer/src/per_thing_from_rational.rs @@ -0,0 +1,105 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Running +//! Running this fuzzer can be done with `cargo hfuzz run per_thing_from_rational`. `honggfuzz` CLI +//! options can be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. +//! +//! # Debugging a panic +//! Once a panic is found, it can be debugged with +//! `cargo hfuzz run-debug per_thing_from_rational hfuzz_workspace/per_thing_from_rational/*.fuzz`. + +use fraction::prelude::BigFraction as Fraction; +use honggfuzz::fuzz; +use sp_arithmetic::{ + traits::SaturatedConversion, PerThing, Perbill, Percent, Perquintill, Rounding::*, *, +}; + +/// Tries to demonstrate that `from_rational` is incorrect for any rounding modes. +/// +/// NOTE: This `Fraction` library is really slow. Using f128/f256 does not work for the large +/// numbers. But an optimization could be done do use either floats or Fraction depending on the +/// size of the inputs. +fn main() { + loop { + fuzz!(|data: (u128, u128, ArbitraryRounding)| { + let (n, d, r) = (data.0.min(data.1), data.0.max(data.1).max(1), data.2); + + check::(n, d, r.0); + check::(n, d, r.0); + check::(n, d, r.0); + check::(n, d, r.0); + check::(n, d, r.0); + }) + } +} + +/// Assert that the parts of `from_rational` are correct for the given rounding mode. +fn check(a: u128, b: u128, r: Rounding) +where + Per::Inner: Into, +{ + let approx_ratio = Per::from_rational_with_rounding(a, b, r).unwrap(); + let approx_parts = Fraction::from(approx_ratio.deconstruct().saturated_into::()); + + let perfect_ratio = if a == 0 && b == 0 { + Fraction::from(1) + } else { + Fraction::from(a) / Fraction::from(b.max(1)) + }; + let perfect_parts = round(perfect_ratio * Fraction::from(Per::ACCURACY.into()), r); + + assert_eq!( + approx_parts, perfect_parts, + "approx_parts: {}, perfect_parts: {}, a: {}, b: {}", + approx_parts, perfect_parts, a, b + ); +} + +/// Round a `Fraction` according to the given mode. +fn round(f: Fraction, r: Rounding) -> Fraction { + match r { + Up => f.ceil(), + NearestPrefUp => + if f.fract() < Fraction::from(0.5) { + f.floor() + } else { + f.ceil() + }, + Down => f.floor(), + NearestPrefDown => + if f.fract() > Fraction::from(0.5) { + f.ceil() + } else { + f.floor() + }, + } +} + +/// An [`arbitrary::Arbitrary`] [`Rounding`] mode. +struct ArbitraryRounding(Rounding); +impl arbitrary::Arbitrary<'_> for ArbitraryRounding { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + Ok(Self(match u.int_in_range(0..=3).unwrap() { + 0 => Up, + 1 => NearestPrefUp, + 2 => Down, + 3 => NearestPrefDown, + _ => unreachable!(), + })) + } +} diff --git a/primitives/arithmetic/fuzzer/src/per_thing_mult_fraction.rs b/primitives/arithmetic/fuzzer/src/per_thing_mult_fraction.rs new file mode 100644 index 0000000000000..9cfe28a7800b0 --- /dev/null +++ b/primitives/arithmetic/fuzzer/src/per_thing_mult_fraction.rs @@ -0,0 +1,69 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Running +//! Running this fuzzer can be done with `cargo hfuzz run per_thing_mult_fraction`. `honggfuzz` CLI +//! options can be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. +//! +//! # Debugging a panic +//! Once a panic is found, it can be debugged with +//! `cargo hfuzz run-debug per_thing_mult_fraction hfuzz_workspace/per_thing_mult_fraction/*.fuzz`. + +use honggfuzz::fuzz; +use sp_arithmetic::{PerThing, Perbill, Percent, Perquintill, *}; + +/// Tries to disprove `(n / d) * d <= n` for any `PerThing`s. +fn main() { + loop { + fuzz!(|data: (u128, u128)| { + let (n, d) = (data.0.min(data.1), data.0.max(data.1).max(1)); + + check_mul::(n, d); + check_mul::(n, d); + check_mul::(n, d); + check_mul::(n, d); + + check_reciprocal_mul::(n, d); + check_reciprocal_mul::(n, d); + check_reciprocal_mul::(n, d); + check_reciprocal_mul::(n, d); + }) + } +} + +/// Checks that `(n / d) * d <= n`. +fn check_mul(n: u128, d: u128) +where + P: PerThing + core::ops::Mul, +{ + let q = P::from_rational_with_rounding(n, d, Rounding::Down).unwrap(); + assert!(q * d <= n, "{:?} * {:?} <= {:?}", q, d, n); +} + +/// Checks that `n / (n / d) >= d`. +fn check_reciprocal_mul(n: u128, d: u128) +where + P: PerThing + core::ops::Mul, +{ + let q = P::from_rational_with_rounding(n, d, Rounding::Down).unwrap(); + if q.is_zero() { + return + } + + let r = q.saturating_reciprocal_mul_floor(n); + assert!(r >= d, "{} / ({} / {}) != {} but {}", n, n, d, d, r); +} diff --git a/primitives/arithmetic/src/helpers_128bit.rs b/primitives/arithmetic/src/helpers_128bit.rs index 56432d15a7260..9b9c74ba55774 100644 --- a/primitives/arithmetic/src/helpers_128bit.rs +++ b/primitives/arithmetic/src/helpers_128bit.rs @@ -182,8 +182,8 @@ mod double128 { } } -/// Returns `a * b / c` and `(a * b) % c` (wrapping to 128 bits) or `None` in the case of -/// overflow and c = 0. +/// Returns `a * b / c` (wrapping to 128 bits) or `None` in the case of +/// overflow. pub const fn multiply_by_rational_with_rounding( a: u128, b: u128, diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index 581206b6d4460..e7ba08a532909 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -45,7 +45,7 @@ pub use per_things::{ InnerOf, MultiplyArg, PerThing, PerU16, Perbill, Percent, Permill, Perquintill, RationalArg, ReciprocalArg, Rounding, SignedRounding, UpperOf, }; -pub use rational::{Rational128, RationalInfinite}; +pub use rational::{MultiplyRational, Rational128, RationalInfinite}; use sp_std::{cmp::Ordering, fmt::Debug, prelude::*}; use traits::{BaseArithmetic, One, SaturatedConversion, Unsigned, Zero}; diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index 068bdb4e17b0d..611a4e05e1027 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -26,7 +26,7 @@ use codec::{CompactAs, Encode}; use num_traits::{Pow, SaturatingAdd, SaturatingSub}; use sp_std::{ fmt, ops, - ops::{Add, AddAssign, Div, Rem, Sub}, + ops::{Add, Sub}, prelude::*, }; @@ -46,6 +46,7 @@ pub trait RationalArg: + Unsigned + Zero + One + + crate::MultiplyRational { } @@ -58,7 +59,8 @@ impl< + ops::AddAssign + Unsigned + Zero - + One, + + One + + crate::MultiplyRational, > RationalArg for T { } @@ -105,7 +107,7 @@ pub trait PerThing: + Pow { /// The data type used to build this per-thingy. - type Inner: BaseArithmetic + Unsigned + Copy + Into + fmt::Debug; + type Inner: BaseArithmetic + Unsigned + Copy + Into + fmt::Debug + crate::MultiplyRational; /// A data type larger than `Self::Inner`, used to avoid overflow in some computations. /// It must be able to compute `ACCURACY^2`. @@ -115,7 +117,8 @@ pub trait PerThing: + TryInto + UniqueSaturatedInto + Unsigned - + fmt::Debug; + + fmt::Debug + + crate::MultiplyRational; /// The accuracy of this type. const ACCURACY: Self::Inner; @@ -524,51 +527,20 @@ where rem_mul_div_inner += 1.into(); } }, - Rounding::NearestPrefDown => + Rounding::NearestPrefDown => { if rem_mul_upper % denom_upper > denom_upper / 2.into() { // `rem * numer / denom` is less than `numer`, so this will not overflow. rem_mul_div_inner += 1.into(); - }, - Rounding::NearestPrefUp => + } + }, + Rounding::NearestPrefUp => { if rem_mul_upper % denom_upper >= denom_upper / 2.into() + denom_upper % 2.into() { // `rem * numer / denom` is less than `numer`, so this will not overflow. rem_mul_div_inner += 1.into(); - }, - } - rem_mul_div_inner.into() -} - -/// Just a simple generic integer divide with custom rounding. -fn div_rounded(n: N, d: N, r: Rounding) -> N -where - N: Clone - + Eq - + Ord - + Zero - + One - + AddAssign - + Add - + Rem - + Div, -{ - let mut o = n.clone() / d.clone(); - use Rounding::*; - let two = || N::one() + N::one(); - if match r { - Up => !((n % d).is_zero()), - NearestPrefDown => { - let rem = n % d.clone(); - rem > d / two() - }, - NearestPrefUp => { - let rem = n % d.clone(); - rem >= d.clone() / two() + d % two() + } }, - Down => false, - } { - o += N::one() } - o + rem_mul_div_inner.into() } macro_rules! implement_per_thing { @@ -687,7 +659,8 @@ macro_rules! implement_per_thing { + ops::AddAssign + Unsigned + Zero - + One, + + One + + $crate::MultiplyRational, Self::Inner: Into { // q cannot be zero. @@ -695,30 +668,8 @@ macro_rules! implement_per_thing { // p should not be bigger than q. if p > q { return Err(()) } - let factor = div_rounded::(q.clone(), $max.into(), Rounding::Up).max(One::one()); - - // q cannot overflow: (q / (q/$max)) < $max. p < q hence p also cannot overflow. - let q_reduce: $type = div_rounded(q, factor.clone(), r) - .try_into() - .map_err(|_| "Failed to convert") - .expect( - "`q / ceil(q/$max) < $max`; macro prevents any type being created that \ - does not satisfy this; qed" - ); - let p_reduce: $type = div_rounded(p, factor, r) - .try_into() - .map_err(|_| "Failed to convert") - .expect( - "`p / ceil(p/$max) < $max`; macro prevents any type being created that \ - does not satisfy this; qed" - ); - - // `p_reduced` and `q_reduced` are within `Self::Inner`. Multiplication by another - // `$max` will always fit in `$upper_type`. This is guaranteed by the macro tests. - let n = p_reduce as $upper_type * <$upper_type>::from($max); - let d = q_reduce as $upper_type; - let part = div_rounded(n, d, r); - Ok($name(part as Self::Inner)) + let max: N = $max.into(); + max.multiply_rational(p, q, r).ok_or(())?.try_into().map(|x| $name(x)).map_err(|_| ()) } } @@ -1862,6 +1813,22 @@ fn from_rational_with_rounding_works_in_extreme_case() { } } +#[test] +fn from_rational_with_rounding_breakage() { + let n = 372633774963620730670986667244911905u128; + let d = 512593663333074177468745541591173060u128; + let q = Perquintill::from_rational_with_rounding(n, d, Rounding::Down).unwrap(); + assert!(q * d <= n); +} + +#[test] +fn from_rational_with_rounding_breakage_2() { + let n = 36893488147419103230u128; + let d = 36893488147419103630u128; + let q = Perquintill::from_rational_with_rounding(n, d, Rounding::Up).unwrap(); + assert!(q * d >= n); +} + implement_per_thing!(Percent, test_per_cent, [u32, u64, u128], 100u8, u8, u16, "_Percent_",); implement_per_thing_with_perthousand!( PerU16, diff --git a/primitives/arithmetic/src/rational.rs b/primitives/arithmetic/src/rational.rs index 0fcb7d4d9462a..ebd89c615a38b 100644 --- a/primitives/arithmetic/src/rational.rs +++ b/primitives/arithmetic/src/rational.rs @@ -284,6 +284,54 @@ impl PartialEq for Rational128 { } } +pub trait MultiplyRational: Sized { + fn multiply_rational(self, n: Self, d: Self, r: Rounding) -> Option; +} + +macro_rules! impl_rrm { + ($ulow:ty, $uhi:ty) => { + impl MultiplyRational for $ulow { + fn multiply_rational(self, n: Self, d: Self, r: Rounding) -> Option { + if d.is_zero() { + return None + } + + let sn = (self as $uhi) * (n as $uhi); + let mut result = sn / (d as $uhi); + let remainder = (sn % (d as $uhi)) as $ulow; + if match r { + Rounding::Up => remainder > 0, + // cannot be `(d + 1) / 2` since `d` might be `max_value` and overflow. + Rounding::NearestPrefUp => remainder >= d / 2 + d % 2, + Rounding::NearestPrefDown => remainder > d / 2, + Rounding::Down => false, + } { + result = match result.checked_add(1) { + Some(v) => v, + None => return None, + }; + } + if result > (<$ulow>::max_value() as $uhi) { + None + } else { + Some(result as $ulow) + } + } + } + }; +} + +impl_rrm!(u8, u16); +impl_rrm!(u16, u32); +impl_rrm!(u32, u64); +impl_rrm!(u64, u128); + +impl MultiplyRational for u128 { + fn multiply_rational(self, n: Self, d: Self, r: Rounding) -> Option { + crate::helpers_128bit::multiply_by_rational_with_rounding(self, n, d, r) + } +} + #[cfg(test)] mod tests { use super::{helpers_128bit::*, *}; diff --git a/primitives/arithmetic/src/traits.rs b/primitives/arithmetic/src/traits.rs index 33b4e376aaf9d..061b11b3e9c72 100644 --- a/primitives/arithmetic/src/traits.rs +++ b/primitives/arithmetic/src/traits.rs @@ -32,6 +32,8 @@ use sp_std::ops::{ Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, RemAssign, Shl, Shr, Sub, SubAssign, }; +use crate::MultiplyRational; + /// A meta trait for arithmetic type operations, regardless of any limitation on size. pub trait BaseArithmetic: From @@ -186,9 +188,9 @@ pub trait AtLeast32Bit: BaseArithmetic + From + From {} impl + From> AtLeast32Bit for T {} /// A meta trait for arithmetic. Same as [`AtLeast32Bit `], but also bounded to be unsigned. -pub trait AtLeast32BitUnsigned: AtLeast32Bit + Unsigned {} +pub trait AtLeast32BitUnsigned: AtLeast32Bit + Unsigned + MultiplyRational {} -impl AtLeast32BitUnsigned for T {} +impl AtLeast32BitUnsigned for T {} /// Just like `From` except that if the source value is too big to fit into the destination type /// then it'll saturate the destination. diff --git a/primitives/authority-discovery/Cargo.toml b/primitives/authority-discovery/Cargo.toml index ff36cf58d366e..e3a82b5fda514 100644 --- a/primitives/authority-discovery/Cargo.toml +++ b/primitives/authority-discovery/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../application-crypto" } sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml deleted file mode 100644 index 49107ebed1db0..0000000000000 --- a/primitives/authorship/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "sp-authorship" -version = "4.0.0-dev" -authors = ["Parity Technologies "] -description = "Authorship primitives" -edition = "2021" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -async-trait = { version = "0.1.57", optional = true } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } - -[features] -default = [ "std" ] -std = [ - "async-trait", - "codec/std", - "sp-inherents/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/primitives/authorship/src/lib.rs b/primitives/authorship/src/lib.rs deleted file mode 100644 index 6ff6607a896cc..0000000000000 --- a/primitives/authorship/src/lib.rs +++ /dev/null @@ -1,101 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Authorship Primitives - -#![cfg_attr(not(feature = "std"), no_std)] - -use sp_std::{prelude::*, result::Result}; - -#[cfg(feature = "std")] -use codec::Decode; -use codec::Encode; -use sp_inherents::{Error, InherentData, InherentIdentifier, IsFatalError}; -use sp_runtime::{traits::Header as HeaderT, RuntimeString}; - -/// The identifier for the `uncles` inherent. -pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"uncles00"; - -/// Errors that can occur while checking the authorship inherent. -#[derive(Encode, sp_runtime::RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode))] -pub enum InherentError { - Uncles(RuntimeString), -} - -impl IsFatalError for InherentError { - fn is_fatal_error(&self) -> bool { - match self { - InherentError::Uncles(_) => true, - } - } -} - -/// Auxiliary trait to extract uncles inherent data. -pub trait UnclesInherentData { - /// Get uncles. - fn uncles(&self) -> Result, Error>; -} - -impl UnclesInherentData for InherentData { - fn uncles(&self) -> Result, Error> { - Ok(self.get_data(&INHERENT_IDENTIFIER)?.unwrap_or_default()) - } -} - -/// Provider for inherent data. -#[cfg(feature = "std")] -pub struct InherentDataProvider { - uncles: Vec, -} - -#[cfg(feature = "std")] -impl InherentDataProvider { - /// Create a new inherent data provider with the given `uncles`. - pub fn new(uncles: Vec) -> Self { - InherentDataProvider { uncles } - } - - /// Create a new instance that is usable for checking inherents. - /// - /// This will always return an empty vec of uncles. - pub fn check_inherents() -> Self { - Self { uncles: Vec::new() } - } -} - -#[cfg(feature = "std")] -#[async_trait::async_trait] -impl sp_inherents::InherentDataProvider for InherentDataProvider { - async fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { - inherent_data.put_data(INHERENT_IDENTIFIER, &self.uncles) - } - - async fn try_handle_error( - &self, - identifier: &InherentIdentifier, - mut error: &[u8], - ) -> Option> { - if *identifier != INHERENT_IDENTIFIER { - return None - } - - let error = InherentError::decode(&mut error).ok()?; - - Some(Err(Error::Application(Box::from(format!("{:?}", error))))) - } -} diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 001dc4a69aca4..94a031a547368 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../application-crypto" } sp-consensus = { version = "0.10.0-dev", optional = true, path = "../common" } diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 334fff2811941..7e171811a7fe2 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -15,14 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -merlin = { version = "2.0", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../application-crypto" } sp-consensus = { version = "0.10.0-dev", optional = true, path = "../common" } sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } -sp-consensus-vrf = { version = "0.10.0-dev", default-features = false, path = "../vrf" } sp-core = { version = "7.0.0", default-features = false, path = "../../core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../inherents" } sp-keystore = { version = "0.13.0", default-features = false, optional = true, path = "../../keystore" } @@ -35,14 +33,12 @@ default = ["std"] std = [ "async-trait", "codec/std", - "merlin/std", "scale-info/std", "serde", "sp-api/std", "sp-application-crypto/std", "sp-consensus", "sp-consensus-slots/std", - "sp-consensus-vrf/std", "sp-core/std", "sp-inherents/std", "sp-keystore", diff --git a/primitives/consensus/babe/src/digests.rs b/primitives/consensus/babe/src/digests.rs index 4364057a4b478..afc967e3af391 100644 --- a/primitives/consensus/babe/src/digests.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -19,14 +19,15 @@ use super::{ AllowedSlots, AuthorityId, AuthorityIndex, AuthoritySignature, BabeAuthorityWeight, - BabeEpochConfiguration, Slot, BABE_ENGINE_ID, + BabeEpochConfiguration, Randomness, Slot, BABE_ENGINE_ID, }; -use codec::{Decode, Encode, MaxEncodedLen}; -use scale_info::TypeInfo; + +use sp_core::sr25519::vrf::VrfSignature; use sp_runtime::{DigestItem, RuntimeDebug}; use sp_std::vec::Vec; -use sp_consensus_vrf::schnorrkel::{Randomness, VRFOutput, VRFProof}; +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; /// Raw BABE primary slot assignment pre-digest. #[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] @@ -35,10 +36,8 @@ pub struct PrimaryPreDigest { pub authority_index: super::AuthorityIndex, /// Slot pub slot: Slot, - /// VRF output - pub vrf_output: VRFOutput, - /// VRF proof - pub vrf_proof: VRFProof, + /// VRF signature + pub vrf_signature: VrfSignature, } /// BABE secondary slot assignment pre-digest. @@ -62,10 +61,8 @@ pub struct SecondaryVRFPreDigest { pub authority_index: super::AuthorityIndex, /// Slot pub slot: Slot, - /// VRF output - pub vrf_output: VRFOutput, - /// VRF proof - pub vrf_proof: VRFProof, + /// VRF signature + pub vrf_signature: VrfSignature, } /// A BABE pre-runtime digest. This contains all data required to validate a @@ -118,11 +115,10 @@ impl PreDigest { } /// Returns the VRF output and proof, if they exist. - pub fn vrf(&self) -> Option<(&VRFOutput, &VRFProof)> { + pub fn vrf_signature(&self) -> Option<&VrfSignature> { match self { - PreDigest::Primary(primary) => Some((&primary.vrf_output, &primary.vrf_proof)), - PreDigest::SecondaryVRF(secondary) => - Some((&secondary.vrf_output, &secondary.vrf_proof)), + PreDigest::Primary(primary) => Some(&primary.vrf_signature), + PreDigest::SecondaryVRF(secondary) => Some(&secondary.vrf_signature), PreDigest::SecondaryPlain(_) => None, } } diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index e7747ac4c204a..37cf0c3f0b677 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -23,22 +23,17 @@ pub mod digests; pub mod inherents; -pub use merlin::Transcript; -pub use sp_consensus_vrf::schnorrkel::{ - Randomness, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, -}; - use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -#[cfg(feature = "std")] -use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; use sp_runtime::{traits::Header, ConsensusEngineId, RuntimeDebug}; use sp_std::vec::Vec; use crate::digests::{NextConfigDescriptor, NextEpochDescriptor}; +pub use sp_core::sr25519::vrf::{VrfOutput, VrfProof, VrfSignature, VrfTranscript}; + /// Key type for BABE module. pub const KEY_TYPE: sp_core::crypto::KeyTypeId = sp_application_crypto::key_types::BABE; @@ -47,11 +42,14 @@ mod app { app_crypto!(sr25519, BABE); } -/// The prefix used by BABE for its VRF keys. -pub const BABE_VRF_PREFIX: &[u8] = b"substrate-babe-vrf"; +/// VRF context used for per-slot randomness generation. +pub const RANDOMNESS_VRF_CONTEXT: &[u8] = b"BabeVRFInOutContext"; -/// BABE VRFInOut context. -pub static BABE_VRF_INOUT_CONTEXT: &[u8] = b"BabeVRFInOutContext"; +/// VRF output length for per-slot randomness. +pub const RANDOMNESS_LENGTH: usize = 32; + +/// Randomness type required by BABE operations. +pub type Randomness = [u8; RANDOMNESS_LENGTH]; /// A Babe authority keypair. Necessarily equivalent to the schnorrkel public key used in /// the main Babe module. If that ever changes, then this must, too. @@ -96,26 +94,16 @@ pub type BabeAuthorityWeight = u64; /// of 0 (regardless of whether they are plain or vrf secondary blocks). pub type BabeBlockWeight = u32; -/// Make a VRF transcript from given randomness, slot number and epoch. -pub fn make_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> Transcript { - let mut transcript = Transcript::new(&BABE_ENGINE_ID); - transcript.append_u64(b"slot number", *slot); - transcript.append_u64(b"current epoch", epoch); - transcript.append_message(b"chain randomness", &randomness[..]); - transcript -} - /// Make a VRF transcript data container -#[cfg(feature = "std")] -pub fn make_transcript_data(randomness: &Randomness, slot: Slot, epoch: u64) -> VRFTranscriptData { - VRFTranscriptData { - label: &BABE_ENGINE_ID, - items: vec![ - ("slot number", VRFTranscriptValue::U64(*slot)), - ("current epoch", VRFTranscriptValue::U64(epoch)), - ("chain randomness", VRFTranscriptValue::Bytes(randomness.to_vec())), +pub fn make_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfTranscript { + VrfTranscript::new( + &BABE_ENGINE_ID, + &[ + (b"slot number", &slot.to_le_bytes()), + (b"current epoch", &epoch.to_le_bytes()), + (b"chain randomness", randomness), ], - } + ) } /// An consensus log item for BABE. @@ -355,7 +343,7 @@ pub struct Epoch { /// The authorities and their weights. pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, /// Randomness for this epoch. - pub randomness: [u8; VRF_OUTPUT_LENGTH], + pub randomness: Randomness, /// Configuration of the epoch. pub config: BabeEpochConfiguration, } diff --git a/primitives/consensus/beefy/Cargo.toml b/primitives/consensus/beefy/Cargo.toml index 657569d122b05..144c8452d71b0 100644 --- a/primitives/consensus/beefy/Cargo.toml +++ b/primitives/consensus/beefy/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../application-crypto" } sp-core = { version = "7.0.0", default-features = false, path = "../../core" } diff --git a/primitives/consensus/beefy/src/commitment.rs b/primitives/consensus/beefy/src/commitment.rs index f824c90e27c46..8ad3cc3721a00 100644 --- a/primitives/consensus/beefy/src/commitment.rs +++ b/primitives/consensus/beefy/src/commitment.rs @@ -253,7 +253,7 @@ mod tests { use crate::{crypto, known_payloads, KEY_TYPE}; use codec::Decode; use sp_core::{keccak_256, Pair}; - use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; + use sp_keystore::{testing::MemoryKeystore, KeystorePtr}; type TestCommitment = Commitment; type TestSignedCommitment = SignedCommitment; @@ -263,22 +263,16 @@ mod tests { // The mock signatures are equivalent to the ones produced by the BEEFY keystore fn mock_signatures() -> (crypto::Signature, crypto::Signature) { - let store: SyncCryptoStorePtr = KeyStore::new().into(); + let store: KeystorePtr = MemoryKeystore::new().into(); let alice = sp_core::ecdsa::Pair::from_string("//Alice", None).unwrap(); - let _ = - SyncCryptoStore::insert_unknown(&*store, KEY_TYPE, "//Alice", alice.public().as_ref()) - .unwrap(); + store.insert(KEY_TYPE, "//Alice", alice.public().as_ref()).unwrap(); let msg = keccak_256(b"This is the first message"); - let sig1 = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, &alice.public(), &msg) - .unwrap() - .unwrap(); + let sig1 = store.ecdsa_sign_prehashed(KEY_TYPE, &alice.public(), &msg).unwrap().unwrap(); let msg = keccak_256(b"This is the second message"); - let sig2 = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, &alice.public(), &msg) - .unwrap() - .unwrap(); + let sig2 = store.ecdsa_sign_prehashed(KEY_TYPE, &alice.public(), &msg).unwrap().unwrap(); (sig1.into(), sig2.into()) } diff --git a/primitives/consensus/beefy/src/lib.rs b/primitives/consensus/beefy/src/lib.rs index cc5d1e8cb9a3b..268e1925b4449 100644 --- a/primitives/consensus/beefy/src/lib.rs +++ b/primitives/consensus/beefy/src/lib.rs @@ -284,7 +284,7 @@ impl OnNewValidatorSet for () { /// the runtime API boundary this type is unknown and as such we keep this /// opaque representation, implementors of the runtime API will have to make /// sure that all usages of `OpaqueKeyOwnershipProof` refer to the same type. -#[derive(Decode, Encode, PartialEq)] +#[derive(Decode, Encode, PartialEq, TypeInfo)] pub struct OpaqueKeyOwnershipProof(Vec); impl OpaqueKeyOwnershipProof { /// Create a new `OpaqueKeyOwnershipProof` using the given encoded diff --git a/primitives/consensus/beefy/src/witness.rs b/primitives/consensus/beefy/src/witness.rs index 9cc31d54b4c1d..ff9a0401dc632 100644 --- a/primitives/consensus/beefy/src/witness.rs +++ b/primitives/consensus/beefy/src/witness.rs @@ -74,9 +74,8 @@ impl SignedCommitmentWitness (crypto::Signature, crypto::Signature) { - let store: SyncCryptoStorePtr = KeyStore::new().into(); + let store: KeystorePtr = MemoryKeystore::new().into(); let alice = sp_core::ecdsa::Pair::from_string("//Alice", None).unwrap(); - let _ = - SyncCryptoStore::insert_unknown(&*store, KEY_TYPE, "//Alice", alice.public().as_ref()) - .unwrap(); + store.insert(KEY_TYPE, "//Alice", alice.public().as_ref()).unwrap(); let msg = keccak_256(b"This is the first message"); - let sig1 = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, &alice.public(), &msg) - .unwrap() - .unwrap(); + let sig1 = store.ecdsa_sign_prehashed(KEY_TYPE, &alice.public(), &msg).unwrap().unwrap(); let msg = keccak_256(b"This is the second message"); - let sig2 = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, &alice.public(), &msg) - .unwrap() - .unwrap(); + let sig2 = store.ecdsa_sign_prehashed(KEY_TYPE, &alice.public(), &msg).unwrap().unwrap(); (sig1.into(), sig2.into()) } diff --git a/primitives/consensus/common/src/error.rs b/primitives/consensus/common/src/error.rs index e881259da11e4..fb8d0447fe3d6 100644 --- a/primitives/consensus/common/src/error.rs +++ b/primitives/consensus/common/src/error.rs @@ -48,8 +48,8 @@ pub enum Error { #[error("Chain lookup failed: {0}")] ChainLookup(String), /// Signing failed. - #[error("Failed to sign using key: {0:?}. Reason: {1}")] - CannotSign(Vec, String), + #[error("Failed to sign: {0}")] + CannotSign(String), /// Some other error. #[error(transparent)] Other(#[from] Box), diff --git a/primitives/consensus/grandpa/Cargo.toml b/primitives/consensus/grandpa/Cargo.toml index 7146873e0a751..446471da8361a 100644 --- a/primitives/consensus/grandpa/Cargo.toml +++ b/primitives/consensus/grandpa/Cargo.toml @@ -15,9 +15,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -grandpa = { package = "finality-grandpa", version = "0.16.1", default-features = false, features = ["derive-codec"] } +grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../application-crypto" } diff --git a/primitives/consensus/grandpa/src/lib.rs b/primitives/consensus/grandpa/src/lib.rs index 26cee07b80be8..728ce3092888b 100644 --- a/primitives/consensus/grandpa/src/lib.rs +++ b/primitives/consensus/grandpa/src/lib.rs @@ -28,7 +28,7 @@ use serde::Serialize; use codec::{Codec, Decode, Encode, Input}; use scale_info::TypeInfo; #[cfg(feature = "std")] -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::KeystorePtr; use sp_runtime::{ traits::{Header as HeaderT, NumberFor}, ConsensusEngineId, RuntimeDebug, @@ -234,15 +234,12 @@ impl ConsensusLog { /// GRANDPA happens when a voter votes on the same round (either at prevote or /// precommit stage) for different blocks. Proving is achieved by collecting the /// signed messages of conflicting votes. -#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +#[derive(Clone, Debug, Decode, Encode, PartialEq, Eq, TypeInfo)] pub struct EquivocationProof { set_id: SetId, equivocation: Equivocation, } -// Don't bother the grandpa crate... -impl Eq for EquivocationProof {} - impl EquivocationProof { /// Create a new `EquivocationProof` for the given set id and using the /// given equivocation as proof. @@ -271,7 +268,7 @@ impl EquivocationProof { /// Wrapper object for GRANDPA equivocation proofs, useful for unifying prevote /// and precommit equivocations under a common type. -#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +#[derive(Clone, Debug, Decode, Encode, PartialEq, Eq, TypeInfo)] pub enum Equivocation { /// Proof of equivocation at prevote stage. Prevote(grandpa::Equivocation, AuthoritySignature>), @@ -444,7 +441,7 @@ where /// Localizes the message to the given set and round and signs the payload. #[cfg(feature = "std")] pub fn sign_message( - keystore: SyncCryptoStorePtr, + keystore: KeystorePtr, message: grandpa::Message, public: AuthorityId, round: RoundNumber, @@ -454,20 +451,15 @@ where H: Encode, N: Encode, { - use sp_application_crypto::AppKey; - use sp_core::crypto::Public; + use sp_application_crypto::AppCrypto; let encoded = localized_payload(round, set_id, &message); - let signature = SyncCryptoStore::sign_with( - &*keystore, - AuthorityId::ID, - &public.to_public_crypto_pair(), - &encoded[..], - ) - .ok() - .flatten()? - .try_into() - .ok()?; + let signature = keystore + .ed25519_sign(AuthorityId::ID, public.as_ref(), &encoded[..]) + .ok() + .flatten()? + .try_into() + .ok()?; Some(grandpa::SignedMessage { message, signature, id: public }) } diff --git a/primitives/consensus/slots/Cargo.toml b/primitives/consensus/slots/Cargo.toml index d450f9341e33e..a4d85dff6ea85 100644 --- a/primitives/consensus/slots/Cargo.toml +++ b/primitives/consensus/slots/Cargo.toml @@ -16,8 +16,6 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0", features = ["derive"], optional = true } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../arithmetic" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../../std" } sp-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../timestamp" } diff --git a/primitives/consensus/vrf/Cargo.toml b/primitives/consensus/vrf/Cargo.toml deleted file mode 100644 index a4f92c30cca03..0000000000000 --- a/primitives/consensus/vrf/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "sp-consensus-vrf" -version = "0.10.0-dev" -authors = ["Parity Technologies "] -description = "Primitives for VRF based consensus" -edition = "2021" -license = "Apache-2.0" -repository = "https://github.com/paritytech/substrate/" -homepage = "https://substrate.io" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false } -schnorrkel = { version = "0.9.1", default-features = false, features = ["preaudit_deprecated", "u64_backend"] } -sp-core = { version = "7.0.0", default-features = false, path = "../../core" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../std" } - -[features] -default = ["std"] -std = [ - "codec/std", - "scale-info/std", - "schnorrkel/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/primitives/consensus/vrf/README.md b/primitives/consensus/vrf/README.md deleted file mode 100644 index d66490e023b38..0000000000000 --- a/primitives/consensus/vrf/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Primitives for VRF-based consensus engines. - -License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/vrf/src/lib.rs b/primitives/consensus/vrf/src/lib.rs deleted file mode 100644 index 84040c18590e0..0000000000000 --- a/primitives/consensus/vrf/src/lib.rs +++ /dev/null @@ -1,21 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Primitives for VRF-based consensus engines. -#![cfg_attr(not(feature = "std"), no_std)] - -pub mod schnorrkel; diff --git a/primitives/consensus/vrf/src/schnorrkel.rs b/primitives/consensus/vrf/src/schnorrkel.rs deleted file mode 100644 index be145c5b12096..0000000000000 --- a/primitives/consensus/vrf/src/schnorrkel.rs +++ /dev/null @@ -1,188 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Schnorrkel-based VRF. - -use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use scale_info::TypeInfo; -use schnorrkel::errors::MultiSignatureStage; -use sp_core::U512; -use sp_std::{ - ops::{Deref, DerefMut}, - prelude::*, -}; - -pub use schnorrkel::{ - vrf::{VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH}, - PublicKey, SignatureError, -}; - -/// The length of the Randomness. -pub const RANDOMNESS_LENGTH: usize = VRF_OUTPUT_LENGTH; - -/// VRF output type available for `std` environment, suitable for schnorrkel operations. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct VRFOutput(pub schnorrkel::vrf::VRFOutput); - -impl Deref for VRFOutput { - type Target = schnorrkel::vrf::VRFOutput; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for VRFOutput { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl Encode for VRFOutput { - fn encode(&self) -> Vec { - self.0.as_bytes().encode() - } -} - -impl EncodeLike for VRFOutput {} - -impl Decode for VRFOutput { - fn decode(i: &mut R) -> Result { - let decoded = <[u8; VRF_OUTPUT_LENGTH]>::decode(i)?; - Ok(Self(schnorrkel::vrf::VRFOutput::from_bytes(&decoded).map_err(convert_error)?)) - } -} - -impl MaxEncodedLen for VRFOutput { - fn max_encoded_len() -> usize { - <[u8; VRF_OUTPUT_LENGTH]>::max_encoded_len() - } -} - -impl TypeInfo for VRFOutput { - type Identity = [u8; VRF_OUTPUT_LENGTH]; - - fn type_info() -> scale_info::Type { - Self::Identity::type_info() - } -} - -impl TryFrom<[u8; VRF_OUTPUT_LENGTH]> for VRFOutput { - type Error = SignatureError; - - fn try_from(raw: [u8; VRF_OUTPUT_LENGTH]) -> Result { - schnorrkel::vrf::VRFOutput::from_bytes(&raw).map(VRFOutput) - } -} - -/// VRF proof type available for `std` environment, suitable for schnorrkel operations. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct VRFProof(pub schnorrkel::vrf::VRFProof); - -impl PartialOrd for VRFProof { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for VRFProof { - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - U512::from(self.0.to_bytes()).cmp(&U512::from(other.0.to_bytes())) - } -} - -impl Deref for VRFProof { - type Target = schnorrkel::vrf::VRFProof; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for VRFProof { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl Encode for VRFProof { - fn encode(&self) -> Vec { - self.0.to_bytes().encode() - } -} - -impl EncodeLike for VRFProof {} - -impl Decode for VRFProof { - fn decode(i: &mut R) -> Result { - let decoded = <[u8; VRF_PROOF_LENGTH]>::decode(i)?; - Ok(Self(schnorrkel::vrf::VRFProof::from_bytes(&decoded).map_err(convert_error)?)) - } -} - -impl MaxEncodedLen for VRFProof { - fn max_encoded_len() -> usize { - <[u8; VRF_PROOF_LENGTH]>::max_encoded_len() - } -} - -impl TypeInfo for VRFProof { - type Identity = [u8; VRF_PROOF_LENGTH]; - - fn type_info() -> scale_info::Type { - Self::Identity::type_info() - } -} - -impl TryFrom<[u8; VRF_PROOF_LENGTH]> for VRFProof { - type Error = SignatureError; - - fn try_from(raw: [u8; VRF_PROOF_LENGTH]) -> Result { - schnorrkel::vrf::VRFProof::from_bytes(&raw).map(VRFProof) - } -} - -fn convert_error(e: SignatureError) -> codec::Error { - use MultiSignatureStage::*; - use SignatureError::*; - match e { - EquationFalse => "Signature error: `EquationFalse`".into(), - PointDecompressionError => "Signature error: `PointDecompressionError`".into(), - ScalarFormatError => "Signature error: `ScalarFormatError`".into(), - NotMarkedSchnorrkel => "Signature error: `NotMarkedSchnorrkel`".into(), - BytesLengthError { .. } => "Signature error: `BytesLengthError`".into(), - MuSigAbsent { musig_stage: Commitment } => - "Signature error: `MuSigAbsent` at stage `Commitment`".into(), - MuSigAbsent { musig_stage: Reveal } => - "Signature error: `MuSigAbsent` at stage `Reveal`".into(), - MuSigAbsent { musig_stage: Cosignature } => - "Signature error: `MuSigAbsent` at stage `Commitment`".into(), - MuSigInconsistent { musig_stage: Commitment, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Commitment` on duplicate".into(), - MuSigInconsistent { musig_stage: Commitment, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Commitment` on not duplicate".into(), - MuSigInconsistent { musig_stage: Reveal, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Reveal` on duplicate".into(), - MuSigInconsistent { musig_stage: Reveal, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Reveal` on not duplicate".into(), - MuSigInconsistent { musig_stage: Cosignature, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Cosignature` on duplicate".into(), - MuSigInconsistent { musig_stage: Cosignature, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Cosignature` on not duplicate".into(), - } -} - -/// Schnorrkel randomness value. Same size as `VRFOutput`. -pub type Randomness = [u8; RANDOMNESS_LENGTH]; diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 10508d239457d..6beb126ef36f1 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -13,11 +13,8 @@ documentation = "https://docs.rs/sp-core" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ - "derive", - "max-encoded-len", -] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive","max-encoded-len"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } serde = { version = "1.0.136", optional = true, features = ["derive"] } bounded-collections = { version = "0.1.4", default-features = false } @@ -25,7 +22,7 @@ primitive-types = { version = "0.12.0", default-features = false, features = ["c impl-serde = { version = "0.4.0", optional = true } hash-db = { version = "0.16.0", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } -base58 = { version = "0.2.0", optional = true } +bs58 = { version = "0.4.0", default-features = false, optional = true } rand = { version = "0.8.5", features = ["small_rng"], optional = true } substrate-bip39 = { version = "0.4.4", optional = true } tiny-bip39 = { version = "1.0.0", optional = true } @@ -43,17 +40,15 @@ dyn-clonable = { version = "0.9.0", optional = true } thiserror = { version = "1.0.30", optional = true } parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } bitflags = "1.3" +paste = "1.0.7" # full crypto array-bytes = { version = "4.1", optional = true } ed25519-zebra = { version = "3.1.0", default-features = false, optional = true } blake2 = { version = "0.10.4", default-features = false, optional = true } -schnorrkel = { version = "0.9.1", features = [ - "preaudit_deprecated", - "u64_backend", -], default-features = false, optional = true } libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } -merlin = { version = "2.0", default-features = false, optional = true } +schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } +merlin = { version = "2.0", default-features = false } secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"], optional = true } ss58-registry = { version = "1.34.0", default-features = false } sp-core-hashing = { version = "5.0.0", path = "./hashing", default-features = false, optional = true } @@ -76,7 +71,7 @@ bench = false [features] default = ["std"] std = [ - "merlin?/std", + "merlin/std", "full_crypto", "log/std", "parity-util-mem/std", @@ -98,7 +93,7 @@ std = [ "blake2/std", "array-bytes", "ed25519-zebra/std", - "base58", + "bs58/std", "substrate-bip39", "tiny-bip39", "rand", @@ -127,10 +122,8 @@ full_crypto = [ "array-bytes", "ed25519-zebra", "blake2", - "schnorrkel", "libsecp256k1", "secp256k1", "sp-core-hashing", "sp-runtime-interface/disable_target_static_assertions", - "merlin", ] diff --git a/primitives/core/hashing/proc-macro/Cargo.toml b/primitives/core/hashing/proc-macro/Cargo.toml index 6d9747de871c7..66e9667c8468c 100644 --- a/primitives/core/hashing/proc-macro/Cargo.toml +++ b/primitives/core/hashing/proc-macro/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro2 = "1.0.37" -quote = "1.0.6" -syn = { version = "1.0.98", features = ["full", "parsing"] } +proc-macro2 = "1.0.56" +quote = "1.0.26" +syn = { version = "2.0.14", features = ["full", "parsing"] } sp-core-hashing = { version = "5.0.0", default-features = false, path = "../" } diff --git a/primitives/core/src/bounded.rs b/primitives/core/src/bounded.rs deleted file mode 100644 index c78f1f85a4c23..0000000000000 --- a/primitives/core/src/bounded.rs +++ /dev/null @@ -1,28 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Bounded collection types. - -pub mod bounded_btree_map; -pub mod bounded_btree_set; -pub mod bounded_vec; -pub mod weak_bounded_vec; - -pub use bounded_btree_map::BoundedBTreeMap; -pub use bounded_btree_set::BoundedBTreeSet; -pub use bounded_vec::{BoundedSlice, BoundedVec}; -pub use weak_bounded_vec::WeakBoundedVec; diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 16af3d06963ab..dd0d9e60529f2 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -19,22 +19,17 @@ //! Cryptographic utilities. // end::description[] -#[cfg(feature = "std")] -use crate::hexdisplay::HexDisplay; use crate::{ed25519, sr25519}; #[cfg(feature = "std")] -use base58::{FromBase58, ToBase58}; +use bip39::{Language, Mnemonic, MnemonicType}; use codec::{Decode, Encode, MaxEncodedLen}; #[cfg(feature = "std")] use rand::{rngs::OsRng, RngCore}; #[cfg(feature = "std")] use regex::Regex; use scale_info::TypeInfo; -/// Trait for accessing reference to `SecretString`. -pub use secrecy::ExposeSecret; -/// A store for sensitive data. #[cfg(feature = "std")] -pub use secrecy::SecretString; +pub use secrecy::{ExposeSecret, SecretString}; use sp_runtime_interface::pass_by::PassByInner; #[doc(hidden)] pub use sp_std::ops::Deref; @@ -52,10 +47,6 @@ pub const DEV_PHRASE: &str = /// The address of the associated root phrase for our publicly known keys. pub const DEV_ADDRESS: &str = "5DfhGyQdFobKM8NsWvEeAKk5EQQgYe9AydgJ7rMB6E1EqRzV"; -/// The infallible type. -#[derive(crate::RuntimeDebug)] -pub enum Infallible {} - /// The length of the junction identifier. Note that this is also referred to as the /// `CHAIN_CODE_LENGTH` in the context of Schnorrkel. #[cfg(feature = "full_crypto")] @@ -108,6 +99,16 @@ pub enum SecretStringError { InvalidPath, } +/// An error when deriving a key. +#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg(feature = "full_crypto")] +pub enum DeriveError { + /// A soft key was found in the path (and is unsupported). + #[cfg_attr(feature = "std", error("Soft key in path"))] + SoftKeyInPath, +} + /// A since derivation junction description. It is the single parameter used when creating /// a new secret key from an existing secret key and, in the case of `SoftRaw` and `SoftIndex` /// a new public key from an existing public key. @@ -270,7 +271,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + ByteArray { const CHECKSUM_LEN: usize = 2; let body_len = Self::LEN; - let data = s.from_base58().map_err(|_| PublicError::BadBase58)?; + let data = bs58::decode(s).into_vec().map_err(|_| PublicError::BadBase58)?; if data.len() < 2 { return Err(PublicError::BadLength) } @@ -339,7 +340,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + ByteArray { v.extend(self.as_ref()); let r = ss58hash(&v); v.extend(&r[0..2]); - v.to_base58() + bs58::encode(v).into_string() } /// Return the ss58-check string for this key. @@ -478,11 +479,8 @@ pub trait ByteArray: AsRef<[u8]> + AsMut<[u8]> + for<'a> TryFrom<&'a [u8], Error } } -/// Trait suitable for typical cryptographic PKI key public type. -pub trait Public: ByteArray + Derive + CryptoType + PartialEq + Eq + Clone + Send + Sync { - /// Return `CryptoTypePublicPair` from public key. - fn to_public_crypto_pair(&self) -> CryptoTypePublicPair; -} +/// Trait suitable for typical cryptographic key public type. +pub trait Public: ByteArray + Derive + CryptoType + PartialEq + Eq + Clone + Send + Sync {} /// An opaque 32-byte cryptographic identifier. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo)] @@ -681,50 +679,47 @@ mod dummy { b"" } } - impl Public for Dummy { - fn to_public_crypto_pair(&self) -> CryptoTypePublicPair { - CryptoTypePublicPair(CryptoTypeId(*b"dumm"), ::to_raw_vec(self)) - } - } + impl Public for Dummy {} impl Pair for Dummy { type Public = Dummy; type Seed = Dummy; type Signature = Dummy; - type DeriveError = (); + #[cfg(feature = "std")] fn generate_with_phrase(_: Option<&str>) -> (Self, String, Self::Seed) { Default::default() } + #[cfg(feature = "std")] fn from_phrase(_: &str, _: Option<&str>) -> Result<(Self, Self::Seed), SecretStringError> { Ok(Default::default()) } + fn derive>( &self, _: Iter, _: Option, - ) -> Result<(Self, Option), Self::DeriveError> { + ) -> Result<(Self, Option), DeriveError> { Ok((Self, None)) } - fn from_seed(_: &Self::Seed) -> Self { - Self - } + fn from_seed_slice(_: &[u8]) -> Result { Ok(Self) } + fn sign(&self, _: &[u8]) -> Self::Signature { Self } + fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { true } - fn verify_weak, M: AsRef<[u8]>>(_: &[u8], _: M, _: P) -> bool { - true - } + fn public(&self) -> Self::Public { Self } + fn to_raw_vec(&self) -> Vec { vec![] } @@ -845,9 +840,6 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { /// and verified with the message and a public key. type Signature: AsRef<[u8]>; - /// Error returned from the `derive` function. - type DeriveError; - /// Generate new secure (random) key pair. /// /// This is only for ephemeral keys really, since you won't have access to the secret key @@ -866,27 +858,47 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { /// This is generally slower than `generate()`, so prefer that unless you need to persist /// the key from the current session. #[cfg(feature = "std")] - fn generate_with_phrase(password: Option<&str>) -> (Self, String, Self::Seed); + fn generate_with_phrase(password: Option<&str>) -> (Self, String, Self::Seed) { + let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); + let phrase = mnemonic.phrase(); + let (pair, seed) = Self::from_phrase(phrase, password) + .expect("All phrases generated by Mnemonic are valid; qed"); + (pair, phrase.to_owned(), seed) + } /// Returns the KeyPair from the English BIP39 seed `phrase`, or `None` if it's invalid. #[cfg(feature = "std")] fn from_phrase( phrase: &str, password: Option<&str>, - ) -> Result<(Self, Self::Seed), SecretStringError>; + ) -> Result<(Self, Self::Seed), SecretStringError> { + let mnemonic = Mnemonic::from_phrase(phrase, Language::English) + .map_err(|_| SecretStringError::InvalidPhrase)?; + let big_seed = + substrate_bip39::seed_from_entropy(mnemonic.entropy(), password.unwrap_or("")) + .map_err(|_| SecretStringError::InvalidSeed)?; + let mut seed = Self::Seed::default(); + let seed_slice = seed.as_mut(); + let seed_len = seed_slice.len(); + debug_assert!(seed_len <= big_seed.len()); + seed_slice[..seed_len].copy_from_slice(&big_seed[..seed_len]); + Self::from_seed_slice(seed_slice).map(|x| (x, seed)) + } /// Derive a child key from a series of given junctions. fn derive>( &self, path: Iter, seed: Option, - ) -> Result<(Self, Option), Self::DeriveError>; + ) -> Result<(Self, Option), DeriveError>; /// Generate new key pair from the provided `seed`. /// /// @WARNING: THIS WILL ONLY BE SECURE IF THE `seed` IS SECURE. If it can be guessed /// by an attacker then they can also derive your key. - fn from_seed(seed: &Self::Seed) -> Self; + fn from_seed(seed: &Self::Seed) -> Self { + Self::from_seed_slice(seed.as_ref()).expect("seed has valid length; qed") + } /// Make a new key pair from secret seed material. The slice must be the correct size or /// it will return `None`. @@ -901,9 +913,6 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { /// Verify a signature on a message. Returns true if the signature is good. fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool; - /// Verify a signature on a message. Returns true if the signature is good. - fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool; - /// Get the public key. fn public(&self) -> Self::Public; @@ -1083,29 +1092,32 @@ impl<'a> TryFrom<&'a str> for KeyTypeId { } } +/// Trait grouping types shared by a VRF signer and verifiers. +pub trait VrfCrypto { + /// Associated signature type. + type VrfSignature; + + /// Vrf input data. Generally some form of transcript. + type VrfInput; +} + +/// VRF Signer. +pub trait VrfSigner: VrfCrypto { + /// Sign input data. + fn vrf_sign(&self, data: &Self::VrfInput) -> Self::VrfSignature; +} + +/// VRF Verifier. +pub trait VrfVerifier: VrfCrypto { + /// Verify input data signature. + fn vrf_verify(&self, data: &Self::VrfInput, signature: &Self::VrfSignature) -> bool; +} + /// An identifier for a specific cryptographic algorithm used by a key pair #[derive(Debug, Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct CryptoTypeId(pub [u8; 4]); -/// A type alias of CryptoTypeId & a public key -#[derive(Debug, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -pub struct CryptoTypePublicPair(pub CryptoTypeId, pub Vec); - -#[cfg(feature = "std")] -impl sp_std::fmt::Display for CryptoTypePublicPair { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let id = match str::from_utf8(&(self.0).0[..]) { - Ok(id) => id.to_string(), - Err(_) => { - format!("{:#?}", self.0) - }, - }; - write!(f, "{}-{}", id, HexDisplay::from(&self.1)) - } -} - /// Known key types; this also functions as a global registry of key types for projects wishing to /// avoid collisions with each other. /// @@ -1193,23 +1205,20 @@ mod tests { vec![] } } - impl Public for TestPublic { - fn to_public_crypto_pair(&self) -> CryptoTypePublicPair { - CryptoTypePublicPair(CryptoTypeId(*b"dumm"), self.to_raw_vec()) - } - } + impl Public for TestPublic {} impl Pair for TestPair { type Public = TestPublic; type Seed = [u8; 8]; type Signature = [u8; 0]; - type DeriveError = (); fn generate() -> (Self, ::Seed) { (TestPair::Generated, [0u8; 8]) } + fn generate_with_phrase(_password: Option<&str>) -> (Self, String, ::Seed) { (TestPair::GeneratedWithPhrase, "".into(), [0u8; 8]) } + fn from_phrase( phrase: &str, password: Option<&str>, @@ -1222,11 +1231,12 @@ mod tests { [0u8; 8], )) } + fn derive>( &self, path_iter: Iter, _: Option<[u8; 8]>, - ) -> Result<(Self, Option<[u8; 8]>), Self::DeriveError> { + ) -> Result<(Self, Option<[u8; 8]>), DeriveError> { Ok(( match self.clone() { TestPair::Standard { phrase, password, path } => TestPair::Standard { @@ -1240,34 +1250,29 @@ mod tests { if path_iter.count() == 0 { x } else { - return Err(()) + return Err(DeriveError::SoftKeyInPath) }, }, None, )) } - fn from_seed(_seed: &::Seed) -> Self { - TestPair::Seed(_seed.as_ref().to_owned()) - } + fn sign(&self, _message: &[u8]) -> Self::Signature { [] } + fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { true } - fn verify_weak, M: AsRef<[u8]>>( - _sig: &[u8], - _message: M, - _pubkey: P, - ) -> bool { - true - } + fn public(&self) -> Self::Public { TestPublic } + fn from_seed_slice(seed: &[u8]) -> Result { Ok(TestPair::Seed(seed.to_owned())) } + fn to_raw_vec(&self) -> Vec { vec![] } diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index d68ba39a0fb79..2474fa7736b78 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -24,16 +24,13 @@ use sp_runtime_interface::pass_by::PassByInner; #[cfg(feature = "std")] use crate::crypto::Ss58Codec; use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, CryptoTypePublicPair, Derive, Public as TraitPublic, - UncheckedFrom, + ByteArray, CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom, }; #[cfg(feature = "full_crypto")] use crate::{ - crypto::{DeriveJunction, Pair as TraitPair, SecretStringError}, + crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}, hashing::blake2_256, }; -#[cfg(feature = "std")] -use bip39::{Language, Mnemonic, MnemonicType}; #[cfg(all(feature = "full_crypto", not(feature = "std")))] use secp256k1::Secp256k1; #[cfg(feature = "std")] @@ -105,23 +102,7 @@ impl ByteArray for Public { const LEN: usize = 33; } -impl TraitPublic for Public { - fn to_public_crypto_pair(&self) -> CryptoTypePublicPair { - CryptoTypePublicPair(CRYPTO_ID, self.to_raw_vec()) - } -} - -impl From for CryptoTypePublicPair { - fn from(key: Public) -> Self { - (&key).into() - } -} - -impl From<&Public> for CryptoTypePublicPair { - fn from(key: &Public) -> Self { - CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) - } -} +impl TraitPublic for Public {} impl Derive for Public {} @@ -367,13 +348,6 @@ fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { ("Secp256k1HDKD", secret_seed, cc).using_encoded(sp_core_hashing::blake2_256) } -/// An error when deriving a key. -#[cfg(feature = "full_crypto")] -pub enum DeriveError { - /// A soft key was found in the path (and is unsupported). - SoftKeyInPath, -} - /// A key pair. #[cfg(feature = "full_crypto")] #[derive(Clone)] @@ -387,44 +361,6 @@ impl TraitPair for Pair { type Public = Public; type Seed = Seed; type Signature = Signature; - type DeriveError = DeriveError; - - /// Generate new secure (random) key pair and provide the recovery phrase. - /// - /// You can recover the same key later with `from_phrase`. - #[cfg(feature = "std")] - fn generate_with_phrase(password: Option<&str>) -> (Pair, String, Seed) { - let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - let phrase = mnemonic.phrase(); - let (pair, seed) = Self::from_phrase(phrase, password) - .expect("All phrases generated by Mnemonic are valid; qed"); - (pair, phrase.to_owned(), seed) - } - - /// Generate key pair from given recovery phrase and password. - #[cfg(feature = "std")] - fn from_phrase( - phrase: &str, - password: Option<&str>, - ) -> Result<(Pair, Seed), SecretStringError> { - let big_seed = substrate_bip39::seed_from_entropy( - Mnemonic::from_phrase(phrase, Language::English) - .map_err(|_| SecretStringError::InvalidPhrase)? - .entropy(), - password.unwrap_or(""), - ) - .map_err(|_| SecretStringError::InvalidSeed)?; - let mut seed = Seed::default(); - seed.copy_from_slice(&big_seed[0..32]); - Self::from_seed_slice(&big_seed[0..32]).map(|x| (x, seed)) - } - - /// Make a new key pair from secret seed material. - /// - /// You should never need to use this; generate(), generate_with_phrase - fn from_seed(seed: &Seed) -> Pair { - Self::from_seed_slice(&seed[..]).expect("seed has valid length; qed") - } /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it /// will return `None`. @@ -471,22 +407,8 @@ impl TraitPair for Pair { } /// Verify a signature on a message. Returns true if the signature is good. - fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { - match sig.recover(message) { - Some(actual) => actual == *pubkey, - None => false, - } - } - - /// Verify a signature on a message. Returns true if the signature is good. - /// - /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct - /// size. Use it only if you're coming from byte buffers and need the speed. - fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - match Signature::from_slice(sig).and_then(|sig| sig.recover(message)) { - Some(actual) => actual.as_ref() == pubkey.as_ref(), - None => false, - } + fn verify>(sig: &Self::Signature, message: M, public: &Self::Public) -> bool { + sig.recover(message).map(|actual| actual == *public).unwrap_or_default() } /// Return a vec filled with raw data. diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index 76b3064ad5a44..d0e6bef97a7d9 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -31,13 +31,9 @@ use scale_info::TypeInfo; #[cfg(feature = "std")] use crate::crypto::Ss58Codec; -use crate::crypto::{ - CryptoType, CryptoTypeId, CryptoTypePublicPair, Derive, Public as TraitPublic, UncheckedFrom, -}; +use crate::crypto::{CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom}; #[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveJunction, Pair as TraitPair, SecretStringError}; -#[cfg(feature = "std")] -use bip39::{Language, Mnemonic, MnemonicType}; +use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; #[cfg(feature = "full_crypto")] use core::convert::TryFrom; #[cfg(feature = "full_crypto")] @@ -46,8 +42,6 @@ use ed25519_zebra::{SigningKey, VerificationKey}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use sp_runtime_interface::pass_by::PassByInner; use sp_std::ops::Deref; -#[cfg(feature = "std")] -use substrate_bip39::seed_from_entropy; /// An identifier used to match public keys against ed25519 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ed25"); @@ -359,82 +353,21 @@ impl ByteArray for Public { const LEN: usize = 32; } -impl TraitPublic for Public { - fn to_public_crypto_pair(&self) -> CryptoTypePublicPair { - CryptoTypePublicPair(CRYPTO_ID, self.to_raw_vec()) - } -} +impl TraitPublic for Public {} impl Derive for Public {} -impl From for CryptoTypePublicPair { - fn from(key: Public) -> Self { - (&key).into() - } -} - -impl From<&Public> for CryptoTypePublicPair { - fn from(key: &Public) -> Self { - CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) - } -} - /// Derive a single hard junction. #[cfg(feature = "full_crypto")] fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { ("Ed25519HDKD", secret_seed, cc).using_encoded(sp_core_hashing::blake2_256) } -/// An error when deriving a key. -#[cfg(feature = "full_crypto")] -pub enum DeriveError { - /// A soft key was found in the path (and is unsupported). - SoftKeyInPath, -} - #[cfg(feature = "full_crypto")] impl TraitPair for Pair { type Public = Public; type Seed = Seed; type Signature = Signature; - type DeriveError = DeriveError; - - /// Generate new secure (random) key pair and provide the recovery phrase. - /// - /// You can recover the same key later with `from_phrase`. - #[cfg(feature = "std")] - fn generate_with_phrase(password: Option<&str>) -> (Pair, String, Seed) { - let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - let phrase = mnemonic.phrase(); - let (pair, seed) = Self::from_phrase(phrase, password) - .expect("All phrases generated by Mnemonic are valid; qed"); - (pair, phrase.to_owned(), seed) - } - - /// Generate key pair from given recovery phrase and password. - #[cfg(feature = "std")] - fn from_phrase( - phrase: &str, - password: Option<&str>, - ) -> Result<(Pair, Seed), SecretStringError> { - let big_seed = seed_from_entropy( - Mnemonic::from_phrase(phrase, Language::English) - .map_err(|_| SecretStringError::InvalidPhrase)? - .entropy(), - password.unwrap_or(""), - ) - .map_err(|_| SecretStringError::InvalidSeed)?; - let mut seed = Seed::default(); - seed.copy_from_slice(&big_seed[0..32]); - Self::from_seed_slice(&big_seed[0..32]).map(|x| (x, seed)) - } - - /// Make a new key pair from secret seed material. - /// - /// You should never need to use this; generate(), generate_with_phrase - fn from_seed(seed: &Seed) -> Pair { - Self::from_seed_slice(&seed[..]).expect("seed has valid length; qed") - } /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it /// will return `None`. @@ -473,27 +406,17 @@ impl TraitPair for Pair { Signature::from_raw(self.secret.sign(message).into()) } - /// Verify a signature on a message. Returns true if the signature is good. - fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { - Self::verify_weak(&sig.0[..], message.as_ref(), pubkey) - } - - /// Verify a signature on a message. Returns true if the signature is good. + /// Verify a signature on a message. /// - /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct - /// size. Use it only if you're coming from byte buffers and need the speed. - fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - let public_key = match VerificationKey::try_from(pubkey.as_ref()) { - Ok(pk) => pk, - Err(_) => return false, + /// Returns true if the signature is good. + fn verify>(sig: &Self::Signature, message: M, public: &Self::Public) -> bool { + let Ok(public) = VerificationKey::try_from(public.as_slice()) else { + return false }; - - let sig = match ed25519_zebra::Signature::try_from(sig) { - Ok(s) => s, - Err(_) => return false, + let Ok(signature) = ed25519_zebra::Signature::try_from(sig.as_ref()) else { + return false }; - - public_key.verify(&sig, message.as_ref()).is_ok() + public.verify(&signature, message.as_ref()).is_ok() } /// Return a vec filled with raw data. diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index ce82c5253902f..11e92150f82bb 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -53,6 +53,7 @@ pub mod hashing; pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64}; pub mod crypto; pub mod hexdisplay; +pub use paste; pub mod defer; pub mod ecdsa; @@ -405,38 +406,62 @@ pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB #[rustfmt::skip] macro_rules! generate_feature_enabled_macro { ( $macro_name:ident, $feature_name:meta, $d:tt ) => { - /// Enable/disable the given code depending on - #[doc = concat!("`", stringify!($feature_name), "`")] - /// being enabled for the crate or not. - /// - /// # Example - /// - /// ```nocompile - /// // Will add the code depending on the feature being enabled or not. - #[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")] - /// ``` - #[cfg($feature_name)] - #[macro_export] - macro_rules! $macro_name { - ( $d ( $d input:tt )* ) => { - $d ( $d input )* + $crate::paste::paste!{ + /// Enable/disable the given code depending on + #[doc = concat!("`", stringify!($feature_name), "`")] + /// being enabled for the crate or not. + /// + /// # Example + /// + /// ```nocompile + /// // Will add the code depending on the feature being enabled or not. + #[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")] + /// ``` + #[cfg($feature_name)] + #[macro_export] + macro_rules! [<_ $macro_name>] { + ( $d ( $d input:tt )* ) => { + $d ( $d input )* + } + } + + /// Enable/disable the given code depending on + #[doc = concat!("`", stringify!($feature_name), "`")] + /// being enabled for the crate or not. + /// + /// # Example + /// + /// ```nocompile + /// // Will add the code depending on the feature being enabled or not. + #[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")] + /// ``` + #[cfg(not($feature_name))] + #[macro_export] + macro_rules! [<_ $macro_name>] { + ( $d ( $d input:tt )* ) => {}; } - } - /// Enable/disable the given code depending on - #[doc = concat!("`", stringify!($feature_name), "`")] - /// being enabled for the crate or not. - /// - /// # Example - /// - /// ```nocompile - /// // Will add the code depending on the feature being enabled or not. - #[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")] - /// ``` - #[cfg(not($feature_name))] - #[macro_export] - macro_rules! $macro_name { - ( $d ( $d input:tt )* ) => {}; + // Work around for: + #[doc(hidden)] + pub use [<_ $macro_name>] as $macro_name; } }; } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[should_panic] + fn generate_feature_enabled_macro_panics() { + generate_feature_enabled_macro!(if_test, test, $); + if_test!(panic!("This should panic")); + } + + #[test] + fn generate_feature_enabled_macro_works() { + generate_feature_enabled_macro!(if_not_test, not(test), $); + if_not_test!(panic!("This should not panic")); + } +} diff --git a/primitives/core/src/sr25519.rs b/primitives/core/src/sr25519.rs index 809d83aaf08a8..9b1c82205de10 100644 --- a/primitives/core/src/sr25519.rs +++ b/primitives/core/src/sr25519.rs @@ -15,33 +15,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -// tag::description[] //! Simple sr25519 (Schnorr-Ristretto) API. //! //! Note: `CHAIN_CODE_LENGTH` must be equal to `crate::crypto::JUNCTION_ID_LEN` //! for this to work. -// end::description[] + #[cfg(feature = "std")] use crate::crypto::Ss58Codec; #[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveJunction, Infallible, Pair as TraitPair, SecretStringError}; -#[cfg(feature = "std")] -use bip39::{Language, Mnemonic, MnemonicType}; +use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; #[cfg(feature = "full_crypto")] use schnorrkel::{ derive::{ChainCode, Derivation, CHAIN_CODE_LENGTH}, signing_context, ExpansionMode, Keypair, MiniSecretKey, PublicKey, SecretKey, }; -#[cfg(feature = "full_crypto")] use sp_std::vec::Vec; -#[cfg(feature = "std")] -use substrate_bip39::mini_secret_from_entropy; use crate::{ - crypto::{ - ByteArray, CryptoType, CryptoTypeId, CryptoTypePublicPair, Derive, Public as TraitPublic, - UncheckedFrom, - }, + crypto::{ByteArray, CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom}, hash::{H256, H512}, }; use codec::{Decode, Encode, MaxEncodedLen}; @@ -207,8 +198,6 @@ impl<'de> Deserialize<'de> for Public { } /// An Schnorrkel/Ristretto x25519 ("sr25519") signature. -/// -/// Instead of importing it for the local module, alias it to be available as a public type #[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq)] pub struct Signature(pub [u8; 64]); @@ -390,23 +379,7 @@ impl ByteArray for Public { const LEN: usize = 32; } -impl TraitPublic for Public { - fn to_public_crypto_pair(&self) -> CryptoTypePublicPair { - CryptoTypePublicPair(CRYPTO_ID, self.to_raw_vec()) - } -} - -impl From for CryptoTypePublicPair { - fn from(key: Public) -> Self { - (&key).into() - } -} - -impl From<&Public> for CryptoTypePublicPair { - fn from(key: &Public) -> Self { - CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) - } -} +impl TraitPublic for Public {} #[cfg(feature = "std")] impl From for Pair { @@ -458,16 +431,6 @@ impl TraitPair for Pair { type Public = Public; type Seed = Seed; type Signature = Signature; - type DeriveError = Infallible; - - /// Make a new key pair from raw secret seed material. - /// - /// This is generated using schnorrkel's Mini-Secret-Keys. - /// - /// A MiniSecretKey is literally what Ed25519 calls a SecretKey, which is just 32 random bytes. - fn from_seed(seed: &Seed) -> Pair { - Self::from_seed_slice(&seed[..]).expect("32 bytes can always build a key; qed") - } /// Get the public key. fn public(&self) -> Public { @@ -476,10 +439,12 @@ impl TraitPair for Pair { Public(pk) } - /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it - /// will return `None`. + /// Make a new key pair from raw secret seed material. + /// + /// This is generated using schnorrkel's Mini-Secret-Keys. /// - /// You should never need to use this; generate(), generate_with_phrase(), from_phrase() + /// A `MiniSecretKey` is literally what Ed25519 calls a `SecretKey`, which is just 32 random + /// bytes. fn from_seed_slice(seed: &[u8]) -> Result { match seed.len() { MINI_SECRET_KEY_LENGTH => Ok(Pair( @@ -495,42 +460,16 @@ impl TraitPair for Pair { _ => Err(SecretStringError::InvalidSeedLength), } } - #[cfg(feature = "std")] - fn generate_with_phrase(password: Option<&str>) -> (Pair, String, Seed) { - let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - let phrase = mnemonic.phrase(); - let (pair, seed) = Self::from_phrase(phrase, password) - .expect("All phrases generated by Mnemonic are valid; qed"); - (pair, phrase.to_owned(), seed) - } - #[cfg(feature = "std")] - fn from_phrase( - phrase: &str, - password: Option<&str>, - ) -> Result<(Pair, Seed), SecretStringError> { - Mnemonic::from_phrase(phrase, Language::English) - .map_err(|_| SecretStringError::InvalidPhrase) - .map(|m| Self::from_entropy(m.entropy(), password)) - } fn derive>( &self, path: Iter, seed: Option, - ) -> Result<(Pair, Option), Self::DeriveError> { - let seed = if let Some(s) = seed { - if let Ok(msk) = MiniSecretKey::from_bytes(&s) { - if msk.expand(ExpansionMode::Ed25519) == self.0.secret { - Some(msk) - } else { - None - } - } else { - None - } - } else { - None - }; + ) -> Result<(Pair, Option), DeriveError> { + let seed = seed + .and_then(|s| MiniSecretKey::from_bytes(&s).ok()) + .filter(|msk| msk.expand(ExpansionMode::Ed25519) == self.0.secret); + let init = self.0.secret.clone(); let (result, seed) = path.fold((init, seed), |(acc, acc_seed), j| match (j, acc_seed) { (DeriveJunction::Soft(cc), _) => (acc.derived_key_simple(ChainCode(cc), &[]).0, None), @@ -548,21 +487,13 @@ impl TraitPair for Pair { } fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { - Self::verify_weak(&sig.0[..], message, pubkey) - } - - fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - let signature = match schnorrkel::Signature::from_bytes(sig) { - Ok(signature) => signature, - Err(_) => return false, + let Ok(signature) = schnorrkel::Signature::from_bytes(sig.as_ref()) else { + return false }; - - let pub_key = match PublicKey::from_bytes(pubkey.as_ref()) { - Ok(pub_key) => pub_key, - Err(_) => return false, + let Ok(public) = PublicKey::from_bytes(pubkey.as_ref()) else { + return false }; - - pub_key.verify_simple(SIGNING_CTX, message.as_ref(), &signature).is_ok() + public.verify_simple(SIGNING_CTX, message.as_ref(), &signature).is_ok() } fn to_raw_vec(&self) -> Vec { @@ -572,18 +503,6 @@ impl TraitPair for Pair { #[cfg(feature = "std")] impl Pair { - /// Make a new key pair from binary data derived from a valid seed phrase. - /// - /// This uses a key derivation function to convert the entropy into a seed, then returns - /// the pair generated from it. - pub fn from_entropy(entropy: &[u8], password: Option<&str>) -> (Pair, Seed) { - let mini_key: MiniSecretKey = mini_secret_from_entropy(entropy, password.unwrap_or("")) - .expect("32 bytes can always build a key; qed"); - - let kp = mini_key.expand_to_keypair(ExpansionMode::Ed25519); - (Pair(kp), mini_key.to_bytes()) - } - /// Verify a signature on a message. Returns `true` if the signature is good. /// Supports old 0.1.1 deprecated signatures and should be used only for backward /// compatibility. @@ -614,48 +533,197 @@ impl CryptoType for Pair { type Pair = Pair; } -/// Batch verification. -/// -/// `messages`, `signatures` and `pub_keys` should all have equal length. -/// -/// Returns `true` if all signatures are correct, `false` otherwise. -#[cfg(feature = "std")] -pub fn verify_batch( - messages: Vec<&[u8]>, - signatures: Vec<&Signature>, - pub_keys: Vec<&Public>, -) -> bool { - let mut sr_pub_keys = Vec::with_capacity(pub_keys.len()); - for pub_key in pub_keys { - match schnorrkel::PublicKey::from_bytes(pub_key.as_ref()) { - Ok(pk) => sr_pub_keys.push(pk), - Err(_) => return false, - }; +/// Schnorrkel VRF related types and operations. +pub mod vrf { + use super::*; + #[cfg(feature = "full_crypto")] + use crate::crypto::VrfSigner; + use crate::crypto::{VrfCrypto, VrfVerifier}; + use schnorrkel::{ + errors::MultiSignatureStage, + vrf::{VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH}, + SignatureError, + }; + + /// VRF transcript ready to be used for VRF sign/verify operations. + pub struct VrfTranscript(pub merlin::Transcript); + + impl VrfTranscript { + /// Build a new transcript ready to be used by a VRF signer/verifier. + pub fn new(label: &'static [u8], data: &[(&'static [u8], &[u8])]) -> Self { + let mut transcript = merlin::Transcript::new(label); + data.iter().for_each(|(l, b)| transcript.append_message(l, b)); + VrfTranscript(transcript) + } } - let mut sr_signatures = Vec::with_capacity(signatures.len()); - for signature in signatures { - match schnorrkel::Signature::from_bytes(signature.as_ref()) { - Ok(s) => sr_signatures.push(s), - Err(_) => return false, - }; + /// VRF signature data + #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub struct VrfSignature { + /// The initial VRF configuration + pub output: VrfOutput, + /// The calculated VRF proof + pub proof: VrfProof, } - let mut messages: Vec = messages - .into_iter() - .map(|msg| signing_context(SIGNING_CTX).bytes(msg)) - .collect(); + /// VRF output type suitable for schnorrkel operations. + #[derive(Clone, Debug, PartialEq, Eq)] + pub struct VrfOutput(pub schnorrkel::vrf::VRFOutput); - schnorrkel::verify_batch(&mut messages, &sr_signatures, &sr_pub_keys, true).is_ok() + impl Encode for VrfOutput { + fn encode(&self) -> Vec { + self.0.as_bytes().encode() + } + } + + impl Decode for VrfOutput { + fn decode(i: &mut R) -> Result { + let decoded = <[u8; VRF_OUTPUT_LENGTH]>::decode(i)?; + Ok(Self(schnorrkel::vrf::VRFOutput::from_bytes(&decoded).map_err(convert_error)?)) + } + } + + impl MaxEncodedLen for VrfOutput { + fn max_encoded_len() -> usize { + <[u8; VRF_OUTPUT_LENGTH]>::max_encoded_len() + } + } + + impl TypeInfo for VrfOutput { + type Identity = [u8; VRF_OUTPUT_LENGTH]; + + fn type_info() -> scale_info::Type { + Self::Identity::type_info() + } + } + + /// VRF proof type suitable for schnorrkel operations. + #[derive(Clone, Debug, PartialEq, Eq)] + pub struct VrfProof(pub schnorrkel::vrf::VRFProof); + + impl Encode for VrfProof { + fn encode(&self) -> Vec { + self.0.to_bytes().encode() + } + } + + impl Decode for VrfProof { + fn decode(i: &mut R) -> Result { + let decoded = <[u8; VRF_PROOF_LENGTH]>::decode(i)?; + Ok(Self(schnorrkel::vrf::VRFProof::from_bytes(&decoded).map_err(convert_error)?)) + } + } + + impl MaxEncodedLen for VrfProof { + fn max_encoded_len() -> usize { + <[u8; VRF_PROOF_LENGTH]>::max_encoded_len() + } + } + + impl TypeInfo for VrfProof { + type Identity = [u8; VRF_PROOF_LENGTH]; + + fn type_info() -> scale_info::Type { + Self::Identity::type_info() + } + } + + #[cfg(feature = "full_crypto")] + impl VrfCrypto for Pair { + type VrfSignature = VrfSignature; + type VrfInput = VrfTranscript; + } + + #[cfg(feature = "full_crypto")] + impl VrfSigner for Pair { + fn vrf_sign(&self, transcript: &Self::VrfInput) -> Self::VrfSignature { + let (inout, proof, _) = self.0.vrf_sign(transcript.0.clone()); + VrfSignature { output: VrfOutput(inout.to_output()), proof: VrfProof(proof) } + } + } + + impl VrfCrypto for Public { + type VrfSignature = VrfSignature; + type VrfInput = VrfTranscript; + } + + impl VrfVerifier for Public { + fn vrf_verify(&self, transcript: &Self::VrfInput, signature: &Self::VrfSignature) -> bool { + schnorrkel::PublicKey::from_bytes(self) + .and_then(|public| { + public.vrf_verify(transcript.0.clone(), &signature.output.0, &signature.proof.0) + }) + .is_ok() + } + } + + fn convert_error(e: SignatureError) -> codec::Error { + use MultiSignatureStage::*; + use SignatureError::*; + match e { + EquationFalse => "Signature error: `EquationFalse`".into(), + PointDecompressionError => "Signature error: `PointDecompressionError`".into(), + ScalarFormatError => "Signature error: `ScalarFormatError`".into(), + NotMarkedSchnorrkel => "Signature error: `NotMarkedSchnorrkel`".into(), + BytesLengthError { .. } => "Signature error: `BytesLengthError`".into(), + MuSigAbsent { musig_stage: Commitment } => + "Signature error: `MuSigAbsent` at stage `Commitment`".into(), + MuSigAbsent { musig_stage: Reveal } => + "Signature error: `MuSigAbsent` at stage `Reveal`".into(), + MuSigAbsent { musig_stage: Cosignature } => + "Signature error: `MuSigAbsent` at stage `Commitment`".into(), + MuSigInconsistent { musig_stage: Commitment, duplicate: true } => + "Signature error: `MuSigInconsistent` at stage `Commitment` on duplicate".into(), + MuSigInconsistent { musig_stage: Commitment, duplicate: false } => + "Signature error: `MuSigInconsistent` at stage `Commitment` on not duplicate".into(), + MuSigInconsistent { musig_stage: Reveal, duplicate: true } => + "Signature error: `MuSigInconsistent` at stage `Reveal` on duplicate".into(), + MuSigInconsistent { musig_stage: Reveal, duplicate: false } => + "Signature error: `MuSigInconsistent` at stage `Reveal` on not duplicate".into(), + MuSigInconsistent { musig_stage: Cosignature, duplicate: true } => + "Signature error: `MuSigInconsistent` at stage `Cosignature` on duplicate".into(), + MuSigInconsistent { musig_stage: Cosignature, duplicate: false } => + "Signature error: `MuSigInconsistent` at stage `Cosignature` on not duplicate" + .into(), + } + } + + #[cfg(feature = "full_crypto")] + impl Pair { + /// Generate bytes from the given VRF configuration. + pub fn make_bytes>( + &self, + context: &[u8], + transcript: &VrfTranscript, + ) -> B { + let inout = self.0.vrf_create_hash(transcript.0.clone()); + inout.make_bytes::(context) + } + } + + impl Public { + /// Generate bytes from the given VRF configuration. + pub fn make_bytes>( + &self, + context: &[u8], + transcript: &VrfTranscript, + output: &VrfOutput, + ) -> Result { + let pubkey = schnorrkel::PublicKey::from_bytes(&self.0).map_err(convert_error)?; + let inout = output + .0 + .attach_input_hash(&pubkey, transcript.0.clone()) + .map_err(convert_error)?; + Ok(inout.make_bytes::(context)) + } + } } #[cfg(test)] -mod compatibility_test { +mod tests { use super::*; - use crate::crypto::DEV_PHRASE; - - // NOTE: tests to ensure addresses that are created with the `0.1.x` version (pre-audit) are - // still functional. + use crate::crypto::{Ss58Codec, DEV_ADDRESS, DEV_PHRASE}; + use serde_json; #[test] fn derive_soft_known_pair_should_work() { @@ -690,13 +758,6 @@ mod compatibility_test { assert!(Pair::verify_deprecated(&signature, &message[..], &public)); assert!(!Pair::verify(&signature, &message[..], &public)); } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::crypto::{Ss58Codec, DEV_ADDRESS, DEV_PHRASE}; - use serde_json; #[test] fn default_phrase_should_be_used() { @@ -889,4 +950,20 @@ mod test { // Poorly-sized assert!(deserialize_signature("\"abc123\"").is_err()); } + + #[test] + fn vrf_make_bytes_matches() { + use super::vrf::*; + use crate::crypto::VrfSigner; + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + let transcript = VrfTranscript::new(b"test", &[(b"foo", b"bar")]); + + let signature = pair.vrf_sign(&transcript); + + let ctx = b"randbytes"; + let b1 = pair.make_bytes::<[u8; 32]>(ctx, &transcript); + let b2 = public.make_bytes::<[u8; 32]>(ctx, &transcript, &signature.output).unwrap(); + assert_eq!(b1, b2); + } } diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index 0e42ee3ad4b12..40137053ab752 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -157,6 +157,16 @@ pub trait ReadRuntimeVersion: Send + Sync { ) -> Result, String>; } +impl ReadRuntimeVersion for std::sync::Arc { + fn read_runtime_version( + &self, + wasm_code: &[u8], + ext: &mut dyn Externalities, + ) -> Result, String> { + (**self).read_runtime_version(wasm_code, ext) + } +} + sp_externalities::decl_extension! { /// An extension that provides functionality to read version information from a given wasm blob. pub struct ReadRuntimeVersionExt(Box); @@ -169,31 +179,6 @@ impl ReadRuntimeVersionExt { } } -sp_externalities::decl_extension! { - /// Task executor extension. - pub struct TaskExecutorExt(Box); -} - -impl TaskExecutorExt { - /// New instance of task executor extension. - pub fn new(spawn_handle: impl SpawnNamed + Send + 'static) -> Self { - Self(Box::new(spawn_handle)) - } -} - -/// Runtime spawn extension. -pub trait RuntimeSpawn: Send { - /// Create new runtime instance and use dynamic dispatch to invoke with specified payload. - /// - /// Returns handle of the spawned task. - /// - /// Function pointers (`dispatcher_ref`, `func`) are WASM pointer types. - fn spawn_call(&self, dispatcher_ref: u32, func: u32, payload: Vec) -> u64; - - /// Join the result of previously created runtime instance invocation. - fn join(&self, handle: u64) -> Vec; -} - /// Something that can spawn tasks (blocking and non-blocking) with an assigned name /// and optional group. #[dyn_clonable::clonable] diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index a903b704410c2..183d7bd5a7720 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.10" -syn = "1.0.98" -proc-macro2 = "1.0" +quote = "1.0.26" +syn = "2.0.14" +proc-macro2 = "1.0.56" [features] default = [ "std" ] diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index fd073db69a6af..2f5616770d681 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { version = "1.0.30", optional = true } sp-core = { version = "7.0.0", default-features = false, path = "../core" } diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index e56bfcf56041a..c6e716396aea4 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -9,6 +9,7 @@ repository = "https://github.com/paritytech/substrate/" description = "I/O for Substrate runtimes" documentation = "https://docs.rs/sp-io" readme = "README.md" +build = "build.rs" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -37,6 +38,9 @@ ed25519-dalek = { version = "1.0.1", default-features = false, optional = true } # Force the usage of ed25519, this is being used in `ed25519-dalek`. ed25519 = { version = "1.5.2", optional = true } +[build-dependencies] +rustversion = "1.0.6" + [features] default = ["std"] std = [ diff --git a/primitives/io/build.rs b/primitives/io/build.rs new file mode 100644 index 0000000000000..8a9c0b6420b29 --- /dev/null +++ b/primitives/io/build.rs @@ -0,0 +1,27 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[rustversion::before(1.68)] +fn main() { + if !cfg!(feature = "std") { + println!("cargo:rustc-cfg=enable_alloc_error_handler"); + } +} + +#[rustversion::since(1.68)] +fn main() {} diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 8c3cdc668cba3..750b5d5924637 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -19,7 +19,7 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(not(feature = "std"), feature(alloc_error_handler))] +#![cfg_attr(enable_alloc_error_handler, feature(alloc_error_handler))] #![cfg_attr( feature = "std", doc = "Substrate runtime standard library as compiled when linked with Rust's standard library." @@ -40,10 +40,9 @@ use sp_core::{ hexdisplay::HexDisplay, offchain::{OffchainDbExt, OffchainWorkerExt, TransactionPoolExt}, storage::ChildInfo, - traits::TaskExecutorExt, }; #[cfg(feature = "std")] -use sp_keystore::{KeystoreExt, SyncCryptoStore}; +use sp_keystore::KeystoreExt; use sp_core::{ crypto::KeyTypeId, @@ -75,12 +74,6 @@ use secp256k1::{ #[cfg(feature = "std")] use sp_externalities::{Externalities, ExternalitiesExt}; -#[cfg(feature = "std")] -mod batch_verifier; - -#[cfg(feature = "std")] -use batch_verifier::BatchVerifier; - pub use sp_externalities::MultiRemovalResults; #[cfg(feature = "std")] @@ -731,10 +724,9 @@ impl Default for UseDalekExt { pub trait Crypto { /// Returns all `ed25519` public keys for the given key id from the keystore. fn ed25519_public_keys(&mut self, id: KeyTypeId) -> Vec { - let keystore = &***self - .extension::() - .expect("No `keystore` associated for the current context!"); - SyncCryptoStore::ed25519_public_keys(keystore, id) + self.extension::() + .expect("No `keystore` associated for the current context!") + .ed25519_public_keys(id) } /// Generate an `ed22519` key for the given key type using an optional `seed` and @@ -745,10 +737,9 @@ pub trait Crypto { /// Returns the public key. fn ed25519_generate(&mut self, id: KeyTypeId, seed: Option>) -> ed25519::Public { let seed = seed.as_ref().map(|s| std::str::from_utf8(s).expect("Seed is valid utf8!")); - let keystore = &***self - .extension::() - .expect("No `keystore` associated for the current context!"); - SyncCryptoStore::ed25519_generate_new(keystore, id, seed) + self.extension::() + .expect("No `keystore` associated for the current context!") + .ed25519_generate_new(id, seed) .expect("`ed25519_generate` failed") } @@ -762,13 +753,11 @@ pub trait Crypto { pub_key: &ed25519::Public, msg: &[u8], ) -> Option { - let keystore = &***self - .extension::() - .expect("No `keystore` associated for the current context!"); - SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg) + self.extension::() + .expect("No `keystore` associated for the current context!") + .ed25519_sign(id, pub_key, msg) .ok() .flatten() - .and_then(|sig| ed25519::Signature::from_slice(&sig)) } /// Verify `ed25519` signature. @@ -805,15 +794,25 @@ pub trait Crypto { /// needs to be called. /// /// Returns `true` when the verification is either successful or batched. + /// + /// NOTE: Is tagged with `register_only` to keep the functions around for backwards + /// compatibility with old runtimes, but it should not be used anymore by new runtimes. + /// The implementation emulates the old behavior, but isn't doing any batch verification + /// anymore. + #[version(1, register_only)] fn ed25519_batch_verify( &mut self, sig: &ed25519::Signature, msg: &[u8], pub_key: &ed25519::Public, ) -> bool { - self.extension::() - .map(|extension| extension.push_ed25519(sig.clone(), *pub_key, msg.to_vec())) - .unwrap_or_else(|| ed25519_verify(sig, msg, pub_key)) + let res = ed25519_verify(sig, msg, pub_key); + + if let Some(ext) = self.extension::() { + ext.0 &= res; + } + + res } /// Verify `sr25519` signature. @@ -832,25 +831,36 @@ pub trait Crypto { /// needs to be called. /// /// Returns `true` when the verification is either successful or batched. + /// + /// NOTE: Is tagged with `register_only` to keep the functions around for backwards + /// compatibility with old runtimes, but it should not be used anymore by new runtimes. + /// The implementation emulates the old behavior, but isn't doing any batch verification + /// anymore. + #[version(1, register_only)] fn sr25519_batch_verify( &mut self, sig: &sr25519::Signature, msg: &[u8], pub_key: &sr25519::Public, ) -> bool { - self.extension::() - .map(|extension| extension.push_sr25519(sig.clone(), *pub_key, msg.to_vec())) - .unwrap_or_else(|| sr25519_verify(sig, msg, pub_key)) + let res = sr25519_verify(sig, msg, pub_key); + + if let Some(ext) = self.extension::() { + ext.0 &= res; + } + + res } /// Start verification extension. + /// + /// NOTE: Is tagged with `register_only` to keep the functions around for backwards + /// compatibility with old runtimes, but it should not be used anymore by new runtimes. + /// The implementation emulates the old behavior, but isn't doing any batch verification + /// anymore. + #[version(1, register_only)] fn start_batch_verify(&mut self) { - let scheduler = self - .extension::() - .expect("No task executor associated with the current context!") - .clone(); - - self.register_extension(VerificationExt(BatchVerifier::new(scheduler))) + self.register_extension(VerificationExtDeprecated(true)) .expect("Failed to register required extension: `VerificationExt`"); } @@ -860,13 +870,19 @@ pub trait Crypto { /// deferred by `sr25519_verify`/`ed25519_verify`. /// /// Will panic if no `VerificationExt` is registered (`start_batch_verify` was not called). + /// + /// NOTE: Is tagged with `register_only` to keep the functions around for backwards + /// compatibility with old runtimes, but it should not be used anymore by new runtimes. + /// The implementation emulates the old behavior, but isn't doing any batch verification + /// anymore. + #[version(1, register_only)] fn finish_batch_verify(&mut self) -> bool { let result = self - .extension::() + .extension::() .expect("`finish_batch_verify` should only be called after `start_batch_verify`") - .verify_and_clear(); + .0; - self.deregister_extension::() + self.deregister_extension::() .expect("No verification extension in current context!"); result @@ -874,10 +890,9 @@ pub trait Crypto { /// Returns all `sr25519` public keys for the given key id from the keystore. fn sr25519_public_keys(&mut self, id: KeyTypeId) -> Vec { - let keystore = &***self - .extension::() - .expect("No `keystore` associated for the current context!"); - SyncCryptoStore::sr25519_public_keys(keystore, id) + self.extension::() + .expect("No `keystore` associated for the current context!") + .sr25519_public_keys(id) } /// Generate an `sr22519` key for the given key type using an optional seed and @@ -888,10 +903,9 @@ pub trait Crypto { /// Returns the public key. fn sr25519_generate(&mut self, id: KeyTypeId, seed: Option>) -> sr25519::Public { let seed = seed.as_ref().map(|s| std::str::from_utf8(s).expect("Seed is valid utf8!")); - let keystore = &***self - .extension::() - .expect("No `keystore` associated for the current context!"); - SyncCryptoStore::sr25519_generate_new(keystore, id, seed) + self.extension::() + .expect("No `keystore` associated for the current context!") + .sr25519_generate_new(id, seed) .expect("`sr25519_generate` failed") } @@ -905,13 +919,11 @@ pub trait Crypto { pub_key: &sr25519::Public, msg: &[u8], ) -> Option { - let keystore = &***self - .extension::() - .expect("No `keystore` associated for the current context!"); - SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg) + self.extension::() + .expect("No `keystore` associated for the current context!") + .sr25519_sign(id, pub_key, msg) .ok() .flatten() - .and_then(|sig| sr25519::Signature::from_slice(&sig)) } /// Verify an `sr25519` signature. @@ -924,10 +936,9 @@ pub trait Crypto { /// Returns all `ecdsa` public keys for the given key id from the keystore. fn ecdsa_public_keys(&mut self, id: KeyTypeId) -> Vec { - let keystore = &***self - .extension::() - .expect("No `keystore` associated for the current context!"); - SyncCryptoStore::ecdsa_public_keys(keystore, id) + self.extension::() + .expect("No `keystore` associated for the current context!") + .ecdsa_public_keys(id) } /// Generate an `ecdsa` key for the given key type using an optional `seed` and @@ -938,10 +949,10 @@ pub trait Crypto { /// Returns the public key. fn ecdsa_generate(&mut self, id: KeyTypeId, seed: Option>) -> ecdsa::Public { let seed = seed.as_ref().map(|s| std::str::from_utf8(s).expect("Seed is valid utf8!")); - let keystore = &***self - .extension::() - .expect("No `keystore` associated for the current context!"); - SyncCryptoStore::ecdsa_generate_new(keystore, id, seed).expect("`ecdsa_generate` failed") + self.extension::() + .expect("No `keystore` associated for the current context!") + .ecdsa_generate_new(id, seed) + .expect("`ecdsa_generate` failed") } /// Sign the given `msg` with the `ecdsa` key that corresponds to the given public key and @@ -954,13 +965,11 @@ pub trait Crypto { pub_key: &ecdsa::Public, msg: &[u8], ) -> Option { - let keystore = &***self - .extension::() - .expect("No `keystore` associated for the current context!"); - SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg) + self.extension::() + .expect("No `keystore` associated for the current context!") + .ecdsa_sign(id, pub_key, msg) .ok() .flatten() - .and_then(|sig| ecdsa::Signature::from_slice(&sig)) } /// Sign the given a pre-hashed `msg` with the `ecdsa` key that corresponds to the given public @@ -973,10 +982,11 @@ pub trait Crypto { pub_key: &ecdsa::Public, msg: &[u8; 32], ) -> Option { - let keystore = &***self - .extension::() - .expect("No `keystore` associated for the current context!"); - SyncCryptoStore::ecdsa_sign_prehashed(keystore, id, pub_key, msg).ok().flatten() + self.extension::() + .expect("No `keystore` associated for the current context!") + .ecdsa_sign_prehashed(id, pub_key, msg) + .ok() + .flatten() } /// Verify `ecdsa` signature. @@ -1015,15 +1025,25 @@ pub trait Crypto { /// needs to be called. /// /// Returns `true` when the verification is either successful or batched. + /// + /// NOTE: Is tagged with `register_only` to keep the functions around for backwards + /// compatibility with old runtimes, but it should not be used anymore by new runtimes. + /// The implementation emulates the old behavior, but isn't doing any batch verification + /// anymore. + #[version(1, register_only)] fn ecdsa_batch_verify( &mut self, sig: &ecdsa::Signature, msg: &[u8], pub_key: &ecdsa::Public, ) -> bool { - self.extension::() - .map(|extension| extension.push_ecdsa(sig.clone(), *pub_key, msg.to_vec())) - .unwrap_or_else(|| ecdsa_verify(sig, msg, pub_key)) + let res = ecdsa_verify(sig, msg, pub_key); + + if let Some(ext) = self.extension::() { + ext.0 &= res; + } + + res } /// Verify and recover a SECP256k1 ECDSA signature. @@ -1196,8 +1216,10 @@ pub trait OffchainIndex { #[cfg(feature = "std")] sp_externalities::decl_extension! { - /// Batch verification extension to register/retrieve from the externalities. - pub struct VerificationExt(BatchVerifier); + /// Deprecated verification context. + /// + /// Stores the combined result of all verifications that are done in the same context. + struct VerificationExtDeprecated(bool); } /// Interface that provides functions to access the offchain functionality. @@ -1386,7 +1408,7 @@ pub trait Offchain { /// Read all response headers. /// /// Returns a vector of pairs `(HeaderKey, HeaderValue)`. - /// NOTE response headers have to be read before response body. + /// NOTE: response headers have to be read before response body. fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { self.extension::() .expect("http_response_headers can be called only in the offchain worker context") @@ -1399,7 +1421,7 @@ pub trait Offchain { /// is reached or server closed the connection. /// If `0` is returned it means that the response has been fully consumed /// and the `request_id` is now invalid. - /// NOTE this implies that response headers must be read before draining the body. + /// NOTE: this implies that response headers must be read before draining the body. /// Passing `None` as a deadline blocks forever. fn http_response_read_body( &mut self, @@ -1653,7 +1675,7 @@ pub fn panic(info: &core::panic::PanicInfo) -> ! { } /// A default OOM handler for WASM environment. -#[cfg(all(not(feature = "disable_oom"), not(feature = "std")))] +#[cfg(all(not(feature = "disable_oom"), enable_alloc_error_handler))] #[alloc_error_handler] pub fn oom(_: core::alloc::Layout) -> ! { #[cfg(feature = "improved_panic_error_reporting")] @@ -1694,12 +1716,8 @@ pub type SubstrateHostFunctions = ( #[cfg(test)] mod tests { use super::*; - use sp_core::{ - crypto::UncheckedInto, map, storage::Storage, testing::TaskExecutor, - traits::TaskExecutorExt, - }; + use sp_core::{crypto::UncheckedInto, map, storage::Storage}; use sp_state_machine::BasicExternalities; - use std::any::TypeId; #[test] fn storage_works() { @@ -1791,54 +1809,6 @@ mod tests { }); } - #[test] - fn batch_verify_start_finish_works() { - let mut ext = BasicExternalities::default(); - ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); - - ext.execute_with(|| { - crypto::start_batch_verify(); - }); - - assert!(ext.extensions().get_mut(TypeId::of::()).is_some()); - - ext.execute_with(|| { - assert!(crypto::finish_batch_verify()); - }); - - assert!(ext.extensions().get_mut(TypeId::of::()).is_none()); - } - - #[test] - fn long_sr25519_batching() { - let mut ext = BasicExternalities::default(); - ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); - ext.execute_with(|| { - let pair = sr25519::Pair::generate_with_phrase(None).0; - let pair_unused = sr25519::Pair::generate_with_phrase(None).0; - crypto::start_batch_verify(); - for it in 0..70 { - let msg = format!("Schnorrkel {}!", it); - let signature = pair.sign(msg.as_bytes()); - crypto::sr25519_batch_verify(&signature, msg.as_bytes(), &pair.public()); - } - - // push invalid - let msg = b"asdf!"; - let signature = pair.sign(msg); - crypto::sr25519_batch_verify(&signature, msg, &pair_unused.public()); - assert!(!crypto::finish_batch_verify()); - - crypto::start_batch_verify(); - for it in 0..70 { - let msg = format!("Schnorrkel {}!", it); - let signature = pair.sign(msg.as_bytes()); - crypto::sr25519_batch_verify(&signature, msg.as_bytes(), &pair.public()); - } - assert!(crypto::finish_batch_verify()); - }); - } - fn zero_ed_pub() -> ed25519::Public { [0u8; 32].unchecked_into() } @@ -1847,90 +1817,6 @@ mod tests { ed25519::Signature::from_raw([0u8; 64]) } - fn zero_sr_pub() -> sr25519::Public { - [0u8; 32].unchecked_into() - } - - fn zero_sr_sig() -> sr25519::Signature { - sr25519::Signature::from_raw([0u8; 64]) - } - - #[test] - fn batching_works() { - let mut ext = BasicExternalities::default(); - ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); - - ext.execute_with(|| { - // valid ed25519 signature - crypto::start_batch_verify(); - crypto::ed25519_batch_verify(&zero_ed_sig(), &Vec::new(), &zero_ed_pub()); - assert!(crypto::finish_batch_verify()); - - // 2 valid ed25519 signatures - crypto::start_batch_verify(); - - let pair = ed25519::Pair::generate_with_phrase(None).0; - let msg = b"Important message"; - let signature = pair.sign(msg); - crypto::ed25519_batch_verify(&signature, msg, &pair.public()); - - let pair = ed25519::Pair::generate_with_phrase(None).0; - let msg = b"Even more important message"; - let signature = pair.sign(msg); - crypto::ed25519_batch_verify(&signature, msg, &pair.public()); - - assert!(crypto::finish_batch_verify()); - - // 1 valid, 1 invalid ed25519 signature - crypto::start_batch_verify(); - - let pair1 = ed25519::Pair::generate_with_phrase(None).0; - let pair2 = ed25519::Pair::generate_with_phrase(None).0; - let msg = b"Important message"; - let signature = pair1.sign(msg); - - crypto::ed25519_batch_verify(&zero_ed_sig(), &Vec::new(), &zero_ed_pub()); - crypto::ed25519_batch_verify(&signature, msg, &pair1.public()); - crypto::ed25519_batch_verify(&signature, msg, &pair2.public()); - - assert!(!crypto::finish_batch_verify()); - - // 1 valid ed25519, 2 valid sr25519 - crypto::start_batch_verify(); - - let pair = ed25519::Pair::generate_with_phrase(None).0; - let msg = b"Ed25519 batching"; - let signature = pair.sign(msg); - crypto::ed25519_batch_verify(&signature, msg, &pair.public()); - - let pair = sr25519::Pair::generate_with_phrase(None).0; - let msg = b"Schnorrkel rules"; - let signature = pair.sign(msg); - crypto::sr25519_batch_verify(&signature, msg, &pair.public()); - - let pair = sr25519::Pair::generate_with_phrase(None).0; - let msg = b"Schnorrkel batches!"; - let signature = pair.sign(msg); - crypto::sr25519_batch_verify(&signature, msg, &pair.public()); - - assert!(crypto::finish_batch_verify()); - - // 1 valid sr25519, 1 invalid sr25519 - crypto::start_batch_verify(); - - let pair1 = sr25519::Pair::generate_with_phrase(None).0; - let pair2 = sr25519::Pair::generate_with_phrase(None).0; - let msg = b"Schnorrkcel!"; - let signature = pair1.sign(msg); - - crypto::sr25519_batch_verify(&signature, msg, &pair1.public()); - crypto::sr25519_batch_verify(&signature, msg, &pair2.public()); - crypto::sr25519_batch_verify(&zero_sr_sig(), &Vec::new(), &zero_sr_pub()); - - assert!(!crypto::finish_batch_verify()); - }); - } - #[test] fn use_dalek_ext_works() { let mut ext = BasicExternalities::default(); diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index 9386cb5d104d2..6b0194247219a 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -13,12 +13,9 @@ documentation = "https://docs.rs/sp-core" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } futures = "0.3.21" -merlin = { version = "2.0", default-features = false } parking_lot = { version = "0.12.1", default-features = false } -schnorrkel = { version = "0.9.1", default-features = false, features = ["preaudit_deprecated", "u64_backend"] } serde = { version = "1.0", optional = true } thiserror = "1.0" sp-core = { version = "7.0.0", default-features = false, path = "../core" } @@ -32,8 +29,6 @@ rand_chacha = "0.2.2" default = ["std"] std = [ "codec/std", - "merlin/std", - "schnorrkel/std", "serde", "sp-core/std", "sp-externalities/std", diff --git a/primitives/keystore/src/lib.rs b/primitives/keystore/src/lib.rs index 17c435483bae5..5b41f3b80043d 100644 --- a/primitives/keystore/src/lib.rs +++ b/primitives/keystore/src/lib.rs @@ -16,19 +16,16 @@ // limitations under the License. //! Keystore traits + pub mod testing; -pub mod vrf; -use crate::vrf::{VRFSignature, VRFTranscriptData}; -use async_trait::async_trait; -use futures::{executor::block_on, future::join_all}; use sp_core::{ - crypto::{CryptoTypePublicPair, KeyTypeId}, + crypto::{ByteArray, CryptoTypeId, KeyTypeId}, ecdsa, ed25519, sr25519, }; use std::sync::Arc; -/// CryptoStore error +/// Keystore error #[derive(Debug, thiserror::Error)] pub enum Error { /// Public key type is not supported @@ -45,349 +42,189 @@ pub enum Error { Other(String), } -/// Something that generates, stores and provides access to keys. -#[async_trait] -pub trait CryptoStore: Send + Sync { - /// Returns all sr25519 public keys for the given key type. - async fn sr25519_public_keys(&self, id: KeyTypeId) -> Vec; +/// Something that generates, stores and provides access to secret keys. +pub trait Keystore: Send + Sync { + /// Returns all the sr25519 public keys for the given key type. + fn sr25519_public_keys(&self, key_type: KeyTypeId) -> Vec; + /// Generate a new sr25519 key pair for the given key type and an optional seed. /// - /// If the given seed is `Some(_)`, the key pair will only be stored in memory. - /// - /// Returns the public key of the generated key pair. - async fn sr25519_generate_new( + /// Returns an `sr25519::Public` key of the generated key pair or an `Err` if + /// something failed during key generation. + fn sr25519_generate_new( &self, - id: KeyTypeId, + key_type: KeyTypeId, seed: Option<&str>, ) -> Result; - /// Returns all ed25519 public keys for the given key type. - async fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec; - /// Generate a new ed25519 key pair for the given key type and an optional seed. - /// - /// If the given seed is `Some(_)`, the key pair will only be stored in memory. - /// - /// Returns the public key of the generated key pair. - async fn ed25519_generate_new( - &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result; - /// Returns all ecdsa public keys for the given key type. - async fn ecdsa_public_keys(&self, id: KeyTypeId) -> Vec; - /// Generate a new ecdsa key pair for the given key type and an optional seed. + + /// Generate an sr25519 signature for a given message. /// - /// If the given seed is `Some(_)`, the key pair will only be stored in memory. + /// Receives [`KeyTypeId`] and an [`sr25519::Public`] key to be able to map + /// them to a private key that exists in the keystore. /// - /// Returns the public key of the generated key pair. - async fn ecdsa_generate_new( + /// Returns an [`sr25519::Signature`] or `None` in case the given `key_type` + /// and `public` combination doesn't exist in the keystore. + /// An `Err` will be returned if generating the signature itself failed. + fn sr25519_sign( &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result; + key_type: KeyTypeId, + public: &sr25519::Public, + msg: &[u8], + ) -> Result, Error>; - /// Insert a new key. This doesn't require any known of the crypto; but a public key must be - /// manually provided. + /// Generate an sr25519 VRF signature for a given transcript data. /// - /// Places it into the file system store. + /// Receives [`KeyTypeId`] and an [`sr25519::Public`] key to be able to map + /// them to a private key that exists in the keystore. /// - /// `Err` if there's some sort of weird filesystem error, but should generally be `Ok`. - async fn insert_unknown(&self, id: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()>; - - /// Find intersection between provided keys and supported keys - /// - /// Provided a list of (CryptoTypeId,[u8]) pairs, this would return - /// a filtered set of public keys which are supported by the keystore. - async fn supported_keys( + /// Returns a result containing the signature data. + /// Namely, VRFOutput and VRFProof which are returned inside the `VRFSignature` + /// container struct. + /// Returns `None` if the given `key_type` and `public` combination doesn't + /// exist in the keystore or an `Err` when something failed. + fn sr25519_vrf_sign( &self, - id: KeyTypeId, - keys: Vec, - ) -> Result, Error>; - /// List all supported keys - /// - /// Returns a set of public keys the signer supports. - async fn keys(&self, id: KeyTypeId) -> Result, Error>; + key_type: KeyTypeId, + public: &sr25519::Public, + transcript: &sr25519::vrf::VrfTranscript, + ) -> Result, Error>; - /// Checks if the private keys for the given public key and key type combinations exist. - /// - /// Returns `true` iff all private keys could be found. - async fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool; + /// Returns all ed25519 public keys for the given key type. + fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec; - /// Sign with key - /// - /// Signs a message with the private key that matches - /// the public key passed. + /// Generate a new ed25519 key pair for the given key type and an optional seed. /// - /// Returns the SCALE encoded signature if key is found and supported, `None` if the key doesn't - /// exist or an error when something failed. - async fn sign_with( + /// Returns an `ed25519::Public` key of the generated key pair or an `Err` if + /// something failed during key generation. + fn ed25519_generate_new( &self, - id: KeyTypeId, - key: &CryptoTypePublicPair, - msg: &[u8], - ) -> Result>, Error>; + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result; - /// Sign with any key + /// Generate an ed25519 signature for a given message. /// - /// Given a list of public keys, find the first supported key and - /// sign the provided message with that key. + /// Receives [`KeyTypeId`] and an [`ed25519::Public`] key to be able to map + /// them to a private key that exists in the keystore. /// - /// Returns a tuple of the used key and the SCALE encoded signature or `None` if no key could - /// be found to sign. - async fn sign_with_any( + /// Returns an [`ed25519::Signature`] or `None` in case the given `key_type` + /// and `public` combination doesn't exist in the keystore. + /// An `Err` will be returned if generating the signature itself failed. + fn ed25519_sign( &self, - id: KeyTypeId, - keys: Vec, + key_type: KeyTypeId, + public: &ed25519::Public, msg: &[u8], - ) -> Result)>, Error> { - if keys.len() == 1 { - return Ok(self.sign_with(id, &keys[0], msg).await?.map(|s| (keys[0].clone(), s))) - } else { - for k in self.supported_keys(id, keys).await? { - if let Ok(Some(sign)) = self.sign_with(id, &k, msg).await { - return Ok(Some((k, sign))) - } - } - } + ) -> Result, Error>; - Ok(None) - } + /// Returns all ecdsa public keys for the given key type. + fn ecdsa_public_keys(&self, key_type: KeyTypeId) -> Vec; - /// Sign with all keys - /// - /// Provided a list of public keys, sign a message with - /// each key given that the key is supported. + /// Generate a new ecdsa key pair for the given key type and an optional seed. /// - /// Returns a list of `Result`s each representing the SCALE encoded - /// signature of each key, `None` if the key doesn't exist or a error when something failed. - async fn sign_with_all( + /// Returns an `ecdsa::Public` key of the generated key pair or an `Err` if + /// something failed during key generation. + fn ecdsa_generate_new( &self, - id: KeyTypeId, - keys: Vec, - msg: &[u8], - ) -> Result>, Error>>, ()> { - let futs = keys.iter().map(|k| self.sign_with(id, k, msg)); - - Ok(join_all(futs).await) - } + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result; - /// Generate VRF signature for given transcript data. - /// - /// Receives KeyTypeId and Public key to be able to map - /// them to a private key that exists in the keystore which - /// is, in turn, used for signing the provided transcript. + /// Generate an ecdsa signature for a given message. /// - /// Returns a result containing the signature data. - /// Namely, VRFOutput and VRFProof which are returned - /// inside the `VRFSignature` container struct. + /// Receives [`KeyTypeId`] and an [`ecdsa::Public`] key to be able to map + /// them to a private key that exists in the keystore. /// - /// This function will return `None` if the given `key_type` and `public` combination - /// doesn't exist in the keystore or an `Err` when something failed. - async fn sr25519_vrf_sign( + /// Returns an [`ecdsa::Signature`] or `None` in case the given `key_type` + /// and `public` combination doesn't exist in the keystore. + /// An `Err` will be returned if generating the signature itself failed. + fn ecdsa_sign( &self, key_type: KeyTypeId, - public: &sr25519::Public, - transcript_data: VRFTranscriptData, - ) -> Result, Error>; + public: &ecdsa::Public, + msg: &[u8], + ) -> Result, Error>; - /// Generate an ECDSA signature for a given pre-hashed message. + /// Generate an ecdsa signature for a given pre-hashed message. /// /// Receives [`KeyTypeId`] and an [`ecdsa::Public`] key to be able to map - /// them to a private key that exists in the keystore. This private key is, - /// in turn, used for signing the provided pre-hashed message. + /// them to a private key that exists in the keystore. /// - /// The `msg` argument provided should be a hashed message for which an - /// ECDSA signature should be generated. - /// - /// Returns an [`ecdsa::Signature`] or `None` in case the given `id` and - /// `public` combination doesn't exist in the keystore. An `Err` will be - /// returned if generating the signature itself failed. - async fn ecdsa_sign_prehashed( + /// Returns an [`ecdsa::Signature`] or `None` in case the given `key_type` + /// and `public` combination doesn't exist in the keystore. + /// An `Err` will be returned if generating the signature itself failed. + fn ecdsa_sign_prehashed( &self, - id: KeyTypeId, + key_type: KeyTypeId, public: &ecdsa::Public, msg: &[u8; 32], ) -> Result, Error>; -} - -/// Sync version of the CryptoStore -/// -/// Some parts of Substrate still rely on a sync version of the `CryptoStore`. -/// To make the transition easier this auto trait wraps any async `CryptoStore` and -/// exposes a `sync` interface using `block_on`. Usage of this is deprecated and it -/// will be removed as soon as the internal usage has transitioned successfully. -/// If you are starting out building something new **do not use this**, -/// instead, use [`CryptoStore`]. -pub trait SyncCryptoStore: CryptoStore + Send + Sync { - /// Returns all sr25519 public keys for the given key type. - fn sr25519_public_keys(&self, id: KeyTypeId) -> Vec; - /// Generate a new sr25519 key pair for the given key type and an optional seed. - /// - /// If the given seed is `Some(_)`, the key pair will only be stored in memory. - /// - /// Returns the public key of the generated key pair. - fn sr25519_generate_new( - &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result; - - /// Returns all ed25519 public keys for the given key type. - fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec; - - /// Generate a new ed25519 key pair for the given key type and an optional seed. - /// - /// If the given seed is `Some(_)`, the key pair will only be stored in memory. - /// - /// Returns the public key of the generated key pair. - fn ed25519_generate_new( - &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result; - - /// Returns all ecdsa public keys for the given key type. - fn ecdsa_public_keys(&self, id: KeyTypeId) -> Vec; - - /// Generate a new ecdsa key pair for the given key type and an optional seed. - /// - /// If the given seed is `Some(_)`, the key pair will only be stored in memory. - /// - /// Returns the public key of the generated key pair. - fn ecdsa_generate_new(&self, id: KeyTypeId, seed: Option<&str>) - -> Result; - - /// Insert a new key. This doesn't require any known of the crypto; but a public key must be - /// manually provided. - /// - /// Places it into the file system store. - /// - /// `Err` if there's some sort of weird filesystem error, but should generally be `Ok`. - fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()>; + /// Insert a new secret key. + fn insert(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()>; - /// Find intersection between provided keys and supported keys + /// List all supported keys of a given type. /// - /// Provided a list of (CryptoTypeId,[u8]) pairs, this would return - /// a filtered set of public keys which are supported by the keystore. - fn supported_keys( - &self, - id: KeyTypeId, - keys: Vec, - ) -> Result, Error>; - - /// List all supported keys - /// - /// Returns a set of public keys the signer supports. - fn keys(&self, id: KeyTypeId) -> Result, Error> { - block_on(CryptoStore::keys(self, id)) - } + /// Returns a set of public keys the signer supports in raw format. + fn keys(&self, key_type: KeyTypeId) -> Result>, Error>; /// Checks if the private keys for the given public key and key type combinations exist. /// /// Returns `true` iff all private keys could be found. fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool; - /// Sign with key + /// Convenience method to sign a message using the given key type and a raw public key + /// for secret lookup. + /// + /// The message is signed using the cryptographic primitive specified by `crypto_id`. /// - /// Signs a message with the private key that matches - /// the public key passed. + /// Schemes supported by the default trait implementation: sr25519, ed25519 and ecdsa. + /// To support more schemes you can overwrite this method. /// /// Returns the SCALE encoded signature if key is found and supported, `None` if the key doesn't /// exist or an error when something failed. fn sign_with( &self, id: KeyTypeId, - key: &CryptoTypePublicPair, + crypto_id: CryptoTypeId, + public: &[u8], msg: &[u8], - ) -> Result>, Error>; - - /// Sign with any key - /// - /// Given a list of public keys, find the first supported key and - /// sign the provided message with that key. - /// - /// Returns a tuple of the used key and the SCALE encoded signature or `None` if no key could - /// be found to sign. - fn sign_with_any( - &self, - id: KeyTypeId, - keys: Vec, - msg: &[u8], - ) -> Result)>, Error> { - if keys.len() == 1 { - return Ok( - SyncCryptoStore::sign_with(self, id, &keys[0], msg)?.map(|s| (keys[0].clone(), s)) - ) - } else { - for k in SyncCryptoStore::supported_keys(self, id, keys)? { - if let Ok(Some(sign)) = SyncCryptoStore::sign_with(self, id, &k, msg) { - return Ok(Some((k, sign))) - } - } - } - - Ok(None) - } - - /// Sign with all keys - /// - /// Provided a list of public keys, sign a message with - /// each key given that the key is supported. - /// - /// Returns a list of `Result`s each representing the SCALE encoded - /// signature of each key, `None` if the key doesn't exist or an error when something failed. - fn sign_with_all( - &self, - id: KeyTypeId, - keys: Vec, - msg: &[u8], - ) -> Result>, Error>>, ()> { - Ok(keys.iter().map(|k| SyncCryptoStore::sign_with(self, id, k, msg)).collect()) + ) -> Result>, Error> { + use codec::Encode; + + let signature = match crypto_id { + sr25519::CRYPTO_ID => { + let public = sr25519::Public::from_slice(public) + .map_err(|_| Error::ValidationError("Invalid public key format".into()))?; + self.sr25519_sign(id, &public, msg)?.map(|s| s.encode()) + }, + ed25519::CRYPTO_ID => { + let public = ed25519::Public::from_slice(public) + .map_err(|_| Error::ValidationError("Invalid public key format".into()))?; + self.ed25519_sign(id, &public, msg)?.map(|s| s.encode()) + }, + ecdsa::CRYPTO_ID => { + let public = ecdsa::Public::from_slice(public) + .map_err(|_| Error::ValidationError("Invalid public key format".into()))?; + self.ecdsa_sign(id, &public, msg)?.map(|s| s.encode()) + }, + _ => return Err(Error::KeyNotSupported(id)), + }; + Ok(signature) } - - /// Generate VRF signature for given transcript data. - /// - /// Receives KeyTypeId and Public key to be able to map - /// them to a private key that exists in the keystore which - /// is, in turn, used for signing the provided transcript. - /// - /// Returns a result containing the signature data. - /// Namely, VRFOutput and VRFProof which are returned - /// inside the `VRFSignature` container struct. - /// - /// This function will return `None` if the given `key_type` and `public` combination - /// doesn't exist in the keystore or an `Err` when something failed. - fn sr25519_vrf_sign( - &self, - key_type: KeyTypeId, - public: &sr25519::Public, - transcript_data: VRFTranscriptData, - ) -> Result, Error>; - - /// Generate an ECDSA signature for a given pre-hashed message. - /// - /// Receives [`KeyTypeId`] and an [`ecdsa::Public`] key to be able to map - /// them to a private key that exists in the keystore. This private key is, - /// in turn, used for signing the provided pre-hashed message. - /// - /// The `msg` argument provided should be a hashed message for which an - /// ECDSA signature should be generated. - /// - /// Returns an [`ecdsa::Signature`] or `None` in case the given `id` and - /// `public` combination doesn't exist in the keystore. An `Err` will be - /// returned if generating the signature itself failed. - fn ecdsa_sign_prehashed( - &self, - id: KeyTypeId, - public: &ecdsa::Public, - msg: &[u8; 32], - ) -> Result, Error>; } -/// A pointer to a keystore. -pub type SyncCryptoStorePtr = Arc; +/// A shared pointer to a keystore implementation. +pub type KeystorePtr = Arc; sp_externalities::decl_extension! { /// The keystore extension to register/retrieve from the externalities. - pub struct KeystoreExt(SyncCryptoStorePtr); + pub struct KeystoreExt(KeystorePtr); +} + +impl KeystoreExt { + /// Create a new instance of `KeystoreExt` + pub fn new(keystore: T) -> Self { + Self(Arc::new(keystore)) + } } diff --git a/primitives/keystore/src/testing.rs b/primitives/keystore/src/testing.rs index 88bbe8d7b718e..f7a24f451f9e3 100644 --- a/primitives/keystore/src/testing.rs +++ b/primitives/keystore/src/testing.rs @@ -17,199 +17,71 @@ //! Types that should only be used for testing! +use crate::{Error, Keystore, KeystorePtr}; + use sp_core::{ - crypto::{ByteArray, CryptoTypePublicPair, KeyTypeId, Pair}, + crypto::{ByteArray, KeyTypeId, Pair, VrfSigner}, ecdsa, ed25519, sr25519, }; -use crate::{ - vrf::{make_transcript, VRFSignature, VRFTranscriptData}, - CryptoStore, Error, SyncCryptoStore, SyncCryptoStorePtr, -}; -use async_trait::async_trait; use parking_lot::RwLock; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; /// A keystore implementation usable in tests. #[derive(Default)] -pub struct KeyStore { +pub struct MemoryKeystore { /// `KeyTypeId` maps to public keys and public keys map to private keys. keys: Arc, String>>>>, } -impl KeyStore { +impl MemoryKeystore { /// Creates a new instance of `Self`. pub fn new() -> Self { Self::default() } - fn sr25519_key_pair(&self, id: KeyTypeId, pub_key: &sr25519::Public) -> Option { - self.keys.read().get(&id).and_then(|inner| { - inner.get(pub_key.as_slice()).map(|s| { - sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid") - }) - }) - } - - fn ed25519_key_pair(&self, id: KeyTypeId, pub_key: &ed25519::Public) -> Option { - self.keys.read().get(&id).and_then(|inner| { - inner.get(pub_key.as_slice()).map(|s| { - ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid") - }) - }) - } - - fn ecdsa_key_pair(&self, id: KeyTypeId, pub_key: &ecdsa::Public) -> Option { - self.keys.read().get(&id).and_then(|inner| { + fn pair(&self, key_type: KeyTypeId, public: &T::Public) -> Option { + self.keys.read().get(&key_type).and_then(|inner| { inner - .get(pub_key.as_slice()) - .map(|s| ecdsa::Pair::from_string(s, None).expect("`ecdsa` seed slice is valid")) + .get(public.as_slice()) + .map(|s| T::from_string(s, None).expect("seed slice is valid")) }) } -} - -#[async_trait] -impl CryptoStore for KeyStore { - async fn keys(&self, id: KeyTypeId) -> Result, Error> { - SyncCryptoStore::keys(self, id) - } - - async fn sr25519_public_keys(&self, id: KeyTypeId) -> Vec { - SyncCryptoStore::sr25519_public_keys(self, id) - } - - async fn sr25519_generate_new( - &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result { - SyncCryptoStore::sr25519_generate_new(self, id, seed) - } - - async fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec { - SyncCryptoStore::ed25519_public_keys(self, id) - } - - async fn ed25519_generate_new( - &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result { - SyncCryptoStore::ed25519_generate_new(self, id, seed) - } - - async fn ecdsa_public_keys(&self, id: KeyTypeId) -> Vec { - SyncCryptoStore::ecdsa_public_keys(self, id) - } - - async fn ecdsa_generate_new( - &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result { - SyncCryptoStore::ecdsa_generate_new(self, id, seed) - } - - async fn insert_unknown(&self, id: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()> { - SyncCryptoStore::insert_unknown(self, id, suri, public) - } - - async fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { - SyncCryptoStore::has_keys(self, public_keys) - } - - async fn supported_keys( - &self, - id: KeyTypeId, - keys: Vec, - ) -> std::result::Result, Error> { - SyncCryptoStore::supported_keys(self, id, keys) - } - - async fn sign_with( - &self, - id: KeyTypeId, - key: &CryptoTypePublicPair, - msg: &[u8], - ) -> Result>, Error> { - SyncCryptoStore::sign_with(self, id, key, msg) - } - - async fn sr25519_vrf_sign( - &self, - key_type: KeyTypeId, - public: &sr25519::Public, - transcript_data: VRFTranscriptData, - ) -> Result, Error> { - SyncCryptoStore::sr25519_vrf_sign(self, key_type, public, transcript_data) - } - - async fn ecdsa_sign_prehashed( - &self, - id: KeyTypeId, - public: &ecdsa::Public, - msg: &[u8; 32], - ) -> Result, Error> { - SyncCryptoStore::ecdsa_sign_prehashed(self, id, public, msg) - } -} - -impl SyncCryptoStore for KeyStore { - fn keys(&self, id: KeyTypeId) -> Result, Error> { - self.keys - .read() - .get(&id) - .map(|map| { - Ok(map.keys().fold(Vec::new(), |mut v, k| { - v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ecdsa::CRYPTO_ID, k.clone())); - v - })) - }) - .unwrap_or_else(|| Ok(vec![])) - } - fn sr25519_public_keys(&self, id: KeyTypeId) -> Vec { + fn public_keys(&self, key_type: KeyTypeId) -> Vec { self.keys .read() - .get(&id) + .get(&key_type) .map(|keys| { keys.values() - .map(|s| { - sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid") - }) + .map(|s| T::from_string(s, None).expect("seed slice is valid")) .map(|p| p.public()) .collect() }) .unwrap_or_default() } - fn sr25519_generate_new( + fn generate_new( &self, - id: KeyTypeId, + key_type: KeyTypeId, seed: Option<&str>, - ) -> Result { + ) -> Result { match seed { Some(seed) => { - let pair = sr25519::Pair::from_string(seed, None).map_err(|_| { - Error::ValidationError("Generates an `sr25519` pair.".to_owned()) - })?; + let pair = T::from_string(seed, None) + .map_err(|_| Error::ValidationError("Generates a pair.".to_owned()))?; self.keys .write() - .entry(id) + .entry(key_type) .or_default() .insert(pair.public().to_raw_vec(), seed.into()); Ok(pair.public()) }, None => { - let (pair, phrase, _) = sr25519::Pair::generate_with_phrase(None); + let (pair, phrase, _) = T::generate_with_phrase(None); self.keys .write() - .entry(id) + .entry(key_type) .or_default() .insert(pair.public().to_raw_vec(), phrase); Ok(pair.public()) @@ -217,183 +89,138 @@ impl SyncCryptoStore for KeyStore { } } - fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec { - self.keys - .read() - .get(&id) - .map(|keys| { - keys.values() - .map(|s| { - ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid") - }) - .map(|p| p.public()) - .collect() - }) - .unwrap_or_default() + fn sign( + &self, + key_type: KeyTypeId, + public: &T::Public, + msg: &[u8], + ) -> Result, Error> { + let sig = self.pair::(key_type, public).map(|pair| pair.sign(msg)); + Ok(sig) } - fn ed25519_generate_new( + fn vrf_sign( &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result { - match seed { - Some(seed) => { - let pair = ed25519::Pair::from_string(seed, None).map_err(|_| { - Error::ValidationError("Generates an `ed25519` pair.".to_owned()) - })?; - self.keys - .write() - .entry(id) - .or_default() - .insert(pair.public().to_raw_vec(), seed.into()); - Ok(pair.public()) - }, - None => { - let (pair, phrase, _) = ed25519::Pair::generate_with_phrase(None); - self.keys - .write() - .entry(id) - .or_default() - .insert(pair.public().to_raw_vec(), phrase); - Ok(pair.public()) - }, - } + key_type: KeyTypeId, + public: &T::Public, + transcript: &T::VrfInput, + ) -> Result, Error> { + let sig = self.pair::(key_type, public).map(|pair| pair.vrf_sign(transcript)); + Ok(sig) } +} - fn ecdsa_public_keys(&self, id: KeyTypeId) -> Vec { - self.keys - .read() - .get(&id) - .map(|keys| { - keys.values() - .map(|s| { - ecdsa::Pair::from_string(s, None).expect("`ecdsa` seed slice is valid") - }) - .map(|p| p.public()) - .collect() - }) - .unwrap_or_default() +impl Keystore for MemoryKeystore { + fn sr25519_public_keys(&self, key_type: KeyTypeId) -> Vec { + self.public_keys::(key_type) } - fn ecdsa_generate_new( + fn sr25519_generate_new( &self, - id: KeyTypeId, + key_type: KeyTypeId, seed: Option<&str>, - ) -> Result { - match seed { - Some(seed) => { - let pair = ecdsa::Pair::from_string(seed, None) - .map_err(|_| Error::ValidationError("Generates an `ecdsa` pair.".to_owned()))?; - self.keys - .write() - .entry(id) - .or_default() - .insert(pair.public().to_raw_vec(), seed.into()); - Ok(pair.public()) - }, - None => { - let (pair, phrase, _) = ecdsa::Pair::generate_with_phrase(None); - self.keys - .write() - .entry(id) - .or_default() - .insert(pair.public().to_raw_vec(), phrase); - Ok(pair.public()) - }, - } + ) -> Result { + self.generate_new::(key_type, seed) } - fn insert_unknown(&self, id: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()> { - self.keys - .write() - .entry(id) - .or_default() - .insert(public.to_owned(), suri.to_string()); - Ok(()) + fn sr25519_sign( + &self, + key_type: KeyTypeId, + public: &sr25519::Public, + msg: &[u8], + ) -> Result, Error> { + self.sign::(key_type, public, msg) } - fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { - public_keys - .iter() - .all(|(k, t)| self.keys.read().get(t).and_then(|s| s.get(k)).is_some()) + fn sr25519_vrf_sign( + &self, + key_type: KeyTypeId, + public: &sr25519::Public, + transcript: &sr25519::vrf::VrfTranscript, + ) -> Result, Error> { + self.vrf_sign::(key_type, public, transcript) } - fn supported_keys( - &self, - id: KeyTypeId, - keys: Vec, - ) -> std::result::Result, Error> { - let provided_keys = keys.into_iter().collect::>(); - let all_keys = SyncCryptoStore::keys(self, id)?.into_iter().collect::>(); + fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec { + self.public_keys::(key_type) + } - Ok(provided_keys.intersection(&all_keys).cloned().collect()) + fn ed25519_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result { + self.generate_new::(key_type, seed) } - fn sign_with( + fn ed25519_sign( &self, - id: KeyTypeId, - key: &CryptoTypePublicPair, + key_type: KeyTypeId, + public: &ed25519::Public, msg: &[u8], - ) -> Result>, Error> { - use codec::Encode; - - match key.0 { - ed25519::CRYPTO_ID => { - let key_pair = self - .ed25519_key_pair(id, &ed25519::Public::from_slice(key.1.as_slice()).unwrap()); - - key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() - }, - sr25519::CRYPTO_ID => { - let key_pair = self - .sr25519_key_pair(id, &sr25519::Public::from_slice(key.1.as_slice()).unwrap()); + ) -> Result, Error> { + self.sign::(key_type, public, msg) + } - key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() - }, - ecdsa::CRYPTO_ID => { - let key_pair = - self.ecdsa_key_pair(id, &ecdsa::Public::from_slice(key.1.as_slice()).unwrap()); + fn ecdsa_public_keys(&self, key_type: KeyTypeId) -> Vec { + self.public_keys::(key_type) + } - key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() - }, - _ => Err(Error::KeyNotSupported(id)), - } + fn ecdsa_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result { + self.generate_new::(key_type, seed) } - fn sr25519_vrf_sign( + fn ecdsa_sign( &self, key_type: KeyTypeId, - public: &sr25519::Public, - transcript_data: VRFTranscriptData, - ) -> Result, Error> { - let transcript = make_transcript(transcript_data); - let pair = - if let Some(k) = self.sr25519_key_pair(key_type, public) { k } else { return Ok(None) }; - - let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); - Ok(Some(VRFSignature { output: inout.to_output(), proof })) + public: &ecdsa::Public, + msg: &[u8], + ) -> Result, Error> { + self.sign::(key_type, public, msg) } fn ecdsa_sign_prehashed( &self, - id: KeyTypeId, + key_type: KeyTypeId, public: &ecdsa::Public, msg: &[u8; 32], ) -> Result, Error> { - let pair = self.ecdsa_key_pair(id, public); - pair.map(|k| k.sign_prehashed(msg)).map(Ok).transpose() + let sig = self.pair::(key_type, public).map(|pair| pair.sign_prehashed(msg)); + Ok(sig) } -} -impl Into for KeyStore { - fn into(self) -> SyncCryptoStorePtr { - Arc::new(self) + fn insert(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()> { + self.keys + .write() + .entry(key_type) + .or_default() + .insert(public.to_owned(), suri.to_string()); + Ok(()) + } + + fn keys(&self, key_type: KeyTypeId) -> Result>, Error> { + let keys = self + .keys + .read() + .get(&key_type) + .map(|map| map.keys().cloned().collect()) + .unwrap_or_default(); + Ok(keys) + } + + fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { + public_keys + .iter() + .all(|(k, t)| self.keys.read().get(t).and_then(|s| s.get(k)).is_some()) } } -impl Into> for KeyStore { - fn into(self) -> Arc { +impl Into for MemoryKeystore { + fn into(self) -> KeystorePtr { Arc::new(self) } } @@ -401,7 +228,6 @@ impl Into> for KeyStore { #[cfg(test)] mod tests { use super::*; - use crate::{vrf::VRFTranscriptValue, SyncCryptoStore}; use sp_core::{ sr25519, testing::{ECDSA, ED25519, SR25519}, @@ -409,67 +235,62 @@ mod tests { #[test] fn store_key_and_extract() { - let store = KeyStore::new(); + let store = MemoryKeystore::new(); - let public = - SyncCryptoStore::ed25519_generate_new(&store, ED25519, None).expect("Generates key"); + let public = store.ed25519_generate_new(ED25519, None).expect("Generates key"); - let public_keys = SyncCryptoStore::keys(&store, ED25519).unwrap(); + let public_keys = store.ed25519_public_keys(ED25519); assert!(public_keys.contains(&public.into())); } #[test] fn store_unknown_and_extract_it() { - let store = KeyStore::new(); + let store = MemoryKeystore::new(); let secret_uri = "//Alice"; let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); - SyncCryptoStore::insert_unknown(&store, SR25519, secret_uri, key_pair.public().as_ref()) + store + .insert(SR25519, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); - let public_keys = SyncCryptoStore::keys(&store, SR25519).unwrap(); + let public_keys = store.sr25519_public_keys(SR25519); assert!(public_keys.contains(&key_pair.public().into())); } #[test] fn vrf_sign() { - let store = KeyStore::new(); + let store = MemoryKeystore::new(); let secret_uri = "//Alice"; let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); - let transcript_data = VRFTranscriptData { - label: b"Test", - items: vec![ - ("one", VRFTranscriptValue::U64(1)), - ("two", VRFTranscriptValue::U64(2)), - ("three", VRFTranscriptValue::Bytes("test".as_bytes().to_vec())), + let transcript = sr25519::vrf::VrfTranscript::new( + b"Test", + &[ + (b"one", &1_u64.to_le_bytes()), + (b"two", &2_u64.to_le_bytes()), + (b"three", "test".as_bytes()), ], - }; - - let result = SyncCryptoStore::sr25519_vrf_sign( - &store, - SR25519, - &key_pair.public(), - transcript_data.clone(), ); + + let result = store.sr25519_vrf_sign(SR25519, &key_pair.public(), &transcript); assert!(result.unwrap().is_none()); - SyncCryptoStore::insert_unknown(&store, SR25519, secret_uri, key_pair.public().as_ref()) + store + .insert(SR25519, secret_uri, key_pair.public().as_ref()) .expect("Inserts unknown key"); - let result = - SyncCryptoStore::sr25519_vrf_sign(&store, SR25519, &key_pair.public(), transcript_data); + let result = store.sr25519_vrf_sign(SR25519, &key_pair.public(), &transcript); assert!(result.unwrap().is_some()); } #[test] fn ecdsa_sign_prehashed_works() { - let store = KeyStore::new(); + let store = MemoryKeystore::new(); let suri = "//Alice"; let pair = ecdsa::Pair::from_string(suri, None).unwrap(); @@ -477,15 +298,13 @@ mod tests { let msg = sp_core::keccak_256(b"this should be a hashed message"); // no key in key store - let res = - SyncCryptoStore::ecdsa_sign_prehashed(&store, ECDSA, &pair.public(), &msg).unwrap(); + let res = store.ecdsa_sign_prehashed(ECDSA, &pair.public(), &msg).unwrap(); assert!(res.is_none()); // insert key, sign again - SyncCryptoStore::insert_unknown(&store, ECDSA, suri, pair.public().as_ref()).unwrap(); + store.insert(ECDSA, suri, pair.public().as_ref()).unwrap(); - let res = - SyncCryptoStore::ecdsa_sign_prehashed(&store, ECDSA, &pair.public(), &msg).unwrap(); + let res = store.ecdsa_sign_prehashed(ECDSA, &pair.public(), &msg).unwrap(); assert!(res.is_some()); } diff --git a/primitives/keystore/src/vrf.rs b/primitives/keystore/src/vrf.rs deleted file mode 100644 index e089336c1485b..0000000000000 --- a/primitives/keystore/src/vrf.rs +++ /dev/null @@ -1,94 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! VRF-specifc data types and helpers - -use codec::Encode; -use merlin::Transcript; -use schnorrkel::vrf::{VRFOutput, VRFProof}; - -/// An enum whose variants represent possible -/// accepted values to construct the VRF transcript -#[derive(Clone, Encode)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -pub enum VRFTranscriptValue { - /// Value is an array of bytes - Bytes(Vec), - /// Value is a u64 integer - U64(u64), -} -/// VRF Transcript data -#[derive(Clone, Encode)] -pub struct VRFTranscriptData { - /// The transcript's label - pub label: &'static [u8], - /// Additional data to be registered into the transcript - pub items: Vec<(&'static str, VRFTranscriptValue)>, -} -/// VRF signature data -pub struct VRFSignature { - /// The VRFOutput serialized - pub output: VRFOutput, - /// The calculated VRFProof - pub proof: VRFProof, -} - -/// Construct a `Transcript` object from data. -/// -/// Returns `merlin::Transcript` -pub fn make_transcript(data: VRFTranscriptData) -> Transcript { - let mut transcript = Transcript::new(data.label); - for (label, value) in data.items.into_iter() { - match value { - VRFTranscriptValue::Bytes(bytes) => { - transcript.append_message(label.as_bytes(), &bytes); - }, - VRFTranscriptValue::U64(val) => { - transcript.append_u64(label.as_bytes(), val); - }, - } - } - transcript -} - -#[cfg(test)] -mod tests { - use super::*; - use rand::RngCore; - use rand_chacha::{rand_core::SeedableRng, ChaChaRng}; - - #[test] - fn transcript_creation_matches() { - let mut orig_transcript = Transcript::new(b"My label"); - orig_transcript.append_u64(b"one", 1); - orig_transcript.append_message(b"two", "test".as_bytes()); - - let new_transcript = make_transcript(VRFTranscriptData { - label: b"My label", - items: vec![ - ("one", VRFTranscriptValue::U64(1)), - ("two", VRFTranscriptValue::Bytes("test".as_bytes().to_vec())), - ], - }); - let test = |t: Transcript| -> [u8; 16] { - let mut b = [0u8; 16]; - t.build_rng().finalize(&mut ChaChaRng::from_seed([0u8; 32])).fill_bytes(&mut b); - b - }; - debug_assert!(test(orig_transcript) == test(new_transcript)); - } -} diff --git a/primitives/maybe-compressed-blob/Cargo.toml b/primitives/maybe-compressed-blob/Cargo.toml index 7e2780aa23355..67f93c517274c 100644 --- a/primitives/maybe-compressed-blob/Cargo.toml +++ b/primitives/maybe-compressed-blob/Cargo.toml @@ -12,4 +12,4 @@ readme = "README.md" [dependencies] thiserror = "1.0" -zstd = { version = "0.11.2", default-features = false } +zstd = { version = "0.12.3", default-features = false } diff --git a/primitives/merkle-mountain-range/Cargo.toml b/primitives/merkle-mountain-range/Cargo.toml index 97add1ed1d37c..cb3d6ddd51ad2 100644 --- a/primitives/merkle-mountain-range/Cargo.toml +++ b/primitives/merkle-mountain-range/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } serde = { version = "1.0.136", features = ["derive"], optional = true } diff --git a/primitives/metadata-ir/Cargo.toml b/primitives/metadata-ir/Cargo.toml new file mode 100644 index 0000000000000..27fada9c6f34e --- /dev/null +++ b/primitives/metadata-ir/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "sp-metadata-ir" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Intermediate representation of the runtime metadata." +documentation = "https://docs.rs/sp-metadata-ir" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +frame-metadata = { version = "15.1.0", default-features = false, features = ["v14", "v15-unstable"] } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +sp-std = { version = "5.0.0", default-features = false, path = "../std" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-metadata/std", + "scale-info/std", + "sp-std/std", +] diff --git a/primitives/metadata-ir/src/lib.rs b/primitives/metadata-ir/src/lib.rs new file mode 100644 index 0000000000000..3ddc2911d4c93 --- /dev/null +++ b/primitives/metadata-ir/src/lib.rs @@ -0,0 +1,110 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Intermediate representation of the runtime metadata. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +// Re-export. +#[doc(hidden)] +pub use frame_metadata; + +mod types; +use frame_metadata::{RuntimeMetadataPrefixed, RuntimeMetadataV14}; +pub use types::*; + +mod v14; +mod v15; + +/// Metadata V14. +const V14: u32 = 14; + +/// Metadata V15. +/// +/// Not yet stable, thus we set it to `u32::MAX`. +const V15: u32 = u32::MAX; + +/// Transform the IR to the specified version. +/// +/// Use [`supported_versions`] to find supported versions. +pub fn into_version(metadata: MetadataIR, version: u32) -> Option { + match version { + // Latest stable version. + V14 => { + let v14: frame_metadata::v14::RuntimeMetadataV14 = metadata.into(); + Some(v14.into()) + }, + // Unstable metadata. + V15 => { + let v15: frame_metadata::v15::RuntimeMetadataV15 = metadata.into(); + Some(v15.into()) + }, + _ => None, + } +} + +/// Returns the supported metadata versions. +pub fn supported_versions() -> sp_std::vec::Vec { + sp_std::vec![V14, V15] +} + +/// Transform the IR to the latest stable metadata version. +pub fn into_latest(metadata: MetadataIR) -> RuntimeMetadataPrefixed { + let latest: RuntimeMetadataV14 = metadata.into(); + latest.into() +} + +#[cfg(test)] +mod test { + use super::*; + use frame_metadata::{v14::META_RESERVED, RuntimeMetadata}; + use scale_info::meta_type; + + fn ir_metadata() -> MetadataIR { + MetadataIR { + pallets: vec![], + extrinsic: ExtrinsicMetadataIR { + ty: meta_type::<()>(), + version: 0, + signed_extensions: vec![], + }, + ty: meta_type::<()>(), + apis: vec![], + } + } + + #[test] + fn into_version_14() { + let ir = ir_metadata(); + let metadata = into_version(ir, V14).expect("Should return prefixed metadata"); + + assert_eq!(metadata.0, META_RESERVED); + + assert!(matches!(metadata.1, RuntimeMetadata::V14(_))); + } + + #[test] + fn into_version_15() { + let ir = ir_metadata(); + let metadata = into_version(ir, V15).expect("Should return prefixed metadata"); + + assert_eq!(metadata.0, META_RESERVED); + + assert!(matches!(metadata.1, RuntimeMetadata::V15(_))); + } +} diff --git a/primitives/metadata-ir/src/types.rs b/primitives/metadata-ir/src/types.rs new file mode 100644 index 0000000000000..93ee54891d89f --- /dev/null +++ b/primitives/metadata-ir/src/types.rs @@ -0,0 +1,400 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::Encode; +use scale_info::{ + form::{Form, MetaForm, PortableForm}, + prelude::vec::Vec, + IntoPortable, MetaType, Registry, +}; + +/// The intermediate representation for the runtime metadata. +/// Contains the needed context that allows conversion to multiple metadata versions. +/// +/// # Note +/// +/// Further fields could be added or removed to ensure proper conversion. +/// When the IR does not contain enough information to generate a specific version +/// of the runtime metadata an appropriate default value is used (ie, empty vector). +pub struct MetadataIR { + /// Pallet metadata. + pub pallets: Vec>, + /// Metadata of the extrinsic. + pub extrinsic: ExtrinsicMetadataIR, + /// The type of the `Runtime`. + pub ty: T::Type, + /// Metadata of the Runtime API. + pub apis: Vec>, +} + +/// Metadata of a runtime trait. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct RuntimeApiMetadataIR { + /// Trait name. + pub name: T::String, + /// Trait methods. + pub methods: Vec>, + /// Trait documentation. + pub docs: Vec, +} + +impl IntoPortable for RuntimeApiMetadataIR { + type Output = RuntimeApiMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + RuntimeApiMetadataIR { + name: self.name.into_portable(registry), + methods: registry.map_into_portable(self.methods), + docs: registry.map_into_portable(self.docs), + } + } +} + +/// Metadata of a runtime method. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct RuntimeApiMethodMetadataIR { + /// Method name. + pub name: T::String, + /// Method parameters. + pub inputs: Vec>, + /// Method output. + pub output: T::Type, + /// Method documentation. + pub docs: Vec, +} + +impl IntoPortable for RuntimeApiMethodMetadataIR { + type Output = RuntimeApiMethodMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + RuntimeApiMethodMetadataIR { + name: self.name.into_portable(registry), + inputs: registry.map_into_portable(self.inputs), + output: registry.register_type(&self.output), + docs: registry.map_into_portable(self.docs), + } + } +} + +/// Metadata of a runtime method parameter. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct RuntimeApiMethodParamMetadataIR { + /// Parameter name. + pub name: T::String, + /// Parameter type. + pub ty: T::Type, +} + +impl IntoPortable for RuntimeApiMethodParamMetadataIR { + type Output = RuntimeApiMethodParamMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + RuntimeApiMethodParamMetadataIR { + name: self.name.into_portable(registry), + ty: registry.register_type(&self.ty), + } + } +} + +/// The intermediate representation for a pallet metadata. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct PalletMetadataIR { + /// Pallet name. + pub name: T::String, + /// Pallet storage metadata. + pub storage: Option>, + /// Pallet calls metadata. + pub calls: Option>, + /// Pallet event metadata. + pub event: Option>, + /// Pallet constants metadata. + pub constants: Vec>, + /// Pallet error metadata. + pub error: Option>, + /// Define the index of the pallet, this index will be used for the encoding of pallet event, + /// call and origin variants. + pub index: u8, + /// Pallet documentation. + pub docs: Vec, +} + +impl IntoPortable for PalletMetadataIR { + type Output = PalletMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + PalletMetadataIR { + name: self.name.into_portable(registry), + storage: self.storage.map(|storage| storage.into_portable(registry)), + calls: self.calls.map(|calls| calls.into_portable(registry)), + event: self.event.map(|event| event.into_portable(registry)), + constants: registry.map_into_portable(self.constants), + error: self.error.map(|error| error.into_portable(registry)), + index: self.index, + docs: registry.map_into_portable(self.docs), + } + } +} + +/// Metadata of the extrinsic used by the runtime. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct ExtrinsicMetadataIR { + /// The type of the extrinsic. + pub ty: T::Type, + /// Extrinsic version. + pub version: u8, + /// The signed extensions in the order they appear in the extrinsic. + pub signed_extensions: Vec>, +} + +impl IntoPortable for ExtrinsicMetadataIR { + type Output = ExtrinsicMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + ExtrinsicMetadataIR { + ty: registry.register_type(&self.ty), + version: self.version, + signed_extensions: registry.map_into_portable(self.signed_extensions), + } + } +} + +/// Metadata of an extrinsic's signed extension. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct SignedExtensionMetadataIR { + /// The unique signed extension identifier, which may be different from the type name. + pub identifier: T::String, + /// The type of the signed extension, with the data to be included in the extrinsic. + pub ty: T::Type, + /// The type of the additional signed data, with the data to be included in the signed payload + pub additional_signed: T::Type, +} + +impl IntoPortable for SignedExtensionMetadataIR { + type Output = SignedExtensionMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + SignedExtensionMetadataIR { + identifier: self.identifier.into_portable(registry), + ty: registry.register_type(&self.ty), + additional_signed: registry.register_type(&self.additional_signed), + } + } +} + +/// All metadata of the pallet's storage. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +/// The common prefix used by all storage entries. +pub struct PalletStorageMetadataIR { + /// The common prefix used by all storage entries. + pub prefix: T::String, + /// Metadata for all storage entries. + pub entries: Vec>, +} + +impl IntoPortable for PalletStorageMetadataIR { + type Output = PalletStorageMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + PalletStorageMetadataIR { + prefix: self.prefix.into_portable(registry), + entries: registry.map_into_portable(self.entries), + } + } +} + +/// Metadata about one storage entry. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct StorageEntryMetadataIR { + /// Variable name of the storage entry. + pub name: T::String, + /// An `Option` modifier of that storage entry. + pub modifier: StorageEntryModifierIR, + /// Type of the value stored in the entry. + pub ty: StorageEntryTypeIR, + /// Default value (SCALE encoded). + pub default: Vec, + /// Storage entry documentation. + pub docs: Vec, +} + +impl IntoPortable for StorageEntryMetadataIR { + type Output = StorageEntryMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + StorageEntryMetadataIR { + name: self.name.into_portable(registry), + modifier: self.modifier, + ty: self.ty.into_portable(registry), + default: self.default, + docs: registry.map_into_portable(self.docs), + } + } +} + +/// A storage entry modifier indicates how a storage entry is returned when fetched and what the +/// value will be if the key is not present. Specifically this refers to the "return type" when +/// fetching a storage entry, and what the value will be if the key is not present. +/// +/// `Optional` means you should expect an `Option`, with `None` returned if the key is not +/// present. `Default` means you should expect a `T` with the default value of default if the key is +/// not present. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub enum StorageEntryModifierIR { + /// The storage entry returns an `Option`, with `None` if the key is not present. + Optional, + /// The storage entry returns `T::Default` if the key is not present. + Default, +} + +/// Hasher used by storage maps +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub enum StorageHasherIR { + /// 128-bit Blake2 hash. + Blake2_128, + /// 256-bit Blake2 hash. + Blake2_256, + /// Multiple 128-bit Blake2 hashes concatenated. + Blake2_128Concat, + /// 128-bit XX hash. + Twox128, + /// 256-bit XX hash. + Twox256, + /// Multiple 64-bit XX hashes concatenated. + Twox64Concat, + /// Identity hashing (no hashing). + Identity, +} + +/// A type of storage value. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub enum StorageEntryTypeIR { + /// Plain storage entry (just the value). + Plain(T::Type), + /// A storage map. + Map { + /// One or more hashers, should be one hasher per key element. + hashers: Vec, + /// The type of the key, can be a tuple with elements for each of the hashers. + key: T::Type, + /// The type of the value. + value: T::Type, + }, +} + +impl IntoPortable for StorageEntryTypeIR { + type Output = StorageEntryTypeIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + match self { + Self::Plain(plain) => StorageEntryTypeIR::Plain(registry.register_type(&plain)), + Self::Map { hashers, key, value } => StorageEntryTypeIR::Map { + hashers, + key: registry.register_type(&key), + value: registry.register_type(&value), + }, + } + } +} + +/// Metadata for all calls in a pallet +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct PalletCallMetadataIR { + /// The corresponding enum type for the pallet call. + pub ty: T::Type, +} + +impl IntoPortable for PalletCallMetadataIR { + type Output = PalletCallMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + PalletCallMetadataIR { ty: registry.register_type(&self.ty) } + } +} + +impl From for PalletCallMetadataIR { + fn from(ty: MetaType) -> Self { + Self { ty } + } +} + +/// Metadata about the pallet Event type. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct PalletEventMetadataIR { + /// The Event type. + pub ty: T::Type, +} + +impl IntoPortable for PalletEventMetadataIR { + type Output = PalletEventMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + PalletEventMetadataIR { ty: registry.register_type(&self.ty) } + } +} + +impl From for PalletEventMetadataIR { + fn from(ty: MetaType) -> Self { + Self { ty } + } +} + +/// Metadata about one pallet constant. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct PalletConstantMetadataIR { + /// Name of the pallet constant. + pub name: T::String, + /// Type of the pallet constant. + pub ty: T::Type, + /// Value stored in the constant (SCALE encoded). + pub value: Vec, + /// Documentation of the constant. + pub docs: Vec, +} + +impl IntoPortable for PalletConstantMetadataIR { + type Output = PalletConstantMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + PalletConstantMetadataIR { + name: self.name.into_portable(registry), + ty: registry.register_type(&self.ty), + value: self.value, + docs: registry.map_into_portable(self.docs), + } + } +} + +/// Metadata about a pallet error. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct PalletErrorMetadataIR { + /// The error type information. + pub ty: T::Type, +} + +impl IntoPortable for PalletErrorMetadataIR { + type Output = PalletErrorMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + PalletErrorMetadataIR { ty: registry.register_type(&self.ty) } + } +} + +impl From for PalletErrorMetadataIR { + fn from(ty: MetaType) -> Self { + Self { ty } + } +} diff --git a/primitives/metadata-ir/src/v14.rs b/primitives/metadata-ir/src/v14.rs new file mode 100644 index 0000000000000..e1b7a24f76577 --- /dev/null +++ b/primitives/metadata-ir/src/v14.rs @@ -0,0 +1,158 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Convert the IR to V14 metadata. + +use super::types::{ + ExtrinsicMetadataIR, MetadataIR, PalletCallMetadataIR, PalletConstantMetadataIR, + PalletErrorMetadataIR, PalletEventMetadataIR, PalletMetadataIR, PalletStorageMetadataIR, + SignedExtensionMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, + StorageHasherIR, +}; + +use frame_metadata::v14::{ + ExtrinsicMetadata, PalletCallMetadata, PalletConstantMetadata, PalletErrorMetadata, + PalletEventMetadata, PalletMetadata, PalletStorageMetadata, RuntimeMetadataV14, + SignedExtensionMetadata, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, + StorageHasher, +}; + +impl From for RuntimeMetadataV14 { + fn from(ir: MetadataIR) -> Self { + RuntimeMetadataV14::new( + ir.pallets.into_iter().map(Into::into).collect(), + ir.extrinsic.into(), + ir.ty, + ) + } +} + +impl From for PalletMetadata { + fn from(ir: PalletMetadataIR) -> Self { + PalletMetadata { + name: ir.name, + storage: ir.storage.map(Into::into), + calls: ir.calls.map(Into::into), + event: ir.event.map(Into::into), + constants: ir.constants.into_iter().map(Into::into).collect(), + error: ir.error.map(Into::into), + index: ir.index, + // Note: ir.docs not part of v14. + } + } +} + +impl From for StorageEntryModifier { + fn from(ir: StorageEntryModifierIR) -> Self { + match ir { + StorageEntryModifierIR::Optional => StorageEntryModifier::Optional, + StorageEntryModifierIR::Default => StorageEntryModifier::Default, + } + } +} + +impl From for StorageHasher { + fn from(ir: StorageHasherIR) -> Self { + match ir { + StorageHasherIR::Blake2_128 => StorageHasher::Blake2_128, + StorageHasherIR::Blake2_256 => StorageHasher::Blake2_256, + StorageHasherIR::Blake2_128Concat => StorageHasher::Blake2_128Concat, + StorageHasherIR::Twox128 => StorageHasher::Twox128, + StorageHasherIR::Twox256 => StorageHasher::Twox256, + StorageHasherIR::Twox64Concat => StorageHasher::Twox64Concat, + StorageHasherIR::Identity => StorageHasher::Identity, + } + } +} + +impl From for StorageEntryType { + fn from(ir: StorageEntryTypeIR) -> Self { + match ir { + StorageEntryTypeIR::Plain(ty) => StorageEntryType::Plain(ty), + StorageEntryTypeIR::Map { hashers, key, value } => StorageEntryType::Map { + hashers: hashers.into_iter().map(Into::into).collect(), + key, + value, + }, + } + } +} + +impl From for StorageEntryMetadata { + fn from(ir: StorageEntryMetadataIR) -> Self { + StorageEntryMetadata { + name: ir.name, + modifier: ir.modifier.into(), + ty: ir.ty.into(), + default: ir.default, + docs: ir.docs, + } + } +} + +impl From for PalletStorageMetadata { + fn from(ir: PalletStorageMetadataIR) -> Self { + PalletStorageMetadata { + prefix: ir.prefix, + entries: ir.entries.into_iter().map(Into::into).collect(), + } + } +} + +impl From for PalletCallMetadata { + fn from(ir: PalletCallMetadataIR) -> Self { + PalletCallMetadata { ty: ir.ty } + } +} + +impl From for PalletEventMetadata { + fn from(ir: PalletEventMetadataIR) -> Self { + PalletEventMetadata { ty: ir.ty } + } +} + +impl From for PalletConstantMetadata { + fn from(ir: PalletConstantMetadataIR) -> Self { + PalletConstantMetadata { name: ir.name, ty: ir.ty, value: ir.value, docs: ir.docs } + } +} + +impl From for PalletErrorMetadata { + fn from(ir: PalletErrorMetadataIR) -> Self { + PalletErrorMetadata { ty: ir.ty } + } +} + +impl From for SignedExtensionMetadata { + fn from(ir: SignedExtensionMetadataIR) -> Self { + SignedExtensionMetadata { + identifier: ir.identifier, + ty: ir.ty, + additional_signed: ir.additional_signed, + } + } +} + +impl From for ExtrinsicMetadata { + fn from(ir: ExtrinsicMetadataIR) -> Self { + ExtrinsicMetadata { + ty: ir.ty, + version: ir.version, + signed_extensions: ir.signed_extensions.into_iter().map(Into::into).collect(), + } + } +} diff --git a/primitives/metadata-ir/src/v15.rs b/primitives/metadata-ir/src/v15.rs new file mode 100644 index 0000000000000..86441228d008e --- /dev/null +++ b/primitives/metadata-ir/src/v15.rs @@ -0,0 +1,188 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Convert the IR to V15 metadata. + +use super::types::{ + ExtrinsicMetadataIR, MetadataIR, PalletCallMetadataIR, PalletConstantMetadataIR, + PalletErrorMetadataIR, PalletEventMetadataIR, PalletMetadataIR, PalletStorageMetadataIR, + RuntimeApiMetadataIR, RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, + SignedExtensionMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, + StorageHasherIR, +}; + +use frame_metadata::v15::{ + ExtrinsicMetadata, PalletCallMetadata, PalletConstantMetadata, PalletErrorMetadata, + PalletEventMetadata, PalletMetadata, PalletStorageMetadata, RuntimeApiMetadata, + RuntimeApiMethodMetadata, RuntimeApiMethodParamMetadata, RuntimeMetadataV15, + SignedExtensionMetadata, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, + StorageHasher, +}; + +impl From for RuntimeMetadataV15 { + fn from(ir: MetadataIR) -> Self { + RuntimeMetadataV15::new( + ir.pallets.into_iter().map(Into::into).collect(), + ir.extrinsic.into(), + ir.ty, + ir.apis.into_iter().map(Into::into).collect(), + ) + } +} + +impl From for RuntimeApiMetadata { + fn from(ir: RuntimeApiMetadataIR) -> Self { + RuntimeApiMetadata { + name: ir.name, + methods: ir.methods.into_iter().map(Into::into).collect(), + docs: ir.docs, + } + } +} + +impl From for RuntimeApiMethodMetadata { + fn from(ir: RuntimeApiMethodMetadataIR) -> Self { + RuntimeApiMethodMetadata { + name: ir.name, + inputs: ir.inputs.into_iter().map(Into::into).collect(), + output: ir.output, + docs: ir.docs, + } + } +} + +impl From for RuntimeApiMethodParamMetadata { + fn from(ir: RuntimeApiMethodParamMetadataIR) -> Self { + RuntimeApiMethodParamMetadata { name: ir.name, ty: ir.ty } + } +} + +impl From for PalletMetadata { + fn from(ir: PalletMetadataIR) -> Self { + PalletMetadata { + name: ir.name, + storage: ir.storage.map(Into::into), + calls: ir.calls.map(Into::into), + event: ir.event.map(Into::into), + constants: ir.constants.into_iter().map(Into::into).collect(), + error: ir.error.map(Into::into), + index: ir.index, + docs: ir.docs, + } + } +} + +impl From for StorageEntryModifier { + fn from(ir: StorageEntryModifierIR) -> Self { + match ir { + StorageEntryModifierIR::Optional => StorageEntryModifier::Optional, + StorageEntryModifierIR::Default => StorageEntryModifier::Default, + } + } +} + +impl From for StorageHasher { + fn from(ir: StorageHasherIR) -> Self { + match ir { + StorageHasherIR::Blake2_128 => StorageHasher::Blake2_128, + StorageHasherIR::Blake2_256 => StorageHasher::Blake2_256, + StorageHasherIR::Blake2_128Concat => StorageHasher::Blake2_128Concat, + StorageHasherIR::Twox128 => StorageHasher::Twox128, + StorageHasherIR::Twox256 => StorageHasher::Twox256, + StorageHasherIR::Twox64Concat => StorageHasher::Twox64Concat, + StorageHasherIR::Identity => StorageHasher::Identity, + } + } +} + +impl From for StorageEntryType { + fn from(ir: StorageEntryTypeIR) -> Self { + match ir { + StorageEntryTypeIR::Plain(ty) => StorageEntryType::Plain(ty), + StorageEntryTypeIR::Map { hashers, key, value } => StorageEntryType::Map { + hashers: hashers.into_iter().map(Into::into).collect(), + key, + value, + }, + } + } +} + +impl From for StorageEntryMetadata { + fn from(ir: StorageEntryMetadataIR) -> Self { + StorageEntryMetadata { + name: ir.name, + modifier: ir.modifier.into(), + ty: ir.ty.into(), + default: ir.default, + docs: ir.docs, + } + } +} + +impl From for PalletStorageMetadata { + fn from(ir: PalletStorageMetadataIR) -> Self { + PalletStorageMetadata { + prefix: ir.prefix, + entries: ir.entries.into_iter().map(Into::into).collect(), + } + } +} + +impl From for PalletCallMetadata { + fn from(ir: PalletCallMetadataIR) -> Self { + PalletCallMetadata { ty: ir.ty } + } +} + +impl From for PalletEventMetadata { + fn from(ir: PalletEventMetadataIR) -> Self { + PalletEventMetadata { ty: ir.ty } + } +} + +impl From for PalletConstantMetadata { + fn from(ir: PalletConstantMetadataIR) -> Self { + PalletConstantMetadata { name: ir.name, ty: ir.ty, value: ir.value, docs: ir.docs } + } +} + +impl From for PalletErrorMetadata { + fn from(ir: PalletErrorMetadataIR) -> Self { + PalletErrorMetadata { ty: ir.ty } + } +} + +impl From for SignedExtensionMetadata { + fn from(ir: SignedExtensionMetadataIR) -> Self { + SignedExtensionMetadata { + identifier: ir.identifier, + ty: ir.ty, + additional_signed: ir.additional_signed, + } + } +} + +impl From for ExtrinsicMetadata { + fn from(ir: ExtrinsicMetadataIR) -> Self { + ExtrinsicMetadata { + ty: ir.ty, + version: ir.version, + signed_extensions: ir.signed_extensions.into_iter().map(Into::into).collect(), + } + } +} diff --git a/primitives/npos-elections/Cargo.toml b/primitives/npos-elections/Cargo.toml index 7a4bdbd679f0e..50a7a5bcd6088 100644 --- a/primitives/npos-elections/Cargo.toml +++ b/primitives/npos-elections/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-arithmetic = { version = "6.0.0", default-features = false, path = "../arithmetic" } sp-core = { version = "7.0.0", default-features = false, path = "../core" } diff --git a/primitives/npos-elections/fuzzer/Cargo.toml b/primitives/npos-elections/fuzzer/Cargo.toml index 8a058cc92edcf..d7cc684546a5f 100644 --- a/primitives/npos-elections/fuzzer/Cargo.toml +++ b/primitives/npos-elections/fuzzer/Cargo.toml @@ -18,7 +18,7 @@ clap = { version = "4.0.9", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-npos-elections = { version = "4.0.0-dev", path = ".." } sp-runtime = { version = "7.0.0", path = "../../runtime" } diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index 9bc7161012cb1..3a63c1ef55dab 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -18,6 +18,6 @@ proc-macro = true [dependencies] Inflector = "0.11.4" proc-macro-crate = "1.1.3" -proc-macro2 = "1.0.37" -quote = "1.0.10" -syn = { version = "1.0.98", features = ["full", "visit", "fold", "extra-traits"] } +proc-macro2 = "1.0.56" +quote = "1.0.26" +syn = { version = "2.0.14", features = ["full", "visit", "fold", "extra-traits"] } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs index 16c1cffb78158..77a29bec3807f 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs @@ -36,8 +36,7 @@ use crate::utils::{ }; use syn::{ - parse_quote, spanned::Spanned, FnArg, Ident, ItemTrait, Result, Signature, Token, - TraitItemMethod, + parse_quote, spanned::Spanned, FnArg, Ident, ItemTrait, Result, Signature, Token, TraitItemFn, }; use proc_macro2::{Span, TokenStream}; @@ -116,7 +115,7 @@ fn function_no_std_impl( quote! {} }; - let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); + let attrs = method.attrs.iter().filter(|a| !a.path().is_ident("version")); let cfg_wasm_only = if is_wasm_only { quote! { #[cfg(target_arch = "wasm32")] } @@ -139,12 +138,12 @@ fn function_no_std_impl( /// Generate call to latest function version for `cfg((feature = "std")` /// /// This should generate simple `fn func(..) { func_version_(..) }`. -fn function_std_latest_impl(method: &TraitItemMethod, latest_version: u32) -> Result { +fn function_std_latest_impl(method: &TraitItemFn, latest_version: u32) -> Result { let function_name = &method.sig.ident; let args = get_function_arguments(&method.sig).map(FnArg::Typed); let arg_names = get_function_argument_names(&method.sig).collect::>(); let return_value = &method.sig.output; - let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); + let attrs = method.attrs.iter().filter(|a| !a.path().is_ident("version")); let latest_function_name = create_function_ident_with_version(&method.sig.ident, latest_version); @@ -162,7 +161,7 @@ fn function_std_latest_impl(method: &TraitItemMethod, latest_version: u32) -> Re /// Generates the bare function implementation for `cfg(feature = "std")`. fn function_std_impl( trait_name: &Ident, - method: &TraitItemMethod, + method: &TraitItemFn, version: u32, is_wasm_only: bool, tracing: bool, @@ -185,7 +184,7 @@ fn function_std_impl( .take(1), ); let return_value = &method.sig.output; - let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); + let attrs = method.attrs.iter().filter(|a| !a.path().is_ident("version")); // Don't make the function public accessible when this is a wasm only interface. let call_to_trait = generate_call_to_trait(trait_name, method, version, is_wasm_only); let call_to_trait = if !tracing { @@ -210,7 +209,7 @@ fn function_std_impl( /// Generate the call to the interface trait. fn generate_call_to_trait( trait_name: &Ident, - method: &TraitItemMethod, + method: &TraitItemFn, version: u32, is_wasm_only: bool, ) -> TokenStream { diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs index fb751c69bc86d..f3cdcf7fd54a9 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs @@ -30,7 +30,7 @@ use crate::utils::{ }; use syn::{ - spanned::Spanned, Error, Ident, ItemTrait, Pat, Result, ReturnType, Signature, TraitItemMethod, + spanned::Spanned, Error, Ident, ItemTrait, Pat, Result, ReturnType, Signature, TraitItemFn, }; use proc_macro2::{Span, TokenStream}; @@ -78,7 +78,7 @@ pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result Result { @@ -136,7 +136,7 @@ fn generate_extern_host_function( } /// Generate the host exchangeable function for the given method. -fn generate_exchangeable_host_function(method: &TraitItemMethod) -> Result { +fn generate_exchangeable_host_function(method: &TraitItemFn) -> Result { let crate_ = generate_crate_access(); let arg_types = get_function_argument_types(&method.sig); let function = &method.sig.ident; diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs index 4a3a688e5c3bd..540e930b0c14b 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs @@ -26,7 +26,7 @@ use crate::utils::{ use syn::{ fold::{self, Fold}, spanned::Spanned, - Error, Generics, ItemTrait, Receiver, Result, TraitItemMethod, Type, Visibility, + Error, Generics, ItemTrait, Receiver, Result, TraitItemFn, Type, Visibility, }; use proc_macro2::TokenStream; @@ -51,7 +51,7 @@ pub fn process(trait_def: &ItemTrait, is_wasm_only: bool) -> Result struct ToEssentialTraitDef { /// All errors found while doing the conversion. errors: Vec, - methods: Vec, + methods: Vec, } impl ToEssentialTraitDef { @@ -59,7 +59,7 @@ impl ToEssentialTraitDef { ToEssentialTraitDef { errors: vec![], methods: vec![] } } - fn into_methods(self) -> Result> { + fn into_methods(self) -> Result> { let mut errors = self.errors; let methods = self.methods; if let Some(first_error) = errors.pop() { @@ -72,8 +72,8 @@ impl ToEssentialTraitDef { } } - fn process(&mut self, method: &TraitItemMethod, version: u32) { - let mut folded = self.fold_trait_item_method(method.clone()); + fn process(&mut self, method: &TraitItemFn, version: u32) { + let mut folded = self.fold_trait_item_fn(method.clone()); folded.sig.ident = create_function_ident_with_version(&folded.sig.ident, version); self.methods.push(folded); } @@ -90,7 +90,7 @@ impl ToEssentialTraitDef { } impl Fold for ToEssentialTraitDef { - fn fold_trait_item_method(&mut self, mut method: TraitItemMethod) -> TraitItemMethod { + fn fold_trait_item_fn(&mut self, mut method: TraitItemFn) -> TraitItemFn { if method.default.take().is_none() { self.push_error(&method, "Methods need to have an implementation."); } @@ -105,9 +105,9 @@ impl Fold for ToEssentialTraitDef { self.error_on_generic_parameters(&method.sig.generics); - method.attrs.retain(|a| !a.path.is_ident("version")); + method.attrs.retain(|a| !a.path().is_ident("version")); - fold::fold_trait_item_method(self, method) + fold::fold_trait_item_fn(self, method) } fn fold_item_trait(&mut self, mut trait_def: ItemTrait) -> ItemTrait { @@ -154,7 +154,7 @@ fn impl_trait_for_externalities(trait_def: &ItemTrait, is_wasm_only: bool) -> Re let interface = get_runtime_interface(trait_def)?; let methods = interface.all_versions().map(|(version, method)| { let mut cloned = (*method).clone(); - cloned.attrs.retain(|a| !a.path.is_ident("version")); + cloned.attrs.retain(|a| !a.path().is_ident("version")); cloned.sig.ident = create_function_ident_with_version(&cloned.sig.ident, version); cloned }); diff --git a/primitives/runtime-interface/proc-macro/src/utils.rs b/primitives/runtime-interface/proc-macro/src/utils.rs index 07f9b5ce09802..9818fd6842a63 100644 --- a/primitives/runtime-interface/proc-macro/src/utils.rs +++ b/primitives/runtime-interface/proc-macro/src/utils.rs @@ -21,7 +21,7 @@ use proc_macro2::{Span, TokenStream}; use syn::{ parse::Parse, parse_quote, spanned::Spanned, token, Error, FnArg, Ident, ItemTrait, LitInt, - Pat, PatType, Result, Signature, TraitItem, TraitItemMethod, Type, + Pat, PatType, Result, Signature, TraitItem, TraitItemFn, Type, }; use proc_macro_crate::{crate_name, FoundCrate}; @@ -41,23 +41,23 @@ mod attributes { /// A concrete, specific version of a runtime interface function. pub struct RuntimeInterfaceFunction { - item: TraitItemMethod, + item: TraitItemFn, should_trap_on_return: bool, } impl std::ops::Deref for RuntimeInterfaceFunction { - type Target = TraitItemMethod; + type Target = TraitItemFn; fn deref(&self) -> &Self::Target { &self.item } } impl RuntimeInterfaceFunction { - fn new(item: &TraitItemMethod) -> Result { + fn new(item: &TraitItemFn) -> Result { let mut item = item.clone(); let mut should_trap_on_return = false; item.attrs.retain(|attr| { - if attr.path.is_ident("trap_on_return") { + if attr.path().is_ident("trap_on_return") { should_trap_on_return = true; false } else { @@ -87,7 +87,7 @@ struct RuntimeInterfaceFunctionSet { } impl RuntimeInterfaceFunctionSet { - fn new(version: VersionAttribute, trait_item: &TraitItemMethod) -> Result { + fn new(version: VersionAttribute, trait_item: &TraitItemFn) -> Result { Ok(Self { latest_version_to_call: version.is_callable().then(|| version.version), versions: BTreeMap::from([( @@ -115,11 +115,7 @@ impl RuntimeInterfaceFunctionSet { } /// Add a different version of the function. - fn add_version( - &mut self, - version: VersionAttribute, - trait_item: &TraitItemMethod, - ) -> Result<()> { + fn add_version(&mut self, version: VersionAttribute, trait_item: &TraitItemFn) -> Result<()> { if let Some(existing_item) = self.versions.get(&version.version) { let mut err = Error::new(trait_item.span(), "Duplicated version attribute"); err.combine(Error::new( @@ -275,9 +271,9 @@ pub fn get_function_argument_types_ref_and_mut( } /// Returns an iterator over all trait methods for the given trait definition. -fn get_trait_methods(trait_def: &ItemTrait) -> impl Iterator { +fn get_trait_methods(trait_def: &ItemTrait) -> impl Iterator { trait_def.items.iter().filter_map(|i| match i { - TraitItem::Method(ref method) => Some(method), + TraitItem::Fn(ref method) => Some(method), _ => None, }) } @@ -326,10 +322,10 @@ impl Parse for VersionAttribute { } /// Return [`VersionAttribute`], if present. -fn get_item_version(item: &TraitItemMethod) -> Result> { +fn get_item_version(item: &TraitItemFn) -> Result> { item.attrs .iter() - .find(|attr| attr.path.is_ident("version")) + .find(|attr| attr.path().is_ident("version")) .map(|attr| attr.parse_args()) .transpose() } diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index d691d4846c330..e1be3b5d99d9b 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -42,7 +42,8 @@ fn call_wasm_method_with_result( let executor = sc_executor::WasmExecutor::< ExtendedHostFunctions, - >::new(sc_executor::WasmExecutionMethod::Interpreted, Some(8), 8, None, 2); + >::builder() + .build(); let (result, allocation_stats) = executor.uncached_call_with_allocation_stats( RuntimeBlob::uncompress_if_needed(binary).expect("Failed to parse binary"), @@ -176,62 +177,85 @@ fn test_versionining_register_only() { call_wasm_method::(wasm_binary_unwrap(), "test_versionning_register_only_works"); } +fn run_test_in_another_process( + test_name: &str, + test_body: impl FnOnce(), +) -> Option { + if std::env::var("RUN_FORKED_TEST").is_ok() { + test_body(); + None + } else { + let output = std::process::Command::new(std::env::current_exe().unwrap()) + .arg(test_name) + .env("RUN_FORKED_TEST", "1") + .output() + .unwrap(); + + assert!(output.status.success()); + Some(output) + } +} + #[test] fn test_tracing() { - use std::fmt; - use tracing::span::Id as SpanId; - use tracing_core::field::{Field, Visit}; - - #[derive(Clone)] - struct TracingSubscriber(Arc>); - - struct FieldConsumer(&'static str, Option); - impl Visit for FieldConsumer { - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - if field.name() == self.0 { - self.1 = Some(format!("{:?}", value)) + // Run in a different process to ensure that the `Span` is registered with our local + // `TracingSubscriber`. + run_test_in_another_process("test_tracing", || { + use std::fmt; + use tracing::span::Id as SpanId; + use tracing_core::field::{Field, Visit}; + + #[derive(Clone)] + struct TracingSubscriber(Arc>); + + struct FieldConsumer(&'static str, Option); + impl Visit for FieldConsumer { + fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { + if field.name() == self.0 { + self.1 = Some(format!("{:?}", value)) + } } } - } - #[derive(Default)] - struct Inner { - spans: HashSet, - } - - impl tracing::subscriber::Subscriber for TracingSubscriber { - fn enabled(&self, _: &tracing::Metadata) -> bool { - true + #[derive(Default)] + struct Inner { + spans: HashSet, } - fn new_span(&self, span: &tracing::span::Attributes) -> tracing::Id { - let mut inner = self.0.lock().unwrap(); - let id = SpanId::from_u64((inner.spans.len() + 1) as _); - let mut f = FieldConsumer("name", None); - span.record(&mut f); - inner.spans.insert(f.1.unwrap_or_else(|| span.metadata().name().to_owned())); - id - } + impl tracing::subscriber::Subscriber for TracingSubscriber { + fn enabled(&self, _: &tracing::Metadata) -> bool { + true + } - fn record(&self, _: &SpanId, _: &tracing::span::Record) {} + fn new_span(&self, span: &tracing::span::Attributes) -> tracing::Id { + let mut inner = self.0.lock().unwrap(); + let id = SpanId::from_u64((inner.spans.len() + 1) as _); + let mut f = FieldConsumer("name", None); + span.record(&mut f); + inner.spans.insert(f.1.unwrap_or_else(|| span.metadata().name().to_owned())); + id + } - fn record_follows_from(&self, _: &SpanId, _: &SpanId) {} + fn record(&self, _: &SpanId, _: &tracing::span::Record) {} - fn event(&self, _: &tracing::Event) {} + fn record_follows_from(&self, _: &SpanId, _: &SpanId) {} - fn enter(&self, _: &SpanId) {} + fn event(&self, _: &tracing::Event) {} - fn exit(&self, _: &SpanId) {} - } + fn enter(&self, _: &SpanId) {} - let subscriber = TracingSubscriber(Default::default()); - let _guard = tracing::subscriber::set_default(subscriber.clone()); + fn exit(&self, _: &SpanId) {} + } - // Call some method to generate a trace - call_wasm_method::(wasm_binary_unwrap(), "test_return_data"); + let subscriber = TracingSubscriber(Default::default()); + let _guard = tracing::subscriber::set_default(subscriber.clone()); + + // Call some method to generate a trace + call_wasm_method::(wasm_binary_unwrap(), "test_return_data"); - let inner = subscriber.0.lock().unwrap(); - assert!(inner.spans.contains("return_input_version_1")); + let inner = subscriber.0.lock().unwrap(); + assert!(inner.spans.contains("return_input_version_1")); + }); } #[test] diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 369f01920f147..881069f5fd507 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -21,7 +21,7 @@ impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } paste = "1.0" rand = { version = "0.8.5", optional = true } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../application-crypto" } sp-arithmetic = { version = "6.0.0", default-features = false, path = "../arithmetic" } @@ -34,7 +34,7 @@ parity-util-mem = { version = "0.12.0", default-features = false, features = ["p [dev-dependencies] rand = "0.8.5" serde_json = "1.0.85" -zstd = { version = "0.11.2", default-features = false } +zstd = { version = "0.12.3", default-features = false } sp-api = { version = "4.0.0-dev", path = "../api" } sp-state-machine = { version = "0.13.0", path = "../state-machine" } sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index 73741ba5d1dae..143fc2ce58d90 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -453,8 +453,8 @@ mod tests { #[test] fn digest_item_type_info() { let type_info = DigestItem::type_info(); - let variants = if let scale_info::TypeDef::Variant(variant) = type_info.type_def() { - variant.variants() + let variants = if let scale_info::TypeDef::Variant(variant) = type_info.type_def { + variant.variants } else { panic!("Should be a TypeDef::TypeDefVariant") }; @@ -475,10 +475,10 @@ mod tests { let encoded = digest_item.encode(); let variant = variants .iter() - .find(|v| v.name() == &variant_name) + .find(|v| v.name == variant_name) .expect(&format!("Variant {} not found", variant_name)); - assert_eq!(encoded[0], variant.index()) + assert_eq!(encoded[0], variant.index) }; check(DigestItemType::Other); diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index dc03e074f9e89..0994dc21b31dd 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -606,9 +606,10 @@ impl From for DispatchError { #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum TokenError { /// Funds are unavailable. - NoFunds, - /// Account that must exist would die. - WouldDie, + FundsUnavailable, + /// Some part of the balance gives the only provider reference to the account and thus cannot + /// be (re)moved. + OnlyProvider, /// Account cannot exist with the funds that would be given. BelowMinimum, /// Account cannot be created. @@ -619,18 +620,25 @@ pub enum TokenError { Frozen, /// Operation is not supported by the asset. Unsupported, + /// Account cannot be created for a held balance. + CannotCreateHold, + /// Withdrawal would cause unwanted loss of account. + NotExpendable, } impl From for &'static str { fn from(e: TokenError) -> &'static str { match e { - TokenError::NoFunds => "Funds are unavailable", - TokenError::WouldDie => "Account that must exist would die", + TokenError::FundsUnavailable => "Funds are unavailable", + TokenError::OnlyProvider => "Account that must exist would die", TokenError::BelowMinimum => "Account cannot exist with the funds that would be given", TokenError::CannotCreate => "Account cannot be created", TokenError::UnknownAsset => "The asset in question is unknown", TokenError::Frozen => "Funds exist but are frozen", TokenError::Unsupported => "Operation is not supported by the asset", + TokenError::CannotCreateHold => + "Account cannot be created for recording amount on hold", + TokenError::NotExpendable => "Account that is desired to remain would die", } } } @@ -896,40 +904,6 @@ pub fn print(print: impl traits::Printable) { print.print(); } -/// Batching session. -/// -/// To be used in runtime only. Outside of runtime, just construct -/// `BatchVerifier` directly. -#[must_use = "`verify()` needs to be called to finish batch signature verification!"] -pub struct SignatureBatching(bool); - -impl SignatureBatching { - /// Start new batching session. - pub fn start() -> Self { - sp_io::crypto::start_batch_verify(); - SignatureBatching(false) - } - - /// Verify all signatures submitted during the batching session. - #[must_use] - pub fn verify(mut self) -> bool { - self.0 = true; - sp_io::crypto::finish_batch_verify() - } -} - -impl Drop for SignatureBatching { - fn drop(&mut self) { - // Sanity check. If user forgets to actually call `verify()`. - // - // We should not panic if the current thread is already panicking, - // because Rust otherwise aborts the process. - if !self.0 && !sp_std::thread::panicking() { - panic!("Signature verification has not been called before `SignatureBatching::drop`") - } - } -} - /// Describes on what should happen with a storage transaction. pub enum TransactionOutcome { /// Commit the transaction. @@ -954,7 +928,7 @@ mod tests { use super::*; use codec::{Decode, Encode}; - use sp_core::crypto::{Pair, UncheckedFrom}; + use sp_core::crypto::Pair; use sp_io::TestExternalities; use sp_state_machine::create_proof_check_backend; @@ -994,8 +968,8 @@ mod tests { Module(ModuleError { index: 2, error: [1, 0, 0, 0], message: None }), ConsumerRemaining, NoProviders, - Token(TokenError::NoFunds), - Token(TokenError::WouldDie), + Token(TokenError::FundsUnavailable), + Token(TokenError::OnlyProvider), Token(TokenError::BelowMinimum), Token(TokenError::CannotCreate), Token(TokenError::UnknownAsset), @@ -1037,36 +1011,6 @@ mod tests { assert!(multi_sig.verify(msg, &multi_signer.into_account())); } - #[test] - #[should_panic(expected = "Signature verification has not been called")] - fn batching_still_finishes_when_not_called_directly() { - let mut ext = sp_state_machine::BasicExternalities::default(); - ext.register_extension(sp_core::traits::TaskExecutorExt::new( - sp_core::testing::TaskExecutor::new(), - )); - - ext.execute_with(|| { - let _batching = SignatureBatching::start(); - let dummy = UncheckedFrom::unchecked_from([1; 32]); - let dummy_sig = UncheckedFrom::unchecked_from([1; 64]); - sp_io::crypto::sr25519_verify(&dummy_sig, &Vec::new(), &dummy); - }); - } - - #[test] - #[should_panic(expected = "Hey, I'm an error")] - fn batching_does_not_panic_while_thread_is_already_panicking() { - let mut ext = sp_state_machine::BasicExternalities::default(); - ext.register_extension(sp_core::traits::TaskExecutorExt::new( - sp_core::testing::TaskExecutor::new(), - )); - - ext.execute_with(|| { - let _batching = SignatureBatching::start(); - panic!("Hey, I'm an error"); - }); - } - #[test] fn execute_and_generate_proof_works() { use codec::Encode; @@ -1110,3 +1054,23 @@ mod tests { }); } } + +// NOTE: we have to test the sp_core stuff also from a different crate to check that the macro +// can access the sp_core crate. +#[cfg(test)] +mod sp_core_tests { + use super::*; + + #[test] + #[should_panic] + fn generate_feature_enabled_macro_panics() { + sp_core::generate_feature_enabled_macro!(if_test, test, $); + if_test!(panic!("This should panic")); + } + + #[test] + fn generate_feature_enabled_macro_works() { + sp_core::generate_feature_enabled_macro!(if_not_test, not(test), $); + if_not_test!(panic!("This should not panic")); + } +} diff --git a/primitives/runtime/src/offchain/http.rs b/primitives/runtime/src/offchain/http.rs index 0229203c1de5f..25e5c1007d5da 100644 --- a/primitives/runtime/src/offchain/http.rs +++ b/primitives/runtime/src/offchain/http.rs @@ -450,7 +450,7 @@ impl Headers { /// and collect them on your own. pub fn find(&self, name: &str) -> Option<&str> { let raw = name.as_bytes(); - for &(ref key, ref val) in &self.raw { + for (key, val) in &self.raw { if &**key == raw { return str::from_utf8(val).ok() } diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index 280f31e7f365e..e6f8d39840863 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -27,7 +27,7 @@ use crate::{ ValidateUnsigned, }, transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, - ApplyExtrinsicResultWithInfo, CryptoTypeId, KeyTypeId, + ApplyExtrinsicResultWithInfo, KeyTypeId, }; use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; use sp_core::{ @@ -116,7 +116,6 @@ impl UintAuthorityId { impl sp_application_crypto::RuntimeAppPublic for UintAuthorityId { const ID: KeyTypeId = key_types::DUMMY; - const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"dumm"); type Signature = TestSignature; @@ -158,10 +157,6 @@ impl OpaqueKeys for UintAuthorityId { } } -impl crate::BoundToRuntimeAppPublic for UintAuthorityId { - type Public = Self; -} - impl traits::IdentifyAccount for UintAuthorityId { type AccountId = u64; diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 90ac8c68fa581..65ad64065d1ad 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -30,7 +30,7 @@ use crate::{ use impl_trait_for_tuples::impl_for_tuples; #[cfg(feature = "std")] use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use sp_application_crypto::AppKey; +use sp_application_crypto::AppCrypto; pub use sp_arithmetic::traits::{ checked_pow, ensure_pow, AtLeast32Bit, AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedShl, CheckedShr, CheckedSub, Ensure, EnsureAdd, EnsureAddAssign, EnsureDiv, @@ -148,10 +148,10 @@ pub trait AppVerify { } impl< - S: Verify::Public as sp_application_crypto::AppPublic>::Generic> + S: Verify::Public as sp_application_crypto::AppPublic>::Generic> + From, T: sp_application_crypto::Wraps - + sp_application_crypto::AppKey + + sp_application_crypto::AppCrypto + sp_application_crypto::AppSignature + AsRef + AsMut @@ -159,16 +159,18 @@ impl< > AppVerify for T where ::Signer: IdentifyAccount::Signer>, - <::Public as sp_application_crypto::AppPublic>::Generic: IdentifyAccount< - AccountId = <::Public as sp_application_crypto::AppPublic>::Generic, + <::Public as sp_application_crypto::AppPublic>::Generic: IdentifyAccount< + AccountId = <::Public as sp_application_crypto::AppPublic>::Generic, >, { - type AccountId = ::Public; - fn verify>(&self, msg: L, signer: &::Public) -> bool { + type AccountId = ::Public; + fn verify>(&self, msg: L, signer: &::Public) -> bool { use sp_application_crypto::IsWrappedBy; let inner: &S = self.as_ref(); let inner_pubkey = - <::Public as sp_application_crypto::AppPublic>::Generic::from_ref(signer); + <::Public as sp_application_crypto::AppPublic>::Generic::from_ref( + signer, + ); Verify::verify(inner, msg, inner_pubkey) } } diff --git a/primitives/session/Cargo.toml b/primitives/session/Cargo.toml index 31ec009ab2c64..6f362974b91b4 100644 --- a/primitives/session/Cargo.toml +++ b/primitives/session/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } sp-core = { version = "7.0.0", default-features = false, path = "../core" } sp-runtime = { version = "7.0.0", optional = true, path = "../runtime" } diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index a8e5a543dbf75..f383a5e88759f 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -13,8 +13,9 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +serde = { version = "1.0.136", optional = true } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-core = { version = "7.0.0", default-features = false, path = "../core" } sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../std" } @@ -22,6 +23,7 @@ sp-std = { version = "5.0.0", default-features = false, path = "../std" } [features] default = ["std"] std = [ + "serde", "codec/std", "scale-info/std", "sp-core/std", diff --git a/primitives/staking/src/lib.rs b/primitives/staking/src/lib.rs index a8d8e6a602c94..57128bd327d9e 100644 --- a/primitives/staking/src/lib.rs +++ b/primitives/staking/src/lib.rs @@ -20,6 +20,8 @@ //! A crate which contains primitives that are useful for implementation that uses staking //! approaches in general. Definitions related to sessions, slashing, etc go here. +use scale_info::TypeInfo; +use sp_core::RuntimeDebug; use sp_runtime::{DispatchError, DispatchResult}; use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; @@ -31,6 +33,18 @@ pub type SessionIndex = u32; /// Counter for the number of eras that have passed. pub type EraIndex = u32; +/// Representation of the status of a staker. +#[derive(RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize, PartialEq, Eq, Clone))] +pub enum StakerStatus { + /// Chilling. + Idle, + /// Declaring desire in validate, i.e author blocks. + Validator, + /// Declaring desire to nominate, delegate, or generally approve of the given set of others. + Nominator(Vec), +} + /// Trait describing something that implements a hook for any operations to perform when a staker is /// slashed. pub trait OnStakerSlash { @@ -57,9 +71,7 @@ impl OnStakerSlash for () { /// A struct that reflects stake that an account has in the staking system. Provides a set of /// methods to operate on it's properties. Aimed at making `StakingInterface` more concise. -pub struct Stake { - /// The stash account whose balance is actually locked and at stake. - pub stash: T::AccountId, +pub struct Stake { /// The total stake that `stash` has in the staking system. This includes the /// `active` stake, and any funds currently in the process of unbonding via /// [`StakingInterface::unbond`]. @@ -69,10 +81,10 @@ pub struct Stake { /// This is only guaranteed to reflect the amount locked by the staking system. If there are /// non-staking locks on the bonded pair's balance this amount is going to be larger in /// reality. - pub total: T::Balance, + pub total: Balance, /// The total amount of the stash's balance that will be at stake in any forthcoming /// rounds. - pub active: T::Balance, + pub active: Balance, } /// A generic representation of a staking implementation. @@ -111,21 +123,25 @@ pub trait StakingInterface { /// This should be the latest planned era that the staking system knows about. fn current_era() -> EraIndex; - /// Returns the stake of `who`. - fn stake(who: &Self::AccountId) -> Result, DispatchError>; + /// Returns the [`Stake`] of `who`. + fn stake(who: &Self::AccountId) -> Result, DispatchError>; + /// Total stake of a staker, `Err` if not a staker. fn total_stake(who: &Self::AccountId) -> Result { Self::stake(who).map(|s| s.total) } + /// Total active portion of a staker's [`Stake`], `Err` if not a staker. fn active_stake(who: &Self::AccountId) -> Result { Self::stake(who).map(|s| s.active) } + /// Returns whether a staker is unbonding, `Err` if not a staker at all. fn is_unbonding(who: &Self::AccountId) -> Result { Self::stake(who).map(|s| s.active != s.total) } + /// Returns whether a staker is FULLY unbonding, `Err` if not a staker at all. fn fully_unbond(who: &Self::AccountId) -> DispatchResult { Self::unbond(who, Self::stake(who)?.active) } @@ -176,9 +192,17 @@ pub trait StakingInterface { /// Checks whether an account `staker` has been exposed in an era. fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool; + /// Return the status of the given staker, `None` if not staked at all. + fn status(who: &Self::AccountId) -> Result, DispatchError>; + /// Get the nominations of a stash, if they are a nominator, `None` otherwise. #[cfg(feature = "runtime-benchmarks")] - fn nominations(who: Self::AccountId) -> Option>; + fn nominations(who: &Self::AccountId) -> Option> { + match Self::status(who) { + Ok(StakerStatus::Nominator(t)) => Some(t), + _ => None, + } + } #[cfg(feature = "runtime-benchmarks")] fn add_era_stakers( diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 9759547d7f384..cc780e51abcaf 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -33,7 +33,7 @@ array-bytes = "4.1" pretty_assertions = "1.2.1" rand = "0.8.5" sp-runtime = { version = "7.0.0", path = "../runtime" } -trie-db = "0.27.0" +trie-db = "0.27.1" assert_matches = "1.5" [features] diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 90ee962dafa9e..0001d0026c394 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -163,7 +163,7 @@ mod execution { use sp_core::{ hexdisplay::HexDisplay, storage::{ChildInfo, ChildType, PrefixedStorageKey}, - traits::{CallContext, CodeExecutor, ReadRuntimeVersionExt, RuntimeCode, SpawnNamed}, + traits::{CallContext, CodeExecutor, RuntimeCode}, }; use sp_externalities::Extensions; use std::{ @@ -322,14 +322,10 @@ mod execution { exec: &'a Exec, method: &'a str, call_data: &'a [u8], - mut extensions: Extensions, + extensions: Extensions, runtime_code: &'a RuntimeCode, - spawn_handle: impl SpawnNamed + Send + 'static, context: CallContext, ) -> Self { - extensions.register(ReadRuntimeVersionExt::new(exec.clone())); - extensions.register(sp_core::traits::TaskExecutorExt::new(spawn_handle)); - Self { backend, exec, @@ -511,11 +507,10 @@ mod execution { } /// Prove execution using the given state backend, overlayed changes, and call executor. - pub fn prove_execution( + pub fn prove_execution( backend: &mut B, overlay: &mut OverlayedChanges, exec: &Exec, - spawn_handle: Spawn, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, @@ -525,14 +520,12 @@ mod execution { H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, - Spawn: SpawnNamed + Send + 'static, { let trie_backend = backend.as_trie_backend(); - prove_execution_on_trie_backend::<_, _, _, _>( + prove_execution_on_trie_backend::<_, _, _>( trie_backend, overlay, exec, - spawn_handle, method, call_data, runtime_code, @@ -549,11 +542,10 @@ mod execution { /// /// Note: changes to code will be in place if this call is made again. For running partial /// blocks (e.g. a transaction at a time), ensure a different method is used. - pub fn prove_execution_on_trie_backend( + pub fn prove_execution_on_trie_backend( trie_backend: &TrieBackend, overlay: &mut OverlayedChanges, exec: &Exec, - spawn_handle: Spawn, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, @@ -564,7 +556,6 @@ mod execution { H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + 'static + Clone, - Spawn: SpawnNamed + Send + 'static, { let proving_backend = TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); @@ -577,7 +568,6 @@ mod execution { call_data, extensions, runtime_code, - spawn_handle, CallContext::Offchain, ) .execute_using_consensus_failure_handler::<_>(always_wasm())?; @@ -590,12 +580,11 @@ mod execution { } /// Check execution proof, generated by `prove_execution` call. - pub fn execution_proof_check( + pub fn execution_proof_check( root: H::Out, proof: StorageProof, overlay: &mut OverlayedChanges, exec: &Exec, - spawn_handle: Spawn, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, @@ -604,14 +593,12 @@ mod execution { H: Hasher + 'static, Exec: CodeExecutor + Clone + 'static, H::Out: Ord + 'static + codec::Codec, - Spawn: SpawnNamed + Send + 'static, { let trie_backend = create_proof_check_backend::(root, proof)?; - execution_proof_check_on_trie_backend::<_, _, _>( + execution_proof_check_on_trie_backend::<_, _>( &trie_backend, overlay, exec, - spawn_handle, method, call_data, runtime_code, @@ -619,11 +606,10 @@ mod execution { } /// Check execution proof on proving backend, generated by `prove_execution` call. - pub fn execution_proof_check_on_trie_backend( + pub fn execution_proof_check_on_trie_backend( trie_backend: &TrieBackend, H>, overlay: &mut OverlayedChanges, exec: &Exec, - spawn_handle: Spawn, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, @@ -632,7 +618,6 @@ mod execution { H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, - Spawn: SpawnNamed + Send + 'static, { StateMachine::<_, H, Exec>::new( trie_backend, @@ -642,7 +627,6 @@ mod execution { call_data, Extensions::default(), runtime_code, - spawn_handle, CallContext::Offchain, ) .execute_using_consensus_failure_handler(always_untrusted_wasm()) @@ -1309,7 +1293,6 @@ mod tests { use sp_core::{ map, storage::{ChildInfo, StateVersion}, - testing::TaskExecutor, traits::{CallContext, CodeExecutor, Externalities, RuntimeCode}, H256, }; @@ -1384,7 +1367,6 @@ mod tests { &[], Default::default(), &wasm_code, - TaskExecutor::new(), CallContext::Offchain, ); @@ -1413,7 +1395,6 @@ mod tests { &[], Default::default(), &wasm_code, - TaskExecutor::new(), CallContext::Offchain, ); @@ -1443,7 +1424,6 @@ mod tests { &[], Default::default(), &wasm_code, - TaskExecutor::new(), CallContext::Offchain, ); @@ -1475,7 +1455,6 @@ mod tests { &mut remote_backend, &mut Default::default(), &executor, - TaskExecutor::new(), "test", &[], &RuntimeCode::empty(), @@ -1483,12 +1462,11 @@ mod tests { .unwrap(); // check proof locally - let local_result = execution_proof_check::( + let local_result = execution_proof_check::( remote_root, remote_proof, &mut Default::default(), &executor, - TaskExecutor::new(), "test", &[], &RuntimeCode::empty(), diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index a9f6399a9a1b5..1921287a64745 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -34,8 +34,6 @@ use sp_core::{ well_known_keys::{is_child_storage_key, CODE}, StateVersion, Storage, }, - testing::TaskExecutor, - traits::TaskExecutorExt, }; use sp_externalities::{Extension, ExtensionStore, Extensions}; use sp_trie::StorageProof; @@ -105,9 +103,6 @@ where storage.top.insert(CODE.to_vec(), code.to_vec()); - let mut extensions = Extensions::default(); - extensions.register(TaskExecutorExt::new(TaskExecutor::new())); - let offchain_db = TestPersistentOffchainDB::new(); let backend = (storage, state_version).into(); @@ -115,7 +110,7 @@ where TestExternalities { overlay: OverlayedChanges::default(), offchain_db, - extensions, + extensions: Default::default(), backend, storage_transaction_cache: Default::default(), state_version, @@ -137,6 +132,17 @@ where self.offchain_db.clone() } + /// Batch insert key/values into backend + pub fn batch_insert(&mut self, kvs: I) + where + I: IntoIterator, + { + self.backend.insert( + Some((None, kvs.into_iter().map(|(k, v)| (k, Some(v))).collect())), + self.state_version, + ); + } + /// Insert key/value into backend pub fn insert(&mut self, k: StorageKey, v: StorageValue) { self.backend.insert(vec![(None, vec![(k, Some(v))])], self.state_version); diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 10e2dfd16d66a..afbe6cbbea081 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -536,6 +536,29 @@ pub mod tests { (mdb, root) } + pub(crate) fn test_db_with_hex_keys( + state_version: StateVersion, + keys: &[&str], + ) -> (PrefixedMemoryDB, H256) { + let mut root = H256::default(); + let mut mdb = PrefixedMemoryDB::::default(); + match state_version { + StateVersion::V0 => { + let mut trie = TrieDBMutBuilderV0::new(&mut mdb, &mut root).build(); + for (index, key) in keys.iter().enumerate() { + trie.insert(&array_bytes::hex2bytes(key).unwrap(), &[index as u8]).unwrap(); + } + }, + StateVersion::V1 => { + let mut trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); + for (index, key) in keys.iter().enumerate() { + trie.insert(&array_bytes::hex2bytes(key).unwrap(), &[index as u8]).unwrap(); + } + }, + }; + (mdb, root) + } + pub(crate) fn test_trie( hashed_value: StateVersion, cache: Option, @@ -549,6 +572,20 @@ pub mod tests { .build() } + pub(crate) fn test_trie_with_hex_keys( + hashed_value: StateVersion, + cache: Option, + recorder: Option, + keys: &[&str], + ) -> TrieBackend, BlakeTwo256> { + let (mdb, root) = test_db_with_hex_keys(hashed_value, keys); + + TrieBackendBuilder::new(mdb, root) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build() + } + parameterized_test!(read_from_storage_returns_some, read_from_storage_returns_some_inner); fn read_from_storage_returns_some_inner( state_version: StateVersion, @@ -697,10 +734,25 @@ pub mod tests { ); // Fetch starting at a given key and with prefix which doesn't match that key. + // (Start *before* the prefix.) + assert_eq!( + trie.keys(IterArgs { + prefix: Some(b"value"), + start_at: Some(b"key"), + ..IterArgs::default() + }) + .unwrap() + .map(|result| result.unwrap()) + .collect::>(), + vec![b"value1".to_vec(), b"value2".to_vec(),] + ); + + // Fetch starting at a given key and with prefix which doesn't match that key. + // (Start *after* the prefix.) assert!(trie .keys(IterArgs { prefix: Some(b"value"), - start_at: Some(b"key"), + start_at: Some(b"vblue"), ..IterArgs::default() }) .unwrap() @@ -722,6 +774,71 @@ pub mod tests { ); } + // This test reproduces an actual real-world issue: https://github.com/polkadot-js/apps/issues/9103 + parameterized_test!( + storage_iter_does_not_return_out_of_prefix_keys, + storage_iter_does_not_return_out_of_prefix_keys_inner + ); + fn storage_iter_does_not_return_out_of_prefix_keys_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let trie = test_trie_with_hex_keys(state_version, cache, recorder, &[ + "6cf4040bbce30824850f1a4823d8c65faeefaa25a5bae16a431719647c1d99da", + "6cf4040bbce30824850f1a4823d8c65ff536928ca5ba50039bc2766a48ddbbab", + "70f943199f1a2dde80afdaf3f447db834e7b9012096b41c4eb3aaf947f6ea429", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d007fc7effcb0c044a0c41fd8a77eb55d2133058a86d1f4d6f8e45612cd271eefd77f91caeaacfe011b8f41540e0a793b0fd51b245dae19382b45386570f2b545fab75e3277910f7324b55f47c29f9965e8298371404e50ac", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d0179c23cd593c770fde9fc7aa8f84b3e401e654b8986c67728844da0080ec9ee222b41a85708a471a511548302870b53f40813d8354b6d2969e1b7ca9e083ecf96f9647e004ecb41c7f26f0110f778bdb3d9da31bef323d9", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d024de296f88310247001277477f4ace4d0aa5685ea2928d518a807956e4806a656520d6520b8ac259f684aa0d91961d76f697716f04e6c997338d03560ab7d703829fe7b9d0e6d7eff8d8412fc428364c2f474a67b36586d", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d13dc5d83f2361c14d05933eb3182a92ac14665718569703baf1da25c7d571843b6489f03d8549c87bfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d1786d20bbb4b91eb1f5765432d750bd0111a0807c8d04f05110ffaf73f4fa7b360422c13bc97efc3a2324d9fa8f954b424c0bcfce7236a2e8107dd31c2042a9860a964f8472fda49749dec3f146e81470b55aa0f3930d854", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d18c246484ec5335a40903e7cd05771be7c0b8459333f1ae2925c3669fc3e5accd0f38c4711a15544bfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d1aca749033252ce75245528397430d14cb8e8c09248d81ee5de00b6ae93ee880b6d19a595e6dc106bfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d1d6bceb91bc07973e7b3296f83af9f1c4300ce9198cc3b44c54dafddb58f4a43aee44a9bef1a2e9dbfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d203383772f45721232139e1a8863b0f2f8d480bdc15bcc1f2033cf467e137059558da743838f6b58bfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d2197cc5c3eb3a6a67538e0dc3eaaf8c820d71310d377499c4a5d276381789e0a234475e69cddf709d207458083d6146d3a36fce7f1fe05b232702bf154096e5e3a8c378bdc237d7a27909acd663563917f0f70bb0e8e61a3", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d4f19c117f2ea36100f753c4885aa8d63b4d65a0dc32106f829f89eeabd52c37105c9bdb75f752469729fa3f0e7d907c1d949192c8e264a1a510c32abe3a05ed50be2262d5bfb981673ec80a07fd2ce28c7f27cd0043a788c", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d547d5aaa651bafa63d077560dfe823ac75665ebf1dcfd96a06e45499f03dda31282977706918d4821b8f41540e0a793b0fd51b245dae19382b45386570f2b545fab75e3277910f7324b55f47c29f9965e8298371404e50ac", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d6037207d54d69a082ea225ab4a412e4b87d6f5612053b07c405cf05ea25e482a4908c0713be2998abfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d63d0920de0c7315ebaed1d639d926961d28af89461c31eca890441e449147d23bb7c9d4fc42d7c16bfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d7912c66be82a5972e5bc11c8d10551a296ba9aaff8ca6ab22a8cd1987974b87a97121c871f786d2e17e0a629acf01c38947f170b7e02a9ebb4ee60f83779acb99b71114c01a4f0a60694611a1502c399c77214ffa26e955b", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d7aa00f217f3a374a2f1ca0f388719f84099e8157a8a83c5ccf54eae1617f93933fa976baa629e6febfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d9e1c3c8ab41943cf377b1aa724d7f518a3cfc96a732bdc4658155d09ed2bfc31b5ccbc6d8646b59f1b8f41540e0a793b0fd51b245dae19382b45386570f2b545fab75e3277910f7324b55f47c29f9965e8298371404e50ac", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d9fb8d6d95d5214a3305a4fa07e344eb99fad4be3565d646c8ac5af85514d9c96702c9c207be234958dbdb9185f467d2be3b84e8b2f529f7ec3844b378a889afd6bd31a9b5ed22ffee2019ad82c6692f1736dd41c8bb85726", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8d9fb8d6d95d5214a3305a4fa07e344eb99fad4be3565d646c8ac5af85514d9c96702c9c207be23495ec1caa509591a36a8403684384ce40838c9bd7fc49d933a10d3b26e979273e2f17ebf0bf41cd90e4287e126a59d5a243", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8da7fc066aae2ffe03b36e9a72f9a39cb2befac7e47f320309f31f1c1676288d9596045807304b3d79bfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8daf3c377b0fddf7c7ad6d390fab0ab45ac16c21645be880af5cab2fbbeb04820401a4c9f766c17bef9fc14a2e16ade86fe26ee81d4497dc6aab81cc5f5bb0458d6149a763ecb09aefec06950dd61db1ba025401d2a04e3b9d", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8daf3c377b0fddf7c7ad6d390fab0ab45ac16c21645be880af5cab2fbbeb04820401a4c9f766c17befbfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8db60505ba8b77ef03ed805436d3242f26dc828084b12aaf4bcb96af468816a182b5360149398aad6b1dafe949b0918138ceef924f6393d1818a04842301294604972da17b24b31b155e4409a01273733b8d21a156c2e7eb71", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8dbd27136a6e028656073cc840bfabb48fe935880c4c4c990ee98458b2fed308e9765f7f7f717dd3b2862fa5361d3b55afa6040e582687403c852b2d065b24f253276cc581226991f8e1818a78fc64c39da7f0b383c6726e0f", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8dca40d91320edd326500f9e8b5a0b23a8bdf21549f98f0e014f66b6a18bdd78e337a6c05d670c80c88a55d4c7bb6fbae546e2d03ac9ab16e85fe11dad6adfd6a20618905477b831d7d48ca32d0bfd2bdc8dbeba26ffe2c710", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8dd27478512243ed62c1c1f7066021798a464d4cf9099546d5d9907b3369f1b9d7a5aa5d60ca845619bfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8de6da5659cbbe1489abbe99c4d3a474f4d1e78edb55a9be68d8f52c6fe730388a298e6f6325db3da7bfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8de6da5659cbbe1489abbe99c4d3a474f4d1e78edb55a9be68d8f52c6fe730388a298e6f6325db3da7e94ca3e8c297d82f71e232a2892992d1f6480475fb797ce64e58f773d8fafd9fbcee4bdf4b14f2a71b6d3a428cf9f24b", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8decdd1760c61ff7234f2876dbe817af803170233320d778b92043b2359e3de6d16c9e5359f6302da31c84d6f551ad2a831263ef956f0cdb3b4810cefcb2d0b57bcce7b82007016ae4fe752c31d1a01b589a7966cea03ec65c", + "7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8df9981ee6b69eb7af2153af34f39ffc06e2daa5272c99798c8849091284dc8905f2a76b65754c2089bfa5709836ba729443c319659e83ad5ee133e6f11af51d883e56216e9e1bbb1e2920c7c6120cbb55cd469b1f95b61601", + "7474449cca95dc5d0c00e71735a6d17d4e7b9012096b41c4eb3aaf947f6ea429", + "89d139e01a5eb2256f222e5fc5dbe6b33c9c1284130706f5aea0c8b3d4c54d89", + "89d139e01a5eb2256f222e5fc5dbe6b36254e9d55588784fa2a62b726696e2b1" + ]); + + let key = array_bytes::hex2bytes("7474449cca95dc5d0c00e71735a6d17d3cd15a3fd6e04e47bee3922dbfa92c8da7dad55cf08ffe8194efa962146801b0503092b1ed6a3fa6aee9107334aefd7965bbe568c3d24c6d").unwrap(); + + assert_eq!( + trie.keys(IterArgs { + prefix: Some(&key), + start_at: Some(&key), + start_at_exclusive: true, + ..IterArgs::default() + }) + .unwrap() + .map(|result| result.unwrap()) + .collect::>(), + Vec::>::new() + ); + } + parameterized_test!(storage_root_is_non_default, storage_root_is_non_default_inner); fn storage_root_is_non_default_inner( state_version: StateVersion, diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index deb120104b717..f6d57ce70fc05 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -13,6 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../application-crypto" } sp-core = { version = "7.0.0", default-features = false, path = "../core" } @@ -28,4 +29,5 @@ std = [ "sp-application-crypto/std", "sp-core/std", "sp-runtime/std", + "scale-info/std", ] diff --git a/primitives/test-primitives/src/lib.rs b/primitives/test-primitives/src/lib.rs index 3a5f3dac40d68..035acc7a35ef6 100644 --- a/primitives/test-primitives/src/lib.rs +++ b/primitives/test-primitives/src/lib.rs @@ -28,7 +28,7 @@ pub use sp_core::{hash::H256, RuntimeDebug}; use sp_runtime::traits::{BlakeTwo256, Extrinsic as ExtrinsicT, Verify}; /// Extrinsic for test-runtime. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] pub enum Extrinsic { IncludeData(Vec), StorageChange(Vec, Option>), diff --git a/primitives/transaction-storage-proof/Cargo.toml b/primitives/transaction-storage-proof/Cargo.toml index 565d3d8d29f1c..72d3175a577cf 100644 --- a/primitives/transaction-storage-proof/Cargo.toml +++ b/primitives/transaction-storage-proof/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } log = { version = "0.4.17", optional = true } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-core = { version = "7.0.0", optional = true, path = "../core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" } sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 21582296b67f8..be02303003fd0 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -20,13 +20,13 @@ harness = false [dependencies] ahash = { version = "0.8.2", optional = true } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -hashbrown = { version = "0.12.3", optional = true } +hashbrown = { version = "0.13.2", optional = true } hash-db = { version = "0.16.0", default-features = false } lazy_static = { version = "1.4.0", optional = true } memory-db = { version = "0.32.0", default-features = false } nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } thiserror = { version = "1.0.30", optional = true } tracing = { version = "0.1.29", optional = true } trie-db = { version = "0.27.0", default-features = false } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 175fb32d4e851..94155458569bf 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -647,7 +647,7 @@ mod tests { count: 1000, }; let mut d = st.make(); - d.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b)); + d.sort_by(|(a, _), (b, _)| a.cmp(b)); let dr = d.iter().map(|v| (&v.0[..], &v.1[..])).collect(); check_input(&dr); } @@ -716,10 +716,7 @@ mod tests { t } - fn unpopulate_trie<'db, T: TrieConfiguration>( - t: &mut TrieDBMut<'db, T>, - v: &[(Vec, Vec)], - ) { + fn unpopulate_trie(t: &mut TrieDBMut<'_, T>, v: &[(Vec, Vec)]) { for i in v { let key: &[u8] = &i.0; t.remove(key).unwrap(); diff --git a/primitives/trie/src/recorder.rs b/primitives/trie/src/recorder.rs index 3bdfda01532cc..728dc836205b5 100644 --- a/primitives/trie/src/recorder.rs +++ b/primitives/trie/src/recorder.rs @@ -25,7 +25,7 @@ use codec::Encode; use hash_db::Hasher; use parking_lot::Mutex; use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, marker::PhantomData, mem, ops::DerefMut, @@ -38,17 +38,43 @@ use trie_db::{RecordedForKey, TrieAccess}; const LOG_TARGET: &str = "trie-recorder"; +/// Stores all the information per transaction. +#[derive(Default)] +struct Transaction { + /// Stores transaction information about [`RecorderInner::recorded_keys`]. + /// + /// For each transaction we only store the `storage_root` and the old states per key. `None` + /// state means that the key wasn't recorded before. + recorded_keys: HashMap, Option>>, + /// Stores transaction information about [`RecorderInner::accessed_nodes`]. + /// + /// For each transaction we only store the hashes of added nodes. + accessed_nodes: HashSet, +} + /// The internals of [`Recorder`]. struct RecorderInner { /// The keys for that we have recorded the trie nodes and if we have recorded up to the value. - recorded_keys: HashMap, RecordedForKey>>, + /// + /// Mapping: `StorageRoot -> (Key -> RecordedForKey)`. + recorded_keys: HashMap, RecordedForKey>>, + + /// Currently active transactions. + transactions: Vec>, + /// The encoded nodes we accessed while recording. + /// + /// Mapping: `Hash(Node) -> Node`. accessed_nodes: HashMap>, } impl Default for RecorderInner { fn default() -> Self { - Self { recorded_keys: Default::default(), accessed_nodes: Default::default() } + Self { + recorded_keys: Default::default(), + accessed_nodes: Default::default(), + transactions: Vec::new(), + } } } @@ -83,6 +109,8 @@ impl Recorder { /// /// - `storage_root`: The storage root of the trie for which accesses are recorded. This is /// important when recording access to different tries at once (like top and child tries). + /// + /// NOTE: This locks a mutex that stays locked until the return value is dropped. #[inline] pub fn as_trie_recorder( &self, @@ -135,6 +163,72 @@ impl Recorder { mem::take(&mut *self.inner.lock()); self.encoded_size_estimation.store(0, Ordering::Relaxed); } + + /// Start a new transaction. + pub fn start_transaction(&self) { + let mut inner = self.inner.lock(); + inner.transactions.push(Default::default()); + } + + /// Rollback the latest transaction. + /// + /// Returns an error if there wasn't any active transaction. + pub fn rollback_transaction(&self) -> Result<(), ()> { + let mut inner = self.inner.lock(); + + // We locked `inner` and can just update the encoded size locally and then store it back to + // the atomic. + let mut new_encoded_size_estimation = self.encoded_size_estimation.load(Ordering::Relaxed); + let transaction = inner.transactions.pop().ok_or(())?; + + transaction.accessed_nodes.into_iter().for_each(|n| { + if let Some(old) = inner.accessed_nodes.remove(&n) { + new_encoded_size_estimation = + new_encoded_size_estimation.saturating_sub(old.encoded_size()); + } + }); + + transaction.recorded_keys.into_iter().for_each(|(storage_root, keys)| { + keys.into_iter().for_each(|(k, old_state)| { + if let Some(state) = old_state { + inner.recorded_keys.entry(storage_root).or_default().insert(k, state); + } else { + inner.recorded_keys.entry(storage_root).or_default().remove(&k); + } + }); + }); + + self.encoded_size_estimation + .store(new_encoded_size_estimation, Ordering::Relaxed); + + Ok(()) + } + + /// Commit the latest transaction. + /// + /// Returns an error if there wasn't any active transaction. + pub fn commit_transaction(&self) -> Result<(), ()> { + let mut inner = self.inner.lock(); + + let transaction = inner.transactions.pop().ok_or(())?; + + if let Some(parent_transaction) = inner.transactions.last_mut() { + parent_transaction.accessed_nodes.extend(transaction.accessed_nodes); + + transaction.recorded_keys.into_iter().for_each(|(storage_root, keys)| { + keys.into_iter().for_each(|(k, old_state)| { + parent_transaction + .recorded_keys + .entry(storage_root) + .or_default() + .entry(k) + .or_insert(old_state); + }) + }); + } + + Ok(()) + } } /// The [`TrieRecorder`](trie_db::TrieRecorder) implementation. @@ -145,6 +239,50 @@ struct TrieRecorder { _phantom: PhantomData, } +impl>> TrieRecorder { + /// Update the recorded keys entry for the given `full_key`. + fn update_recorded_keys(&mut self, full_key: &[u8], access: RecordedForKey) { + let inner = self.inner.deref_mut(); + + let entry = + inner.recorded_keys.entry(self.storage_root).or_default().entry(full_key.into()); + + let key = entry.key().clone(); + + // We don't need to update the record if we only accessed the `Hash` for the given + // `full_key`. Only `Value` access can be an upgrade from `Hash`. + let entry = if matches!(access, RecordedForKey::Value) { + entry.and_modify(|e| { + if let Some(tx) = inner.transactions.last_mut() { + // Store the previous state only once per transaction. + tx.recorded_keys + .entry(self.storage_root) + .or_default() + .entry(key.clone()) + .or_insert(Some(*e)); + } + + *e = access; + }) + } else { + entry + }; + + entry.or_insert_with(|| { + if let Some(tx) = inner.transactions.last_mut() { + // The key wasn't yet recorded, so there isn't any old state. + tx.recorded_keys + .entry(self.storage_root) + .or_default() + .entry(key) + .or_insert(None); + } + + access + }); + } +} + impl>> trie_db::TrieRecorder for TrieRecorder { @@ -159,11 +297,17 @@ impl>> trie_db::TrieRecord "Recording node", ); - self.inner.accessed_nodes.entry(hash).or_insert_with(|| { + let inner = self.inner.deref_mut(); + + inner.accessed_nodes.entry(hash).or_insert_with(|| { let node = node_owned.to_encoded::>(); encoded_size_update += node.encoded_size(); + if let Some(tx) = inner.transactions.last_mut() { + tx.accessed_nodes.insert(hash); + } + node }); }, @@ -174,11 +318,17 @@ impl>> trie_db::TrieRecord "Recording node", ); - self.inner.accessed_nodes.entry(hash).or_insert_with(|| { + let inner = self.inner.deref_mut(); + + inner.accessed_nodes.entry(hash).or_insert_with(|| { let node = encoded_node.into_owned(); encoded_size_update += node.encoded_size(); + if let Some(tx) = inner.transactions.last_mut() { + tx.accessed_nodes.insert(hash); + } + node }); }, @@ -190,21 +340,21 @@ impl>> trie_db::TrieRecord "Recording value", ); - self.inner.accessed_nodes.entry(hash).or_insert_with(|| { + let inner = self.inner.deref_mut(); + + inner.accessed_nodes.entry(hash).or_insert_with(|| { let value = value.into_owned(); encoded_size_update += value.encoded_size(); + if let Some(tx) = inner.transactions.last_mut() { + tx.accessed_nodes.insert(hash); + } + value }); - self.inner - .recorded_keys - .entry(self.storage_root) - .or_default() - .entry(full_key.to_vec()) - .and_modify(|e| *e = RecordedForKey::Value) - .or_insert(RecordedForKey::Value); + self.update_recorded_keys(full_key, RecordedForKey::Value); }, TrieAccess::Hash { full_key } => { tracing::trace!( @@ -215,12 +365,7 @@ impl>> trie_db::TrieRecord // We don't need to update the `encoded_size_update` as the hash was already // accounted for by the recorded node that holds the hash. - self.inner - .recorded_keys - .entry(self.storage_root) - .or_default() - .entry(full_key.to_vec()) - .or_insert(RecordedForKey::Hash); + self.update_recorded_keys(full_key, RecordedForKey::Hash); }, TrieAccess::NonExisting { full_key } => { tracing::trace!( @@ -232,13 +377,7 @@ impl>> trie_db::TrieRecord // Non-existing access means we recorded all trie nodes up to the value. // Not the actual value, as it doesn't exist, but all trie nodes to know // that the value doesn't exist in the trie. - self.inner - .recorded_keys - .entry(self.storage_root) - .or_default() - .entry(full_key.to_vec()) - .and_modify(|e| *e = RecordedForKey::Value) - .or_insert(RecordedForKey::Value); + self.update_recorded_keys(full_key, RecordedForKey::Value); }, }; @@ -256,14 +395,15 @@ impl>> trie_db::TrieRecord #[cfg(test)] mod tests { - use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut}; + use super::*; + use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder}; type MemoryDB = crate::MemoryDB; type Layout = crate::LayoutV1; type Recorder = super::Recorder; const TEST_DATA: &[(&[u8], &[u8])] = - &[(b"key1", b"val1"), (b"key2", b"val2"), (b"key3", b"val3"), (b"key4", b"val4")]; + &[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64]), (b"key4", &[4; 64])]; fn create_trie() -> (MemoryDB, TrieHash) { let mut db = MemoryDB::default(); @@ -300,4 +440,271 @@ mod tests { let trie = TrieDBBuilder::::new(&memory_db, &root).build(); assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap()); } + + #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] + struct RecorderStats { + accessed_nodes: usize, + recorded_keys: usize, + estimated_size: usize, + } + + impl RecorderStats { + fn extract(recorder: &Recorder) -> Self { + let inner = recorder.inner.lock(); + + let recorded_keys = + inner.recorded_keys.iter().flat_map(|(_, keys)| keys.keys()).count(); + + Self { + recorded_keys, + accessed_nodes: inner.accessed_nodes.len(), + estimated_size: recorder.estimate_encoded_size(), + } + } + } + + #[test] + fn recorder_transactions_rollback_work() { + let (db, root) = create_trie(); + + let recorder = Recorder::default(); + let mut stats = vec![RecorderStats::default()]; + + for i in 0..4 { + recorder.start_transaction(); + { + let mut trie_recorder = recorder.as_trie_recorder(root); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder) + .build(); + + assert_eq!(TEST_DATA[i].1.to_vec(), trie.get(TEST_DATA[i].0).unwrap().unwrap()); + } + stats.push(RecorderStats::extract(&recorder)); + } + + assert_eq!(4, recorder.inner.lock().transactions.len()); + + for i in 0..5 { + assert_eq!(stats[4 - i], RecorderStats::extract(&recorder)); + + let storage_proof = recorder.to_storage_proof(); + let memory_db: MemoryDB = storage_proof.into_memory_db(); + + // Check that we recorded the required data + let trie = TrieDBBuilder::::new(&memory_db, &root).build(); + + // Check that the required data is still present. + for a in 0..4 { + if a < 4 - i { + assert_eq!(TEST_DATA[a].1.to_vec(), trie.get(TEST_DATA[a].0).unwrap().unwrap()); + } else { + // All the data that we already rolled back, should be gone! + assert!(trie.get(TEST_DATA[a].0).is_err()); + } + } + + if i < 4 { + recorder.rollback_transaction().unwrap(); + } + } + + assert_eq!(0, recorder.inner.lock().transactions.len()); + } + + #[test] + fn recorder_transactions_commit_work() { + let (db, root) = create_trie(); + + let recorder = Recorder::default(); + + for i in 0..4 { + recorder.start_transaction(); + { + let mut trie_recorder = recorder.as_trie_recorder(root); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder) + .build(); + + assert_eq!(TEST_DATA[i].1.to_vec(), trie.get(TEST_DATA[i].0).unwrap().unwrap()); + } + } + + let stats = RecorderStats::extract(&recorder); + assert_eq!(4, recorder.inner.lock().transactions.len()); + + for _ in 0..4 { + recorder.commit_transaction().unwrap(); + } + assert_eq!(0, recorder.inner.lock().transactions.len()); + assert_eq!(stats, RecorderStats::extract(&recorder)); + + let storage_proof = recorder.to_storage_proof(); + let memory_db: MemoryDB = storage_proof.into_memory_db(); + + // Check that we recorded the required data + let trie = TrieDBBuilder::::new(&memory_db, &root).build(); + + // Check that the required data is still present. + for i in 0..4 { + assert_eq!(TEST_DATA[i].1.to_vec(), trie.get(TEST_DATA[i].0).unwrap().unwrap()); + } + } + + #[test] + fn recorder_transactions_commit_and_rollback_work() { + let (db, root) = create_trie(); + + let recorder = Recorder::default(); + + for i in 0..2 { + recorder.start_transaction(); + { + let mut trie_recorder = recorder.as_trie_recorder(root); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder) + .build(); + + assert_eq!(TEST_DATA[i].1.to_vec(), trie.get(TEST_DATA[i].0).unwrap().unwrap()); + } + } + + recorder.rollback_transaction().unwrap(); + + for i in 2..4 { + recorder.start_transaction(); + { + let mut trie_recorder = recorder.as_trie_recorder(root); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder) + .build(); + + assert_eq!(TEST_DATA[i].1.to_vec(), trie.get(TEST_DATA[i].0).unwrap().unwrap()); + } + } + + recorder.rollback_transaction().unwrap(); + + assert_eq!(2, recorder.inner.lock().transactions.len()); + + for _ in 0..2 { + recorder.commit_transaction().unwrap(); + } + + assert_eq!(0, recorder.inner.lock().transactions.len()); + + let storage_proof = recorder.to_storage_proof(); + let memory_db: MemoryDB = storage_proof.into_memory_db(); + + // Check that we recorded the required data + let trie = TrieDBBuilder::::new(&memory_db, &root).build(); + + // Check that the required data is still present. + for i in 0..4 { + if i % 2 == 0 { + assert_eq!(TEST_DATA[i].1.to_vec(), trie.get(TEST_DATA[i].0).unwrap().unwrap()); + } else { + assert!(trie.get(TEST_DATA[i].0).is_err()); + } + } + } + + #[test] + fn recorder_transaction_accessed_keys_works() { + let key = TEST_DATA[0].0; + let (db, root) = create_trie(); + + let recorder = Recorder::default(); + + { + let trie_recorder = recorder.as_trie_recorder(root); + assert!(matches!(trie_recorder.trie_nodes_recorded_for_key(key), RecordedForKey::None)); + } + + recorder.start_transaction(); + { + let mut trie_recorder = recorder.as_trie_recorder(root); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder) + .build(); + + assert_eq!( + sp_core::Blake2Hasher::hash(TEST_DATA[0].1), + trie.get_hash(TEST_DATA[0].0).unwrap().unwrap() + ); + assert!(matches!(trie_recorder.trie_nodes_recorded_for_key(key), RecordedForKey::Hash)); + } + + recorder.start_transaction(); + { + let mut trie_recorder = recorder.as_trie_recorder(root); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder) + .build(); + + assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap()); + assert!(matches!( + trie_recorder.trie_nodes_recorded_for_key(key), + RecordedForKey::Value, + )); + } + + recorder.rollback_transaction().unwrap(); + { + let trie_recorder = recorder.as_trie_recorder(root); + assert!(matches!(trie_recorder.trie_nodes_recorded_for_key(key), RecordedForKey::Hash)); + } + + recorder.rollback_transaction().unwrap(); + { + let trie_recorder = recorder.as_trie_recorder(root); + assert!(matches!(trie_recorder.trie_nodes_recorded_for_key(key), RecordedForKey::None)); + } + + recorder.start_transaction(); + { + let mut trie_recorder = recorder.as_trie_recorder(root); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder) + .build(); + + assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap()); + assert!(matches!( + trie_recorder.trie_nodes_recorded_for_key(key), + RecordedForKey::Value, + )); + } + + recorder.start_transaction(); + { + let mut trie_recorder = recorder.as_trie_recorder(root); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder) + .build(); + + assert_eq!( + sp_core::Blake2Hasher::hash(TEST_DATA[0].1), + trie.get_hash(TEST_DATA[0].0).unwrap().unwrap() + ); + assert!(matches!( + trie_recorder.trie_nodes_recorded_for_key(key), + RecordedForKey::Value + )); + } + + recorder.rollback_transaction().unwrap(); + { + let trie_recorder = recorder.as_trie_recorder(root); + assert!(matches!( + trie_recorder.trie_nodes_recorded_for_key(key), + RecordedForKey::Value + )); + } + + recorder.rollback_transaction().unwrap(); + { + let trie_recorder = recorder.as_trie_recorder(root); + assert!(matches!(trie_recorder.trie_nodes_recorded_for_key(key), RecordedForKey::None)); + } + } } diff --git a/primitives/ver/src/lib.rs b/primitives/ver/src/lib.rs index 92b3c6db02174..790a573a22238 100644 --- a/primitives/ver/src/lib.rs +++ b/primitives/ver/src/lib.rs @@ -5,12 +5,14 @@ use codec::{Decode, Encode}; use sp_core::crypto::key_types::AURA; #[cfg(feature = "helpers")] use sp_core::sr25519; -use sp_core::ShufflingSeed; +use sp_core::{ShufflingSeed}; +use sp_core::hash::{H256, H512}; +use sp_core::hexdisplay::AsBytesRef; use sp_inherents::{InherentData, InherentIdentifier}; #[cfg(feature = "helpers")] -use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; +pub use sp_core::sr25519::vrf::{VrfOutput, VrfProof, VrfSignature, VrfTranscript}; #[cfg(feature = "helpers")] -use sp_keystore::SyncCryptoStore; +use sp_keystore::{Keystore}; use sp_runtime::{traits::Block as BlockT, ConsensusEngineId, RuntimeString}; use sp_std::vec::Vec; @@ -18,6 +20,7 @@ use sp_std::vec::Vec; pub const RANDOM_SEED_INHERENT_IDENTIFIER: InherentIdentifier = *b"blckseed"; pub const VER_ENGINE_ID: ConsensusEngineId = *b"_VER"; + #[derive(Clone, Encode, Decode)] pub struct PreDigestVer { pub prev_extrisnics: Vec<::Extrinsic>, @@ -35,7 +38,7 @@ pub fn extract_inherent_data(data: &InherentData) -> Result( +pub fn calculate_next_seed( keystore: &T, public_key: &sr25519::Public, prev_seed: &ShufflingSeed, @@ -44,21 +47,20 @@ pub fn calculate_next_seed( } #[cfg(feature = "helpers")] -pub fn calculate_next_seed_from_bytes( +pub fn calculate_next_seed_from_bytes( keystore: &T, public_key: &sr25519::Public, prev_seed: Vec, ) -> Option { - let transcript = VRFTranscriptData { - label: b"shuffling_seed", - items: vec![("prev_seed", VRFTranscriptValue::Bytes(prev_seed))], - }; - SyncCryptoStore::sr25519_vrf_sign(keystore, AURA, public_key, transcript.clone()) + let transcript = VrfTranscript::new(b"shuffling_seed", &[(b"prev_seed",&prev_seed)]); + Keystore::sr25519_vrf_sign(keystore, AURA, public_key, &transcript) .ok() .flatten() - .map(|sig| ShufflingSeed { - seed: sig.output.to_bytes().into(), - proof: sig.proof.to_bytes().into(), + .map(|sig| { + ShufflingSeed { + seed: H256::from_slice(sig.output.encode().as_bytes_ref()), + proof: H512::from_slice(sig.proof.encode().as_bytes_ref()), + } }) } diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 7d32c540f9a10..6b425aa7b9d27 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", optional = true } parity-wasm = { version = "0.45", optional = true } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } thiserror = { version = "1.0.30", optional = true } sp-core-hashing-proc-macro = { version = "5.0.0", path = "../core/hashing/proc-macro" } diff --git a/primitives/version/proc-macro/Cargo.toml b/primitives/version/proc-macro/Cargo.toml index abe9e579a20da..aac41dd43dd08 100644 --- a/primitives/version/proc-macro/Cargo.toml +++ b/primitives/version/proc-macro/Cargo.toml @@ -17,9 +17,9 @@ proc-macro = true [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", features = [ "derive" ] } -proc-macro2 = "1.0.37" -quote = "1.0.10" -syn = { version = "1.0.98", features = ["full", "fold", "extra-traits", "visit"] } +proc-macro2 = "1.0.56" +quote = "1.0.26" +syn = { version = "2.0.14", features = ["full", "fold", "extra-traits", "visit"] } [dev-dependencies] sp-version = { version = "5.0.0", path = ".." } diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index 441d66f0fb064..c45ea75830ef7 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", optional = true } wasmi = { version = "0.13.2", optional = true } -wasmtime = { version = "6.0.1", default-features = false, optional = true } +wasmtime = { version = "6.0.2", default-features = false, optional = true } anyhow = { version = "1.0.68", optional = true } sp-std = { version = "5.0.0", default-features = false, path = "../std" } diff --git a/primitives/weights/Cargo.toml b/primitives/weights/Cargo.toml index 2368b913b3b2f..af1b5647e24d7 100644 --- a/primitives/weights/Cargo.toml +++ b/primitives/weights/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true, features = ["derive"] } smallvec = "1.8.0" sp-arithmetic = { version = "6.0.0", default-features = false, path = "../arithmetic" } diff --git a/primitives/weights/src/lib.rs b/primitives/weights/src/lib.rs index 8a842307355bf..55d9104b00b77 100644 --- a/primitives/weights/src/lib.rs +++ b/primitives/weights/src/lib.rs @@ -18,6 +18,9 @@ //! # Primitives for transaction weighting. #![cfg_attr(not(feature = "std"), no_std)] +// TODO remove once `OldWeight` is gone. I dont know why this is needed, maybe by one of the macros +// of `OldWeight`. +#![allow(deprecated)] extern crate self as sp_weights; @@ -30,7 +33,7 @@ use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use smallvec::SmallVec; use sp_arithmetic::{ - traits::{BaseArithmetic, SaturatedConversion, Saturating, Unsigned}, + traits::{BaseArithmetic, SaturatedConversion, Unsigned}, Perbill, }; use sp_core::Get; @@ -68,6 +71,7 @@ pub mod constants { )] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(transparent))] +#[deprecated(note = "Will be removed soon; use `Weight` instead.")] pub struct OldWeight(pub u64); /// The weight of database operations that the runtime can invoke. @@ -118,9 +122,77 @@ pub struct WeightToFeeCoefficient { pub degree: u8, } -/// A list of coefficients that represent one polynomial. +impl WeightToFeeCoefficient +where + Balance: BaseArithmetic + From + Copy + Unsigned, +{ + /// Evaluate the term at `x` and saturatingly amalgamate into `result`. + /// + /// The unsigned value for the term is calculated as: + /// ```ignore + /// (frac * x^(degree) + integer * x^(degree)) + /// ``` + /// Depending on the value of `negative`, it is added or subtracted from the `result`. + pub fn saturating_eval(&self, mut result: Balance, x: Balance) -> Balance { + let power = x.saturating_pow(self.degree.into()); + + let frac = self.coeff_frac * power; // Overflow safe. + let integer = self.coeff_integer.saturating_mul(power); + // Do not add them together here to avoid an underflow. + + if self.negative { + result = result.saturating_sub(frac); + result = result.saturating_sub(integer); + } else { + result = result.saturating_add(frac); + result = result.saturating_add(integer); + } + + result + } +} + +/// A list of coefficients that represent a polynomial. pub type WeightToFeeCoefficients = SmallVec<[WeightToFeeCoefficient; 4]>; +/// A list of coefficients that represent a polynomial. +/// +/// Can be [eval](Self::eval)uated at a specific `u64` to get the fee. The evaluations happens by +/// summing up all term [results](`WeightToFeeCoefficient::saturating_eval`). The order of the +/// coefficients matters since it uses saturating arithmetic. This struct does therefore not model a +/// polynomial in the mathematical sense (polynomial ring). +/// +/// For visualization purposes, the formulas of the unsigned terms look like: +/// +/// ```ignore +/// (c[0].frac * x^(c[0].degree) + c[0].integer * x^(c[0].degree)) +/// (c[1].frac * x^(c[1].degree) + c[1].integer * x^(c[1].degree)) +/// ... +/// ``` +/// Depending on the value of `c[i].negative`, each term is added or subtracted from the result. +/// The result is initialized as zero. +pub struct FeePolynomial { + coefficients: SmallVec<[WeightToFeeCoefficient; 4]>, +} + +impl From> for FeePolynomial { + fn from(coefficients: WeightToFeeCoefficients) -> Self { + Self { coefficients } + } +} + +impl FeePolynomial +where + Balance: BaseArithmetic + From + Copy + Unsigned, +{ + /// Evaluate the polynomial at a specific `x`. + pub fn eval(&self, x: u64) -> Balance { + self.coefficients.iter().fold(Balance::zero(), |acc, term| { + term.saturating_eval(acc, Balance::saturated_from(x)) + }) + } +} + /// A trait that describes the weight to fee calculation. pub trait WeightToFee { /// The type that is returned as result from calculation. @@ -157,27 +229,8 @@ where /// This should not be overridden in most circumstances. Calculation is done in the /// `Balance` type and never overflows. All evaluation is saturating. fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::polynomial() - .iter() - .fold(Self::Balance::saturated_from(0u32), |mut acc, args| { - let w = Self::Balance::saturated_from(weight.ref_time()) - .saturating_pow(args.degree.into()); - - // The sum could get negative. Therefore we only sum with the accumulator. - // The Perbill Mul implementation is non overflowing. - let frac = args.coeff_frac * w; - let integer = args.coeff_integer.saturating_mul(w); - - if args.negative { - acc = acc.saturating_sub(frac); - acc = acc.saturating_sub(integer); - } else { - acc = acc.saturating_add(frac); - acc = acc.saturating_add(integer); - } - - acc - }) + let poly: FeePolynomial = Self::polynomial().into(); + poly.eval(weight.ref_time()) } } diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index ca137145920bb..cb1cedf18ce87 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -35,12 +35,6 @@ pub struct Weight { proof_size: u64, } -impl From for Weight { - fn from(old: OldWeight) -> Self { - Weight::from_parts(old.0, 0) - } -} - impl Weight { /// Set the reference time part of the weight. pub const fn set_ref_time(mut self, c: u64) -> Self { @@ -74,6 +68,7 @@ impl Weight { &mut self.proof_size } + /// The maximal weight in all dimensions. pub const MAX: Self = Self { ref_time: u64::MAX, proof_size: u64::MAX }; /// Get the conservative min of `self` and `other` weight. diff --git a/scripts/ci/deny.toml b/scripts/ci/deny.toml index bc41f746cbb1e..f932875937606 100644 --- a/scripts/ci/deny.toml +++ b/scripts/ci/deny.toml @@ -1,87 +1,19 @@ -# This template contains all of the possible sections and their default values - -# Note that all fields that take a lint level have these possible values: -# * deny - An error will be produced and the check will fail -# * warn - A warning will be produced, but the check will not fail -# * allow - No warning or error will be produced, though in some cases a note -# will be - -# The values provided in this template are the default values that will be used -# when any section or field is not specified in your own configuration - -# If 1 or more target triples (and optionally, target_features) are specified, -# only the specified targets will be checked when running `cargo deny check`. -# This means, if a particular package is only ever used as a target specific -# dependency, such as, for example, the `nix` crate only being used via the -# `target_family = "unix"` configuration, that only having windows targets in -# this list would mean the nix crate, as well as any of its exclusive -# dependencies not shared by any other crates, would be ignored, as the target -# list here is effectively saying which targets you are building for. -targets = [ - # The triple can be any string, but only the target triples built in to - # rustc (as of 1.40) can be checked against actual config expressions - #{ triple = "x86_64-unknown-linux-musl" }, - # You can also specify which target_features you promise are enabled for a - # particular target. target_features are currently not validated against - # the actual valid features supported by the target architecture. - #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, -] - -# This section is considered when running `cargo deny check advisories` -# More documentation for the advisories section can be found here: -# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html -[advisories] -# The path where the advisory database is cloned/fetched into -db-path = "~/.cargo/advisory-db" -# The url of the advisory database to use -db-url = "https://github.com/rustsec/advisory-db" -# The lint level for security vulnerabilities -vulnerability = "deny" -# The lint level for unmaintained crates -unmaintained = "warn" -# The lint level for crates that have been yanked from their source registry -yanked = "warn" -# The lint level for crates with security notices. Note that as of -# 2019-12-17 there are no security notice advisories in -# https://github.com/rustsec/advisory-db -notice = "warn" -# A list of advisory IDs to ignore. Note that ignored advisories will still -# output a note when they are encountered. -ignore = [ - #"RUSTSEC-0000-0000", -] -# Threshold for security vulnerabilities, any vulnerability with a CVSS score -# lower than the range specified will be ignored. Note that ignored advisories -# will still output a note when they are encountered. -# * None - CVSS Score 0.0 -# * Low - CVSS Score 0.1 - 3.9 -# * Medium - CVSS Score 4.0 - 6.9 -# * High - CVSS Score 7.0 - 8.9 -# * Critical - CVSS Score 9.0 - 10.0 -#severity-threshold = - -# This section is considered when running `cargo deny check licenses` -# More documentation for the licenses section can be found here: -# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html [licenses] # The lint level for crates which do not have a detectable license unlicensed = "deny" -# List of explictly allowed licenses +# List of explicitly allowed licenses # See https://spdx.org/licenses/ for list of possible licenses -# [possible values: any SPDX 3.7 short identifier (+ optional exception)]. +# [possible values: any SPDX 3.11 short identifier (+ optional exception)]. allow = [ - #"MIT", - #"Apache-2.0", - #"Apache-2.0 WITH LLVM-exception", + "MPL-2.0", ] -# List of explictly disallowed licenses +# List of explicitly disallowed licenses # See https://spdx.org/licenses/ for list of possible licenses -# [possible values: any SPDX 3.7 short identifier (+ optional exception)]. +# [possible values: any SPDX 3.11 short identifier (+ optional exception)]. deny = [ - #"Nokia", ] # Lint level for licenses considered copyleft -copyleft = "allow" +copyleft = "deny" # Blanket approval or denial for OSI-approved or FSF Free/Libre licenses # * both - The license will be approved if it is both OSI-approved *AND* FSF # * either - The license will be approved if it is either OSI-approved *OR* FSF @@ -98,13 +30,69 @@ default = "deny" # The higher the value, the more closely the license text must be to the # canonical license text of a valid SPDX license file. # [possible values: any between 0.0 and 1.0]. -confidence-threshold = 0.9 +confidence-threshold = 0.8 # Allow 1 or more licenses on a per-crate basis, so that particular licenses # aren't accepted for every possible crate as with the normal allow list exceptions = [ - # Each entry is the crate and version constraint, and its specific allow - # list - #{ allow = ["Zlib"], name = "adler32", version = "*" }, + # Each entry is the crate and version constraint, and its specific allow list + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "chain-spec-builder" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "mmr-gadget" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-bench" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-cli" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-inspect" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-testing" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-authority-discovery" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-basic-authorship" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-block-builder" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-chain-spec" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-chain-spec-derive" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-cli" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-client-api" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-client-db" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-aura" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-babe" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-babe-rpc" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-beefy" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-beefy-rpc" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-epochs" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-grandpa" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-grandpa-rpc" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-manual-seal" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-pow" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-slots" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-common" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmi" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmtime" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-informant" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-keystore" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-bitswap" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-common" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-gossip" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-light" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-sync" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-test" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-transactions" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-offchain" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-peerset" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-proposer-metrics" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-api" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-server" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-spec-v2" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-runtime-test" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service-test" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-state-db" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-storage-monitor" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-sysinfo" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-telemetry" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-tracing" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool-api" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "subkey" }, ] # Some crates don't have (easily) machine readable licensing information, @@ -113,10 +101,8 @@ exceptions = [ [[licenses.clarify]] # The name of the crate the clarification applies to name = "ring" -# THe optional version constraint for the crate -#version = "*" # The SPDX expression for the license requirements of the crate -expression = "OpenSSL" +expression = "MIT AND ISC AND OpenSSL" # One or more files in the crate's source used as the "source of truth" for # the license expression. If the contents match, the clarification will be used # when running the license check, otherwise the clarification will be ignored @@ -126,67 +112,3 @@ license-files = [ # Each entry is a crate relative path, and the (opaque) hash of its contents { path = "LICENSE", hash = 0xbd0eed23 } ] -[[licenses.clarify]] -name = "webpki" -expression = "ISC" -license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] - -[licenses.private] -# If true, ignores workspace crates that aren't published, or are only -# published to private registries -ignore = false -# One or more private registries that you might publish crates to, if a crate -# is only published to private registries, and ignore is true, the crate will -# not have its license(s) checked -registries = [ - #"https://sekretz.com/registry -] - -# This section is considered when running `cargo deny check bans`. -# More documentation about the 'bans' section can be found here: -# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html -[bans] -# Lint level for when multiple versions of the same crate are detected -multiple-versions = "warn" -# The graph highlighting used when creating dotgraphs for crates -# with multiple versions -# * lowest-version - The path to the lowest versioned duplicate is highlighted -# * simplest-path - The path to the version with the fewest edges is highlighted -# * all - Both lowest-version and simplest-path are used -highlight = "lowest-version" -# List of crates that are allowed. Use with care! -allow = [ - #{ name = "ansi_term", version = "=0.11.0" }, -] -# List of crates to deny -deny = [ - # Each entry the name of a crate and a version range. If version is - # not specified, all versions will be matched. -] -# Certain crates/versions that will be skipped when doing duplicate detection. -skip = [ - #{ name = "ansi_term", version = "=0.11.0" }, -] -# Similarly to `skip` allows you to skip certain crates during duplicate -# detection. Unlike skip, it also includes the entire tree of transitive -# dependencies starting at the specified crate, up to a certain depth, which is -# by default infinite -skip-tree = [ - #{ name = "ansi_term", version = "=0.11.0", depth = 20 }, -] - -# This section is considered when running `cargo deny check sources`. -# More documentation about the 'sources' section can be found here: -# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html -[sources] -# Lint level for what to happen when a crate from a crate registry that is not -# in the allow list is encountered -unknown-registry = "deny" -# Lint level for what to happen when a crate from a git repository that is not -# in the allow list is encountered -unknown-git = "warn" -# List of URLs for allowed crate registries. Defaults to the crates.io index -# if not specified. If it is specified but empty, no registries are allowed. -allow-registry = ["https://github.com/rust-lang/crates.io-index"] -# List of URLs for allowed Git repositories -allow-git = [] diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index 0a36599c70e24..0c04abfd5aa30 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -12,6 +12,8 @@ extends: - .docker-env - .test-refs-no-trigger-prs-only + variables: + RUSTFLAGS: "-D warnings" script: - git clone --depth=1 @@ -35,6 +37,10 @@ check-dependent-polkadot: COMPANION_OVERRIDES: | substrate: polkadot-v* polkadot: release-v* + # enable the same feature flags as polkadot's test-linux-stable + COMPANION_CHECK_COMMAND: > + cargo check --all-targets --workspace + --features=runtime-benchmarks,runtime-metrics,try-runtime rules: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ #PRs @@ -148,7 +154,6 @@ build-rustdoc: variables: SKIP_WASM_BUILD: 1 DOC_INDEX_PAGE: "sc_service/index.html" # default redirected page - RUSTY_CACHIER_TOOLCHAIN: nightly # this variable gets overriden by "rusty-cachier environment inject", use the value as default CARGO_TARGET_DIR: "$CI_PROJECT_DIR/target" artifacts: @@ -163,7 +168,7 @@ build-rustdoc: artifacts: false script: - rusty-cachier snapshot create - - time cargo +nightly doc --locked --workspace --all-features --verbose --no-deps + - time cargo doc --locked --workspace --all-features --verbose --no-deps - rm -f $CARGO_TARGET_DIR/doc/.lock - mv $CARGO_TARGET_DIR/doc ./crate-docs # FIXME: remove me after CI image gets nonroot diff --git a/scripts/ci/gitlab/pipeline/publish.yml b/scripts/ci/gitlab/pipeline/publish.yml index 188a093864cc0..5a69e876ac9ff 100644 --- a/scripts/ci/gitlab/pipeline/publish.yml +++ b/scripts/ci/gitlab/pipeline/publish.yml @@ -19,7 +19,7 @@ script: - test "$DOCKER_USER" -a "$DOCKER_PASS" || ( echo "no docker credentials provided"; exit 1 ) - - buildah bud + - $BUILDAH_COMMAND build --format=docker --build-arg VCS_REF="${CI_COMMIT_SHA}" --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" @@ -29,9 +29,9 @@ --file "$DOCKERFILE" . - echo "$DOCKER_PASS" | buildah login --username "$DOCKER_USER" --password-stdin docker.io - - buildah info - - buildah push --format=v2s2 "$IMAGE_NAME:$VERSION" - - buildah push --format=v2s2 "$IMAGE_NAME:latest" + - $BUILDAH_COMMAND info + - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$VERSION" + - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:latest" after_script: - buildah logout --all - echo "SUBSTRATE_IMAGE_NAME=${IMAGE_NAME}" | tee -a ./artifacts/$PRODUCT/build.env @@ -52,7 +52,6 @@ stage: publish extends: - .kubernetes-env - - .publish-refs variables: CI_IMAGE: paritytech/dockerhub-description DOCKERHUB_REPOSITORY: parity/$PRODUCT @@ -60,9 +59,11 @@ DOCKER_PASSWORD: $Docker_Hub_Pass_Parity README_FILEPATH: $CI_PROJECT_DIR/scripts/ci/docker/$PRODUCT.Dockerfile.README.md rules: - - if: $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE == "push" changes: - scripts/ci/docker/$PRODUCT.Dockerfile.README.md + before_script: + - echo script: - cd / && sh entrypoint.sh diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index fd031d9aa56c3..c73268caa79af 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -21,27 +21,30 @@ find-fail-ci-phrase: exit 0; fi -cargo-deny: +cargo-deny-licenses: stage: test extends: - .docker-env - - .nightly-pipeline + - .test-refs + variables: + CARGO_DENY_CMD: "cargo deny --all-features check licenses -c ./scripts/ci/deny.toml" script: - rusty-cachier snapshot create - - cargo deny check --hide-inclusion-graph -c ./scripts/ci/deny.toml + - $CARGO_DENY_CMD --hide-inclusion-graph - rusty-cachier cache upload after_script: - !reference [.rusty-cachier, after_script] - echo "___The complete log is in the artifacts___" - - cargo deny check -c ./scripts/ci/deny.toml 2> deny.log + - $CARGO_DENY_CMD 2> deny.log + - if [ $CI_JOB_STATUS != 'success' ]; then + echo 'Please check license of your crate or add an exception to scripts/ci/deny.toml'; + fi artifacts: name: $CI_COMMIT_SHORT_SHA expire_in: 3 days when: always paths: - deny.log - # FIXME: Temporarily allow to fail. - allow_failure: true cargo-fmt: stage: test @@ -61,21 +64,17 @@ cargo-clippy: needs: - job: cargo-fmt artifacts: false - variables: - RUSTY_CACHIER_TOOLCHAIN: nightly extends: - .docker-env - .test-refs script: - rusty-cachier snapshot create - - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo +nightly clippy --all-targets + - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo clippy --locked --all-targets - rusty-cachier cache upload cargo-check-benches: stage: test variables: - # Override to use nightly toolchain - RUSTY_CACHIER_TOOLCHAIN: "nightly" CI_JOB_NAME: "cargo-check-benches" extends: - .docker-env @@ -90,15 +89,13 @@ cargo-check-benches: - !reference [.rusty-cachier, before_script] - !reference [.pipeline-stopper-vars, script] # merges in the master branch on PRs - - | - export BASE=$(curl -s -H "Authorization: Bearer ${GITHUB_PR_TOKEN}" https://api.github.com/repos/paritytech/substrate/pulls/${$CI_COMMIT_REF_NAME} | jq .base.ref) - - if [ $CI_COMMIT_REF_NAME != "master" ]; then - git fetch origin +${BASE}:${BASE}; - git fetch origin +$CI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME; - git checkout ${BASE}; + - 'if [ $CI_COMMIT_REF_NAME != "master" ]; then + BASE=$(curl -s -H "Authorization: Bearer ${GITHUB_PR_TOKEN}" https://api.github.com/repos/paritytech/substrate/pulls/${CI_COMMIT_REF_NAME} | jq -r .base.ref); + printf "Merging base branch %s\n" "${BASE:=master}"; git config user.email "ci@gitlab.parity.io"; - git merge $CI_COMMIT_REF_NAME --verbose --no-edit; - fi + git fetch origin "refs/heads/${BASE}"; + git merge --verbose --no-edit FETCH_HEAD; + fi' parallel: 2 script: - rusty-cachier snapshot create @@ -107,7 +104,7 @@ cargo-check-benches: - echo "___Running benchmarks___"; - case ${CI_NODE_INDEX} in 1) - SKIP_WASM_BUILD=1 time cargo +nightly check --locked --benches --all; + SKIP_WASM_BUILD=1 time cargo check --locked --benches --all; cargo run --locked --release -p node-bench -- ::trie::read::small --json | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json; echo "___Uploading cache for rusty-cachier___"; @@ -204,10 +201,10 @@ test-linux-stable: variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + RUSTFLAGS: "-C debug-assertions -D warnings" RUST_BACKTRACE: 1 WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" # Ensure we run the UI tests. RUN_UI_TESTS: 1 # needed for rusty-cachier to keep cache in test-linux-stable folder and not in test-linux-stable-1/3 @@ -238,17 +235,19 @@ test-frame-support: variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + RUSTFLAGS: "-C debug-assertions -D warnings" RUST_BACKTRACE: 1 WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" # Ensure we run the UI tests. RUN_UI_TESTS: 1 script: - rusty-cachier snapshot create - - time cargo test --locked -p frame-support-test --features=frame-feature-testing,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec - - time cargo test --locked -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec + - cat /cargo_target_dir/debug/.fingerprint/memory_units-759eddf317490d2b/lib-memory_units.json || true + - time cargo test --verbose --locked -p frame-support-test --features=frame-feature-testing,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet + - time cargo test --verbose --locked -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout + - cat /cargo_target_dir/debug/.fingerprint/memory_units-759eddf317490d2b/lib-memory_units.json || true - rusty-cachier cache upload # This job runs tests that don't work with cargo-nextest in test-linux-stable @@ -260,10 +259,10 @@ test-linux-stable-extra: variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + RUSTFLAGS: "-C debug-assertions -D warnings" RUST_BACKTRACE: 1 WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" # Ensure we run the UI tests. RUN_UI_TESTS: 1 script: @@ -285,10 +284,10 @@ quick-benchmarks: variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + RUSTFLAGS: "-C debug-assertions -D warnings" RUST_BACKTRACE: "full" WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" script: - rusty-cachier snapshot create - time cargo run --locked --release --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 @@ -301,17 +300,16 @@ test-frame-examples-compile-to-wasm: - .docker-env - .test-refs variables: - RUSTY_CACHIER_TOOLCHAIN: nightly # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y" + RUSTFLAGS: "-C debug-assertions" RUST_BACKTRACE: 1 script: - rusty-cachier snapshot create - cd ./frame/examples/offchain-worker/ - - cargo +nightly build --locked --target=wasm32-unknown-unknown --no-default-features + - cargo build --locked --target=wasm32-unknown-unknown --no-default-features - cd ../basic - - cargo +nightly build --locked --target=wasm32-unknown-unknown --no-default-features + - cargo build --locked --target=wasm32-unknown-unknown --no-default-features - rusty-cachier cache upload test-linux-stable-int: @@ -323,10 +321,10 @@ test-linux-stable-int: variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + RUSTFLAGS: "-C debug-assertions -D warnings" RUST_BACKTRACE: 1 WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" # Ensure we run the UI tests. RUN_UI_TESTS: 1 script: @@ -344,8 +342,6 @@ check-tracing: needs: - job: test-linux-stable-int artifacts: false - variables: - RUSTY_CACHIER_TOOLCHAIN: nightly extends: - .docker-env - .test-refs @@ -353,8 +349,8 @@ check-tracing: script: - rusty-cachier snapshot create # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases - - time cargo +nightly test --locked --manifest-path ./primitives/tracing/Cargo.toml --no-default-features - - time cargo +nightly test --locked --manifest-path ./primitives/tracing/Cargo.toml --no-default-features --features=with-tracing + - time cargo test --locked --manifest-path ./primitives/tracing/Cargo.toml --no-default-features + - time cargo test --locked --manifest-path ./primitives/tracing/Cargo.toml --no-default-features --features=with-tracing - rusty-cachier cache upload # more information about this job can be found here: @@ -369,17 +365,16 @@ test-full-crypto-feature: - .docker-env - .test-refs variables: - RUSTY_CACHIER_TOOLCHAIN: nightly # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y" + RUSTFLAGS: "-C debug-assertions" RUST_BACKTRACE: 1 script: - rusty-cachier snapshot create - cd primitives/core/ - - time cargo +nightly build --locked --verbose --no-default-features --features full_crypto + - time cargo build --locked --verbose --no-default-features --features full_crypto - cd ../application-crypto - - time cargo +nightly build --locked --verbose --no-default-features --features full_crypto + - time cargo build --locked --verbose --no-default-features --features full_crypto - rusty-cachier cache upload check-rustdoc: @@ -388,12 +383,11 @@ check-rustdoc: - .docker-env - .test-refs variables: - RUSTY_CACHIER_TOOLCHAIN: nightly SKIP_WASM_BUILD: 1 RUSTDOCFLAGS: "-Dwarnings" script: - rusty-cachier snapshot create - - time cargo +nightly doc --locked --workspace --all-features --verbose --no-deps + - time cargo doc --locked --workspace --all-features --verbose --no-deps - rusty-cachier cache upload cargo-check-each-crate: @@ -434,3 +428,39 @@ cargo-check-each-crate-macos: - time cargo check --workspace --locked tags: - osx + +cargo-hfuzz: + stage: test + extends: + - .docker-env + - .test-refs + - .pipeline-stopper-artifacts + variables: + # max 10s per iteration, 60s per file + HFUZZ_RUN_ARGS: > + --exit_upon_crash + --exit_code_upon_crash 1 + --timeout 10 + --run_time 60 + artifacts: + name: "hfuzz-$CI_COMMIT_SHORT_SHA" + expire_in: 7 days + when: on_failure + paths: + - primitives/arithmetic/fuzzer/hfuzz_workspace/ + script: + # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: + # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian + # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr + # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling rusty-cachier's absolute CARGO_TARGET_DIR + - | + cat >>Cargo.toml <"] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +substrate-rpc-client = { path = "../../utils/frame/rpc/client" } +assert_cmd = "2.0.10" +nix = "0.26.2" +regex = "1.7.3" +tempfile = "3.5.0" +tokio = { version = "1.22.0", features = ["full"] } +node-primitives = { path = "../../bin/node/primitives" } +futures = "0.3.28" diff --git a/test-utils/cli/src/lib.rs b/test-utils/cli/src/lib.rs new file mode 100644 index 0000000000000..1846f19090b33 --- /dev/null +++ b/test-utils/cli/src/lib.rs @@ -0,0 +1,302 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#![cfg(unix)] + +use assert_cmd::cargo::cargo_bin; +use nix::{ + sys::signal::{kill, Signal, Signal::SIGINT}, + unistd::Pid, +}; +use node_primitives::{Hash, Header}; +use regex::Regex; +use std::{ + env, + io::{BufRead, BufReader, Read}, + ops::{Deref, DerefMut}, + path::{Path, PathBuf}, + process::{self, Child, Command}, + time::Duration, +}; +use tokio::io::{AsyncBufReadExt, AsyncRead}; + +/// Starts a new Substrate node in development mode with a temporary chain. +/// +/// This function creates a new Substrate node using the `substrate` binary. +/// It configures the node to run in development mode (`--dev`) with a temporary chain (`--tmp`), +/// sets the WebSocket port to 45789 (`--ws-port=45789`). +/// +/// # Returns +/// +/// A [`Child`] process representing the spawned Substrate node. +/// +/// # Panics +/// +/// This function will panic if the `substrate` binary is not found or if the node fails to start. +/// +/// # Examples +/// +/// ```ignore +/// use my_crate::start_node; +/// +/// let child = start_node(); +/// // Interact with the Substrate node using the WebSocket port 45789. +/// // When done, the node will be killed when the `child` is dropped. +/// ``` +/// +/// [`Child`]: std::process::Child +pub fn start_node() -> Child { + Command::new(cargo_bin("substrate")) + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) + .args(&["--dev", "--tmp", "--ws-port=45789", "--no-hardware-benchmarks"]) + .spawn() + .unwrap() +} + +/// Builds the Substrate project using the provided arguments. +/// +/// This function reads the CARGO_MANIFEST_DIR environment variable to find the root workspace +/// directory. It then runs the `cargo b` command in the root directory with the specified +/// arguments. +/// +/// This can be useful for building the Substrate binary with a desired set of features prior +/// to using the binary in a CLI test. +/// +/// # Arguments +/// +/// * `args: &[&str]` - A slice of string references representing the arguments to pass to the +/// `cargo b` command. +/// +/// # Panics +/// +/// This function will panic if: +/// +/// * The CARGO_MANIFEST_DIR environment variable is not set. +/// * The root workspace directory cannot be determined. +/// * The 'cargo b' command fails to execute. +/// * The 'cargo b' command returns a non-successful status. +/// +/// # Examples +/// +/// ```ignore +/// build_substrate(&["--features=try-runtime"]); +/// ``` +pub fn build_substrate(args: &[&str]) { + // Get the root workspace directory from the CARGO_MANIFEST_DIR environment variable + let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set"); + let root_dir = std::path::Path::new(&manifest_dir) + .parent() + .expect("Failed to find root workspace directory"); + let output = Command::new("cargo") + .arg("build") + .args(args) + .current_dir(root_dir) + .output() + .expect(format!("Failed to execute 'cargo b' with args {:?}'", args).as_str()); + + if !output.status.success() { + panic!( + "Failed to execute 'cargo b' with args {:?}': \n{}", + args, + String::from_utf8_lossy(&output.stderr) + ); + } +} + +/// Takes a readable tokio stream (e.g. from a child process `ChildStderr` or `ChildStdout`) and +/// a `Regex` pattern, and checks each line against the given pattern as it is produced. +/// The function returns OK(()) as soon as a line matching the pattern is found, or an Err if +/// the stream ends without any lines matching the pattern. +/// +/// # Arguments +/// +/// * `child_stream` - An async tokio stream, e.g. from a child process `ChildStderr` or +/// `ChildStdout`. +/// * `re` - A `Regex` pattern to search for in the stream. +/// +/// # Returns +/// +/// * `Ok(())` if a line matching the pattern is found. +/// * `Err(String)` if the stream ends without any lines matching the pattern. +/// +/// # Examples +/// +/// ```ignore +/// use regex::Regex; +/// use tokio::process::Command; +/// use tokio::io::AsyncRead; +/// +/// # async fn run() { +/// let child = Command::new("some-command").stderr(std::process::Stdio::piped()).spawn().unwrap(); +/// let stderr = child.stderr.unwrap(); +/// let re = Regex::new("error:").unwrap(); +/// +/// match wait_for_pattern_match_in_stream(stderr, re).await { +/// Ok(()) => println!("Error found in stderr"), +/// Err(e) => println!("Error: {}", e), +/// } +/// # } +/// ``` +pub async fn wait_for_stream_pattern_match(stream: R, re: Regex) -> Result<(), String> +where + R: AsyncRead + Unpin, +{ + let mut stdio_reader = tokio::io::BufReader::new(stream).lines(); + while let Ok(Some(line)) = stdio_reader.next_line().await { + match re.find(line.as_str()) { + Some(_) => return Ok(()), + None => (), + } + } + Err(String::from("Stream closed without any lines matching the regex.")) +} + +/// Run the given `future` and panic if the `timeout` is hit. +pub async fn run_with_timeout(timeout: Duration, future: impl futures::Future) { + tokio::time::timeout(timeout, future).await.expect("Hit timeout"); +} + +/// Wait for at least n blocks to be finalized from a specified node +pub async fn wait_n_finalized_blocks(n: usize, url: &str) { + use substrate_rpc_client::{ws_client, ChainApi}; + + let mut built_blocks = std::collections::HashSet::new(); + let mut interval = tokio::time::interval(Duration::from_secs(2)); + let rpc = ws_client(url).await.unwrap(); + + loop { + if let Ok(block) = ChainApi::<(), Hash, Header, ()>::finalized_head(&rpc).await { + built_blocks.insert(block); + if built_blocks.len() > n { + break + } + }; + interval.tick().await; + } +} + +/// Run the node for a while (3 blocks) +pub async fn run_node_for_a_while(base_path: &Path, args: &[&str]) { + run_with_timeout(Duration::from_secs(60 * 10), async move { + let mut cmd = Command::new(cargo_bin("substrate")) + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) + .args(args) + .arg("-d") + .arg(base_path) + .spawn() + .unwrap(); + + let stderr = cmd.stderr.take().unwrap(); + + let mut child = KillChildOnDrop(cmd); + + let ws_url = extract_info_from_output(stderr).0.ws_url; + + // Let it produce some blocks. + wait_n_finalized_blocks(3, &ws_url).await; + + child.assert_still_running(); + + // Stop the process + child.stop(); + }) + .await +} + +pub struct KillChildOnDrop(pub Child); + +impl KillChildOnDrop { + /// Stop the child and wait until it is finished. + /// + /// Asserts if the exit status isn't success. + pub fn stop(&mut self) { + self.stop_with_signal(SIGINT); + } + + /// Same as [`Self::stop`] but takes the `signal` that is sent to stop the child. + pub fn stop_with_signal(&mut self, signal: Signal) { + kill(Pid::from_raw(self.id().try_into().unwrap()), signal).unwrap(); + assert!(self.wait().unwrap().success()); + } + + /// Asserts that the child is still running. + pub fn assert_still_running(&mut self) { + assert!(self.try_wait().unwrap().is_none(), "the process should still be running"); + } +} + +impl Drop for KillChildOnDrop { + fn drop(&mut self) { + let _ = self.0.kill(); + } +} + +impl Deref for KillChildOnDrop { + type Target = Child; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for KillChildOnDrop { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +/// Information extracted from a running node. +pub struct NodeInfo { + pub ws_url: String, + pub db_path: PathBuf, +} + +/// Extract [`NodeInfo`] from a running node by parsing its output. +/// +/// Returns the [`NodeInfo`] and all the read data. +pub fn extract_info_from_output(read: impl Read + Send) -> (NodeInfo, String) { + let mut data = String::new(); + + let ws_url = BufReader::new(read) + .lines() + .find_map(|line| { + let line = line.expect("failed to obtain next line while extracting node info"); + data.push_str(&line); + data.push_str("\n"); + + // does the line contain our port (we expect this specific output from substrate). + let sock_addr = match line.split_once("Running JSON-RPC WS server: addr=") { + None => return None, + Some((_, after)) => after.split_once(",").unwrap().0, + }; + + Some(format!("ws://{}", sock_addr)) + }) + .unwrap_or_else(|| { + eprintln!("Observed node output:\n{}", data); + panic!("We should get a WebSocket address") + }); + + // Database path is printed before the ws url! + let re = Regex::new(r"Database: .+ at (\S+)").unwrap(); + let db_path = PathBuf::from(re.captures(data.as_str()).unwrap().get(1).unwrap().as_str()); + + (NodeInfo { ws_url, db_path }, data) +} diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index a27178792579a..fc9ba1c9e0dd9 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -27,13 +27,13 @@ pub use sc_client_api::{ BadBlocks, ForkBlocks, }; pub use sc_client_db::{self, Backend, BlocksPruning}; -pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod}; +pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod, WasmExecutor}; pub use sc_service::{client, RpcHandlers}; pub use sp_consensus; pub use sp_keyring::{ ed25519::Keyring as Ed25519Keyring, sr25519::Keyring as Sr25519Keyring, AccountKeyring, }; -pub use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +pub use sp_keystore::{Keystore, KeystorePtr}; pub use sp_runtime::{Storage, StorageChild}; pub use sp_state_machine::ExecutionStrategy; @@ -70,7 +70,7 @@ pub struct TestClientBuilder, StorageChild>, backend: Arc, _executor: std::marker::PhantomData, - keystore: Option, + keystore: Option, fork_blocks: ForkBlocks, bad_blocks: BadBlocks, enable_offchain_indexing_api: bool, @@ -128,7 +128,7 @@ impl } /// Set the keystore that should be used by the externalities. - pub fn set_keystore(mut self, keystore: SyncCryptoStorePtr) -> Self { + pub fn set_keystore(mut self, keystore: KeystorePtr) -> Self { self.keystore = Some(keystore); self } @@ -286,17 +286,17 @@ impl Backend: sc_client_api::backend::Backend + 'static, { let executor = executor.into().unwrap_or_else(|| { - NativeElseWasmExecutor::new(WasmExecutionMethod::Interpreted, None, 8, 2) + NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()) }); let executor = LocalCallExecutor::new( self.backend.clone(), - executor, - Box::new(sp_core::testing::TaskExecutor::new()), + executor.clone(), Default::default(), ExecutionExtensions::new( self.execution_strategies.clone(), self.keystore.clone(), sc_offchain::OffchainDb::factory_from_backend(&*self.backend), + Arc::new(executor), ), ) .expect("Creates LocalCallExecutor"); diff --git a/test-utils/derive/Cargo.toml b/test-utils/derive/Cargo.toml index b55ace822ceb3..f3e38ada2b30d 100644 --- a/test-utils/derive/Cargo.toml +++ b/test-utils/derive/Cargo.toml @@ -11,9 +11,9 @@ publish = false [dependencies] proc-macro-crate = "1.1.3" -proc-macro2 = "1.0.37" -quote = "1.0.10" -syn = { version = "1.0.98", features = ["full"] } +proc-macro2 = "1.0.56" +quote = "1.0.26" +syn = { version = "2.0.14", features = ["full"] } [lib] proc-macro = true diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index ab5a0d08b6844..2cb4715770f78 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -20,7 +20,7 @@ sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = " sp-consensus-beefy = { version = "4.0.0-dev", default-features = false, path = "../../primitives/consensus/beefy" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../primitives/block-builder" } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } sp-keyring = { version = "7.0.0", optional = true, path = "../../primitives/keyring" } memory-db = { version = "0.32.0", default-features = false } diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 099aeab11f768..5e4b5d6a12dd9 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -38,6 +38,7 @@ use sp_core::{ Pair, }; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; +use substrate_test_client::sc_executor::WasmExecutor; use substrate_test_runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; /// A prelude to import in tests. @@ -171,7 +172,7 @@ pub type Client = client::Client< client::LocalCallExecutor< substrate_test_runtime::Block, B, - sc_executor::NativeElseWasmExecutor, + NativeElseWasmExecutor, >, substrate_test_runtime::Block, substrate_test_runtime::RuntimeApi, @@ -290,6 +291,6 @@ pub fn new() -> Client { } /// Create a new native executor. -pub fn new_native_executor() -> NativeElseWasmExecutor { - NativeElseWasmExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8, 2) +pub fn new_native_or_wasm_executor() -> NativeElseWasmExecutor { + NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()) } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index cedd88bc11c3d..57726316b7272 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -165,13 +165,21 @@ pub enum Extrinsic { OffchainIndexClear(Vec), Store(Vec), EnqueueTxs(u64), + /// Read X times from the state some data and then panic! + /// + /// Returns `Ok` if it didn't read anything. + ReadAndPanic(u32), + /// Read X times from the state some data. + /// + /// Panics if it can not read `X` times. + Read(u32), } #[cfg(feature = "std")] impl serde::Serialize for Extrinsic { fn serialize(&self, seq: S) -> Result where - S: ::serde::Serializer, + S: serde::Serializer, { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } @@ -212,6 +220,8 @@ impl BlindCheckable for Extrinsic { Extrinsic::OffchainIndexClear(key) => Ok(Extrinsic::OffchainIndexClear(key)), Extrinsic::Store(data) => Ok(Extrinsic::Store(data)), Extrinsic::EnqueueTxs(data) => Ok(Extrinsic::EnqueueTxs(data)), + Extrinsic::ReadAndPanic(i) => Ok(Extrinsic::ReadAndPanic(i)), + Extrinsic::Read(i) => Ok(Extrinsic::Read(i)), } } } @@ -380,6 +390,8 @@ cfg_if! { fn do_trace_log(); /// Verify the given signature, public & message bundle. fn verify_ed25519(sig: ed25519::Signature, public: ed25519::Public, message: Vec) -> bool; + /// Write the given `value` under the given `key` into the storage and then optional panic. + fn write_key_value(key: Vec, value: Vec, panic: bool); } } } else { @@ -439,7 +451,6 @@ cfg_if! { #[derive(Clone, Eq, PartialEq, TypeInfo)] pub struct Runtime; - impl GetNodeBlockType for Runtime { type NodeBlock = Block; } @@ -759,6 +770,14 @@ cfg_if! { fn metadata() -> OpaqueMetadata { unimplemented!() } + + fn metadata_at_version(_version: u32) -> Option { + unimplemented!() + } + + fn metadata_versions() -> sp_std::vec::Vec { + unimplemented!() + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { @@ -888,6 +907,14 @@ cfg_if! { fn verify_ed25519(sig: ed25519::Signature, public: ed25519::Public, message: Vec) -> bool { sp_io::crypto::ed25519_verify(&sig, &message, &public) } + + fn write_key_value(key: Vec, value: Vec, panic: bool) { + sp_io::storage::set(&key, &value); + + if panic { + panic!("I'm just following my master"); + } + } } impl sp_consensus_aura::AuraApi for Runtime { @@ -1051,6 +1078,14 @@ cfg_if! { fn metadata() -> OpaqueMetadata { unimplemented!() } + + fn metadata_at_version(_version: u32) -> Option { + unimplemented!() + } + + fn metadata_versions() -> sp_std::vec::Vec { + unimplemented!() + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index a7e407e611b5d..24a9e132b3b00 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -284,6 +284,32 @@ fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyEx }, Extrinsic::Store(data) => execute_store(data.clone()), Extrinsic::EnqueueTxs(_) => Ok(Ok(())), + Extrinsic::ReadAndPanic(i) => execute_read(*i, true), + Extrinsic::Read(i) => execute_read(*i, false), + } +} + +fn execute_read(read: u32, panic_at_end: bool) -> ApplyExtrinsicResult { + let mut next_key = vec![]; + for _ in 0..(read as usize) { + if let Some(next) = sp_io::storage::next_key(&next_key) { + // Read the value + sp_io::storage::get(&next); + + next_key = next; + } else { + if panic_at_end { + return Ok(Ok(())) + } else { + panic!("Could not read {read} times from the state"); + } + } + } + + if panic_at_end { + panic!("BYE") + } else { + Ok(Ok(())) } } @@ -359,7 +385,7 @@ mod tests { use super::*; use crate::{wasm_binary_unwrap, Header, Transfer}; - use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod}; + use sc_executor::{NativeElseWasmExecutor, WasmExecutor}; use sp_core::{ map, traits::{CallContext, CodeExecutor, RuntimeCode}, @@ -383,7 +409,7 @@ mod tests { } fn executor() -> NativeElseWasmExecutor { - NativeElseWasmExecutor::new(WasmExecutionMethod::Interpreted, None, 8, 2) + NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()) } fn new_test_ext() -> TestExternalities { diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index 60f078142e17f..08bcf3ee5a6b5 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -27,7 +27,7 @@ use sc_cli::{ execution_method_from_cli, CliConfiguration, ExecutionStrategy, Result, SharedParams, }; use sc_client_db::BenchmarkingState; -use sc_executor::NativeElseWasmExecutor; +use sc_executor::{NativeElseWasmExecutor, WasmExecutor}; use sc_service::{Configuration, NativeExecutionDispatch}; use serde::Serialize; use sp_core::{ @@ -35,13 +35,13 @@ use sp_core::{ testing::{TestOffchainExt, TestTransactionPoolExt}, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }, - traits::CallContext, + traits::{CallContext, ReadRuntimeVersionExt}, }; use sp_externalities::Extensions; -use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStorePtr}; +use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_state_machine::StateMachine; -use std::{collections::HashMap, fmt::Debug, fs, str::FromStr, sync::Arc, time}; +use std::{collections::HashMap, fmt::Debug, fs, str::FromStr, time}; /// Logging target const LOG_TARGET: &'static str = "frame::benchmark::pallet"; @@ -209,21 +209,28 @@ impl PalletCmd { // Do not enable storage tracking false, )?; - let executor = NativeElseWasmExecutor::::new( - execution_method_from_cli(self.wasm_method, self.wasmtime_instantiation_strategy), - self.heap_pages, - 2, // The runtime instances cache size. - 2, // The runtime cache size + + let method = + execution_method_from_cli(self.wasm_method, self.wasmtime_instantiation_strategy); + + let executor = NativeElseWasmExecutor::::new_with_wasm_executor( + WasmExecutor::builder() + .with_execution_method(method) + .with_max_runtime_instances(2) + .with_runtime_cache_size(2) + .build(), ); let extensions = || -> Extensions { let mut extensions = Extensions::default(); - extensions.register(KeystoreExt(Arc::new(KeyStore::new()) as SyncCryptoStorePtr)); let (offchain, _) = TestOffchainExt::new(); let (pool, _) = TestTransactionPoolExt::new(); + let keystore = MemoryKeystore::new(); + extensions.register(KeystoreExt::new(keystore)); extensions.register(OffchainWorkerExt::new(offchain.clone())); extensions.register(OffchainDbExt::new(offchain)); extensions.register(TransactionPoolExt::new(pool)); + extensions.register(ReadRuntimeVersionExt::new(executor.clone())); extensions }; @@ -237,7 +244,6 @@ impl PalletCmd { &(self.extra).encode(), extensions(), &sp_state_machine::backend::BackendRuntimeCode::new(state).runtime_code()?, - sp_core::testing::TaskExecutor::new(), CallContext::Offchain, ) .execute(strategy.into()) @@ -375,7 +381,6 @@ impl PalletCmd { extensions(), &sp_state_machine::backend::BackendRuntimeCode::new(state) .runtime_code()?, - sp_core::testing::TaskExecutor::new(), CallContext::Offchain, ) .execute(strategy.into()) @@ -416,7 +421,6 @@ impl PalletCmd { extensions(), &sp_state_machine::backend::BackendRuntimeCode::new(state) .runtime_code()?, - sp_core::testing::TaskExecutor::new(), CallContext::Offchain, ) .execute(strategy.into()) @@ -449,7 +453,6 @@ impl PalletCmd { extensions(), &sp_state_machine::backend::BackendRuntimeCode::new(state) .runtime_code()?, - sp_core::testing::TaskExecutor::new(), CallContext::Offchain, ) .execute(strategy.into()) @@ -470,7 +473,7 @@ impl PalletCmd { log::info!( target: LOG_TARGET, - "Running Benchmark: {}.{}({} args) {}/{} {}/{}", + "Running benchmark: {}.{}({} args) {}/{} {}/{}", String::from_utf8(pallet.clone()) .expect("Encoded from String; qed"), String::from_utf8(extrinsic.clone()) diff --git a/utils/frame/benchmarking-cli/src/pallet/template.hbs b/utils/frame/benchmarking-cli/src/pallet/template.hbs index 88d6b69a6c339..85b0e86caad96 100644 --- a/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -15,9 +15,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `{{pallet}}`. pub struct WeightInfo(PhantomData); diff --git a/utils/frame/benchmarking-cli/src/pallet/writer.rs b/utils/frame/benchmarking-cli/src/pallet/writer.rs index a609f9e38c0ce..89cce45627e68 100644 --- a/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -278,6 +278,7 @@ fn get_benchmark_data( used_recorded_proof_size.push(ComponentSlope { name: name.clone(), slope, error }); } }); + used_recorded_proof_size.sort_by(|a, b| a.name.cmp(&b.name)); // We add additional comments showing which storage items were touched. // We find the worst case proof size, and use that as the final proof size result. @@ -315,12 +316,12 @@ fn get_benchmark_data( let mut base_calculated_proof_size = 0; // Sum up the proof sizes per component for (_, slope, base) in proof_size_per_components.iter() { - base_calculated_proof_size += base; + base_calculated_proof_size = base_calculated_proof_size.max(*base); for component in slope.iter() { let mut found = false; for used_component in used_calculated_proof_size.iter_mut() { if used_component.name == component.name { - used_component.slope += component.slope; + used_component.slope = used_component.slope.max(component.slope); found = true; break } @@ -337,6 +338,7 @@ fn get_benchmark_data( } } } + used_calculated_proof_size.sort_by(|a, b| a.name.cmp(&b.name)); // This puts a marker on any component which is entirely unused in the weight formula. let components = batch.time_results[0] @@ -626,7 +628,7 @@ pub(crate) fn process_storage_results( }, }; // Add the additional trie layer overhead for every new prefix. - if *reads > 0 { + if *reads > 0 && !is_all_ignored { prefix_result.proof_size += 15 * 33 * additional_trie_layers as u32; } storage_per_prefix.entry(prefix.clone()).or_default().push(prefix_result); diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 8611ae4980f12..d3909af34451b 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -12,6 +12,7 @@ description = "An externalities provided environment that can load itself from r targets = ["x86_64-unknown-linux-gnu"] [dependencies] +jsonrpsee = { version = "0.16.2", features = ["http-client"] } codec = { package = "parity-scale-codec", version = "3.2.2" } log = "0.4.17" serde = "1.0.136" @@ -22,6 +23,7 @@ sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } substrate-rpc-client = { path = "../rpc/client" } futures = "0.3" +async-recursion = "1.0.4" [dev-dependencies] frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index ee342408828de..283d2c280b15e 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -20,8 +20,13 @@ //! An equivalent of `sp_io::TestExternalities` that can load its state from a remote substrate //! based chain, or a local state snapshot file. +use async_recursion::async_recursion; use codec::{Decode, Encode}; use futures::{channel::mpsc, stream::StreamExt}; +use jsonrpsee::{ + core::params::ArrayParams, + http_client::{HttpClient, HttpClientBuilder}, +}; use log::*; use serde::de::DeserializeOwned; use sp_core::{ @@ -35,6 +40,7 @@ use sp_core::{ pub use sp_io::TestExternalities; use sp_runtime::{traits::Block as BlockT, StateVersion}; use std::{ + cmp::{max, min}, fs, num::NonZeroUsize, ops::{Deref, DerefMut}, @@ -42,19 +48,14 @@ use std::{ sync::Arc, thread, }; -use substrate_rpc_client::{ - rpc_params, ws_client, BatchRequestBuilder, ChainApi, ClientT, StateApi, WsClient, -}; +use substrate_rpc_client::{rpc_params, BatchRequestBuilder, ChainApi, ClientT, StateApi}; type KeyValue = (StorageKey, StorageData); type TopKeyValues = Vec; type ChildKeyValues = Vec<(ChildInfo, Vec)>; const LOG_TARGET: &str = "remote-ext"; -const DEFAULT_WS_ENDPOINT: &str = "wss://rpc.polkadot.io:443"; -const DEFAULT_VALUE_DOWNLOAD_BATCH: usize = 4096; -// NOTE: increasing this value does not seem to impact speed all that much. -const DEFAULT_KEY_DOWNLOAD_PAGE: u32 = 1000; +const DEFAULT_HTTP_ENDPOINT: &str = "https://rpc.polkadot.io:443"; /// The snapshot that we store on disk. #[derive(Decode, Encode)] struct Snapshot { @@ -117,36 +118,55 @@ pub struct OfflineConfig { pub enum Transport { /// Use the `URI` to open a new WebSocket connection. Uri(String), - /// Use existing WebSocket connection. - RemoteClient(Arc), + /// Use HTTP connection. + RemoteClient(Arc), } impl Transport { - fn as_client(&self) -> Option<&WsClient> { + fn as_client(&self) -> Option<&HttpClient> { match self { Self::RemoteClient(client) => Some(client), _ => None, } } - fn as_client_cloned(&self) -> Option> { + fn as_client_cloned(&self) -> Option> { match self { Self::RemoteClient(client) => Some(client.clone()), _ => None, } } - // Open a new WebSocket connection if it's not connected. - async fn map_uri(&mut self) -> Result<(), &'static str> { + // Build an HttpClient from a URI. + async fn init(&mut self) -> Result<(), &'static str> { if let Self::Uri(uri) = self { log::debug!(target: LOG_TARGET, "initializing remote client to {:?}", uri); - let ws_client = ws_client(uri).await.map_err(|e| { - log::error!(target: LOG_TARGET, "error: {:?}", e); - "failed to build ws client" - })?; + // If we have a ws uri, try to convert it to an http uri. + // We use an HTTP client rather than WS because WS starts to choke with "accumulated + // message length exceeds maximum" errors after processing ~10k keys when fetching + // from a node running a default configuration. + let uri = if uri.starts_with("ws://") { + let uri = uri.replace("ws://", "http://"); + log::info!(target: LOG_TARGET, "replacing ws:// in uri with http://: {:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)", uri); + uri + } else if uri.starts_with("wss://") { + let uri = uri.replace("wss://", "https://"); + log::info!(target: LOG_TARGET, "replacing wss:// in uri with https://: {:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)", uri); + uri + } else { + uri.clone() + }; + let http_client = HttpClientBuilder::default() + .max_request_body_size(u32::MAX) + .request_timeout(std::time::Duration::from_secs(60 * 5)) + .build(uri) + .map_err(|e| { + log::error!(target: LOG_TARGET, "error: {:?}", e); + "failed to build http client" + })?; - *self = Self::RemoteClient(Arc::new(ws_client)) + *self = Self::RemoteClient(Arc::new(http_client)) } Ok(()) @@ -159,8 +179,8 @@ impl From for Transport { } } -impl From> for Transport { - fn from(client: Arc) -> Self { +impl From> for Transport { + fn from(client: Arc) -> Self { Transport::RemoteClient(client) } } @@ -189,18 +209,18 @@ pub struct OnlineConfig { } impl OnlineConfig { - /// Return rpc (ws) client reference. - fn rpc_client(&self) -> &WsClient { + /// Return rpc (http) client reference. + fn rpc_client(&self) -> &HttpClient { self.transport .as_client() - .expect("ws client must have been initialized by now; qed.") + .expect("http client must have been initialized by now; qed.") } - /// Return a cloned rpc (ws) client, suitable for being moved to threads. - fn rpc_client_cloned(&self) -> Arc { + /// Return a cloned rpc (http) client, suitable for being moved to threads. + fn rpc_client_cloned(&self) -> Arc { self.transport .as_client_cloned() - .expect("ws client must have been initialized by now; qed.") + .expect("http client must have been initialized by now; qed.") } fn at_expected(&self) -> B::Hash { @@ -211,7 +231,7 @@ impl OnlineConfig { impl Default for OnlineConfig { fn default() -> Self { Self { - transport: Transport::from(DEFAULT_WS_ENDPOINT.to_owned()), + transport: Transport::from(DEFAULT_HTTP_ENDPOINT.to_owned()), child_trie: true, at: None, state_snapshot: None, @@ -307,10 +327,29 @@ where B::Hash: DeserializeOwned, B::Header: DeserializeOwned, { + const DEFAULT_PARALLELISM: usize = 4; + const BATCH_SIZE_INCREASE_FACTOR: f32 = 1.10; + const BATCH_SIZE_DECREASE_FACTOR: f32 = 0.50; + const INITIAL_BATCH_SIZE: usize = 5000; + // NOTE: increasing this value does not seem to impact speed all that much. + const DEFAULT_KEY_DOWNLOAD_PAGE: u32 = 1000; + /// Get the number of threads to use. + /// Cap the number of threads. Performance improvement beyond a small number of threads is + /// negligible, and too many threads can create issues with the HttpClient. fn threads() -> NonZeroUsize { - thread::available_parallelism() - .unwrap_or(NonZeroUsize::new(4usize).expect("4 is non-zero; qed")) + let avaliable = thread::available_parallelism() + .unwrap_or(NonZeroUsize::new(1usize).expect("1 is non-zero; qed")) + .get(); + assert!(avaliable > 0, "avaliable parallelism must be greater than 0"); + + let requested: usize = match std::env::var("TRY_RUNTIME_MAX_THREADS") { + Ok(n) => n.parse::().expect("TRY_RUNTIME_MAX_THREADS must be a number"), + Err(_) => Self::DEFAULT_PARALLELISM, + }; + assert!(requested > 0, "TRY_RUNTIME_MAX_THREADS must be greater than 0"); + return NonZeroUsize::new(min(requested, avaliable)) + .expect("requested and avaliable are non-zero; qed") } async fn rpc_get_storage( @@ -352,7 +391,7 @@ where .rpc_client() .storage_keys_paged( Some(prefix.clone()), - DEFAULT_KEY_DOWNLOAD_PAGE, + Self::DEFAULT_KEY_DOWNLOAD_PAGE, last_key.clone(), Some(at), ) @@ -365,7 +404,7 @@ where all_keys.extend(page); - if page_len < DEFAULT_KEY_DOWNLOAD_PAGE as usize { + if page_len < Self::DEFAULT_KEY_DOWNLOAD_PAGE as usize { log::debug!(target: LOG_TARGET, "last page received: {}", page_len); break all_keys } else { @@ -384,6 +423,123 @@ where Ok(keys) } + /// Fetches storage data from a node using a dynamic batch size. + /// + /// This function adjusts the batch size on the fly to help prevent overwhelming the node with + /// large batch requests, and stay within request size limits enforced by the node. + /// + /// # Arguments + /// + /// * `client` - An `Arc` wrapped `HttpClient` used for making the requests. + /// * `payloads` - A vector of tuples containing a JSONRPC method name and `ArrayParams` + /// * `batch_size` - The initial batch size to use for the request. The batch size will be + /// adjusted dynamically in case of failure. + /// + /// # Returns + /// + /// Returns a `Result` with a vector of `Option`, where each element corresponds to + /// the storage data for the given method and parameters. The result will be an `Err` with a + /// `String` error message if the request fails. + /// + /// # Errors + /// + /// This function will return an error if: + /// * The batch request fails and the batch size is less than 2. + /// * There are invalid batch params. + /// * There is an error in the batch response. + /// + /// # Example + /// + /// ```ignore + /// use your_crate::{get_storage_data_dynamic_batch_size, HttpClient, ArrayParams}; + /// use std::sync::Arc; + /// + /// async fn example() { + /// let client = Arc::new(HttpClient::new()); + /// let payloads = vec![ + /// ("some_method".to_string(), ArrayParams::new(vec![])), + /// ("another_method".to_string(), ArrayParams::new(vec![])), + /// ]; + /// let initial_batch_size = 10; + /// + /// let storage_data = get_storage_data_dynamic_batch_size(client, payloads, batch_size).await; + /// match storage_data { + /// Ok(data) => println!("Storage data: {:?}", data), + /// Err(e) => eprintln!("Error fetching storage data: {}", e), + /// } + /// } + /// ``` + #[async_recursion] + async fn get_storage_data_dynamic_batch_size( + client: &Arc, + payloads: Vec<(String, ArrayParams)>, + batch_size: usize, + ) -> Result>, String> { + // All payloads have been processed + if payloads.is_empty() { + return Ok(vec![]) + }; + + log::debug!( + target: LOG_TARGET, + "Remaining payloads: {} Batch request size: {}", + payloads.len(), + batch_size, + ); + + // Payloads to attempt to process this batch + let page = payloads.iter().take(batch_size).cloned().collect::>(); + + // Build the batch request + let mut batch = BatchRequestBuilder::new(); + for (method, params) in page.iter() { + batch + .insert(method, params.clone()) + .map_err(|_| "Invalid batch method and/or params")? + } + let batch_response = match client.batch_request::>(batch).await { + Ok(batch_response) => batch_response, + Err(e) => { + if batch_size < 2 { + return Err(e.to_string()) + } + + log::debug!( + target: LOG_TARGET, + "Batch request failed, trying again with smaller batch size. {}", + e.to_string() + ); + + return Self::get_storage_data_dynamic_batch_size( + client, + payloads, + max(1, (batch_size as f32 * Self::BATCH_SIZE_DECREASE_FACTOR) as usize), + ) + .await + }, + }; + + // Collect the data from this batch + let mut data: Vec> = vec![]; + for item in batch_response.into_iter() { + match item { + Ok(x) => data.push(x), + Err(e) => return Err(e.message().to_string()), + } + } + + // Return this data joined with the remaining keys + let payloads = payloads.iter().skip(batch_size).cloned().collect::>(); + let mut rest = Self::get_storage_data_dynamic_batch_size( + client, + payloads, + max(batch_size + 1, (batch_size as f32 * Self::BATCH_SIZE_INCREASE_FACTOR) as usize), + ) + .await?; + data.append(&mut rest); + Ok(data) + } + /// Synonym of `getPairs` that uses paged queries to first get the keys, and then /// map them to values one by one. /// @@ -428,81 +584,61 @@ where let (tx, mut rx) = mpsc::unbounded::(); for thread_keys in keys_chunked { - let thread_client = client.clone(); let thread_sender = tx.clone(); + let thread_client = client.clone(); let handle = std::thread::spawn(move || { - let rt = tokio::runtime::Runtime::new().unwrap(); - let mut thread_key_values = Vec::with_capacity(thread_keys.len()); - - for chunk_keys in thread_keys.chunks(DEFAULT_VALUE_DOWNLOAD_BATCH) { - let mut batch = BatchRequestBuilder::new(); - - for key in chunk_keys.iter() { - batch - .insert("state_getStorage", rpc_params![key, at]) - .map_err(|_| "Invalid batch params") - .unwrap(); - } - - let batch_response = rt - .block_on(thread_client.batch_request::>(batch)) - .map_err(|e| { - log::error!( - target: LOG_TARGET, - "failed to execute batch: {:?}. Error: {:?}", - chunk_keys.iter().map(HexDisplay::from).collect::>(), - e - ); - "batch failed." - }) - .unwrap(); + // Process the payloads in chunks so each thread can pass kvs back to the main + // thread to start inserting before all of the data has been queried from the node. + // Inserting data takes a very long time, so the earlier it can start the better. + let mut thread_key_values = vec![]; + let chunk_size = thread_keys.len() / 1; + for thread_keys_chunk in thread_keys.chunks(chunk_size) { + let mut thread_key_chunk_values = Vec::with_capacity(thread_keys_chunk.len()); + + let payloads = thread_keys_chunk + .iter() + .map(|key| ("state_getStorage".to_string(), rpc_params!(key, at))) + .collect::>(); + + let rt = tokio::runtime::Runtime::new().unwrap(); + let storage_data = match rt.block_on(Self::get_storage_data_dynamic_batch_size( + &thread_client, + payloads, + Self::INITIAL_BATCH_SIZE, + )) { + Ok(storage_data) => storage_data, + Err(e) => { + thread_sender.unbounded_send(Message::BatchFailed(e)).unwrap(); + return Default::default() + }, + }; // Check if we got responses for all submitted requests. - assert_eq!(chunk_keys.len(), batch_response.len()); + assert_eq!(thread_keys_chunk.len(), storage_data.len()); - let mut batch_kv = Vec::with_capacity(chunk_keys.len()); - for (key, maybe_value) in chunk_keys.into_iter().zip(batch_response) { + let mut batch_kv = Vec::with_capacity(thread_keys_chunk.len()); + for (key, maybe_value) in thread_keys_chunk.iter().zip(storage_data) { match maybe_value { - Ok(Some(data)) => { - thread_key_values.push((key.clone(), data.clone())); + Some(data) => { + thread_key_chunk_values.push((key.clone(), data.clone())); batch_kv.push((key.clone().0, data.0)); }, - Ok(None) => { + None => { log::warn!( target: LOG_TARGET, "key {:?} had none corresponding value.", &key ); let data = StorageData(vec![]); - thread_key_values.push((key.clone(), data.clone())); + thread_key_chunk_values.push((key.clone(), data.clone())); batch_kv.push((key.clone().0, data.0)); }, - Err(e) => { - let reason = format!("key {:?} failed: {:?}", &key, e); - log::error!(target: LOG_TARGET, "Reason: {}", reason); - // Signal failures to the main thread, stop aggregating (key, value) - // pairs and return immediately an error. - thread_sender.unbounded_send(Message::BatchFailed(reason)).unwrap(); - return Default::default() - }, }; - - if thread_key_values.len() % (thread_keys.len() / 10).max(1) == 0 { - let ratio: f64 = - thread_key_values.len() as f64 / thread_keys.len() as f64; - log::debug!( - target: LOG_TARGET, - "[thread = {:?}] progress = {:.2} [{} / {}]", - std::thread::current().id(), - ratio, - thread_key_values.len(), - thread_keys.len(), - ); - } } - // Send this batch to the main thread to start inserting. + // Send this chunk to the main thread to start inserting. thread_sender.unbounded_send(Message::Batch(batch_kv)).unwrap(); + thread_key_values.extend(thread_key_chunk_values); } thread_sender.unbounded_send(Message::Terminated).unwrap(); @@ -516,16 +652,23 @@ where // `pending_ext`. let mut terminated = 0usize; let mut batch_failed = false; + let mut processed = 0usize; loop { match rx.next().await.unwrap() { - Message::Batch(kv) => { - for (k, v) in kv { - // skip writing the child root data. - if is_default_child_storage_key(k.as_ref()) { - continue - } - pending_ext.insert(k, v); - } + Message::Batch(kvs) => { + let kvs = kvs + .into_iter() + .filter(|(k, _)| !is_default_child_storage_key(k)) + .collect::>(); + processed += kvs.len(); + pending_ext.batch_insert(kvs); + log::info!( + target: LOG_TARGET, + "inserting keys progress = {:.0}% [{} / {}]", + (processed as f32 / keys.len() as f32) * 100f32, + processed, + keys.len(), + ); }, Message::BatchFailed(error) => { log::error!(target: LOG_TARGET, "Batch processing failed: {:?}", error); @@ -554,73 +697,58 @@ where /// Get the values corresponding to `child_keys` at the given `prefixed_top_key`. pub(crate) async fn rpc_child_get_storage_paged( - client: &WsClient, + client: &Arc, prefixed_top_key: &StorageKey, child_keys: Vec, at: B::Hash, ) -> Result, &'static str> { - let mut child_kv_inner = vec![]; - let mut batch_success = true; - - for batch_child_key in child_keys.chunks(DEFAULT_VALUE_DOWNLOAD_BATCH) { - let mut batch_request = BatchRequestBuilder::new(); - - for key in batch_child_key { - batch_request - .insert( - "childstate_getStorage", - rpc_params![ - PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), - key, - at - ], - ) - .map_err(|_| "Invalid batch params")?; - } + let child_keys_len = child_keys.len(); - let batch_response = - client.batch_request::>(batch_request).await.map_err(|e| { - log::error!( - target: LOG_TARGET, - "failed to execute batch: {:?}. Error: {:?}", - batch_child_key, - e - ); - "batch failed." - })?; + let payloads = child_keys + .iter() + .map(|key| { + ( + "childstate_getStorage".to_string(), + rpc_params![ + PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), + key, + at + ], + ) + }) + .collect::>(); - assert_eq!(batch_child_key.len(), batch_response.len()); - - for (key, maybe_value) in batch_child_key.iter().zip(batch_response) { - match maybe_value { - Ok(Some(v)) => { - child_kv_inner.push((key.clone(), v)); - }, - Ok(None) => { - log::warn!( - target: LOG_TARGET, - "key {:?} had none corresponding value.", - &key - ); - child_kv_inner.push((key.clone(), StorageData(vec![]))); - }, - Err(e) => { - log::error!(target: LOG_TARGET, "key {:?} failed: {:?}", &key, e); - batch_success = false; - }, - }; - } - } + let storage_data = match Self::get_storage_data_dynamic_batch_size( + client, + payloads, + Self::INITIAL_BATCH_SIZE, + ) + .await + { + Ok(storage_data) => storage_data, + Err(e) => { + log::error!(target: LOG_TARGET, "batch processing failed: {:?}", e); + return Err("batch processing failed") + }, + }; - if batch_success { - Ok(child_kv_inner) - } else { - Err("batch failed.") - } + assert_eq!(child_keys_len, storage_data.len()); + + Ok(child_keys + .iter() + .zip(storage_data) + .map(|(key, maybe_value)| match maybe_value { + Some(v) => (key.clone(), v), + None => { + log::warn!(target: LOG_TARGET, "key {:?} had no corresponding value.", &key); + (key.clone(), StorageData(vec![])) + }, + }) + .collect::>()) } pub(crate) async fn rpc_child_get_keys( - client: &WsClient, + client: &HttpClient, prefixed_top_key: &StorageKey, child_prefix: StorageKey, at: B::Hash, @@ -678,107 +806,47 @@ where return Ok(Default::default()) } - // div-ceil simulation. - let threads = Self::threads().get(); - let child_roots_per_thread = (child_roots.len() + threads - 1) / threads; - info!( target: LOG_TARGET, - "👩‍👦 scraping child-tree data from {} top keys, split among {} threads, {} top keys per thread", + "👩‍👦 scraping child-tree data from {} top keys", child_roots.len(), - threads, - child_roots_per_thread, ); - // NOTE: the threading done here is the simpler, yet slightly un-elegant because we are - // splitting child root among threads, and it is very common for these root to have vastly - // different child tries underneath them, causing some threads to finish way faster than - // others. Certainly still better than single thread though. - let mut handles = vec![]; - let client = self.as_online().rpc_client_cloned(); let at = self.as_online().at_expected(); - enum Message { - Terminated, - Batch((ChildInfo, Vec<(Vec, Vec)>)), - } - let (tx, mut rx) = mpsc::unbounded::(); - - for thread_child_roots in child_roots - .chunks(child_roots_per_thread) - .map(|x| x.into()) - .collect::>>() - { - let thread_client = client.clone(); - let thread_sender = tx.clone(); - let handle = thread::spawn(move || { - let rt = tokio::runtime::Runtime::new().unwrap(); - let mut thread_child_kv = vec![]; - for prefixed_top_key in thread_child_roots { - let child_keys = rt.block_on(Self::rpc_child_get_keys( - &thread_client, - &prefixed_top_key, - StorageKey(vec![]), - at, - ))?; - let child_kv_inner = rt.block_on(Self::rpc_child_get_storage_paged( - &thread_client, - &prefixed_top_key, - child_keys, - at, - ))?; - - let prefixed_top_key = PrefixedStorageKey::new(prefixed_top_key.clone().0); - let un_prefixed = match ChildType::from_prefixed_key(&prefixed_top_key) { - Some((ChildType::ParentKeyId, storage_key)) => storage_key, - None => { - log::error!(target: LOG_TARGET, "invalid key: {:?}", prefixed_top_key); - return Err("Invalid child key") - }, - }; - - thread_sender - .unbounded_send(Message::Batch(( - ChildInfo::new_default(un_prefixed), - child_kv_inner - .iter() - .cloned() - .map(|(k, v)| (k.0, v.0)) - .collect::>(), - ))) - .unwrap(); - thread_child_kv.push((ChildInfo::new_default(un_prefixed), child_kv_inner)); - } - - thread_sender.unbounded_send(Message::Terminated).unwrap(); - Ok(thread_child_kv) - }); - handles.push(handle); - } - - // first, wait until all threads send a `Terminated` message, in the meantime populate - // `pending_ext`. - let mut terminated = 0usize; - loop { - match rx.next().await.unwrap() { - Message::Batch((info, kvs)) => - for (k, v) in kvs { - pending_ext.insert_child(info.clone(), k, v); - }, - Message::Terminated => { - terminated += 1; - if terminated == handles.len() { - break - } + let arc_client = self.as_online().rpc_client_cloned(); + let mut child_kv = vec![]; + for prefixed_top_key in child_roots { + let child_keys = Self::rpc_child_get_keys( + arc_client.as_ref(), + &prefixed_top_key, + StorageKey(vec![]), + at, + ) + .await?; + + let child_kv_inner = + Self::rpc_child_get_storage_paged(&arc_client, &prefixed_top_key, child_keys, at) + .await?; + + let prefixed_top_key = PrefixedStorageKey::new(prefixed_top_key.clone().0); + let un_prefixed = match ChildType::from_prefixed_key(&prefixed_top_key) { + Some((ChildType::ParentKeyId, storage_key)) => storage_key, + None => { + log::error!(target: LOG_TARGET, "invalid key: {:?}", prefixed_top_key); + return Err("Invalid child key") }, + }; + + let info = ChildInfo::new_default(un_prefixed); + let key_values = + child_kv_inner.iter().cloned().map(|(k, v)| (k.0, v.0)).collect::>(); + child_kv.push((info.clone(), child_kv_inner)); + for (k, v) in key_values { + pending_ext.insert_child(info.clone(), k, v); } } - let child_kv = handles - .into_iter() - .flat_map(|h| h.join().unwrap()) - .flatten() - .collect::>(); Ok(child_kv) } @@ -841,8 +909,8 @@ where /// /// initializes the remote client in `transport`, and sets the `at` field, if not specified. async fn init_remote_client(&mut self) -> Result<(), &'static str> { - // First, initialize the ws client. - self.as_online_mut().transport.map_uri().await?; + // First, initialize the http client. + self.as_online_mut().transport.init().await?; // Then, if `at` is not set, set it. if self.as_online().at.is_none() { @@ -955,13 +1023,12 @@ where ); info!(target: LOG_TARGET, "injecting a total of {} top keys", top.len()); - for (k, v) in top { - // skip writing the child root data. - if is_default_child_storage_key(k.as_ref()) { - continue - } - inner_ext.insert(k.0, v.0); - } + let top = top + .into_iter() + .filter(|(k, _)| !is_default_child_storage_key(k.as_ref())) + .map(|(k, v)| (k.0, v.0)) + .collect::>(); + inner_ext.batch_insert(top); info!( target: LOG_TARGET, @@ -997,9 +1064,7 @@ where "extending externalities with {} manually injected key-values", self.hashed_key_values.len() ); - for (k, v) in self.hashed_key_values { - ext.insert(k.0, v.0); - } + ext.batch_insert(self.hashed_key_values.into_iter().map(|(k, v)| (k.0, v.0))); } // exclude manual key values. diff --git a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 3689a87da8ae5..91f50014667cc 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } serde = { version = "1", features = ["derive"] } log = { version = "0.4.17", default-features = false } diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index a220289542464..686d27eefbf13 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -42,13 +42,13 @@ log = "0.4.17" parity-scale-codec = "3.2.2" serde = "1.0.136" serde_json = "1.0.85" -zstd = { version = "0.11.2", default-features = false } +zstd = { version = "0.12.3", default-features = false } [dev-dependencies] -tokio = "1.22.0" +assert_cmd = "2.0.10" +regex = "1.7.3" +substrate-cli-test-utils = { path = "../../../../test-utils/cli" } +tokio = "1.27.0" [features] -try-runtime = [ - "sp-debug-derive/force-debug", - "frame-try-runtime/try-runtime", -] +try-runtime = ["sp-debug-derive/force-debug", "frame-try-runtime/try-runtime"] diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index 561bc57c70c5a..48dab6b9bd1b3 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -133,7 +133,7 @@ where &executor, "TryRuntime_execute_block", &payload, - full_extensions(), + full_extensions(executor.clone()), shared.export_proof, )?; diff --git a/utils/frame/try-runtime/cli/src/commands/fast_forward.rs b/utils/frame/try-runtime/cli/src/commands/fast_forward.rs index 75c48c3c402f3..0c517c02fbe81 100644 --- a/utils/frame/try-runtime/cli/src/commands/fast_forward.rs +++ b/utils/frame/try-runtime/cli/src/commands/fast_forward.rs @@ -103,7 +103,7 @@ async fn dry_run( executor, method, data, - full_extensions(), + full_extensions(executor.clone()), )?; Ok(::decode(&mut &*result)?) @@ -121,7 +121,7 @@ async fn run( executor, method, data, - full_extensions(), + full_extensions(executor.clone()), )?; let storage_changes = changes.drain_storage_changes( diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 2a67d269c87f1..d5c68978eb61a 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -108,10 +108,12 @@ where .or_else(|e| { if matches!(e, substrate_rpc_client::Error::ParseError(_)) { log::error!( + target: LOG_TARGET, "failed to parse the block format of remote against the local \ - codebase. The block format has changed, and follow-chain cannot run in \ - this case. Try running this command in a branch of your codebase that has \ - the same block format as the remote chain. For now, we replace the block with an empty one" + codebase. The block format has changed, and follow-chain cannot run in \ + this case. Try running this command in a branch of your codebase that + has the same block format as the remote chain. For now, we replace the \ + block with an empty one." ); } Err(rpc_err_handler(e)) @@ -148,8 +150,10 @@ where state_ext, &executor, "TryRuntime_execute_block", - (block, command.state_root_check, command.try_state.clone()).encode().as_ref(), - full_extensions(), + (block, command.state_root_check, true, command.try_state.clone()) + .encode() + .as_ref(), + full_extensions(executor.clone()), shared .export_proof .as_ref() diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index 352d552a3f358..4da6f07836cd1 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -97,7 +97,7 @@ where &executor, "OffchainWorkerApi_offchain_worker", &payload, - full_extensions(), + full_extensions(executor.clone()), )?; Ok(()) diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index c8582ea4d2a60..948ff9c030ea9 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -40,8 +40,8 @@ pub struct OnRuntimeUpgradeCmd { /// Performing any checks will potentially invalidate the measured PoV/Weight. // NOTE: The clap attributes make it backwards compatible with the previous `--checks` flag. #[clap(long, - default_value = "None", - default_missing_value = "All", + default_value = "pre-and-post", + default_missing_value = "all", num_args = 0..=1, require_equals = true, verbatim_doc_comment)] diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index 6fb4b44392546..9268ef2edba8b 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -29,9 +29,9 @@ //! //! Some resources about the above: //! -//! 1. +//! 1. //! 2. -//! 3. +//! 3. //! //! --- //! @@ -97,18 +97,6 @@ //! > If anything, in most cases, we expect spec-versions to NOT match, because try-runtime is all //! > about testing unreleased runtimes. //! -//! ## Note on nodes that respond to `try-runtime` requests. -//! -//! There are a number of flags that need to be preferably set on a running node in order to work -//! well with try-runtime's expensive RPC queries: -//! -//! - set `--rpc-max-response-size 1000` and -//! - `--rpc-max-request-size 1000` to ensure connections are not dropped in case the state is -//! large. -//! - set `--rpc-cors all` to ensure ws connections can come through. -//! -//! Note that *none* of the try-runtime operations need unsafe RPCs. -//! //! ## Note on signature and state-root checks //! //! All of the commands calling into `TryRuntime_execute_block` ([`Command::ExecuteBlock`] and @@ -133,11 +121,10 @@ //! given the right flag: //! //! ```ignore -//! -//! #[cfg(feature = try-runtime)] +//! #[cfg(feature = "try-runtime")] //! fn pre_upgrade() -> Result, &'static str> {} //! -//! #[cfg(feature = try-runtime)] +//! #[cfg(feature = "try-runtime")] //! fn post_upgrade(state: Vec) -> Result<(), &'static str> {} //! ``` //! @@ -152,9 +139,9 @@ //! //! Similarly, each pallet can expose a function in `#[pallet::hooks]` section as follows: //! -//! ``` -//! #[cfg(feature = try-runtime)] -//! fn try_state(_) -> Result<(), &'static str> {} +//! ```ignore +//! #[cfg(feature = "try-runtime")] +//! fn try_state(_: BlockNumber) -> Result<(), &'static str> {} //! ``` //! //! which is called on numerous code paths in the try-runtime tool. These checks should ensure that @@ -365,10 +352,13 @@ use remote_externalities::{ TestExternalities, }; use sc_cli::{ - CliConfiguration, RuntimeVersion, WasmExecutionMethod, WasmtimeInstantiationStrategy, - DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, DEFAULT_WASM_EXECUTION_METHOD, + execution_method_from_cli, CliConfiguration, RuntimeVersion, WasmExecutionMethod, + WasmtimeInstantiationStrategy, DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, + DEFAULT_WASM_EXECUTION_METHOD, +}; +use sc_executor::{ + sp_wasm_interface::HostFunctions, HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, }; -use sc_executor::{sp_wasm_interface::HostFunctions, WasmExecutor}; use sp_api::HashT; use sp_core::{ hexdisplay::HexDisplay, @@ -377,13 +367,12 @@ use sp_core::{ OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }, storage::well_known_keys, - testing::TaskExecutor, - traits::{CallContext, ReadRuntimeVersion, TaskExecutorExt}, + traits::{CallContext, ReadRuntimeVersion, ReadRuntimeVersionExt}, twox_128, H256, }; use sp_externalities::Extensions; use sp_inherents::InherentData; -use sp_keystore::{testing::KeyStore, KeystoreExt}; +use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, NumberFor}, DeserializeOwned, Digest, @@ -599,8 +588,6 @@ pub struct LiveState { #[derive(Debug, Clone, clap::Subcommand)] pub enum State { /// Use a state snapshot as the source of runtime state. - /// - /// This can be crated by passing a value to [`State::Live::snapshot_path`]. Snap { #[arg(short, long)] snapshot_path: PathBuf, @@ -813,31 +800,34 @@ where } /// Build all extensions that we typically use. -pub(crate) fn full_extensions() -> Extensions { +pub(crate) fn full_extensions(wasm_executor: WasmExecutor) -> Extensions { let mut extensions = Extensions::default(); - extensions.register(TaskExecutorExt::new(TaskExecutor::new())); let (offchain, _offchain_state) = TestOffchainExt::new(); let (pool, _pool_state) = TestTransactionPoolExt::new(); + let keystore = MemoryKeystore::new(); extensions.register(OffchainDbExt::new(offchain.clone())); extensions.register(OffchainWorkerExt::new(offchain)); - extensions.register(KeystoreExt(std::sync::Arc::new(KeyStore::new()))); + extensions.register(KeystoreExt::new(keystore)); extensions.register(TransactionPoolExt::new(pool)); + extensions.register(ReadRuntimeVersionExt::new(wasm_executor)); extensions } +/// Build wasm executor by default config. pub(crate) fn build_executor(shared: &SharedParams) -> WasmExecutor { - let heap_pages = shared.heap_pages.or(Some(2048)); - let max_runtime_instances = 8; - let runtime_cache_size = 2; - - WasmExecutor::new( - sc_executor::WasmExecutionMethod::Interpreted, - heap_pages, - max_runtime_instances, - None, - runtime_cache_size, - ) + let heap_pages = shared + .heap_pages + .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |p| HeapAllocStrategy::Static { extra_pages: p as _ }); + + WasmExecutor::builder() + .with_execution_method(execution_method_from_cli( + shared.wasm_method, + shared.wasmtime_instantiation_strategy, + )) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .build() } /// Ensure that the given `ext` is compiled with `try-runtime` @@ -876,7 +866,6 @@ pub(crate) fn state_machine_call( data, extensions, &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, - sp_core::testing::TaskExecutor::new(), CallContext::Offchain, ) .execute(sp_state_machine::ExecutionStrategy::AlwaysWasm) @@ -916,7 +905,6 @@ pub(crate) fn state_machine_call_with_proof. + +#![cfg(unix)] + +#[cfg(feature = "try-runtime")] +mod tests { + use assert_cmd::cargo::cargo_bin; + use regex::Regex; + use std::{ + process::{self}, + time::Duration, + }; + use substrate_cli_test_utils as common; + use tokio::process::{Child, Command}; + + #[tokio::test] + async fn follow_chain_works() { + // Build substrate so binaries used in the test use the latest code. + common::build_substrate(&["--features=try-runtime"]); + + common::run_with_timeout(Duration::from_secs(60), async move { + fn start_follow(ws_url: &str) -> Child { + Command::new(cargo_bin("substrate")) + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) + .args(&["try-runtime", "--runtime=existing"]) + .args(&["follow-chain", format!("--uri={}", ws_url).as_str()]) + .kill_on_drop(true) + .spawn() + .unwrap() + } + + // Start a node and wait for it to begin finalizing blocks + let mut node = common::KillChildOnDrop(common::start_node()); + let ws_url = common::extract_info_from_output(node.stderr.take().unwrap()).0.ws_url; + common::wait_n_finalized_blocks(1, &ws_url).await; + + // Kick off the follow-chain process and wait for it to process at least 3 blocks. + let mut follow = start_follow(&ws_url); + let re = Regex::new(r#".*executed block ([3-9]|[1-9]\d+).*"#).unwrap(); + let matched = + common::wait_for_stream_pattern_match(follow.stderr.take().unwrap(), re).await; + + // Assert that the follow-chain process has followed at least 3 blocks. + assert!(matches!(matched, Ok(_))); + }) + .await; + } +} diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index 5a442e026aa83..6755dbcce1824 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -15,10 +15,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" build-helper = "0.1.1" -cargo_metadata = "0.15.2" +cargo_metadata = "0.15.4" strum = { version = "0.24.1", features = ["derive"] } tempfile = "3.1.0" -toml = "0.5.4" +toml = "0.7.3" walkdir = "2.3.2" sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../primitives/maybe-compressed-blob" } filetime = "0.2.16" diff --git a/utils/wasm-builder/README.md b/utils/wasm-builder/README.md index a10611cc26a00..b1ccb1b35b10e 100644 --- a/utils/wasm-builder/README.md +++ b/utils/wasm-builder/README.md @@ -77,8 +77,13 @@ Wasm builder requires the following prerequisites for building the Wasm binary: - rust nightly + `wasm32-unknown-unknown` toolchain -If a specific rust nightly is installed with `rustup`, it is important that the wasm target is installed -as well. For example if installing the rust nightly from 20.02.2020 using `rustup install nightly-2020-02-20`, -the wasm target needs to be installed as well `rustup target add wasm32-unknown-unknown --toolchain nightly-2020-02-20`. +or + +- rust stable and version at least 1.68.0 + `wasm32-unknown-unknown` toolchain + +If a specific rust is installed with `rustup`, it is important that the wasm target is +installed as well. For example if installing the rust from 20.02.2020 using `rustup +install nightly-2020-02-20`, the wasm target needs to be installed as well `rustup target add +wasm32-unknown-unknown --toolchain nightly-2020-02-20`. License: Apache-2.0 diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index 659b955256af7..8405b5a0bda9e 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -100,8 +100,12 @@ //! //! - rust nightly + `wasm32-unknown-unknown` toolchain //! -//! If a specific rust nightly is installed with `rustup`, it is important that the wasm target is -//! installed as well. For example if installing the rust nightly from 20.02.2020 using `rustup +//! or +//! +//! - rust stable and version at least 1.68.0 + `wasm32-unknown-unknown` toolchain +//! +//! If a specific rust is installed with `rustup`, it is important that the wasm target is +//! installed as well. For example if installing the rust from 20.02.2020 using `rustup //! install nightly-2020-02-20`, the wasm target needs to be installed as well `rustup target add //! wasm32-unknown-unknown --toolchain nightly-2020-02-20`. @@ -111,9 +115,11 @@ use std::{ path::{Path, PathBuf}, process::Command, }; +use version::Version; mod builder; mod prerequisites; +mod version; mod wasm_project; pub use builder::{WasmBuilder, WasmBuilderSelectProject}; @@ -172,53 +178,59 @@ fn copy_file_if_changed(src: PathBuf, dst: PathBuf) { } } -/// Get a cargo command that compiles with nightly -fn get_nightly_cargo() -> CargoCommand { +/// Get a cargo command that should be used to invoke the compilation. +fn get_cargo_command() -> CargoCommand { let env_cargo = CargoCommand::new(&env::var("CARGO").expect("`CARGO` env variable is always set by cargo")); let default_cargo = CargoCommand::new("cargo"); - let rustup_run_nightly = CargoCommand::new_with_args("rustup", &["run", "nightly", "cargo"]); let wasm_toolchain = env::var(WASM_BUILD_TOOLCHAIN).ok(); // First check if the user requested a specific toolchain - if let Some(cmd) = wasm_toolchain.and_then(|t| get_rustup_nightly(Some(t))) { + if let Some(cmd) = + wasm_toolchain.map(|t| CargoCommand::new_with_args("rustup", &["run", &t, "cargo"])) + { cmd - } else if env_cargo.is_nightly() { + } else if env_cargo.supports_substrate_wasm_env() { env_cargo - } else if default_cargo.is_nightly() { + } else if default_cargo.supports_substrate_wasm_env() { default_cargo - } else if rustup_run_nightly.is_nightly() { - rustup_run_nightly } else { - // If no command before provided us with a nightly compiler, we try to search one - // with rustup. If that fails as well, we return the default cargo and let the prequisities - // check fail. - get_rustup_nightly(None).unwrap_or(default_cargo) + // If no command before provided us with a cargo that supports our Substrate wasm env, we + // try to search one with rustup. If that fails as well, we return the default cargo and let + // the prequisities check fail. + get_rustup_command().unwrap_or(default_cargo) } } -/// Get a nightly from rustup. If `selected` is `Some(_)`, a `CargoCommand` using the given -/// nightly is returned. -fn get_rustup_nightly(selected: Option) -> Option { +/// Get the newest rustup command that supports our Substrate wasm env. +/// +/// Stable versions are always favored over nightly versions even if the nightly versions are +/// newer. +fn get_rustup_command() -> Option { let host = format!("-{}", env::var("HOST").expect("`HOST` is always set by cargo")); - let version = match selected { - Some(selected) => selected, - None => { - let output = Command::new("rustup").args(&["toolchain", "list"]).output().ok()?.stdout; - let lines = output.as_slice().lines(); + let output = Command::new("rustup").args(&["toolchain", "list"]).output().ok()?.stdout; + let lines = output.as_slice().lines(); + + let mut versions = Vec::new(); + for line in lines.filter_map(|l| l.ok()) { + let rustup_version = line.trim_end_matches(&host); + + let cmd = CargoCommand::new_with_args("rustup", &["run", &rustup_version, "cargo"]); - let mut latest_nightly = None; - for line in lines.filter_map(|l| l.ok()) { - if line.starts_with("nightly-") && line.ends_with(&host) { - // Rustup prints them sorted - latest_nightly = Some(line.clone()); - } - } + if !cmd.supports_substrate_wasm_env() { + continue + } + + let Some(cargo_version) = cmd.version() else { continue; }; - latest_nightly?.trim_end_matches(&host).into() - }, - }; + versions.push((cargo_version, rustup_version.to_string())); + } + + // Sort by the parsed version to get the latest version (greatest version) at the end of the + // vec. + versions.sort_by_key(|v| v.0); + let version = &versions.last()?.1; Some(CargoCommand::new_with_args("rustup", &["run", &version, "cargo"])) } @@ -228,17 +240,23 @@ fn get_rustup_nightly(selected: Option) -> Option { struct CargoCommand { program: String, args: Vec, + version: Option, } impl CargoCommand { fn new(program: &str) -> Self { - CargoCommand { program: program.into(), args: Vec::new() } + let version = Self::extract_version(program, &[]); + + CargoCommand { program: program.into(), args: Vec::new(), version } } fn new_with_args(program: &str, args: &[&str]) -> Self { + let version = Self::extract_version(program, args); + CargoCommand { program: program.into(), args: args.iter().map(ToString::to_string).collect(), + version, } } @@ -248,20 +266,40 @@ impl CargoCommand { cmd } - /// Check if the supplied cargo command is a nightly version - fn is_nightly(&self) -> bool { + fn extract_version(program: &str, args: &[&str]) -> Option { + let version = Command::new(program) + .args(args) + .arg("--version") + .output() + .ok() + .and_then(|o| String::from_utf8(o.stdout).ok())?; + + Version::extract(&version) + } + + /// Returns the version of this cargo command or `None` if it failed to extract the version. + fn version(&self) -> Option { + self.version + } + + /// Check if the supplied cargo command supports our Substrate wasm environment. + /// + /// This means that either the cargo version is at minimum 1.68.0 or this is a nightly cargo. + /// + /// Assumes that cargo version matches the rustc version. + fn supports_substrate_wasm_env(&self) -> bool { // `RUSTC_BOOTSTRAP` tells a stable compiler to behave like a nightly. So, when this env // variable is set, we can assume that whatever rust compiler we have, it is a nightly // compiler. For "more" information, see: // https://github.com/rust-lang/rust/blob/fa0f7d0080d8e7e9eb20aa9cbf8013f96c81287f/src/libsyntax/feature_gate/check.rs#L891 - env::var("RUSTC_BOOTSTRAP").is_ok() || - self.command() - .arg("--version") - .output() - .map_err(|_| ()) - .and_then(|o| String::from_utf8(o.stdout).map_err(|_| ())) - .unwrap_or_default() - .contains("-nightly") + if env::var("RUSTC_BOOTSTRAP").is_ok() { + return true + } + + let Some(version) = self.version() else { return false }; + + // Check if major and minor are greater or equal than 1.68 or this is a nightly. + version.major > 1 || (version.major == 1 && version.minor >= 68) || version.is_nightly } } diff --git a/utils/wasm-builder/src/prerequisites.rs b/utils/wasm-builder/src/prerequisites.rs index ca07a029281a8..f5a985ab92b5d 100644 --- a/utils/wasm-builder/src/prerequisites.rs +++ b/utils/wasm-builder/src/prerequisites.rs @@ -35,10 +35,13 @@ fn print_error_message(message: &str) -> String { /// /// Returns the versioned cargo command on success. pub(crate) fn check() -> Result { - let cargo_command = crate::get_nightly_cargo(); + let cargo_command = crate::get_cargo_command(); - if !cargo_command.is_nightly() { - return Err(print_error_message("Rust nightly not installed, please install it!")) + if !cargo_command.supports_substrate_wasm_env() { + return Err(print_error_message( + "Cannot compile the WASM runtime: no compatible Rust compiler found!\n\ + Install at least Rust 1.68.0 or a recent nightly version.", + )) } check_wasm_toolchain_installed(cargo_command) diff --git a/utils/wasm-builder/src/version.rs b/utils/wasm-builder/src/version.rs new file mode 100644 index 0000000000000..77e62b394bd55 --- /dev/null +++ b/utils/wasm-builder/src/version.rs @@ -0,0 +1,198 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::cmp::Ordering; + +/// The version of rustc/cargo. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct Version { + pub major: u32, + pub minor: u32, + pub patch: u32, + pub is_nightly: bool, + pub year: u32, + pub month: u32, + pub day: u32, +} + +impl Version { + /// Returns if `self` is a stable version. + pub fn is_stable(&self) -> bool { + !self.is_nightly + } + + /// Return if `self` is a nightly version. + pub fn is_nightly(&self) -> bool { + self.is_nightly + } + + /// Extract from the given `version` string. + pub fn extract(version: &str) -> Option { + let mut is_nightly = false; + let version_parts = version + .trim() + .split(" ") + .nth(1)? + .split(".") + .filter_map(|v| { + if let Some(rest) = v.strip_suffix("-nightly") { + is_nightly = true; + rest.parse().ok() + } else { + v.parse().ok() + } + }) + .collect::>(); + + if version_parts.len() != 3 { + return None + } + + let date = version.split(" ").nth(3)?; + + let date_parts = date + .split("-") + .filter_map(|v| v.trim().strip_suffix(")").unwrap_or(v).parse().ok()) + .collect::>(); + + if date_parts.len() != 3 { + return None + } + + Some(Version { + major: version_parts[0], + minor: version_parts[1], + patch: version_parts[2], + is_nightly, + year: date_parts[0], + month: date_parts[1], + day: date_parts[2], + }) + } +} + +/// Ordering is done in the following way: +/// +/// 1. `stable` > `nightly` +/// 2. Then compare major, minor and patch. +/// 3. Last compare the date. +impl Ord for Version { + fn cmp(&self, other: &Self) -> Ordering { + if self == other { + return Ordering::Equal + } + + // Ensure that `stable > nightly` + if self.is_stable() && other.is_nightly() { + return Ordering::Greater + } else if self.is_nightly() && other.is_stable() { + return Ordering::Less + } + + let to_compare = [ + (self.major, other.major), + (self.minor, other.minor), + (self.patch, other.patch), + (self.year, other.year), + (self.month, other.month), + (self.day, other.day), + ]; + + to_compare + .iter() + .find_map(|(l, r)| if l != r { l.partial_cmp(&r) } else { None }) + // We already checked this right at the beginning, so we should never return here + // `Equal`. + .unwrap_or(Ordering::Equal) + } +} + +impl PartialOrd for Version { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn version_compare_and_extract_works() { + let version_1_66_0 = Version::extract("cargo 1.66.0 (d65d197ad 2022-11-15)").unwrap(); + let version_1_66_1 = Version::extract("cargo 1.66.1 (d65d197ad 2022-11-15)").unwrap(); + let version_1_66_0_nightly = + Version::extract("cargo 1.66.0-nightly (d65d197ad 2022-10-15)").unwrap(); + let version_1_66_0_nightly_older_date = + Version::extract("cargo 1.66.0-nightly (d65d197ad 2022-10-14)").unwrap(); + let version_1_65_0 = Version::extract("cargo 1.65.0 (d65d197ad 2022-10-15)").unwrap(); + let version_1_65_0_older_date = + Version::extract("cargo 1.65.0 (d65d197ad 2022-10-14)").unwrap(); + + assert!(version_1_66_1 > version_1_66_0); + assert!(version_1_66_1 > version_1_65_0); + assert!(version_1_66_1 > version_1_66_0_nightly); + assert!(version_1_66_1 > version_1_66_0_nightly_older_date); + assert!(version_1_66_1 > version_1_65_0_older_date); + + assert!(version_1_66_0 > version_1_65_0); + assert!(version_1_66_0 > version_1_66_0_nightly); + assert!(version_1_66_0 > version_1_66_0_nightly_older_date); + assert!(version_1_66_0 > version_1_65_0_older_date); + + assert!(version_1_65_0 > version_1_66_0_nightly); + assert!(version_1_65_0 > version_1_66_0_nightly_older_date); + assert!(version_1_65_0 > version_1_65_0_older_date); + + let mut versions = vec![ + version_1_66_0, + version_1_66_0_nightly, + version_1_66_0_nightly_older_date, + version_1_65_0_older_date, + version_1_65_0, + version_1_66_1, + ]; + versions.sort_by(|a, b| b.cmp(a)); + + let expected_versions_order = vec![ + version_1_66_1, + version_1_66_0, + version_1_65_0, + version_1_65_0_older_date, + version_1_66_0_nightly, + version_1_66_0_nightly_older_date, + ]; + assert_eq!(expected_versions_order, versions); + } + + #[test] + fn parse_with_newline() { + let version_1_66_0 = Version::extract("cargo 1.66.0 (d65d197ad 2022-11-15)\n").unwrap(); + assert_eq!( + Version { + major: 1, + minor: 66, + patch: 0, + is_nightly: false, + year: 2022, + month: 11, + day: 15 + }, + version_1_66_0 + ); + } +} diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index a3038c4e934c9..c45a40a6b9202 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -632,7 +632,7 @@ fn build_project( let mut build_cmd = cargo_cmd.command(); let rustflags = format!( - "-C link-arg=--export-table {} {}", + "-C target-cpu=mvp -C target-feature=-sign-ext -C link-arg=--export-table {} {}", default_rustflags, env::var(crate::WASM_BUILD_RUSTFLAGS_ENV).unwrap_or_default(), ); diff --git a/zombienet/0000-block-building/transaction-gets-finalized.js b/zombienet/0000-block-building/transaction-gets-finalized.js index a38cf603fc57e..d16e85f2c4e60 100644 --- a/zombienet/0000-block-building/transaction-gets-finalized.js +++ b/zombienet/0000-block-building/transaction-gets-finalized.js @@ -14,7 +14,7 @@ async function run(nodeName, networkInfo, args) { const bob = '5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty'; // Create a extrinsic, transferring 10^20 units to Bob - const transfer = api.tx.balances.transfer(bob, 10n**20n); + const transfer = api.tx.balances.transferAllowDeath(bob, 10n**20n); let transaction_success_event = false; try {