From 87762fc9a7d15fd14e513b1b16085e369032ab90 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 23 Aug 2021 11:16:54 +0200 Subject: [PATCH 01/14] Remove spurious debug_assert! (#9603) --- Cargo.lock | 30 ++-- client/consensus/pow/Cargo.toml | 2 + client/consensus/pow/src/lib.rs | 237 ++++++++++++++++++++--------- client/consensus/pow/src/worker.rs | 209 ++----------------------- client/service/src/builder.rs | 1 - 5 files changed, 199 insertions(+), 280 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0e7fce2b8d1a2..c52e99341967b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2682,7 +2682,7 @@ dependencies = [ "itoa", "pin-project-lite 0.2.6", "socket2 0.4.0", - "tokio 1.10.0", + "tokio 1.10.1", "tower-service", "tracing", "want", @@ -2700,7 +2700,7 @@ dependencies = [ "log 0.4.14", "rustls", "rustls-native-certs", - "tokio 1.10.0", + "tokio 1.10.1", "tokio-rustls 0.22.0", "webpki", ] @@ -2714,7 +2714,7 @@ dependencies = [ "bytes 1.0.1", "hyper 0.14.11", "native-tls", - "tokio 1.10.0", + "tokio 1.10.1", "tokio-native-tls", ] @@ -2941,7 +2941,7 @@ dependencies = [ "log 0.4.14", "serde", "serde_json", - "tokio 1.10.0", + "tokio 1.10.1", "url 1.7.2", "websocket", ] @@ -3041,7 +3041,7 @@ dependencies = [ "jsonrpc-core", "lazy_static", "log 0.4.14", - "tokio 1.10.0", + "tokio 1.10.1", "tokio-stream", "tokio-util 0.6.7", "unicase 2.6.0", @@ -5967,7 +5967,7 @@ dependencies = [ "libc", "log 0.4.14", "rand 0.7.3", - "tokio 1.10.0", + "tokio 1.10.1", "winapi 0.3.9", ] @@ -7705,6 +7705,8 @@ dependencies = [ "sp-inherents", "sp-runtime", "substrate-prometheus-endpoint", + "tokio 1.10.1", + "tokio-stream", ] [[package]] @@ -8086,7 +8088,7 @@ dependencies = [ "sp-utils", "substrate-test-runtime-client", "threadpool", - "tokio 1.10.0", + "tokio 1.10.1", ] [[package]] @@ -9825,7 +9827,7 @@ dependencies = [ "hyper 0.14.11", "log 0.4.14", "prometheus", - "tokio 1.10.0", + "tokio 1.10.1", ] [[package]] @@ -10286,9 +10288,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cf844b23c6131f624accf65ce0e4e9956a8bb329400ea5bcc26ae3a5c20b0b" +checksum = "92036be488bb6594459f2e03b60e42df6f937fe6ca5c5ffdcb539c6b84dc40f5" dependencies = [ "autocfg 1.0.1", "bytes 1.0.1", @@ -10386,7 +10388,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" dependencies = [ "native-tls", - "tokio 1.10.0", + "tokio 1.10.1", ] [[package]] @@ -10427,7 +10429,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls", - "tokio 1.10.0", + "tokio 1.10.1", "webpki", ] @@ -10439,7 +10441,7 @@ checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" dependencies = [ "futures-core", "pin-project-lite 0.2.6", - "tokio 1.10.0", + "tokio 1.10.1", ] [[package]] @@ -10565,7 +10567,7 @@ dependencies = [ "futures-sink", "log 0.4.14", "pin-project-lite 0.2.6", - "tokio 1.10.0", + "tokio 1.10.1", ] [[package]] diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index c71e11aef275e..b3e941a5fbc4d 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -31,3 +31,5 @@ parking_lot = "0.11.1" derive_more = "0.99.2" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} async-trait = "0.1.50" +tokio = { version = "1.10.1", features = ["sync"] } +tokio-stream = "0.1.7" diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 1f5781434ef71..a03d13a37fe13 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -41,24 +41,24 @@ mod worker; -pub use crate::worker::{MiningBuild, MiningMetadata, MiningWorker}; +pub use crate::worker::{MiningBuild, MiningData, MiningMetadata}; -use crate::worker::UntilImportedOrTimeout; use codec::{Decode, Encode}; use futures::{Future, StreamExt}; use log::*; -use parking_lot::Mutex; use prometheus_endpoint::Registry; use sc_client_api::{self, backend::AuxStore, BlockOf, BlockchainEvents}; use sc_consensus::{ BasicQueue, BlockCheckParams, BlockImport, BlockImportParams, BoxBlockImport, - BoxJustificationImport, ForkChoiceStrategy, ImportResult, Verifier, + BoxJustificationImport, ForkChoiceStrategy, ImportResult, StateAction, StorageChanges, + Verifier, }; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend, ProvideCache}; use sp_consensus::{ - CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, + BlockOrigin, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, + SyncOracle, }; use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; use sp_core::ExecutionContext; @@ -72,6 +72,7 @@ use std::{ borrow::Cow, cmp::Ordering, collections::HashMap, marker::PhantomData, sync::Arc, time::Duration, }; +use tokio_stream::wrappers::ReceiverStream; #[derive(derive_more::Display, Debug)] pub enum Error { @@ -276,7 +277,7 @@ where execution_context: ExecutionContext, ) -> Result<(), Error> { if *block.header().number() < self.check_inherents_after { - return Ok(()) + return Ok(()); } if let Err(e) = self.can_author_with.can_author_with(&block_id) { @@ -286,7 +287,7 @@ where e, ); - return Ok(()) + return Ok(()); } let inherent_data = inherent_data_providers @@ -388,7 +389,7 @@ where &inner_seal, difficulty, )? { - return Err(Error::::InvalidSeal.into()) + return Err(Error::::InvalidSeal.into()); } aux.difficulty = difficulty; @@ -406,7 +407,7 @@ where fetch_seal::(best_header.digest().logs.last(), best_hash)?; self.algorithm.break_tie(&best_inner_seal, &inner_seal) - }, + } }, )); } @@ -436,19 +437,20 @@ impl PowVerifier { let hash = header.hash(); let (seal, inner_seal) = match header.digest_mut().pop() { - Some(DigestItem::Seal(id, seal)) => + Some(DigestItem::Seal(id, seal)) => { if id == POW_ENGINE_ID { (DigestItem::Seal(id, seal.clone()), seal) } else { - return Err(Error::WrongEngine(id)) - }, + return Err(Error::WrongEngine(id)); + } + } _ => return Err(Error::HeaderUnsealed(hash)), }; let pre_hash = header.hash(); if !self.algorithm.preliminary_verify(&pre_hash, &inner_seal)?.unwrap_or(true) { - return Err(Error::FailedPreliminaryVerify) + return Err(Error::FailedPreliminaryVerify); } Ok((header, seal)) @@ -502,6 +504,7 @@ where Ok(BasicQueue::new(verifier, block_import, justification_import, spawner, registry)) } +type SealStream = ReceiverStream; /// Start the mining worker for PoW. This function provides the necessary helper functions that can /// be used to implement a miner. However, it does not do the CPU-intensive mining itself. /// @@ -511,56 +514,72 @@ where /// /// `pre_runtime` is a parameter that allows a custom additional pre-runtime digest to be inserted /// for blocks being built. This can encode authorship information, or just be a graffiti. -pub fn start_mining_worker( - block_import: BoxBlockImport>, +pub fn start_mining_worker( + mut block_import: BoxBlockImport>, client: Arc, select_chain: S, - algorithm: Algorithm, + algorithm: A, mut env: E, mut sync_oracle: SO, - justification_sync_link: L, + mut justification_sync_link: L, pre_runtime: Option>, create_inherent_data_providers: CIDP, - timeout: Duration, build_time: Duration, can_author_with: CAW, ) -> ( - Arc>::Proof>>>, + tokio::sync::watch::Receiver>>, impl Future, ) where - Block: BlockT, - C: ProvideRuntimeApi + BlockchainEvents + 'static, - S: SelectChain + 'static, - Algorithm: PowAlgorithm + Clone, - Algorithm::Difficulty: Send + 'static, - E: Environment + Send + Sync + 'static, + B: BlockT, + C: ProvideRuntimeApi + BlockchainEvents + 'static, + S: SelectChain + 'static, + A: PowAlgorithm + Clone, + A::Difficulty: Send + 'static, + E: Environment + Send + Sync + 'static, E::Error: std::fmt::Debug, - E::Proposer: Proposer>, + E::Proposer: Proposer>, SO: SyncOracle + Clone + Send + Sync + 'static, - L: sc_consensus::JustificationSyncLink, - CIDP: CreateInherentDataProviders, - CAW: CanAuthorWith + Clone + Send + 'static, + L: sc_consensus::JustificationSyncLink, + CIDP: CreateInherentDataProviders, + CAW: CanAuthorWith + Clone + Send + 'static, { - let mut timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); - let worker = Arc::new(Mutex::new(MiningWorker { - build: None, - algorithm: algorithm.clone(), - block_import, - justification_sync_link, - })); - let worker_ret = worker.clone(); + use futures::future::Either; + + // Create a spmc channel here + let (producer, consumer) = tokio::sync::watch::channel(None); + + // Create channel for receiving a seal from the node + let mut seal_channel: Option = None; + let mut import_stream = client.import_notification_stream(); + let mut build = None; let task = async move { loop { - if timer.next().await.is_none() { - break - } + if let Some(ref mut channel) = seal_channel { + let result = futures::future::select(channel.next(), import_stream.next()).await; + + match (result, build.take()) { + // we only care about these two cases. + (Either::Left((Some(seal), _)), Some(mining_build)) => { + do_import_block( + seal, + mining_build, + &algorithm, + &mut block_import, + &mut justification_sync_link, + ) + .await + } + _ => {} + } + // we're done, + seal_channel = None; + }; if sync_oracle.is_major_syncing() { debug!(target: "pow", "Skipping proposal due to sync."); - worker.lock().on_major_syncing(); - continue + continue; } let best_header = match select_chain.best_chain().await { @@ -572,8 +591,8 @@ where Select best chain error: {:?}", err ); - continue - }, + continue; + } }; let best_hash = best_header.hash(); @@ -584,11 +603,7 @@ where Probably a node update is required!", err, ); - continue - } - - if worker.lock().best_hash() == Some(best_hash) { - continue + continue; } // The worker is locked for the duration of the whole proposing period. Within this @@ -603,8 +618,8 @@ where Fetch difficulty failed: {:?}", err, ); - continue - }, + continue; + } }; let inherent_data_providers = match create_inherent_data_providers @@ -619,8 +634,8 @@ where Creating inherent data providers failed: {:?}", err, ); - continue - }, + continue; + } }; let inherent_data = match inherent_data_providers.create_inherent_data() { @@ -632,11 +647,11 @@ where Creating inherent data failed: {:?}", e, ); - continue - }, + continue; + } }; - let mut inherent_digest = Digest::::default(); + let mut inherent_digest = Digest::::default(); if let Some(pre_runtime) = &pre_runtime { inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); } @@ -652,8 +667,8 @@ where Creating proposer failed: {:?}", err, ); - continue - }, + continue; + } }; let proposal = match proposer @@ -668,11 +683,15 @@ where Creating proposal failed: {:?}", err, ); - continue - }, + continue; + } }; - let build = MiningBuild:: { + let (sender, consumer) = tokio::sync::mpsc::channel(10); + + seal_channel = Some(ReceiverStream::new(consumer)); + + let mining_build = MiningBuild:: { metadata: MiningMetadata { best_hash, pre_hash: proposal.block.header().hash(), @@ -682,11 +701,16 @@ where proposal, }; - worker.lock().on_build(build); + let _res = producer.send(Some(MiningData { + metadata: mining_build.metadata.clone(), + sender: sender.clone(), + })); + + build = Some(mining_build); } }; - (worker_ret, task) + (consumer, task) } /// Find PoW pre-runtime. @@ -695,11 +719,12 @@ fn find_pre_digest(header: &B::Header) -> Result>, Err for log in header.digest().logs() { trace!(target: "pow", "Checking log {:?}, looking for pre runtime digest", log); match (log, pre_digest.is_some()) { - (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => - return Err(Error::MultiplePreRuntimeDigests), + (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => { + return Err(Error::MultiplePreRuntimeDigests) + } (DigestItem::PreRuntime(POW_ENGINE_ID, v), false) => { pre_digest = Some(v.clone()); - }, + } (_, _) => trace!(target: "pow", "Ignoring digest not meant for us"), } } @@ -713,12 +738,86 @@ fn fetch_seal( hash: B::Hash, ) -> Result, Error> { match digest { - Some(DigestItem::Seal(id, seal)) => + Some(DigestItem::Seal(id, seal)) => { if id == &POW_ENGINE_ID { Ok(seal.clone()) } else { - return Err(Error::::WrongEngine(*id).into()) - }, + return Err(Error::::WrongEngine(*id).into()); + } + } _ => return Err(Error::::HeaderUnsealed(hash).into()), } } + +pub async fn do_import_block( + seal: Seal, + build: MiningBuild, + algorithm: &A, + block_import: &mut BoxBlockImport>, + justification_sync_link: &mut L, +) where + B: BlockT, + C: ProvideRuntimeApi + BlockchainEvents + 'static, + A: PowAlgorithm + Clone, + A::Difficulty: Send + 'static, + L: sc_consensus::JustificationSyncLink, +{ + match algorithm.verify( + &BlockId::Hash(build.metadata.best_hash), + &build.metadata.pre_hash, + build.metadata.pre_runtime.as_ref().map(|v| &v[..]), + &seal, + build.metadata.difficulty, + ) { + Ok(true) => (), + Ok(false) => { + warn!( + target: "pow", + "Unable to import mined block: seal is invalid", + ); + } + Err(err) => { + warn!( + target: "pow", + "Unable to import mined block: {:?}", + err, + ); + } + } + + let seal = DigestItem::Seal(POW_ENGINE_ID, seal); + let (header, body) = build.proposal.block.deconstruct(); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(seal); + import_block.body = Some(body); + import_block.state_action = + StateAction::ApplyChanges(StorageChanges::Changes(build.proposal.storage_changes)); + + let intermediate = + PowIntermediate:: { difficulty: Some(build.metadata.difficulty) }; + + import_block + .intermediates + .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); + + let header = import_block.post_header(); + match block_import.import_block(import_block, HashMap::default()).await { + Ok(res) => { + res.handle_justification(&header.hash(), *header.number(), justification_sync_link); + + info!( + target: "pow", + "✅ Successfully mined block on top of: {}", + build.metadata.best_hash + ); + } + Err(err) => { + warn!( + target: "pow", + "Unable to import mined block: {:?}", + err, + ); + } + } +} diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index c0ca16ccad3aa..f3782005ef62a 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -16,23 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use futures::{ - prelude::*, - task::{Context, Poll}, -}; -use futures_timer::Delay; -use log::*; -use sc_client_api::ImportNotifications; -use sc_consensus::{BlockImportParams, BoxBlockImport, StateAction, StorageChanges}; -use sp_consensus::{BlockOrigin, Proposal}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT}, - DigestItem, -}; -use std::{borrow::Cow, collections::HashMap, pin::Pin, time::Duration}; +use sp_consensus::Proposal; +use sp_runtime::traits::Block as BlockT; -use crate::{PowAlgorithm, PowIntermediate, Seal, INTERMEDIATE_KEY, POW_ENGINE_ID}; +use crate::{PowAlgorithm, Seal}; /// Mining metadata. This is the information needed to start an actual mining loop. #[derive(Clone, Eq, PartialEq)] @@ -47,187 +34,17 @@ pub struct MiningMetadata { pub difficulty: D, } +#[derive(Clone)] +pub struct MiningData { + pub metadata: MiningMetadata, + /// sink to send the seal back to the authorship task + pub sender: tokio::sync::mpsc::Sender, +} + /// A build of mining, containing the metadata and the block proposal. -pub struct MiningBuild< - Block: BlockT, - Algorithm: PowAlgorithm, - C: sp_api::ProvideRuntimeApi, - Proof, -> { +pub struct MiningBuild, C: sp_api::ProvideRuntimeApi, P> { /// Mining metadata. - pub metadata: MiningMetadata, + pub metadata: MiningMetadata, /// Mining proposal. - pub proposal: Proposal, Proof>, -} - -/// Mining worker that exposes structs to query the current mining build and submit mined blocks. -pub struct MiningWorker< - Block: BlockT, - Algorithm: PowAlgorithm, - C: sp_api::ProvideRuntimeApi, - L: sc_consensus::JustificationSyncLink, - Proof, -> { - pub(crate) build: Option>, - pub(crate) algorithm: Algorithm, - pub(crate) block_import: BoxBlockImport>, - pub(crate) justification_sync_link: L, -} - -impl MiningWorker -where - Block: BlockT, - C: sp_api::ProvideRuntimeApi, - Algorithm: PowAlgorithm, - Algorithm::Difficulty: 'static + Send, - L: sc_consensus::JustificationSyncLink, - sp_api::TransactionFor: Send + 'static, -{ - /// Get the current best hash. `None` if the worker has just started or the client is doing - /// major syncing. - pub fn best_hash(&self) -> Option { - self.build.as_ref().map(|b| b.metadata.best_hash) - } - - pub(crate) fn on_major_syncing(&mut self) { - self.build = None; - } - - pub(crate) fn on_build(&mut self, build: MiningBuild) { - self.build = Some(build); - } - - /// Get a copy of the current mining metadata, if available. - pub fn metadata(&self) -> Option> { - self.build.as_ref().map(|b| b.metadata.clone()) - } - - /// Submit a mined seal. The seal will be validated again. Returns true if the submission is - /// successful. - pub async fn submit(&mut self, seal: Seal) -> bool { - if let Some(build) = self.build.take() { - match self.algorithm.verify( - &BlockId::Hash(build.metadata.best_hash), - &build.metadata.pre_hash, - build.metadata.pre_runtime.as_ref().map(|v| &v[..]), - &seal, - build.metadata.difficulty, - ) { - Ok(true) => (), - Ok(false) => { - warn!( - target: "pow", - "Unable to import mined block: seal is invalid", - ); - return false - }, - Err(err) => { - warn!( - target: "pow", - "Unable to import mined block: {:?}", - err, - ); - return false - }, - } - - let seal = DigestItem::Seal(POW_ENGINE_ID, seal); - let (header, body) = build.proposal.block.deconstruct(); - - let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); - import_block.post_digests.push(seal); - import_block.body = Some(body); - import_block.state_action = - StateAction::ApplyChanges(StorageChanges::Changes(build.proposal.storage_changes)); - - let intermediate = PowIntermediate:: { - difficulty: Some(build.metadata.difficulty), - }; - - import_block - .intermediates - .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); - - let header = import_block.post_header(); - match self.block_import.import_block(import_block, HashMap::default()).await { - Ok(res) => { - res.handle_justification( - &header.hash(), - *header.number(), - &mut self.justification_sync_link, - ); - - info!( - target: "pow", - "✅ Successfully mined block on top of: {}", - build.metadata.best_hash - ); - true - }, - Err(err) => { - warn!( - target: "pow", - "Unable to import mined block: {:?}", - err, - ); - false - }, - } - } else { - warn!( - target: "pow", - "Unable to import mined block: build does not exist", - ); - false - } - } -} - -/// A stream that waits for a block import or timeout. -pub struct UntilImportedOrTimeout { - import_notifications: ImportNotifications, - timeout: Duration, - inner_delay: Option, -} - -impl UntilImportedOrTimeout { - /// Create a new stream using the given import notification and timeout duration. - pub fn new(import_notifications: ImportNotifications, timeout: Duration) -> Self { - Self { import_notifications, timeout, inner_delay: None } - } -} - -impl Stream for UntilImportedOrTimeout { - type Item = (); - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let mut fire = false; - - loop { - match Stream::poll_next(Pin::new(&mut self.import_notifications), cx) { - Poll::Pending => break, - Poll::Ready(Some(_)) => { - fire = true; - }, - Poll::Ready(None) => return Poll::Ready(None), - } - } - - let timeout = self.timeout.clone(); - let inner_delay = self.inner_delay.get_or_insert_with(|| Delay::new(timeout)); - - match Future::poll(Pin::new(inner_delay), cx) { - Poll::Pending => (), - Poll::Ready(()) => { - fire = true; - }, - } - - if fire { - self.inner_delay = None; - Poll::Ready(Some(())) - } else { - Poll::Pending - } - } + pub proposal: Proposal, P>, } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index fb83fdb00ca43..dea9953633199 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1026,7 +1026,6 @@ where // future using `spawn_blocking`. spawn_handle.spawn_blocking("network-worker", async move { if network_start_rx.await.is_err() { - debug_assert!(false); log::warn!( "The NetworkStart returned as part of `build_network` has been silently dropped" ); From ed08cfeaa094efdfeb253039ce5f0b71b6ad4d83 Mon Sep 17 00:00:00 2001 From: Zeke Mostov <32168567+emostov@users.noreply.github.com> Date: Mon, 23 Aug 2021 17:15:27 -0700 Subject: [PATCH 02/14] pallet-vesting: Support multiple, merge-able vesting schedules (#9202) * Support multiple, mergable vesting schedules * Update node runtime * Remove some TODO design questions and put them as commennts * Update frame/vesting/src/benchmarking.rs * Syntax and comment clean up * Create filter enum for removing schedules * Dry vesting calls with do_vest * Improve old benchmarks to account for max schedules * Update WeightInfo trait and make dummy fns * Add merge_schedule weights * Explicitly test multiple vesting scheudles * Make new vesting tests more more clear * Apply suggestions from code review * Update remove_vesting_schedule to error with no index * Try reduce spacing diff * Apply suggestions from code review * Use get on vesting for bounds check; check origin first * No filter tuple; various simplifications * unwrap or default when getting user schedules * spaces be gone * ReadMe fixes * Update frame/vesting/src/lib.rs Co-authored-by: Peter Goodspeed-Niklaus * address some comments for docs * merge sched docs * Apply suggestions from code review Co-authored-by: Shawn Tabrizi * log error when trying to push to vesting vec * use let Some, not is_some * remove_vesting_schedule u32, not optin * new not try_new, create validate builder; VestingInfo * Merge prep: break out tests and mock * Add files forgot to include in merge * revert some accidental changes to merged files * Revert remaining accidental file changes * More revert of accidental file change * Try to reduce diff on tests * namespace Vesting; check key when key should not exist; * ending_block throws error on per_block of 0 * Try improve merge vesting info comment * Update frame/vesting/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * add validate + correct; handle duration > blocknumber * Move vesting_info module to its own file * Seperate Vesting/locks updates from writing * Add can_add_vesting schedule * Adjust min vested transfer to be greater than all ED * Initial integrity test impl * merge_finished_and_yet_to_be_started_schedules * Make sure to assert storage items are cleaned up * Migration initial impl (not tested) * Correct try-runtime hooks * Apply suggestions from code review Co-authored-by: Shawn Tabrizi * header * WIP: improve benchmarks * Benchmarking working * benchmarking: step over max schedules * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_vesting --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/vesting/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Simplify APIs by accepting vec; convert to bounded on write * Test: build_genesis_has_storage_version_v1 * Test more error cases * Hack to get polkadot weights to work; should revert later * Improve benchmarking; works on polkadot * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_vesting --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/vesting/src/weights.rs --template=./.maintain/frame-weight-template.hbs * WIP override storage * Set storage not working example * Remove unused tests * VestingInfo: make public, derive MaxEndcodedLen * Rename ending_block to ending_block_as_balance * Superificial improvements * Check for end block infinite, not just duration * More superficial update * Update tests * Test vest with multi schedule * Don't use half max balance in benchmarks * Use debug_assert when locked is unexpected 0 * Implement exec_action * Simplify per_block calc in vesting_info * VestingInfo.validate in add_vesting_schedule & can_add_vesting_schedule * Simplify post migrate check * Remove merge event * Minor benchmarking updates * Remove VestingInfo.correct * per_block accesor max with 1 * Improve comment * Remoe debug * Fix add schedule comment * Apply suggestions from code review Co-authored-by: Peter Goodspeed-Niklaus * no ref for should_remove param * Remove unused vestingaction derive * Asserts to show balance unlock in merge benchmark * Remove unused imports * trivial * Fix benchmark asserts to handle non-multiple of 20 locked * Add generate_storage_info * migration :facepalm * Remove per_block 0 logic * Update frame/vesting/src/lib.rs * Do not check for ending later than greatest block * Apply suggestions from code review * Benchmarks: simplify vesting schedule creation * Add log back for migration * Add note in ext docs explaining that all schedules will vest * Make integrity test work * Improve integrity test * Remove unnescary type param from VestingInfo::new * Remove unnescary resut for ending_block_as_balance * Remove T param from ending_block_as_balance * Reduce visibility of raw_per_block * Remove unused type param for validate * update old comment * Make log a dep; log warn in migrate * VestingInfo.validate returns Err(()), no T type param * Try improve report_schedule_updates * is_valid, not validate * revert node runtime reorg; * change schedule validity check to just warning * Simplify merge_vesting_info return type * Apply suggestions from code review * Apply suggestions from code review Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Add warning for migration * Fix indentation * Delete duplicate warnings * Reduce diff in node runtime * Fix benchmark build * Upgrade cargo.toml to use 4.0.0-dev * Cleanup * MaxVestingSchedulesGetter initial impl * MinVestedTransfer getter inintial impl * Test MaxVestingSchedules & MinVestedTransfer getters; use getters in benchmarks * Run cargo fmt * Revert MinVestedTransfer & MaxVestingSchedules getters; Add integrity test * Make MAX_VESTING_SCHEDULES a const * fmt * WIP: benchmark improvements * Finish benchmark update * Add test for transfer to account with less than ed * Rm min_new_account_transfer; move sp-io to dev-dep * Reduce cargo.toml diff * Explain MAX_VESTING_SCHEDULES choice * Fix after merge * Try fix CI complaints * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_vesting --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/vesting/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_vesting --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/vesting/src/weights.rs --template=./.maintain/frame-weight-template.hbs * fmt * trigger * fmt Co-authored-by: Parity Bot Co-authored-by: Peter Goodspeed-Niklaus Co-authored-by: Shawn Tabrizi Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: kianenigma --- Cargo.lock | 1 + bin/node/runtime/src/lib.rs | 3 + .../src/traits/tokens/currency/lockable.rs | 14 +- frame/vesting/Cargo.toml | 3 +- frame/vesting/src/benchmarking.rs | 269 ++++- frame/vesting/src/lib.rs | 581 ++++++++-- frame/vesting/src/migrations.rs | 95 ++ frame/vesting/src/mock.rs | 29 +- frame/vesting/src/tests.rs | 1026 +++++++++++++++-- frame/vesting/src/vesting_info.rs | 114 ++ frame/vesting/src/weights.rs | 200 +++- 11 files changed, 1981 insertions(+), 354 deletions(-) create mode 100644 frame/vesting/src/migrations.rs create mode 100644 frame/vesting/src/vesting_info.rs diff --git a/Cargo.lock b/Cargo.lock index c52e99341967b..1ffc6ebd5d0ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5898,6 +5898,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log 0.4.14", "pallet-balances", "parity-scale-codec", "sp-core", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index e690ce8a3b3a4..909ff931756ad 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1062,6 +1062,9 @@ impl pallet_vesting::Config for Runtime { type BlockNumberToBalance = ConvertInto; type MinVestedTransfer = MinVestedTransfer; type WeightInfo = pallet_vesting::weights::SubstrateWeight; + // `VestingInfo` encode length is 36bytes. 28 schedules gets encoded as 1009 bytes, which is the + // highest number of schedules that encodes less than 2^10. + const MAX_VESTING_SCHEDULES: u32 = 28; } impl pallet_mmr::Config for Runtime { diff --git a/frame/support/src/traits/tokens/currency/lockable.rs b/frame/support/src/traits/tokens/currency/lockable.rs index 94bce216dcbcb..26463864a6471 100644 --- a/frame/support/src/traits/tokens/currency/lockable.rs +++ b/frame/support/src/traits/tokens/currency/lockable.rs @@ -80,8 +80,8 @@ pub trait VestingSchedule { /// Adds a vesting schedule to a given account. /// - /// If there already exists a vesting schedule for the given account, an `Err` is returned - /// and nothing is updated. + /// If the account has `MaxVestingSchedules`, an Error is returned and nothing + /// is updated. /// /// Is a no-op if the amount to be vested is zero. /// @@ -93,8 +93,16 @@ pub trait VestingSchedule { starting_block: Self::Moment, ) -> DispatchResult; + /// Checks if `add_vesting_schedule` would work against `who`. + fn can_add_vesting_schedule( + who: &AccountId, + locked: >::Balance, + per_block: >::Balance, + starting_block: Self::Moment, + ) -> DispatchResult; + /// Remove a vesting schedule for a given account. /// /// NOTE: This doesn't alter the free balance of the account. - fn remove_vesting_schedule(who: &AccountId); + fn remove_vesting_schedule(who: &AccountId, schedule_index: u32) -> DispatchResult; } diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index 9d818d7a33de4..96af259959c3e 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -21,9 +21,10 @@ sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../pr frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +log = { version = "0.4.0", default-features = false } [dev-dependencies] -sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index fba4369dba9d3..5cdc14c8fdaca 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -19,12 +19,12 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; - use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::assert_ok; use frame_system::{Pallet as System, RawOrigin}; -use sp_runtime::traits::Bounded; +use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul}; +use super::*; use crate::Pallet as Vesting; const SEED: u32 = 0; @@ -35,42 +35,63 @@ type BalanceOf = fn add_locks(who: &T::AccountId, n: u8) { for id in 0..n { let lock_id = [id; 8]; - let locked = 100u32; + let locked = 256u32; let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; T::Currency::set_lock(lock_id, who, locked.into(), reasons); } } -fn add_vesting_schedule(who: &T::AccountId) -> Result<(), &'static str> { - let locked = 100u32; - let per_block = 10u32; +fn add_vesting_schedules( + target: ::Source, + n: u32, +) -> Result, &'static str> { + let min_transfer = T::MinVestedTransfer::get(); + let locked = min_transfer.checked_mul(&20u32.into()).unwrap(); + // Schedule has a duration of 20. + let per_block = min_transfer; let starting_block = 1u32; - System::::set_block_number(0u32.into()); + let source: T::AccountId = account("source", 0, SEED); + let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); + T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); + + System::::set_block_number(T::BlockNumber::zero()); + + let mut total_locked: BalanceOf = Zero::zero(); + for _ in 0..n { + total_locked += locked; + + let schedule = VestingInfo::new(locked, per_block, starting_block.into()); + assert_ok!(Vesting::::do_vested_transfer( + source_lookup.clone(), + target.clone(), + schedule + )); + + // Top up to guarantee we can always transfer another schedule. + T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); + } - // Add schedule to avoid `NotVesting` error. - Vesting::::add_vesting_schedule( - &who, - locked.into(), - per_block.into(), - starting_block.into(), - )?; - Ok(()) + Ok(total_locked.into()) } benchmarks! { vest_locked { - let l in 0 .. MaxLocksOf::::get(); + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 1 .. T::MAX_VESTING_SCHEDULES; + + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); - let caller = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); add_locks::(&caller, l as u8); - add_vesting_schedule::(&caller)?; + let expected_balance = add_vesting_schedules::(caller_lookup, s)?; + // At block zero, everything is vested. - System::::set_block_number(T::BlockNumber::zero()); + assert_eq!(System::::block_number(), T::BlockNumber::zero()); assert_eq!( Vesting::::vesting_balance(&caller), - Some(100u32.into()), + Some(expected_balance.into()), "Vesting schedule not added", ); }: vest(RawOrigin::Signed(caller.clone())) @@ -78,20 +99,24 @@ benchmarks! { // Nothing happened since everything is still vested. assert_eq!( Vesting::::vesting_balance(&caller), - Some(100u32.into()), + Some(expected_balance.into()), "Vesting schedule was removed", ); } vest_unlocked { - let l in 0 .. MaxLocksOf::::get(); + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 1 .. T::MAX_VESTING_SCHEDULES; + + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); - let caller = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); add_locks::(&caller, l as u8); - add_vesting_schedule::(&caller)?; - // At block 20, everything is unvested. - System::::set_block_number(20u32.into()); + add_vesting_schedules::(caller_lookup, s)?; + + // At block 21, everything is unlocked. + System::::set_block_number(21u32.into()); assert_eq!( Vesting::::vesting_balance(&caller), Some(BalanceOf::::zero()), @@ -108,18 +133,20 @@ benchmarks! { } vest_other_locked { - let l in 0 .. MaxLocksOf::::get(); + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 1 .. T::MAX_VESTING_SCHEDULES; let other: T::AccountId = account("other", 0, SEED); let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); - T::Currency::make_free_balance_be(&other, BalanceOf::::max_value()); + add_locks::(&other, l as u8); - add_vesting_schedule::(&other)?; + let expected_balance = add_vesting_schedules::(other_lookup.clone(), s)?; + // At block zero, everything is vested. - System::::set_block_number(T::BlockNumber::zero()); + assert_eq!(System::::block_number(), T::BlockNumber::zero()); assert_eq!( Vesting::::vesting_balance(&other), - Some(100u32.into()), + Some(expected_balance), "Vesting schedule not added", ); @@ -129,21 +156,23 @@ benchmarks! { // Nothing happened since everything is still vested. assert_eq!( Vesting::::vesting_balance(&other), - Some(100u32.into()), + Some(expected_balance.into()), "Vesting schedule was removed", ); } vest_other_unlocked { - let l in 0 .. MaxLocksOf::::get(); + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 1 .. T::MAX_VESTING_SCHEDULES; let other: T::AccountId = account("other", 0, SEED); let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); - T::Currency::make_free_balance_be(&other, BalanceOf::::max_value()); + add_locks::(&other, l as u8); - add_vesting_schedule::(&other)?; - // At block 20, everything is unvested. - System::::set_block_number(20u32.into()); + add_vesting_schedules::(other_lookup.clone(), s)?; + // At block 21 everything is unlocked. + System::::set_block_number(21u32.into()); + assert_eq!( Vesting::::vesting_balance(&other), Some(BalanceOf::::zero()), @@ -153,7 +182,7 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); }: vest_other(RawOrigin::Signed(caller.clone()), other_lookup) verify { - // Vesting schedule is removed! + // Vesting schedule is removed. assert_eq!( Vesting::::vesting_balance(&other), None, @@ -162,65 +191,187 @@ benchmarks! { } vested_transfer { - let l in 0 .. MaxLocksOf::::get(); + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 0 .. T::MAX_VESTING_SCHEDULES - 1; let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let target: T::AccountId = account("target", 0, SEED); let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); // Give target existing locks add_locks::(&target, l as u8); + // Add one vesting schedules. + let mut expected_balance = add_vesting_schedules::(target_lookup.clone(), s)?; let transfer_amount = T::MinVestedTransfer::get(); + let per_block = transfer_amount.checked_div(&20u32.into()).unwrap(); + expected_balance += transfer_amount; - let vesting_schedule = VestingInfo { - locked: transfer_amount, - per_block: 10u32.into(), - starting_block: 1u32.into(), - }; + let vesting_schedule = VestingInfo::new( + transfer_amount, + per_block, + 1u32.into(), + ); }: _(RawOrigin::Signed(caller), target_lookup, vesting_schedule) verify { assert_eq!( - T::MinVestedTransfer::get(), + expected_balance, T::Currency::free_balance(&target), "Transfer didn't happen", ); assert_eq!( Vesting::::vesting_balance(&target), - Some(T::MinVestedTransfer::get()), - "Lock not created", + Some(expected_balance), + "Lock not correctly updated", ); } force_vested_transfer { - let l in 0 .. MaxLocksOf::::get(); + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 0 .. T::MAX_VESTING_SCHEDULES - 1; let source: T::AccountId = account("source", 0, SEED); let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); + let target: T::AccountId = account("target", 0, SEED); let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); // Give target existing locks add_locks::(&target, l as u8); + // Add one less than max vesting schedules + let mut expected_balance = add_vesting_schedules::(target_lookup.clone(), s)?; let transfer_amount = T::MinVestedTransfer::get(); + let per_block = transfer_amount.checked_div(&20u32.into()).unwrap(); + expected_balance += transfer_amount; - let vesting_schedule = VestingInfo { - locked: transfer_amount, - per_block: 10u32.into(), - starting_block: 1u32.into(), - }; + let vesting_schedule = VestingInfo::new( + transfer_amount, + per_block, + 1u32.into(), + ); }: _(RawOrigin::Root, source_lookup, target_lookup, vesting_schedule) verify { assert_eq!( - T::MinVestedTransfer::get(), + expected_balance, T::Currency::free_balance(&target), "Transfer didn't happen", ); assert_eq!( Vesting::::vesting_balance(&target), - Some(T::MinVestedTransfer::get()), - "Lock not created", + Some(expected_balance.into()), + "Lock not correctly updated", + ); + } + + not_unlocking_merge_schedules { + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 2 .. T::MAX_VESTING_SCHEDULES; + + let caller: T::AccountId = account("caller", 0, SEED); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + // Give target existing locks. + add_locks::(&caller, l as u8); + // Add max vesting schedules. + let expected_balance = add_vesting_schedules::(caller_lookup.clone(), s)?; + + // Schedules are not vesting at block 0. + assert_eq!(System::::block_number(), T::BlockNumber::zero()); + assert_eq!( + Vesting::::vesting_balance(&caller), + Some(expected_balance), + "Vesting balance should equal sum locked of all schedules", + ); + assert_eq!( + Vesting::::vesting(&caller).unwrap().len(), + s as usize, + "There should be exactly max vesting schedules" + ); + }: merge_schedules(RawOrigin::Signed(caller.clone()), 0, s - 1) + verify { + let expected_schedule = VestingInfo::new( + T::MinVestedTransfer::get() * 20u32.into() * 2u32.into(), + T::MinVestedTransfer::get() * 2u32.into(), + 1u32.into(), + ); + let expected_index = (s - 2) as usize; + assert_eq!( + Vesting::::vesting(&caller).unwrap()[expected_index], + expected_schedule + ); + assert_eq!( + Vesting::::vesting_balance(&caller), + Some(expected_balance), + "Vesting balance should equal total locked of all schedules", + ); + assert_eq!( + Vesting::::vesting(&caller).unwrap().len(), + (s - 1) as usize, + "Schedule count should reduce by 1" + ); + } + + unlocking_merge_schedules { + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 2 .. T::MAX_VESTING_SCHEDULES; + + // Destination used just for currency transfers in asserts. + let test_dest: T::AccountId = account("test_dest", 0, SEED); + + let caller: T::AccountId = account("caller", 0, SEED); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + // Give target other locks. + add_locks::(&caller, l as u8); + // Add max vesting schedules. + let total_transferred = add_vesting_schedules::(caller_lookup.clone(), s)?; + + // Go to about half way through all the schedules duration. (They all start at 1, and have a duration of 20 or 21). + System::::set_block_number(11u32.into()); + // We expect half the original locked balance (+ any remainder that vests on the last block). + let expected_balance = total_transferred / 2u32.into(); + assert_eq!( + Vesting::::vesting_balance(&caller), + Some(expected_balance), + "Vesting balance should reflect that we are half way through all schedules duration", + ); + assert_eq!( + Vesting::::vesting(&caller).unwrap().len(), + s as usize, + "There should be exactly max vesting schedules" + ); + // The balance is not actually transferable because it has not been unlocked. + assert!(T::Currency::transfer(&caller, &test_dest, expected_balance, ExistenceRequirement::AllowDeath).is_err()); + }: merge_schedules(RawOrigin::Signed(caller.clone()), 0, s - 1) + verify { + let expected_schedule = VestingInfo::new( + T::MinVestedTransfer::get() * 2u32.into() * 10u32.into(), + T::MinVestedTransfer::get() * 2u32.into(), + 11u32.into(), + ); + let expected_index = (s - 2) as usize; + assert_eq!( + Vesting::::vesting(&caller).unwrap()[expected_index], + expected_schedule, + "New schedule is properly created and placed" + ); + assert_eq!( + Vesting::::vesting(&caller).unwrap()[expected_index], + expected_schedule + ); + assert_eq!( + Vesting::::vesting_balance(&caller), + Some(expected_balance), + "Vesting balance should equal half total locked of all schedules", + ); + assert_eq!( + Vesting::::vesting(&caller).unwrap().len(), + (s - 1) as usize, + "Schedule count should reduce by 1" + ); + // Since merge unlocks all schedules we can now transfer the balance. + assert_ok!( + T::Currency::transfer(&caller, &test_dest, expected_balance, ExistenceRequirement::AllowDeath) ); } } diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 8a2651a84c647..7e4a11fbd5c36 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -45,14 +45,16 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; +mod migrations; #[cfg(test)] mod mock; #[cfg(test)] mod tests; +mod vesting_info; pub mod weights; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ ensure, pallet_prelude::*, @@ -64,10 +66,14 @@ use frame_support::{ use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; pub use pallet::*; use sp_runtime::{ - traits::{AtLeast32BitUnsigned, Convert, MaybeSerializeDeserialize, StaticLookup, Zero}, + traits::{ + AtLeast32BitUnsigned, Bounded, Convert, MaybeSerializeDeserialize, One, Saturating, + StaticLookup, Zero, + }, RuntimeDebug, }; -use sp_std::{fmt::Debug, prelude::*}; +use sp_std::{convert::TryInto, fmt::Debug, prelude::*}; +pub use vesting_info::*; pub use weights::WeightInfo; type BalanceOf = @@ -77,37 +83,62 @@ type MaxLocksOf = const VESTING_ID: LockIdentifier = *b"vesting "; -/// Struct to encode the vesting schedule of an individual account. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct VestingInfo { - /// Locked amount at genesis. - pub locked: Balance, - /// Amount that gets unlocked every block after `starting_block`. - pub per_block: Balance, - /// Starting block for unlocking(vesting). - pub starting_block: BlockNumber, +// A value placed in storage that represents the current version of the Vesting storage. +// This value is used by `on_runtime_upgrade` to determine whether we run storage migration logic. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, MaxEncodedLen)] +enum Releases { + V0, + V1, } -impl - VestingInfo -{ - /// Amount locked at block `n`. - pub fn locked_at>( - &self, - n: BlockNumber, - ) -> Balance { - // Number of blocks that count toward vesting - // Saturating to 0 when n < starting_block - let vested_block_count = n.saturating_sub(self.starting_block); - let vested_block_count = BlockNumberToBalance::convert(vested_block_count); - // Return amount that is still locked in vesting - let maybe_balance = vested_block_count.checked_mul(&self.per_block); - if let Some(balance) = maybe_balance { - self.locked.saturating_sub(balance) - } else { - Zero::zero() +impl Default for Releases { + fn default() -> Self { + Releases::V0 + } +} + +/// Actions to take against a user's `Vesting` storage entry. +#[derive(Clone, Copy)] +enum VestingAction { + /// Do not actively remove any schedules. + Passive, + /// Remove the schedule specified by the index. + Remove(usize), + /// Remove the two schedules, specified by index, so they can be merged. + Merge(usize, usize), +} + +impl VestingAction { + /// Whether or not the filter says the schedule index should be removed. + fn should_remove(&self, index: usize) -> bool { + match self { + Self::Passive => false, + Self::Remove(index1) => *index1 == index, + Self::Merge(index1, index2) => *index1 == index || *index2 == index, } } + + /// Pick the schedules that this action dictates should continue vesting undisturbed. + fn pick_schedules<'a, T: Config>( + &'a self, + schedules: Vec, T::BlockNumber>>, + ) -> impl Iterator, T::BlockNumber>> + 'a { + schedules.into_iter().enumerate().filter_map(move |(index, schedule)| { + if self.should_remove(index) { + None + } else { + Some(schedule) + } + }) + } +} + +// Wrapper for `T::MAX_VESTING_SCHEDULES` to satisfy `trait Get`. +pub struct MaxVestingSchedulesGet(PhantomData); +impl Get for MaxVestingSchedulesGet { + fn get() -> u32 { + T::MAX_VESTING_SCHEDULES + } } #[frame_support::pallet] @@ -131,16 +162,65 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// Maximum number of vesting schedules an account may have at a given moment. + const MAX_VESTING_SCHEDULES: u32; + } + + #[pallet::extra_constants] + impl Pallet { + // TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + #[allow(non_snake_case)] + fn MaxVestingSchedules() -> u32 { + T::MAX_VESTING_SCHEDULES + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + migrations::v1::pre_migrate::() + } + + fn on_runtime_upgrade() -> Weight { + if StorageVersion::::get() == Releases::V0 { + StorageVersion::::put(Releases::V1); + migrations::v1::migrate::().saturating_add(T::DbWeight::get().reads_writes(1, 1)) + } else { + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + migrations::v1::post_migrate::() + } + + fn integrity_test() { + assert!(T::MAX_VESTING_SCHEDULES > 0, "`MaxVestingSchedules` must ge greater than 0"); + } } /// Information regarding the vesting of a given account. #[pallet::storage] #[pallet::getter(fn vesting)] - pub type Vesting = - StorageMap<_, Blake2_128Concat, T::AccountId, VestingInfo, T::BlockNumber>>; + pub type Vesting = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + BoundedVec, T::BlockNumber>, MaxVestingSchedulesGet>, + >; + + /// Storage version of the pallet. + /// + /// New networks start with latest version, as determined by the genesis build. + #[pallet::storage] + pub(crate) type StorageVersion = StorageValue<_, Releases, ValueQuery>; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] pub struct Pallet(_); #[pallet::genesis_config] @@ -160,6 +240,9 @@ pub mod pallet { fn build(&self) { use sp_runtime::traits::Saturating; + // Genesis uses the latest storage version. + StorageVersion::::put(Releases::V1); + // Generate initial vesting configuration // * who - Account which we are generating vesting configuration for // * begin - Block when the account will start to vest @@ -172,8 +255,14 @@ pub mod pallet { let locked = balance.saturating_sub(liquid); let length_as_balance = T::BlockNumberToBalance::convert(length); let per_block = locked / length_as_balance.max(sp_runtime::traits::One::one()); + let vesting_info = VestingInfo::new(locked, per_block, begin); + if !vesting_info.is_valid() { + panic!("Invalid VestingInfo params at genesis") + }; + + Vesting::::try_append(who, vesting_info) + .expect("Too many vesting schedules at genesis."); - Vesting::::insert(who, VestingInfo { locked, per_block, starting_block: begin }); let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; T::Currency::set_lock(VESTING_ID, who, locked, reasons); } @@ -182,13 +271,15 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] - #[pallet::metadata(T::AccountId = "AccountId", BalanceOf = "Balance")] + #[pallet::metadata( + T::AccountId = "AccountId", BalanceOf = "Balance", T::BlockNumber = "BlockNumber" + )] pub enum Event { - /// The amount vested has been updated. This could indicate more funds are available. The - /// balance given is the amount which is left unvested (and thus locked). + /// The amount vested has been updated. This could indicate a change in funds available. + /// The balance given is the amount which is left unvested (and thus locked). /// \[account, unvested\] VestingUpdated(T::AccountId, BalanceOf), - /// An \[account\] has become fully vested. No further vesting can happen. + /// An \[account\] has become fully vested. VestingCompleted(T::AccountId), } @@ -197,10 +288,15 @@ pub mod pallet { pub enum Error { /// The account given is not vesting. NotVesting, - /// An existing vesting schedule already exists for this account that cannot be clobbered. - ExistingVestingSchedule, + /// The account already has `MaxVestingSchedules` count of schedules and thus + /// cannot add another one. Consider merging existing schedules in order to add another. + AtMaxVestingSchedules, /// Amount being transferred is too low to create a vesting schedule. AmountLow, + /// An index was out of bounds of the vesting schedules. + ScheduleIndexOutOfBounds, + /// Failed to create a new schedule because some parameter was invalid. + InvalidScheduleParams, } #[pallet::call] @@ -218,12 +314,12 @@ pub mod pallet { /// - Reads: Vesting Storage, Balances Locks, [Sender Account] /// - Writes: Vesting Storage, Balances Locks, [Sender Account] /// # - #[pallet::weight(T::WeightInfo::vest_locked(MaxLocksOf::::get()) - .max(T::WeightInfo::vest_unlocked(MaxLocksOf::::get())) + #[pallet::weight(T::WeightInfo::vest_locked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) + .max(T::WeightInfo::vest_unlocked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES)) )] pub fn vest(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; - Self::update_lock(who) + Self::do_vest(who) } /// Unlock any vested funds of a `target` account. @@ -241,61 +337,46 @@ pub mod pallet { /// - Reads: Vesting Storage, Balances Locks, Target Account /// - Writes: Vesting Storage, Balances Locks, Target Account /// # - #[pallet::weight(T::WeightInfo::vest_other_locked(MaxLocksOf::::get()) - .max(T::WeightInfo::vest_other_unlocked(MaxLocksOf::::get())) + #[pallet::weight(T::WeightInfo::vest_other_locked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) + .max(T::WeightInfo::vest_other_unlocked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES)) )] pub fn vest_other( origin: OriginFor, target: ::Source, ) -> DispatchResult { ensure_signed(origin)?; - Self::update_lock(T::Lookup::lookup(target)?) + let who = T::Lookup::lookup(target)?; + Self::do_vest(who) } /// Create a vested transfer. /// /// The dispatch origin for this call must be _Signed_. /// - /// - `target`: The account that should be transferred the vested funds. - /// - `amount`: The amount of funds to transfer and will be vested. + /// - `target`: The account receiving the vested funds. /// - `schedule`: The vesting schedule attached to the transfer. /// /// Emits `VestingCreated`. /// + /// NOTE: This will unlock all schedules through the current block. + /// /// # /// - `O(1)`. /// - DbWeight: 3 Reads, 3 Writes /// - Reads: Vesting Storage, Balances Locks, Target Account, [Sender Account] /// - Writes: Vesting Storage, Balances Locks, Target Account, [Sender Account] /// # - #[pallet::weight(T::WeightInfo::vested_transfer(MaxLocksOf::::get()))] + #[pallet::weight( + T::WeightInfo::vested_transfer(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) + )] pub fn vested_transfer( origin: OriginFor, target: ::Source, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { let transactor = ensure_signed(origin)?; - ensure!(schedule.locked >= T::MinVestedTransfer::get(), Error::::AmountLow); - - let who = T::Lookup::lookup(target)?; - ensure!(!Vesting::::contains_key(&who), Error::::ExistingVestingSchedule); - - T::Currency::transfer( - &transactor, - &who, - schedule.locked, - ExistenceRequirement::AllowDeath, - )?; - - Self::add_vesting_schedule( - &who, - schedule.locked, - schedule.per_block, - schedule.starting_block, - ) - .expect("user does not have an existing vesting schedule; q.e.d."); - - Ok(()) + let transactor = ::unlookup(transactor); + Self::do_vested_transfer(transactor, target, schedule) } /// Force a vested transfer. @@ -304,18 +385,21 @@ pub mod pallet { /// /// - `source`: The account whose funds should be transferred. /// - `target`: The account that should be transferred the vested funds. - /// - `amount`: The amount of funds to transfer and will be vested. /// - `schedule`: The vesting schedule attached to the transfer. /// /// Emits `VestingCreated`. /// + /// NOTE: This will unlock all schedules through the current block. + /// /// # /// - `O(1)`. /// - DbWeight: 4 Reads, 4 Writes /// - Reads: Vesting Storage, Balances Locks, Target Account, Source Account /// - Writes: Vesting Storage, Balances Locks, Target Account, Source Account /// # - #[pallet::weight(T::WeightInfo::force_vested_transfer(MaxLocksOf::::get()))] + #[pallet::weight( + T::WeightInfo::force_vested_transfer(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) + )] pub fn force_vested_transfer( origin: OriginFor, source: ::Source, @@ -323,26 +407,53 @@ pub mod pallet { schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { ensure_root(origin)?; - ensure!(schedule.locked >= T::MinVestedTransfer::get(), Error::::AmountLow); - - let target = T::Lookup::lookup(target)?; - let source = T::Lookup::lookup(source)?; - ensure!(!Vesting::::contains_key(&target), Error::::ExistingVestingSchedule); - - T::Currency::transfer( - &source, - &target, - schedule.locked, - ExistenceRequirement::AllowDeath, - )?; - - Self::add_vesting_schedule( - &target, - schedule.locked, - schedule.per_block, - schedule.starting_block, - ) - .expect("user does not have an existing vesting schedule; q.e.d."); + Self::do_vested_transfer(source, target, schedule) + } + + /// Merge two vesting schedules together, creating a new vesting schedule that unlocks over + /// the highest possible start and end blocks. If both schedules have already started the + /// current block will be used as the schedule start; with the caveat that if one schedule + /// is finished by the current block, the other will be treated as the new merged schedule, + /// unmodified. + /// + /// NOTE: If `schedule1_index == schedule2_index` this is a no-op. + /// NOTE: This will unlock all schedules through the current block prior to merging. + /// NOTE: If both schedules have ended by the current block, no new schedule will be created + /// and both will be removed. + /// + /// Merged schedule attributes: + /// - `starting_block`: `MAX(schedule1.starting_block, scheduled2.starting_block, + /// current_block)`. + /// - `ending_block`: `MAX(schedule1.ending_block, schedule2.ending_block)`. + /// - `locked`: `schedule1.locked_at(current_block) + schedule2.locked_at(current_block)`. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `schedule1_index`: index of the first schedule to merge. + /// - `schedule2_index`: index of the second schedule to merge. + #[pallet::weight( + T::WeightInfo::not_unlocking_merge_schedules(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) + .max(T::WeightInfo::unlocking_merge_schedules(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES)) + )] + pub fn merge_schedules( + origin: OriginFor, + schedule1_index: u32, + schedule2_index: u32, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + if schedule1_index == schedule2_index { + return Ok(()) + }; + let schedule1_index = schedule1_index as usize; + let schedule2_index = schedule2_index as usize; + + let schedules = Self::vesting(&who).ok_or(Error::::NotVesting)?; + let merge_action = VestingAction::Merge(schedule1_index, schedule2_index); + + let (schedules, locked_now) = Self::exec_action(schedules.to_vec(), merge_action)?; + + Self::write_vesting(&who, schedules)?; + Self::write_lock(&who, locked_now); Ok(()) } @@ -350,39 +461,233 @@ pub mod pallet { } impl Pallet { - /// (Re)set or remove the pallet's currency lock on `who`'s account in accordance with their - /// current unvested amount. - fn update_lock(who: T::AccountId) -> DispatchResult { - let vesting = Self::vesting(&who).ok_or(Error::::NotVesting)?; + // Create a new `VestingInfo`, based off of two other `VestingInfo`s. + // NOTE: We assume both schedules have had funds unlocked up through the current block. + fn merge_vesting_info( + now: T::BlockNumber, + schedule1: VestingInfo, T::BlockNumber>, + schedule2: VestingInfo, T::BlockNumber>, + ) -> Option, T::BlockNumber>> { + let schedule1_ending_block = schedule1.ending_block_as_balance::(); + let schedule2_ending_block = schedule2.ending_block_as_balance::(); + let now_as_balance = T::BlockNumberToBalance::convert(now); + + // Check if one or both schedules have ended. + match (schedule1_ending_block <= now_as_balance, schedule2_ending_block <= now_as_balance) { + // If both schedules have ended, we don't merge and exit early. + (true, true) => return None, + // If one schedule has ended, we treat the one that has not ended as the new + // merged schedule. + (true, false) => return Some(schedule2), + (false, true) => return Some(schedule1), + // If neither schedule has ended don't exit early. + _ => {}, + } + + let locked = schedule1 + .locked_at::(now) + .saturating_add(schedule2.locked_at::(now)); + // This shouldn't happen because we know at least one ending block is greater than now, + // thus at least a schedule a some locked balance. + debug_assert!( + !locked.is_zero(), + "merge_vesting_info validation checks failed to catch a locked of 0" + ); + + let ending_block = schedule1_ending_block.max(schedule2_ending_block); + let starting_block = now.max(schedule1.starting_block()).max(schedule2.starting_block()); + + let per_block = { + let duration = ending_block + .saturating_sub(T::BlockNumberToBalance::convert(starting_block)) + .max(One::one()); + (locked / duration).max(One::one()) + }; + + let schedule = VestingInfo::new(locked, per_block, starting_block); + debug_assert!(schedule.is_valid(), "merge_vesting_info schedule validation check failed"); + + Some(schedule) + } + + // Execute a vested transfer from `source` to `target` with the given `schedule`. + fn do_vested_transfer( + source: ::Source, + target: ::Source, + schedule: VestingInfo, T::BlockNumber>, + ) -> DispatchResult { + // Validate user inputs. + ensure!(schedule.locked() >= T::MinVestedTransfer::get(), Error::::AmountLow); + if !schedule.is_valid() { + return Err(Error::::InvalidScheduleParams.into()) + }; + let target = T::Lookup::lookup(target)?; + let source = T::Lookup::lookup(source)?; + + // Check we can add to this account prior to any storage writes. + Self::can_add_vesting_schedule( + &target, + schedule.locked(), + schedule.per_block(), + schedule.starting_block(), + )?; + + T::Currency::transfer( + &source, + &target, + schedule.locked(), + ExistenceRequirement::AllowDeath, + )?; + + // We can't let this fail because the currency transfer has already happened. + let res = Self::add_vesting_schedule( + &target, + schedule.locked(), + schedule.per_block(), + schedule.starting_block(), + ); + debug_assert!(res.is_ok(), "Failed to add a schedule when we had to succeed."); + + Ok(()) + } + + /// Iterate through the schedules to track the current locked amount and + /// filter out completed and specified schedules. + /// + /// Returns a tuple that consists of: + /// - Vec of vesting schedules, where completed schedules and those specified + /// by filter are removed. (Note the vec is not checked for respecting + /// bounded length.) + /// - The amount locked at the current block number based on the given schedules. + /// + /// NOTE: the amount locked does not include any schedules that are filtered out via `action`. + fn report_schedule_updates( + schedules: Vec, T::BlockNumber>>, + action: VestingAction, + ) -> (Vec, T::BlockNumber>>, BalanceOf) { let now = >::block_number(); - let locked_now = vesting.locked_at::(now); - if locked_now.is_zero() { - T::Currency::remove_lock(VESTING_ID, &who); - Vesting::::remove(&who); - Self::deposit_event(Event::::VestingCompleted(who)); + let mut total_locked_now: BalanceOf = Zero::zero(); + let filtered_schedules = action + .pick_schedules::(schedules) + .filter_map(|schedule| { + let locked_now = schedule.locked_at::(now); + if locked_now.is_zero() { + None + } else { + total_locked_now = total_locked_now.saturating_add(locked_now); + Some(schedule) + } + }) + .collect::>(); + + (filtered_schedules, total_locked_now) + } + + /// Write an accounts updated vesting lock to storage. + fn write_lock(who: &T::AccountId, total_locked_now: BalanceOf) { + if total_locked_now.is_zero() { + T::Currency::remove_lock(VESTING_ID, who); + Self::deposit_event(Event::::VestingCompleted(who.clone())); } else { let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; - T::Currency::set_lock(VESTING_ID, &who, locked_now, reasons); - Self::deposit_event(Event::::VestingUpdated(who, locked_now)); + T::Currency::set_lock(VESTING_ID, who, total_locked_now, reasons); + Self::deposit_event(Event::::VestingUpdated(who.clone(), total_locked_now)); + }; + } + + /// Write an accounts updated vesting schedules to storage. + fn write_vesting( + who: &T::AccountId, + schedules: Vec, T::BlockNumber>>, + ) -> Result<(), DispatchError> { + let schedules: BoundedVec< + VestingInfo, T::BlockNumber>, + MaxVestingSchedulesGet, + > = schedules.try_into().map_err(|_| Error::::AtMaxVestingSchedules)?; + + if schedules.len() == 0 { + Vesting::::remove(&who); + } else { + Vesting::::insert(who, schedules) } + + Ok(()) + } + + /// Unlock any vested funds of `who`. + fn do_vest(who: T::AccountId) -> DispatchResult { + let schedules = Self::vesting(&who).ok_or(Error::::NotVesting)?; + + let (schedules, locked_now) = + Self::exec_action(schedules.to_vec(), VestingAction::Passive)?; + + Self::write_vesting(&who, schedules)?; + Self::write_lock(&who, locked_now); + Ok(()) } + + /// Execute a `VestingAction` against the given `schedules`. Returns the updated schedules + /// and locked amount. + fn exec_action( + schedules: Vec, T::BlockNumber>>, + action: VestingAction, + ) -> Result<(Vec, T::BlockNumber>>, BalanceOf), DispatchError> { + let (schedules, locked_now) = match action { + VestingAction::Merge(idx1, idx2) => { + // The schedule index is based off of the schedule ordering prior to filtering out + // any schedules that may be ending at this block. + let schedule1 = *schedules.get(idx1).ok_or(Error::::ScheduleIndexOutOfBounds)?; + let schedule2 = *schedules.get(idx2).ok_or(Error::::ScheduleIndexOutOfBounds)?; + + // The length of `schedules` decreases by 2 here since we filter out 2 schedules. + // Thus we know below that we can push the new merged schedule without error + // (assuming initial state was valid). + let (mut schedules, mut locked_now) = + Self::report_schedule_updates(schedules.to_vec(), action); + + let now = >::block_number(); + if let Some(new_schedule) = Self::merge_vesting_info(now, schedule1, schedule2) { + // Merging created a new schedule so we: + // 1) need to add it to the accounts vesting schedule collection, + schedules.push(new_schedule); + // (we use `locked_at` in case this is a schedule that started in the past) + let new_schedule_locked = + new_schedule.locked_at::(now); + // and 2) update the locked amount to reflect the schedule we just added. + locked_now = locked_now.saturating_add(new_schedule_locked); + } // In the None case there was no new schedule to account for. + + (schedules, locked_now) + }, + _ => Self::report_schedule_updates(schedules.to_vec(), action), + }; + + debug_assert!( + locked_now > Zero::zero() && schedules.len() > 0 || + locked_now == Zero::zero() && schedules.len() == 0 + ); + + Ok((schedules, locked_now)) + } } impl VestingSchedule for Pallet where BalanceOf: MaybeSerializeDeserialize + Debug, { - type Moment = T::BlockNumber; type Currency = T::Currency; + type Moment = T::BlockNumber; /// Get the amount that is currently being vested and cannot be transferred out of this account. fn vesting_balance(who: &T::AccountId) -> Option> { if let Some(v) = Self::vesting(who) { let now = >::block_number(); - let locked_now = v.locked_at::(now); - Some(T::Currency::free_balance(who).min(locked_now)) + let total_locked_now = v.iter().fold(Zero::zero(), |total, schedule| { + schedule.locked_at::(now).saturating_add(total) + }); + Some(T::Currency::free_balance(who).min(total_locked_now)) } else { None } @@ -390,14 +695,16 @@ where /// Adds a vesting schedule to a given account. /// - /// If there already exists a vesting schedule for the given account, an `Err` is returned - /// and nothing is updated. + /// If the account has `MaxVestingSchedules`, an Error is returned and nothing + /// is updated. /// /// On success, a linearly reducing amount of funds will be locked. In order to realise any /// reduction of the lock over time as it diminishes, the account owner must use `vest` or /// `vest_other`. /// /// Is a no-op if the amount to be vested is zero. + /// + /// NOTE: This doesn't alter the free balance of the account. fn add_vesting_schedule( who: &T::AccountId, locked: BalanceOf, @@ -407,22 +714,58 @@ where if locked.is_zero() { return Ok(()) } - if Vesting::::contains_key(who) { - Err(Error::::ExistingVestingSchedule)? + + let vesting_schedule = VestingInfo::new(locked, per_block, starting_block); + // Check for `per_block` or `locked` of 0. + if !vesting_schedule.is_valid() { + return Err(Error::::InvalidScheduleParams.into()) + }; + + let mut schedules = Self::vesting(who).unwrap_or_default(); + + // NOTE: we must push the new schedule so that `exec_action` + // will give the correct new locked amount. + ensure!(schedules.try_push(vesting_schedule).is_ok(), Error::::AtMaxVestingSchedules); + + let (schedules, locked_now) = + Self::exec_action(schedules.to_vec(), VestingAction::Passive)?; + + Self::write_vesting(&who, schedules)?; + Self::write_lock(who, locked_now); + + Ok(()) + } + + // Ensure we can call `add_vesting_schedule` without error. This should always + // be called prior to `add_vesting_schedule`. + fn can_add_vesting_schedule( + who: &T::AccountId, + locked: BalanceOf, + per_block: BalanceOf, + starting_block: T::BlockNumber, + ) -> DispatchResult { + // Check for `per_block` or `locked` of 0. + if !VestingInfo::new(locked, per_block, starting_block).is_valid() { + return Err(Error::::InvalidScheduleParams.into()) } - let vesting_schedule = VestingInfo { locked, per_block, starting_block }; - Vesting::::insert(who, vesting_schedule); - // it can't fail, but even if somehow it did, we don't really care. - let res = Self::update_lock(who.clone()); - debug_assert!(res.is_ok()); + + ensure!( + (Vesting::::decode_len(who).unwrap_or_default() as u32) < T::MAX_VESTING_SCHEDULES, + Error::::AtMaxVestingSchedules + ); + Ok(()) } /// Remove a vesting schedule for a given account. - fn remove_vesting_schedule(who: &T::AccountId) { - Vesting::::remove(who); - // it can't fail, but even if somehow it did, we don't really care. - let res = Self::update_lock(who.clone()); - debug_assert!(res.is_ok()); + fn remove_vesting_schedule(who: &T::AccountId, schedule_index: u32) -> DispatchResult { + let schedules = Self::vesting(who).ok_or(Error::::NotVesting)?; + let remove_action = VestingAction::Remove(schedule_index as usize); + + let (schedules, locked_now) = Self::exec_action(schedules.to_vec(), remove_action)?; + + Self::write_vesting(&who, schedules)?; + Self::write_lock(who, locked_now); + Ok(()) } } diff --git a/frame/vesting/src/migrations.rs b/frame/vesting/src/migrations.rs new file mode 100644 index 0000000000000..086257d285ea0 --- /dev/null +++ b/frame/vesting/src/migrations.rs @@ -0,0 +1,95 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage migrations for the vesting pallet. + +use super::*; + +// Migration from single schedule to multiple schedules. +pub(crate) mod v1 { + use super::*; + + #[cfg(feature = "try-runtime")] + pub(crate) fn pre_migrate() -> Result<(), &'static str> { + assert!(StorageVersion::::get() == Releases::V0, "Storage version too high."); + + log::debug!( + target: "runtime::vesting", + "migration: Vesting storage version v1 PRE migration checks succesful!" + ); + + Ok(()) + } + + /// Migrate from single schedule to multi schedule storage. + /// WARNING: This migration will delete schedules if `MaxVestingSchedules < 1`. + pub(crate) fn migrate() -> Weight { + let mut reads_writes = 0; + + Vesting::::translate::, T::BlockNumber>, _>( + |_key, vesting_info| { + reads_writes += 1; + let v: Option< + BoundedVec< + VestingInfo, T::BlockNumber>, + MaxVestingSchedulesGet, + >, + > = vec![vesting_info].try_into().ok(); + + if v.is_none() { + log::warn!( + target: "runtime::vesting", + "migration: Failed to move a vesting schedule into a BoundedVec" + ); + } + + v + }, + ); + + T::DbWeight::get().reads_writes(reads_writes, reads_writes) + } + + #[cfg(feature = "try-runtime")] + pub(crate) fn post_migrate() -> Result<(), &'static str> { + assert_eq!(StorageVersion::::get(), Releases::V1); + + for (_key, schedules) in Vesting::::iter() { + assert!( + schedules.len() == 1, + "A bounded vec with incorrect count of items was created." + ); + + for s in schedules { + // It is ok if this does not pass, but ideally pre-existing schedules would pass + // this validation logic so we can be more confident about edge cases. + if !s.is_valid() { + log::warn!( + target: "runtime::vesting", + "migration: A schedule does not pass new validation logic.", + ) + } + } + } + + log::debug!( + target: "runtime::vesting", + "migration: Vesting storage version v1 POST migration checks successful!" + ); + Ok(()) + } +} diff --git a/frame/vesting/src/mock.rs b/frame/vesting/src/mock.rs index 4efbabefe688f..cb8961150003b 100644 --- a/frame/vesting/src/mock.rs +++ b/frame/vesting/src/mock.rs @@ -92,24 +92,33 @@ impl Config for Test { type BlockNumberToBalance = Identity; type Currency = Balances; type Event = Event; + const MAX_VESTING_SCHEDULES: u32 = 3; type MinVestedTransfer = MinVestedTransfer; type WeightInfo = (); } pub struct ExtBuilder { existential_deposit: u64, + vesting_genesis_config: Option>, } + impl Default for ExtBuilder { fn default() -> Self { - Self { existential_deposit: 1 } + Self { existential_deposit: 1, vesting_genesis_config: None } } } + impl ExtBuilder { pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { self.existential_deposit = existential_deposit; self } + pub fn vesting_genesis_config(mut self, config: Vec<(u64, u64, u64, u64)>) -> Self { + self.vesting_genesis_config = Some(config); + self + } + pub fn build(self) -> sp_io::TestExternalities { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -120,19 +129,25 @@ impl ExtBuilder { (3, 30 * self.existential_deposit), (4, 40 * self.existential_deposit), (12, 10 * self.existential_deposit), + (13, 9999 * self.existential_deposit), ], } .assimilate_storage(&mut t) .unwrap(); - pallet_vesting::GenesisConfig:: { - vesting: vec![ + + let vesting = if let Some(vesting_config) = self.vesting_genesis_config { + vesting_config + } else { + vec![ (1, 0, 10, 5 * self.existential_deposit), (2, 10, 20, 0), (12, 10, 20, 5 * self.existential_deposit), - ], - } - .assimilate_storage(&mut t) - .unwrap(); + ] + }; + + pallet_vesting::GenesisConfig:: { vesting } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext diff --git a/frame/vesting/src/tests.rs b/frame/vesting/src/tests.rs index 2ee0e83933cb6..2a6dd0520c3b0 100644 --- a/frame/vesting/src/tests.rs +++ b/frame/vesting/src/tests.rs @@ -15,47 +15,62 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::{assert_noop, assert_ok}; +use frame_support::{assert_noop, assert_ok, assert_storage_noop, dispatch::EncodeLike}; use frame_system::RawOrigin; -use sp_runtime::traits::BadOrigin; +use sp_runtime::traits::{BadOrigin, Identity}; -use super::*; +use super::{Vesting as VestingStorage, *}; use crate::mock::{Balances, ExtBuilder, System, Test, Vesting}; +/// A default existential deposit. +const ED: u64 = 256; + +/// Calls vest, and asserts that there is no entry for `account` +/// in the `Vesting` storage item. +fn vest_and_assert_no_vesting(account: u64) +where + u64: EncodeLike<::AccountId>, + T: pallet::Config, +{ + // Its ok for this to fail because the user may already have no schedules. + let _result = Vesting::vest(Some(account).into()); + assert!(!>::contains_key(account)); +} + #[test] fn check_vesting_status() { - ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { let user1_free_balance = Balances::free_balance(&1); let user2_free_balance = Balances::free_balance(&2); let user12_free_balance = Balances::free_balance(&12); - assert_eq!(user1_free_balance, 256 * 10); // Account 1 has free balance - assert_eq!(user2_free_balance, 256 * 20); // Account 2 has free balance - assert_eq!(user12_free_balance, 256 * 10); // Account 12 has free balance - let user1_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 128, // Vesting over 10 blocks - starting_block: 0, - }; - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - let user12_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&1), Some(user1_vesting_schedule)); // Account 1 has a vesting schedule - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); // Account 2 has a vesting schedule - assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); // Account 12 has a vesting schedule - - // Account 1 has only 128 units vested from their illiquid 256 * 5 units at block 1 + assert_eq!(user1_free_balance, ED * 10); // Account 1 has free balance + assert_eq!(user2_free_balance, ED * 20); // Account 2 has free balance + assert_eq!(user12_free_balance, ED * 10); // Account 12 has free balance + let user1_vesting_schedule = VestingInfo::new( + ED * 5, + 128, // Vesting over 10 blocks + 0, + ); + let user2_vesting_schedule = VestingInfo::new( + ED * 20, + ED, // Vesting over 20 blocks + 10, + ); + let user12_vesting_schedule = VestingInfo::new( + ED * 5, + 64, // Vesting over 20 blocks + 10, + ); + assert_eq!(Vesting::vesting(&1).unwrap(), vec![user1_vesting_schedule]); // Account 1 has a vesting schedule + assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_vesting_schedule]); // Account 2 has a vesting schedule + assert_eq!(Vesting::vesting(&12).unwrap(), vec![user12_vesting_schedule]); // Account 12 has a vesting schedule + + // Account 1 has only 128 units vested from their illiquid ED * 5 units at block 1 assert_eq!(Vesting::vesting_balance(&1), Some(128 * 9)); // Account 2 has their full balance locked assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); // Account 12 has only their illiquid funds locked - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - ED * 5)); System::set_block_number(10); assert_eq!(System::block_number(), 10); @@ -65,7 +80,7 @@ fn check_vesting_status() { // Account 2 has started vesting by block 10 assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); // Account 12 has started vesting by block 10 - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - ED * 5)); System::set_block_number(30); assert_eq!(System::block_number(), 30); @@ -73,6 +88,88 @@ fn check_vesting_status() { assert_eq!(Vesting::vesting_balance(&1), Some(0)); // Account 1 is still fully vested, and not negative assert_eq!(Vesting::vesting_balance(&2), Some(0)); // Account 2 has fully vested by block 30 assert_eq!(Vesting::vesting_balance(&12), Some(0)); // Account 2 has fully vested by block 30 + + // Once we unlock the funds, they are removed from storage. + vest_and_assert_no_vesting::(1); + vest_and_assert_no_vesting::(2); + vest_and_assert_no_vesting::(12); + }); +} + +#[test] +fn check_vesting_status_for_multi_schedule_account() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + assert_eq!(System::block_number(), 1); + let sched0 = VestingInfo::new( + ED * 20, + ED, // Vesting over 20 blocks + 10, + ); + // Account 2 already has a vesting schedule. + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + // Account 2's free balance is from sched0. + let free_balance = Balances::free_balance(&2); + assert_eq!(free_balance, ED * (20)); + assert_eq!(Vesting::vesting_balance(&2), Some(free_balance)); + + // Add a 2nd schedule that is already unlocking by block #1. + let sched1 = VestingInfo::new( + ED * 10, + ED, // Vesting over 10 blocks + 0, + ); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched1)); + // Free balance is equal to the two existing schedules total amount. + let free_balance = Balances::free_balance(&2); + assert_eq!(free_balance, ED * (10 + 20)); + // The most recently added schedule exists. + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + // sched1 has free funds at block #1, but nothing else. + assert_eq!(Vesting::vesting_balance(&2), Some(free_balance - sched1.per_block())); + + // Add a 3rd schedule. + let sched2 = VestingInfo::new( + ED * 30, + ED, // Vesting over 30 blocks + 5, + ); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched2)); + + System::set_block_number(9); + // Free balance is equal to the 3 existing schedules total amount. + let free_balance = Balances::free_balance(&2); + assert_eq!(free_balance, ED * (10 + 20 + 30)); + // sched1 and sched2 are freeing funds at block #9. + assert_eq!( + Vesting::vesting_balance(&2), + Some(free_balance - sched1.per_block() * 9 - sched2.per_block() * 4) + ); + + System::set_block_number(20); + // At block #20 sched1 is fully unlocked while sched2 and sched0 are partially unlocked. + assert_eq!( + Vesting::vesting_balance(&2), + Some( + free_balance - sched1.locked() - sched2.per_block() * 15 - sched0.per_block() * 10 + ) + ); + + System::set_block_number(30); + // At block #30 sched0 and sched1 are fully unlocked while sched2 is partially unlocked. + assert_eq!( + Vesting::vesting_balance(&2), + Some(free_balance - sched1.locked() - sched2.per_block() * 25 - sched0.locked()) + ); + + // At block #35 sched2 fully unlocks and thus all schedules funds are unlocked. + System::set_block_number(35); + assert_eq!(Vesting::vesting_balance(&2), Some(0)); + // Since we have not called any extrinsics that would unlock funds the schedules + // are still in storage, + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1, sched2]); + // but once we unlock the funds, they are removed from storage. + vest_and_assert_no_vesting::(2); }); } @@ -102,6 +199,32 @@ fn vested_balance_should_transfer() { }); } +#[test] +fn vested_balance_should_transfer_with_multi_sched() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let sched0 = VestingInfo::new(5 * ED, 128, 0); + assert_ok!(Vesting::vested_transfer(Some(13).into(), 1, sched0)); + // Total 10*ED locked for all the schedules. + assert_eq!(Vesting::vesting(&1).unwrap(), vec![sched0, sched0]); + + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 3840); // Account 1 has free balance + + // Account 1 has only 256 units unlocking at block 1 (plus 1280 already fee). + assert_eq!(Vesting::vesting_balance(&1), Some(2304)); + assert_ok!(Vesting::vest(Some(1).into())); + assert_ok!(Balances::transfer(Some(1).into(), 2, 1536)); + }); +} + +#[test] +fn non_vested_cannot_vest() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + assert!(!>::contains_key(4)); + assert_noop!(Vesting::vest(Some(4).into()), Error::::NotVesting); + }); +} + #[test] fn vested_balance_should_transfer_using_vest_other() { ExtBuilder::default().existential_deposit(10).build().execute_with(|| { @@ -114,6 +237,32 @@ fn vested_balance_should_transfer_using_vest_other() { }); } +#[test] +fn vested_balance_should_transfer_using_vest_other_with_multi_sched() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let sched0 = VestingInfo::new(5 * ED, 128, 0); + assert_ok!(Vesting::vested_transfer(Some(13).into(), 1, sched0)); + // Total of 10*ED of locked for all the schedules. + assert_eq!(Vesting::vesting(&1).unwrap(), vec![sched0, sched0]); + + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 3840); // Account 1 has free balance + + // Account 1 has only 256 units unlocking at block 1 (plus 1280 already free). + assert_eq!(Vesting::vesting_balance(&1), Some(2304)); + assert_ok!(Vesting::vest_other(Some(2).into(), 1)); + assert_ok!(Balances::transfer(Some(1).into(), 2, 1536)); + }); +} + +#[test] +fn non_vested_cannot_vest_other() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + assert!(!>::contains_key(4)); + assert_noop!(Vesting::vest_other(Some(3).into(), 4), Error::::NotVesting); + }); +} + #[test] fn extra_balance_should_transfer() { ExtBuilder::default().existential_deposit(10).build().execute_with(|| { @@ -148,12 +297,12 @@ fn liquid_funds_should_transfer_with_delayed_vesting() { assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); // Account 12 has delayed vesting - let user12_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); + let user12_vesting_schedule = VestingInfo::new( + 256 * 5, + 64, // Vesting over 20 blocks + 10, + ); + assert_eq!(Vesting::vesting(&12).unwrap(), vec![user12_vesting_schedule]); // Account 12 can still send liquid funds assert_ok!(Balances::transfer(Some(12).into(), 3, 256 * 5)); @@ -170,14 +319,14 @@ fn vested_transfer_works() { // Account 4 should not have any vesting yet. assert_eq!(Vesting::vesting(&4), None); // Make the schedule for the new transfer. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; + let new_vesting_schedule = VestingInfo::new( + 256 * 5, + 64, // Vesting over 20 blocks + 10, + ); assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); + assert_eq!(Vesting::vesting(&4).unwrap(), vec![new_vesting_schedule]); // Ensure the transfer happened correctly. let user3_free_balance_updated = Balances::free_balance(&3); assert_eq!(user3_free_balance_updated, 256 * 25); @@ -195,66 +344,117 @@ fn vested_transfer_works() { System::set_block_number(30); assert_eq!(System::block_number(), 30); - // Account 4 has fully vested. + // Account 4 has fully vested, assert_eq!(Vesting::vesting_balance(&4), Some(0)); + // and after unlocking its schedules are removed from storage. + vest_and_assert_no_vesting::(4); }); } #[test] fn vested_transfer_correctly_fails() { - ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { let user2_free_balance = Balances::free_balance(&2); let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); + assert_eq!(user2_free_balance, ED * 20); + assert_eq!(user4_free_balance, ED * 40); + // Account 2 should already have a vesting schedule. - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); - - // The vesting schedule we will try to create, fails due to pre-existence of schedule. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_noop!( - Vesting::vested_transfer(Some(4).into(), 2, new_vesting_schedule), - Error::::ExistingVestingSchedule, + let user2_vesting_schedule = VestingInfo::new( + ED * 20, + ED, // Vesting over 20 blocks + 10, ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_vesting_schedule]); // Fails due to too low transfer amount. let new_vesting_schedule_too_low = - VestingInfo { locked: 256 * 1, per_block: 64, starting_block: 10 }; + VestingInfo::new(::MinVestedTransfer::get() - 1, 64, 10); assert_noop!( Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule_too_low), Error::::AmountLow, ); - // Verify no currency transfer happened. - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); + // `per_block` is 0, which would result in a schedule with infinite duration. + let schedule_per_block_0 = + VestingInfo::new(::MinVestedTransfer::get(), 0, 10); + assert_noop!( + Vesting::vested_transfer(Some(13).into(), 4, schedule_per_block_0), + Error::::InvalidScheduleParams, + ); + + // `locked` is 0. + let schedule_locked_0 = VestingInfo::new(0, 1, 10); + assert_noop!( + Vesting::vested_transfer(Some(3).into(), 4, schedule_locked_0), + Error::::AmountLow, + ); + + // Free balance has not changed. + assert_eq!(user2_free_balance, Balances::free_balance(&2)); + assert_eq!(user4_free_balance, Balances::free_balance(&4)); + // Account 4 has no schedules. + vest_and_assert_no_vesting::(4); + }); +} + +#[test] +fn vested_transfer_allows_max_schedules() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let mut user_4_free_balance = Balances::free_balance(&4); + let max_schedules = ::MAX_VESTING_SCHEDULES; + let sched = VestingInfo::new( + ::MinVestedTransfer::get(), + 1, // Vest over 2 * 256 blocks. + 10, + ); + + // Add max amount schedules to user 4. + for _ in 0..max_schedules { + assert_ok!(Vesting::vested_transfer(Some(13).into(), 4, sched)); + } + + // The schedules count towards vesting balance + let transferred_amount = ::MinVestedTransfer::get() * max_schedules as u64; + assert_eq!(Vesting::vesting_balance(&4), Some(transferred_amount)); + // and free balance. + user_4_free_balance += transferred_amount; + assert_eq!(Balances::free_balance(&4), user_4_free_balance); + + // Cannot insert a 4th vesting schedule when `MaxVestingSchedules` === 3, + assert_noop!( + Vesting::vested_transfer(Some(3).into(), 4, sched), + Error::::AtMaxVestingSchedules, + ); + // so the free balance does not change. + assert_eq!(Balances::free_balance(&4), user_4_free_balance); + + // Account 4 has fully vested when all the schedules end, + System::set_block_number( + ::MinVestedTransfer::get() + sched.starting_block(), + ); + assert_eq!(Vesting::vesting_balance(&4), Some(0)); + // and after unlocking its schedules are removed from storage. + vest_and_assert_no_vesting::(4); }); } #[test] fn force_vested_transfer_works() { - ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { let user3_free_balance = Balances::free_balance(&3); let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user3_free_balance, 256 * 30); - assert_eq!(user4_free_balance, 256 * 40); + assert_eq!(user3_free_balance, ED * 30); + assert_eq!(user4_free_balance, ED * 40); // Account 4 should not have any vesting yet. assert_eq!(Vesting::vesting(&4), None); // Make the schedule for the new transfer. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; + let new_vesting_schedule = VestingInfo::new( + ED * 5, + 64, // Vesting over 20 blocks + 10, + ); + assert_noop!( Vesting::force_vested_transfer(Some(4).into(), 3, 4, new_vesting_schedule), BadOrigin @@ -266,14 +466,15 @@ fn force_vested_transfer_works() { new_vesting_schedule )); // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); + assert_eq!(Vesting::vesting(&4).unwrap()[0], new_vesting_schedule); + assert_eq!(Vesting::vesting(&4).unwrap().len(), 1); // Ensure the transfer happened correctly. let user3_free_balance_updated = Balances::free_balance(&3); - assert_eq!(user3_free_balance_updated, 256 * 25); + assert_eq!(user3_free_balance_updated, ED * 25); let user4_free_balance_updated = Balances::free_balance(&4); - assert_eq!(user4_free_balance_updated, 256 * 45); - // Account 4 has 5 * 256 locked. - assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); + assert_eq!(user4_free_balance_updated, ED * 45); + // Account 4 has 5 * ED locked. + assert_eq!(Vesting::vesting_balance(&4), Some(ED * 5)); System::set_block_number(20); assert_eq!(System::block_number(), 20); @@ -284,40 +485,31 @@ fn force_vested_transfer_works() { System::set_block_number(30); assert_eq!(System::block_number(), 30); - // Account 4 has fully vested. + // Account 4 has fully vested, assert_eq!(Vesting::vesting_balance(&4), Some(0)); + // and after unlocking its schedules are removed from storage. + vest_and_assert_no_vesting::(4); }); } #[test] fn force_vested_transfer_correctly_fails() { - ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { let user2_free_balance = Balances::free_balance(&2); let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); + assert_eq!(user2_free_balance, ED * 20); + assert_eq!(user4_free_balance, ED * 40); // Account 2 should already have a vesting schedule. - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); - - // The vesting schedule we will try to create, fails due to pre-existence of schedule. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_noop!( - Vesting::force_vested_transfer(RawOrigin::Root.into(), 4, 2, new_vesting_schedule), - Error::::ExistingVestingSchedule, + let user2_vesting_schedule = VestingInfo::new( + ED * 20, + ED, // Vesting over 20 blocks + 10, ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_vesting_schedule]); - // Fails due to too low transfer amount. + // Too low transfer amount. let new_vesting_schedule_too_low = - VestingInfo { locked: 256 * 1, per_block: 64, starting_block: 10 }; + VestingInfo::new(::MinVestedTransfer::get() - 1, 64, 10); assert_noop!( Vesting::force_vested_transfer( RawOrigin::Root.into(), @@ -328,8 +520,638 @@ fn force_vested_transfer_correctly_fails() { Error::::AmountLow, ); + // `per_block` is 0. + let schedule_per_block_0 = + VestingInfo::new(::MinVestedTransfer::get(), 0, 10); + assert_noop!( + Vesting::force_vested_transfer(RawOrigin::Root.into(), 13, 4, schedule_per_block_0), + Error::::InvalidScheduleParams, + ); + + // `locked` is 0. + let schedule_locked_0 = VestingInfo::new(0, 1, 10); + assert_noop!( + Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 4, schedule_locked_0), + Error::::AmountLow, + ); + // Verify no currency transfer happened. - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); + assert_eq!(user2_free_balance, Balances::free_balance(&2)); + assert_eq!(user4_free_balance, Balances::free_balance(&4)); + // Account 4 has no schedules. + vest_and_assert_no_vesting::(4); + }); +} + +#[test] +fn force_vested_transfer_allows_max_schedules() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let mut user_4_free_balance = Balances::free_balance(&4); + let max_schedules = ::MAX_VESTING_SCHEDULES; + let sched = VestingInfo::new( + ::MinVestedTransfer::get(), + 1, // Vest over 2 * 256 blocks. + 10, + ); + + // Add max amount schedules to user 4. + for _ in 0..max_schedules { + assert_ok!(Vesting::force_vested_transfer(RawOrigin::Root.into(), 13, 4, sched)); + } + + // The schedules count towards vesting balance. + let transferred_amount = ::MinVestedTransfer::get() * max_schedules as u64; + assert_eq!(Vesting::vesting_balance(&4), Some(transferred_amount)); + // and free balance. + user_4_free_balance += transferred_amount; + assert_eq!(Balances::free_balance(&4), user_4_free_balance); + + // Cannot insert a 4th vesting schedule when `MaxVestingSchedules` === 3 + assert_noop!( + Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 4, sched), + Error::::AtMaxVestingSchedules, + ); + // so the free balance does not change. + assert_eq!(Balances::free_balance(&4), user_4_free_balance); + + // Account 4 has fully vested when all the schedules end, + System::set_block_number(::MinVestedTransfer::get() + 10); + assert_eq!(Vesting::vesting_balance(&4), Some(0)); + // and after unlocking its schedules are removed from storage. + vest_and_assert_no_vesting::(4); + }); +} + +#[test] +fn merge_schedules_that_have_not_started() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // Vest over 20 blocks. + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(Balances::usable_balance(&2), 0); + + // Add a schedule that is identical to the one that already exists. + assert_ok!(Vesting::vested_transfer(Some(3).into(), 2, sched0)); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched0]); + assert_eq!(Balances::usable_balance(&2), 0); + assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); + + // Since we merged identical schedules, the new schedule finishes at the same + // time as the original, just with double the amount. + let sched1 = VestingInfo::new( + sched0.locked() * 2, + sched0.per_block() * 2, + 10, // Starts at the block the schedules are merged/ + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched1]); + + assert_eq!(Balances::usable_balance(&2), 0); + }); +} + +#[test] +fn merge_ongoing_schedules() { + // Merging two schedules that have started will vest both before merging. + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // Vest over 20 blocks. + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + let sched1 = VestingInfo::new( + ED * 10, + ED, // Vest over 10 blocks. + sched0.starting_block() + 5, // Start at block 15. + ); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched1)); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + + // Got to half way through the second schedule where both schedules are actively vesting. + let cur_block = 20; + System::set_block_number(cur_block); + + // Account 2 has no usable balances prior to the merge because they have not unlocked + // with `vest` yet. + assert_eq!(Balances::usable_balance(&2), 0); + + assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); + + // Merging schedules un-vests all pre-existing schedules prior to merging, which is + // reflected in account 2's updated usable balance. + let sched0_vested_now = sched0.per_block() * (cur_block - sched0.starting_block()); + let sched1_vested_now = sched1.per_block() * (cur_block - sched1.starting_block()); + assert_eq!(Balances::usable_balance(&2), sched0_vested_now + sched1_vested_now); + + // The locked amount is the sum of what both schedules have locked at the current block. + let sched2_locked = sched1 + .locked_at::(cur_block) + .saturating_add(sched0.locked_at::(cur_block)); + // End block of the new schedule is the greater of either merged schedule. + let sched2_end = sched1 + .ending_block_as_balance::() + .max(sched0.ending_block_as_balance::()); + let sched2_duration = sched2_end - cur_block; + // Based off the new schedules total locked and its duration, we can calculate the + // amount to unlock per block. + let sched2_per_block = sched2_locked / sched2_duration; + + let sched2 = VestingInfo::new(sched2_locked, sched2_per_block, cur_block); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2]); + + // And just to double check, we assert the new merged schedule we be cleaned up as expected. + System::set_block_number(30); + vest_and_assert_no_vesting::(2); + }); +} + +#[test] +fn merging_shifts_other_schedules_index() { + // Schedules being merged are filtered out, schedules to the right of any merged + // schedule shift left and the merged schedule is always last. + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let sched0 = VestingInfo::new( + ED * 10, + ED, // Vesting over 10 blocks. + 10, + ); + let sched1 = VestingInfo::new( + ED * 11, + ED, // Vesting over 11 blocks. + 11, + ); + let sched2 = VestingInfo::new( + ED * 12, + ED, // Vesting over 12 blocks. + 12, + ); + + // Account 3 starts out with no schedules, + assert_eq!(Vesting::vesting(&3), None); + // and some usable balance. + let usable_balance = Balances::usable_balance(&3); + assert_eq!(usable_balance, 30 * ED); + + let cur_block = 1; + assert_eq!(System::block_number(), cur_block); + + // Transfer the above 3 schedules to account 3. + assert_ok!(Vesting::vested_transfer(Some(4).into(), 3, sched0)); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 3, sched1)); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 3, sched2)); + + // With no schedules vested or merged they are in the order they are created + assert_eq!(Vesting::vesting(&3).unwrap(), vec![sched0, sched1, sched2]); + // and the usable balance has not changed. + assert_eq!(usable_balance, Balances::usable_balance(&3)); + + assert_ok!(Vesting::merge_schedules(Some(3).into(), 0, 2)); + + // Create the merged schedule of sched0 & sched2. + // The merged schedule will have the max possible starting block, + let sched3_start = sched1.starting_block().max(sched2.starting_block()); + // `locked` equal to the sum of the two schedules locked through the current block, + let sched3_locked = + sched2.locked_at::(cur_block) + sched0.locked_at::(cur_block); + // and will end at the max possible block. + let sched3_end = sched2 + .ending_block_as_balance::() + .max(sched0.ending_block_as_balance::()); + let sched3_duration = sched3_end - sched3_start; + let sched3_per_block = sched3_locked / sched3_duration; + let sched3 = VestingInfo::new(sched3_locked, sched3_per_block, sched3_start); + + // The not touched schedule moves left and the new merged schedule is appended. + assert_eq!(Vesting::vesting(&3).unwrap(), vec![sched1, sched3]); + // The usable balance hasn't changed since none of the schedules have started. + assert_eq!(Balances::usable_balance(&3), usable_balance); + }); +} + +#[test] +fn merge_ongoing_and_yet_to_be_started_schedules() { + // Merge an ongoing schedule that has had `vest` called and a schedule that has not already + // started. + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // Vesting over 20 blocks + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + // Fast forward to half way through the life of sched1. + let mut cur_block = + (sched0.starting_block() + sched0.ending_block_as_balance::()) / 2; + assert_eq!(cur_block, 20); + System::set_block_number(cur_block); + + // Prior to vesting there is no usable balance. + let mut usable_balance = 0; + assert_eq!(Balances::usable_balance(&2), usable_balance); + // Vest the current schedules (which is just sched0 now). + Vesting::vest(Some(2).into()).unwrap(); + + // After vesting the usable balance increases by the unlocked amount. + let sched0_vested_now = sched0.locked() - sched0.locked_at::(cur_block); + usable_balance += sched0_vested_now; + assert_eq!(Balances::usable_balance(&2), usable_balance); + + // Go forward a block. + cur_block += 1; + System::set_block_number(cur_block); + + // And add a schedule that starts after this block, but before sched0 finishes. + let sched1 = VestingInfo::new( + ED * 10, + 1, // Vesting over 256 * 10 (2560) blocks + cur_block + 1, + ); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched1)); + + // Merge the schedules before sched1 starts. + assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); + // After merging, the usable balance only changes by the amount sched0 vested since we + // last called `vest` (which is just 1 block). The usable balance is not affected by + // sched1 because it has not started yet. + usable_balance += sched0.per_block(); + assert_eq!(Balances::usable_balance(&2), usable_balance); + + // The resulting schedule will have the later starting block of the two, + let sched2_start = sched1.starting_block(); + // `locked` equal to the sum of the two schedules locked through the current block, + let sched2_locked = + sched0.locked_at::(cur_block) + sched1.locked_at::(cur_block); + // and will end at the max possible block. + let sched2_end = sched0 + .ending_block_as_balance::() + .max(sched1.ending_block_as_balance::()); + let sched2_duration = sched2_end - sched2_start; + let sched2_per_block = sched2_locked / sched2_duration; + + let sched2 = VestingInfo::new(sched2_locked, sched2_per_block, sched2_start); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2]); + }); +} + +#[test] +fn merge_finished_and_ongoing_schedules() { + // If a schedule finishes by the current block we treat the ongoing schedule, + // without any alterations, as the merged one. + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // Vesting over 20 blocks. + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + let sched1 = VestingInfo::new( + ED * 40, + ED, // Vesting over 40 blocks. + 10, + ); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched1)); + + // Transfer a 3rd schedule, so we can demonstrate how schedule indices change. + // (We are not merging this schedule.) + let sched2 = VestingInfo::new( + ED * 30, + ED, // Vesting over 30 blocks. + 10, + ); + assert_ok!(Vesting::vested_transfer(Some(3).into(), 2, sched2)); + + // The schedules are in expected order prior to merging. + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1, sched2]); + + // Fast forward to sched0's end block. + let cur_block = sched0.ending_block_as_balance::(); + System::set_block_number(cur_block); + assert_eq!(System::block_number(), 30); + + // Prior to `merge_schedules` and with no vest/vest_other called the user has no usable + // balance. + assert_eq!(Balances::usable_balance(&2), 0); + assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); + + // sched2 is now the first, since sched0 & sched1 get filtered out while "merging". + // sched1 gets treated like the new merged schedule by getting pushed onto back + // of the vesting schedules vec. Note: sched0 finished at the current block. + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2, sched1]); + + // sched0 has finished, so its funds are fully unlocked. + let sched0_unlocked_now = sched0.locked(); + // The remaining schedules are ongoing, so their funds are partially unlocked. + let sched1_unlocked_now = sched1.locked() - sched1.locked_at::(cur_block); + let sched2_unlocked_now = sched2.locked() - sched2.locked_at::(cur_block); + + // Since merging also vests all the schedules, the users usable balance after merging + // includes all pre-existing schedules unlocked through the current block, including + // schedules not merged. + assert_eq!( + Balances::usable_balance(&2), + sched0_unlocked_now + sched1_unlocked_now + sched2_unlocked_now + ); + }); +} + +#[test] +fn merge_finishing_schedules_does_not_create_a_new_one() { + // If both schedules finish by the current block we don't create new one + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // 20 block duration. + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + // Create sched1 and transfer it to account 2. + let sched1 = VestingInfo::new( + ED * 30, + ED, // 30 block duration. + 10, + ); + assert_ok!(Vesting::vested_transfer(Some(3).into(), 2, sched1)); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + + let all_scheds_end = sched0 + .ending_block_as_balance::() + .max(sched1.ending_block_as_balance::()); + + assert_eq!(all_scheds_end, 40); + System::set_block_number(all_scheds_end); + + // Prior to merge_schedules and with no vest/vest_other called the user has no usable + // balance. + assert_eq!(Balances::usable_balance(&2), 0); + + // Merge schedule 0 and 1. + assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); + // The user no longer has any more vesting schedules because they both ended at the + // block they where merged, + assert!(!>::contains_key(&2)); + // and their usable balance has increased by the total amount locked in the merged + // schedules. + assert_eq!(Balances::usable_balance(&2), sched0.locked() + sched1.locked()); + }); +} + +#[test] +fn merge_finished_and_yet_to_be_started_schedules() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // 20 block duration. + 10, // Ends at block 30 + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + let sched1 = VestingInfo::new( + ED * 30, + ED * 2, // 30 block duration. + 35, + ); + assert_ok!(Vesting::vested_transfer(Some(13).into(), 2, sched1)); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + + let sched2 = VestingInfo::new( + ED * 40, + ED, // 40 block duration. + 30, + ); + // Add a 3rd schedule to demonstrate how sched1 shifts. + assert_ok!(Vesting::vested_transfer(Some(13).into(), 2, sched2)); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1, sched2]); + + System::set_block_number(30); + + // At block 30, sched0 has finished unlocking while sched1 and sched2 are still fully + // locked, + assert_eq!(Vesting::vesting_balance(&2), Some(sched1.locked() + sched2.locked())); + // but since we have not vested usable balance is still 0. + assert_eq!(Balances::usable_balance(&2), 0); + + // Merge schedule 0 and 1. + assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); + + // sched0 is removed since it finished, and sched1 is removed and then pushed on the back + // because it is treated as the merged schedule + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2, sched1]); + + // The usable balance is updated because merging fully unlocked sched0. + assert_eq!(Balances::usable_balance(&2), sched0.locked()); + }); +} + +#[test] +fn merge_schedules_throws_proper_errors() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // 20 block duration. + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + // Account 2 only has 1 vesting schedule. + assert_noop!( + Vesting::merge_schedules(Some(2).into(), 0, 1), + Error::::ScheduleIndexOutOfBounds + ); + + // Account 4 has 0 vesting schedules. + assert_eq!(Vesting::vesting(&4), None); + assert_noop!(Vesting::merge_schedules(Some(4).into(), 0, 1), Error::::NotVesting); + + // There are enough schedules to merge but an index is non-existent. + Vesting::vested_transfer(Some(3).into(), 2, sched0).unwrap(); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched0]); + assert_noop!( + Vesting::merge_schedules(Some(2).into(), 0, 2), + Error::::ScheduleIndexOutOfBounds + ); + + // It is a storage noop with no errors if the indexes are the same. + assert_storage_noop!(Vesting::merge_schedules(Some(2).into(), 0, 0).unwrap()); + }); +} + +#[test] +fn generates_multiple_schedules_from_genesis_config() { + let vesting_config = vec![ + // 5 * existential deposit locked. + (1, 0, 10, 5 * ED), + // 1 * existential deposit locked. + (2, 10, 20, 19 * ED), + // 2 * existential deposit locked. + (2, 10, 20, 18 * ED), + // 1 * existential deposit locked. + (12, 10, 20, 9 * ED), + // 2 * existential deposit locked. + (12, 10, 20, 8 * ED), + // 3 * existential deposit locked. + (12, 10, 20, 7 * ED), + ]; + ExtBuilder::default() + .existential_deposit(ED) + .vesting_genesis_config(vesting_config) + .build() + .execute_with(|| { + let user1_sched1 = VestingInfo::new(5 * ED, 128, 0u64); + assert_eq!(Vesting::vesting(&1).unwrap(), vec![user1_sched1]); + + let user2_sched1 = VestingInfo::new(1 * ED, 12, 10u64); + let user2_sched2 = VestingInfo::new(2 * ED, 25, 10u64); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_sched1, user2_sched2]); + + let user12_sched1 = VestingInfo::new(1 * ED, 12, 10u64); + let user12_sched2 = VestingInfo::new(2 * ED, 25, 10u64); + let user12_sched3 = VestingInfo::new(3 * ED, 38, 10u64); + assert_eq!( + Vesting::vesting(&12).unwrap(), + vec![user12_sched1, user12_sched2, user12_sched3] + ); + }); +} + +#[test] +#[should_panic] +fn multiple_schedules_from_genesis_config_errors() { + // MaxVestingSchedules is 3, but this config has 4 for account 12 so we panic when building + // from genesis. + let vesting_config = + vec![(12, 10, 20, ED), (12, 10, 20, ED), (12, 10, 20, ED), (12, 10, 20, ED)]; + ExtBuilder::default() + .existential_deposit(ED) + .vesting_genesis_config(vesting_config) + .build(); +} + +#[test] +fn build_genesis_has_storage_version_v1() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + assert_eq!(StorageVersion::::get(), Releases::V1); + }); +} + +#[test] +fn merge_vesting_handles_per_block_0() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let sched0 = VestingInfo::new( + ED, 0, // Vesting over 256 blocks. + 1, + ); + assert_eq!(sched0.ending_block_as_balance::(), 257); + let sched1 = VestingInfo::new( + ED * 2, + 0, // Vesting over 512 blocks. + 10, + ); + assert_eq!(sched1.ending_block_as_balance::(), 512u64 + 10); + + let merged = VestingInfo::new(764, 1, 10); + assert_eq!(Vesting::merge_vesting_info(5, sched0, sched1), Some(merged)); + }); +} + +#[test] +fn vesting_info_validate_works() { + let min_transfer = ::MinVestedTransfer::get(); + // Does not check for min transfer. + assert_eq!(VestingInfo::new(min_transfer - 1, 1u64, 10u64).is_valid(), true); + + // `locked` cannot be 0. + assert_eq!(VestingInfo::new(0, 1u64, 10u64).is_valid(), false); + + // `per_block` cannot be 0. + assert_eq!(VestingInfo::new(min_transfer + 1, 0u64, 10u64).is_valid(), false); + + // With valid inputs it does not error. + assert_eq!(VestingInfo::new(min_transfer, 1u64, 10u64).is_valid(), true); +} + +#[test] +fn vesting_info_ending_block_as_balance_works() { + // Treats `per_block` 0 as 1. + let per_block_0 = VestingInfo::new(256u32, 0u32, 10u32); + assert_eq!(per_block_0.ending_block_as_balance::(), 256 + 10); + + // `per_block >= locked` always results in a schedule ending the block after it starts + let per_block_gt_locked = VestingInfo::new(256u32, 256 * 2u32, 10u32); + assert_eq!( + per_block_gt_locked.ending_block_as_balance::(), + 1 + per_block_gt_locked.starting_block() + ); + let per_block_eq_locked = VestingInfo::new(256u32, 256u32, 10u32); + assert_eq!( + per_block_gt_locked.ending_block_as_balance::(), + per_block_eq_locked.ending_block_as_balance::() + ); + + // Correctly calcs end if `locked % per_block != 0`. (We need a block to unlock the remainder). + let imperfect_per_block = VestingInfo::new(256u32, 250u32, 10u32); + assert_eq!( + imperfect_per_block.ending_block_as_balance::(), + imperfect_per_block.starting_block() + 2u32, + ); + assert_eq!( + imperfect_per_block + .locked_at::(imperfect_per_block.ending_block_as_balance::()), + 0 + ); +} + +#[test] +fn per_block_works() { + let per_block_0 = VestingInfo::new(256u32, 0u32, 10u32); + assert_eq!(per_block_0.per_block(), 1u32); + assert_eq!(per_block_0.raw_per_block(), 0u32); + + let per_block_1 = VestingInfo::new(256u32, 1u32, 10u32); + assert_eq!(per_block_1.per_block(), 1u32); + assert_eq!(per_block_1.raw_per_block(), 1u32); +} + +// When an accounts free balance + schedule.locked is less than ED, the vested transfer will fail. +#[test] +fn vested_transfer_less_than_existential_deposit_fails() { + ExtBuilder::default().existential_deposit(4 * ED).build().execute_with(|| { + // MinVestedTransfer is less the ED. + assert!( + ::Currency::minimum_balance() > + ::MinVestedTransfer::get() + ); + + let sched = + VestingInfo::new(::MinVestedTransfer::get() as u64, 1u64, 10u64); + // The new account balance with the schedule's locked amount would be less than ED. + assert!( + Balances::free_balance(&99) + sched.locked() < + ::Currency::minimum_balance() + ); + + // vested_transfer fails. + assert_noop!( + Vesting::vested_transfer(Some(3).into(), 99, sched), + pallet_balances::Error::::ExistentialDeposit, + ); + // force_vested_transfer fails. + assert_noop!( + Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 99, sched), + pallet_balances::Error::::ExistentialDeposit, + ); }); } diff --git a/frame/vesting/src/vesting_info.rs b/frame/vesting/src/vesting_info.rs new file mode 100644 index 0000000000000..72171910086cd --- /dev/null +++ b/frame/vesting/src/vesting_info.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Module to enforce private fields on `VestingInfo`. + +use super::*; + +/// Struct to encode the vesting schedule of an individual account. +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen)] +pub struct VestingInfo { + /// Locked amount at genesis. + locked: Balance, + /// Amount that gets unlocked every block after `starting_block`. + per_block: Balance, + /// Starting block for unlocking(vesting). + starting_block: BlockNumber, +} + +impl VestingInfo +where + Balance: AtLeast32BitUnsigned + Copy, + BlockNumber: AtLeast32BitUnsigned + Copy + Bounded, +{ + /// Instantiate a new `VestingInfo`. + pub fn new( + locked: Balance, + per_block: Balance, + starting_block: BlockNumber, + ) -> VestingInfo { + VestingInfo { locked, per_block, starting_block } + } + + /// Validate parameters for `VestingInfo`. Note that this does not check + /// against `MinVestedTransfer`. + pub fn is_valid(&self) -> bool { + !self.locked.is_zero() && !self.raw_per_block().is_zero() + } + + /// Locked amount at schedule creation. + pub fn locked(&self) -> Balance { + self.locked + } + + /// Amount that gets unlocked every block after `starting_block`. Corrects for `per_block` of 0. + /// We don't let `per_block` be less than 1, or else the vesting will never end. + /// This should be used whenever accessing `per_block` unless explicitly checking for 0 values. + pub fn per_block(&self) -> Balance { + self.per_block.max(One::one()) + } + + /// Get the unmodified `per_block`. Generally should not be used, but is useful for + /// validating `per_block`. + pub(crate) fn raw_per_block(&self) -> Balance { + self.per_block + } + + /// Starting block for unlocking(vesting). + pub fn starting_block(&self) -> BlockNumber { + self.starting_block + } + + /// Amount locked at block `n`. + pub fn locked_at>( + &self, + n: BlockNumber, + ) -> Balance { + // Number of blocks that count toward vesting; + // saturating to 0 when n < starting_block. + let vested_block_count = n.saturating_sub(self.starting_block); + let vested_block_count = BlockNumberToBalance::convert(vested_block_count); + // Return amount that is still locked in vesting. + vested_block_count + .checked_mul(&self.per_block()) // `per_block` accessor guarantees at least 1. + .map(|to_unlock| self.locked.saturating_sub(to_unlock)) + .unwrap_or(Zero::zero()) + } + + /// Block number at which the schedule ends (as type `Balance`). + pub fn ending_block_as_balance>( + &self, + ) -> Balance { + let starting_block = BlockNumberToBalance::convert(self.starting_block); + let duration = if self.per_block() >= self.locked { + // If `per_block` is bigger than `locked`, the schedule will end + // the block after starting. + One::one() + } else { + self.locked / self.per_block() + + if (self.locked % self.per_block()).is_zero() { + Zero::zero() + } else { + // `per_block` does not perfectly divide `locked`, so we need an extra block to + // unlock some amount less than `per_block`. + One::one() + } + }; + + starting_block.saturating_add(duration) + } +} diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs index 50f72b44d6cf5..3ccc1a5bda362 100644 --- a/frame/vesting/src/weights.rs +++ b/frame/vesting/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_vesting //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-08-10, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -45,135 +45,209 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_vesting. pub trait WeightInfo { - fn vest_locked(l: u32, ) -> Weight; - fn vest_unlocked(l: u32, ) -> Weight; - fn vest_other_locked(l: u32, ) -> Weight; - fn vest_other_unlocked(l: u32, ) -> Weight; - fn vested_transfer(l: u32, ) -> Weight; - fn force_vested_transfer(l: u32, ) -> Weight; + fn vest_locked(l: u32, s: u32, ) -> Weight; + fn vest_unlocked(l: u32, s: u32, ) -> Weight; + fn vest_other_locked(l: u32, s: u32, ) -> Weight; + fn vest_other_unlocked(l: u32, s: u32, ) -> Weight; + fn vested_transfer(l: u32, s: u32, ) -> Weight; + fn force_vested_transfer(l: u32, s: u32, ) -> Weight; + fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight; + fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight; } /// Weights for pallet_vesting using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: Vesting Vesting (r:1 w:0) + // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vest_locked(l: u32, ) -> Weight { - (42_983_000 as Weight) - // Standard Error: 9_000 - .saturating_add((190_000 as Weight).saturating_mul(l as Weight)) + fn vest_locked(l: u32, s: u32, ) -> Weight { + (50_642_000 as Weight) + // Standard Error: 1_000 + .saturating_add((144_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((177_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vest_unlocked(l: u32, ) -> Weight { - (46_213_000 as Weight) - // Standard Error: 5_000 - .saturating_add((158_000 as Weight).saturating_mul(l as Weight)) + fn vest_unlocked(l: u32, s: u32, ) -> Weight { + (50_830_000 as Weight) + // Standard Error: 1_000 + .saturating_add((115_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((112_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } - // Storage: Vesting Vesting (r:1 w:0) + // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - fn vest_other_locked(l: u32, ) -> Weight { - (42_644_000 as Weight) - // Standard Error: 11_000 - .saturating_add((202_000 as Weight).saturating_mul(l as Weight)) + fn vest_other_locked(l: u32, s: u32, ) -> Weight { + (52_151_000 as Weight) + // Standard Error: 1_000 + .saturating_add((130_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((162_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - fn vest_other_unlocked(l: u32, ) -> Weight { - (45_765_000 as Weight) - // Standard Error: 5_000 - .saturating_add((159_000 as Weight).saturating_mul(l as Weight)) + fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { + (51_009_000 as Weight) + // Standard Error: 4_000 + .saturating_add((123_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 9_000 + .saturating_add((118_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vested_transfer(l: u32, ) -> Weight { - (97_417_000 as Weight) - // Standard Error: 11_000 - .saturating_add((235_000 as Weight).saturating_mul(l as Weight)) + fn vested_transfer(l: u32, s: u32, ) -> Weight { + (89_517_000 as Weight) + // Standard Error: 5_000 + .saturating_add((114_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 10_000 + .saturating_add((23_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:1 w:1) - fn force_vested_transfer(l: u32, ) -> Weight { - (97_661_000 as Weight) - // Standard Error: 16_000 - .saturating_add((239_000 as Weight).saturating_mul(l as Weight)) + fn force_vested_transfer(l: u32, s: u32, ) -> Weight { + (87_903_000 as Weight) + // Standard Error: 6_000 + .saturating_add((121_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 12_000 + .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { + (54_463_000 as Weight) + // Standard Error: 2_000 + .saturating_add((123_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 5_000 + .saturating_add((149_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { + (53_674_000 as Weight) + // Standard Error: 1_000 + .saturating_add((137_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 4_000 + .saturating_add((152_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } } // For backwards compatibility and tests impl WeightInfo for () { - // Storage: Vesting Vesting (r:1 w:0) + // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vest_locked(l: u32, ) -> Weight { - (42_983_000 as Weight) - // Standard Error: 9_000 - .saturating_add((190_000 as Weight).saturating_mul(l as Weight)) + fn vest_locked(l: u32, s: u32, ) -> Weight { + (50_642_000 as Weight) + // Standard Error: 1_000 + .saturating_add((144_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((177_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vest_unlocked(l: u32, ) -> Weight { - (46_213_000 as Weight) - // Standard Error: 5_000 - .saturating_add((158_000 as Weight).saturating_mul(l as Weight)) + fn vest_unlocked(l: u32, s: u32, ) -> Weight { + (50_830_000 as Weight) + // Standard Error: 1_000 + .saturating_add((115_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((112_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } - // Storage: Vesting Vesting (r:1 w:0) + // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - fn vest_other_locked(l: u32, ) -> Weight { - (42_644_000 as Weight) - // Standard Error: 11_000 - .saturating_add((202_000 as Weight).saturating_mul(l as Weight)) + fn vest_other_locked(l: u32, s: u32, ) -> Weight { + (52_151_000 as Weight) + // Standard Error: 1_000 + .saturating_add((130_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((162_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - fn vest_other_unlocked(l: u32, ) -> Weight { - (45_765_000 as Weight) - // Standard Error: 5_000 - .saturating_add((159_000 as Weight).saturating_mul(l as Weight)) + fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { + (51_009_000 as Weight) + // Standard Error: 4_000 + .saturating_add((123_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 9_000 + .saturating_add((118_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vested_transfer(l: u32, ) -> Weight { - (97_417_000 as Weight) - // Standard Error: 11_000 - .saturating_add((235_000 as Weight).saturating_mul(l as Weight)) + fn vested_transfer(l: u32, s: u32, ) -> Weight { + (89_517_000 as Weight) + // Standard Error: 5_000 + .saturating_add((114_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 10_000 + .saturating_add((23_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:1 w:1) - fn force_vested_transfer(l: u32, ) -> Weight { - (97_661_000 as Weight) - // Standard Error: 16_000 - .saturating_add((239_000 as Weight).saturating_mul(l as Weight)) + fn force_vested_transfer(l: u32, s: u32, ) -> Weight { + (87_903_000 as Weight) + // Standard Error: 6_000 + .saturating_add((121_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 12_000 + .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { + (54_463_000 as Weight) + // Standard Error: 2_000 + .saturating_add((123_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 5_000 + .saturating_add((149_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { + (53_674_000 as Weight) + // Standard Error: 1_000 + .saturating_add((137_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 4_000 + .saturating_add((152_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } } From 7c265c03d493010c4c3be825fd79421e770ee0f8 Mon Sep 17 00:00:00 2001 From: brenzi Date: Tue, 24 Aug 2021 09:30:39 +0200 Subject: [PATCH 03/14] rebranding SubstraTEE to integritee (#9248) prefix 13 for parachain prefix 113 for sidechains and offchain workers involving a runtime --- ss58-registry.json | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/ss58-registry.json b/ss58-registry.json index 23aab7ea0c711..fc5de10335661 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -129,12 +129,12 @@ }, { "prefix": 13, - "network": "substratee", - "displayName": "SubstraTEE", - "symbols": null, - "decimals": null, + "network": "integritee", + "displayName": "Integritee", + "symbols": ["TEER"], + "decimals": [12], "standardAccount": "*25519", - "website": "https://www.substratee.com" + "website": "https://integritee.network" }, { "prefix": 14, @@ -541,7 +541,7 @@ "standardAccount": "secp256k1", "website": "https://origintrail.io" }, - { + { "prefix": 110, "network": "heiko", "displayName": "Heiko", @@ -549,7 +549,16 @@ "decimals": [12], "standardAccount": "*25519", "website": "https://parallel.fi/" - }, + }, + { + "prefix": 113, + "network": "integritee-incognito", + "displayName": "Integritee Incognito", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": "https://integritee.network" + }, { "prefix": 136, "network": "altair", From 2a1c7be144dd6c231aa604d5d463a8506dec71c0 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Tue, 24 Aug 2021 10:37:14 +0200 Subject: [PATCH 04/14] Fix spelling (#9614) --- frame/uniques/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 37855253ffcae..b4a0b9821683a 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -202,9 +202,9 @@ pub mod pallet { ForceCreated(T::ClassId, T::AccountId), /// An asset `class` was destroyed. \[ class \] Destroyed(T::ClassId), - /// An asset `instace` was issued. \[ class, instance, owner \] + /// An asset `instance` was issued. \[ class, instance, owner \] Issued(T::ClassId, T::InstanceId, T::AccountId), - /// An asset `instace` was transferred. \[ class, instance, from, to \] + /// An asset `instance` was transferred. \[ class, instance, from, to \] Transferred(T::ClassId, T::InstanceId, T::AccountId, T::AccountId), /// An asset `instance` was destroyed. \[ class, instance, owner \] Burned(T::ClassId, T::InstanceId, T::AccountId), From 2b70250d309959f5dbc006510314c391f22e89b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Tue, 24 Aug 2021 11:54:30 +0200 Subject: [PATCH 05/14] Remove useless borrow (#9615) --- client/executor/common/src/util.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/executor/common/src/util.rs b/client/executor/common/src/util.rs index 995424bfa8399..3ea29540f98ee 100644 --- a/client/executor/common/src/util.rs +++ b/client/executor/common/src/util.rs @@ -105,7 +105,7 @@ pub mod wasmi { let range = checked_range(dest_addr.into(), source.len(), destination.len()) .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; - &mut destination[range].copy_from_slice(source); + destination[range].copy_from_slice(source); Ok(()) }) } From 52e799a6bfcd9166d5893ee7fd2f6af6e6526104 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 24 Aug 2021 11:54:53 +0200 Subject: [PATCH 06/14] Fetch runtime code from storage cache when using proofing backend (#9611) Before we fetched the runtime code from the `TrieBackend` and this lead to not using the storage cache. Thus, we recalculated the storage hash for the runtime code on every call into the runtime and this killed the performance on parachains block authoring. The solution is to fetch the runtime code from the storage cache, to make sure we use the cached storage cache. --- client/api/src/cht.rs | 2 +- client/db/src/bench.rs | 6 ----- client/db/src/lib.rs | 2 +- client/db/src/storage_cache.rs | 6 ++--- client/light/src/backend.rs | 4 +-- client/service/src/client/call_executor.rs | 26 ++++++++----------- primitives/state-machine/src/backend.rs | 2 +- .../state-machine/src/in_memory_backend.rs | 2 +- primitives/state-machine/src/lib.rs | 16 ++++++------ .../state-machine/src/proving_backend.rs | 4 +-- primitives/state-machine/src/trie_backend.rs | 2 +- 11 files changed, 31 insertions(+), 41 deletions(-) diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 83bc84c6ec9b5..ee7854b5d8297 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -117,7 +117,7 @@ where .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); - let mut storage = InMemoryBackend::::default().update(vec![(None, transaction)]); + let storage = InMemoryBackend::::default().update(vec![(None, transaction)]); let trie_storage = storage .as_trie_backend() .expect("InMemoryState::as_trie_backend always returns Some; qed"); diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index a4b8f6696ea6a..1b7826f973999 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -454,12 +454,6 @@ impl StateBackend> for BenchmarkingState { .map_or(Default::default(), |s| s.child_keys(child_info, prefix)) } - fn as_trie_backend( - &mut self, - ) -> Option<&sp_state_machine::TrieBackend>> { - None - } - fn commit( &self, storage_root: as Hasher>::Out, diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 9f1dd4c0ec07f..c7d6029c5356d 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -274,7 +274,7 @@ impl StateBackend> for RefTrackingState { } fn as_trie_backend( - &mut self, + &self, ) -> Option<&sp_state_machine::TrieBackend>> { self.state.as_trie_backend() } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 3193d34796196..a895324a2e7b9 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -703,7 +703,7 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.child_keys(child_info, prefix) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { + fn as_trie_backend(&self) -> Option<&TrieBackend>> { self.state.as_trie_backend() } @@ -901,9 +901,9 @@ impl>, B: BlockT> StateBackend> self.caching_state().child_keys(child_info, prefix) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { + fn as_trie_backend(&self) -> Option<&TrieBackend>> { self.caching_state - .as_mut() + .as_ref() .expect("`caching_state` is valid for the lifetime of the object; qed") .as_trie_backend() } diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 87d7dba3ddfb1..3091dce625a3f 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -569,9 +569,9 @@ where sp_state_machine::UsageInfo::empty() } - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + fn as_trie_backend(&self) -> Option<&TrieBackend> { match self { - GenesisOrUnavailableState::Genesis(ref mut state) => state.as_trie_backend(), + GenesisOrUnavailableState::Genesis(ref state) => state.as_trie_backend(), GenesisOrUnavailableState::Unavailable => None, } } diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 41cc1526fa3e0..9b8774ce6d497 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -212,7 +212,7 @@ where backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); - let mut state = self.backend.state_at(*at)?; + let state = self.backend.state_at(*at)?; let changes = &mut *changes.borrow_mut(); @@ -220,6 +220,15 @@ where sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) })?; + // It is important to extract the runtime code here before we create the proof + // recorder to not record it. We also need to fetch the runtime code from `state` to + // make sure we use the caching layers. + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, at)?; + match recorder { Some(recorder) => { let trie_state = state.as_trie_backend().ok_or_else(|| { @@ -227,14 +236,6 @@ where as Box })?; - let state_runtime_code = - sp_state_machine::backend::BackendRuntimeCode::new(trie_state); - // It is important to extract the runtime code here before we create the proof - // recorder. - let runtime_code = - state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, at)?; - let backend = sp_state_machine::ProvingBackend::new_with_recorder( trie_state, recorder.clone(), @@ -259,11 +260,6 @@ where ) }, None => { - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = - state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, at)?; - let mut state_machine = StateMachine::new( &state, changes_trie_state, @@ -309,7 +305,7 @@ where method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)> { - let mut state = self.backend.state_at(*at)?; + let state = self.backend.state_at(*at)?; let trie_backend = state.as_trie_backend().ok_or_else(|| { Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index de4ff33b51fe8..1b1a732f8d0fc 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -173,7 +173,7 @@ pub trait Backend: sp_std::fmt::Debug { } /// Try convert into trie backend. - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + fn as_trie_backend(&self) -> Option<&TrieBackend> { None } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 4daf1004a85fc..3e75ff5126a61 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -175,7 +175,7 @@ mod tests { let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; - let mut storage = storage + let storage = storage .update(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); let trie_backend = storage.as_trie_backend().unwrap(); assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 3c4acdccb10c5..07d7e54530ea2 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -719,7 +719,7 @@ mod execution { } /// Generate storage read proof. - pub fn prove_read(mut backend: B, keys: I) -> Result> + pub fn prove_read(backend: B, keys: I) -> Result> where B: Backend, H: Hasher, @@ -735,7 +735,7 @@ mod execution { /// Generate range storage read proof. pub fn prove_range_read_with_size( - mut backend: B, + backend: B, child_info: Option<&ChildInfo>, prefix: Option<&[u8]>, size_limit: usize, @@ -794,7 +794,7 @@ mod execution { /// Generate child storage read proof. pub fn prove_child_read( - mut backend: B, + backend: B, child_info: &ChildInfo, keys: I, ) -> Result> @@ -1197,7 +1197,7 @@ mod tests { b"abc".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"3".to_vec() ]; - let mut state = InMemoryBackend::::from(initial); + let state = InMemoryBackend::::from(initial); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1350,7 +1350,7 @@ mod tests { fn set_child_storage_works() { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; - let mut state = new_in_mem::(); + let state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1372,7 +1372,7 @@ mod tests { fn append_storage_works() { let reference_data = vec![b"data1".to_vec(), b"2".to_vec(), b"D3".to_vec(), b"d4".to_vec()]; let key = b"key".to_vec(); - let mut state = new_in_mem::(); + let state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1427,7 +1427,7 @@ mod tests { let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); - let mut state = new_in_mem::(); + let state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1696,7 +1696,7 @@ mod tests { b"aaa".to_vec() => b"0".to_vec(), b"bbb".to_vec() => b"".to_vec() ]; - let mut state = InMemoryBackend::::from(initial); + let state = InMemoryBackend::::from(initial); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 3a242313a65c7..690266dab1e72 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -433,7 +433,7 @@ mod tests { fn proof_recorded_and_checked() { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(vec![(None, contents)]); + let in_memory = in_memory.update(vec![(None, contents)]); let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -464,7 +464,7 @@ mod tests { (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(contents); + let in_memory = in_memory.update(contents); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory .full_storage_root( diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 95007653321cc..4cdf1d3b75e9a 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -253,7 +253,7 @@ where (root, is_default, write_overlay) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + fn as_trie_backend(&self) -> Option<&TrieBackend> { Some(self) } From 0989d181c4fd506960d0e72dc1394d2e3860f886 Mon Sep 17 00:00:00 2001 From: Vladimir Istyufeev Date: Tue, 24 Aug 2021 13:22:50 +0300 Subject: [PATCH 07/14] Run tests for the wasmer sandbox (#9610) --- .gitlab-ci.yml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5a2d8c5b48440..98b29fa65e374 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -113,6 +113,26 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs +.test-refs-wasmer-sandbox: &test-refs-wasmer-sandbox + rules: + - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME == "master" + changes: + - client/executor/**/* + - frame/contracts/**/* + - primitives/sandbox/**/* + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + changes: + - client/executor/**/* + - frame/contracts/**/* + - primitives/sandbox/**/* + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + changes: + - client/executor/**/* + - frame/contracts/**/* + - primitives/sandbox/**/* + .build-refs: &build-refs rules: - if: $CI_PIPELINE_SOURCE == "pipeline" @@ -426,6 +446,16 @@ test-full-crypto-feature: - time cargo +nightly build --verbose --no-default-features --features full_crypto - sccache -s +test-wasmer-sandbox: + stage: test + <<: *docker-env + <<: *test-refs-wasmer-sandbox + variables: + <<: *default-vars + script: + - time cargo test --release --features runtime-benchmarks,wasmer-sandbox + - sccache -s + cargo-check-macos: stage: test # shell runner on mac ignores the image set in *docker-env From 6e016995551a6804832da4cccb721a0e2a19fbdc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 24 Aug 2021 12:48:23 +0200 Subject: [PATCH 08/14] Better RPC prometheus metrics. (#9358) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Better RPC prometehus metrics. * Add session metrics. * Add counting requests as well. * Fix type for web build. * Fix browser-node * Filter out unknown method names. * Change Gauge to Counters * Use micros instead of millis. * cargo fmt * Update client/rpc-servers/src/lib.rs Co-authored-by: Bastian Köcher * Apply suggestions from code review Co-authored-by: Bastian Köcher * move log to separate lines. * Fix compilation. * cargo +nightly fmt --all Co-authored-by: Bastian Köcher --- client/rpc-servers/src/lib.rs | 55 ++++++- client/rpc-servers/src/middleware.rs | 221 +++++++++++++++++++++++---- client/service/src/builder.rs | 7 +- client/service/src/lib.rs | 29 +++- 4 files changed, 274 insertions(+), 38 deletions(-) diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index a833002fcdbff..6e09a0ea36ac0 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -24,6 +24,7 @@ mod middleware; use jsonrpc_core::{IoHandlerExtension, MetaIoHandler}; use log::error; +use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use pubsub::PubSubMetadata; use std::io; @@ -42,7 +43,7 @@ const HTTP_THREADS: usize = 4; pub type RpcHandler = pubsub::PubSubHandler; pub use self::inner::*; -pub use middleware::{RpcMetrics, RpcMiddleware}; +pub use middleware::{method_names, RpcMetrics, RpcMiddleware}; /// Construct rpc `IoHandler` pub fn rpc_handler( @@ -73,6 +74,43 @@ pub fn rpc_handler( io } +/// RPC server-specific prometheus metrics. +#[derive(Debug, Clone, Default)] +pub struct ServerMetrics { + /// Number of sessions opened. + session_opened: Option>, + /// Number of sessions closed. + session_closed: Option>, +} + +impl ServerMetrics { + /// Create new WebSocket RPC server metrics. + pub fn new(registry: Option<&Registry>) -> Result { + registry + .map(|r| { + Ok(Self { + session_opened: register( + Counter::new( + "rpc_sessions_opened", + "Number of persistent RPC sessions opened", + )?, + r, + )? + .into(), + session_closed: register( + Counter::new( + "rpc_sessions_closed", + "Number of persistent RPC sessions closed", + )?, + r, + )? + .into(), + }) + }) + .unwrap_or_else(|| Ok(Default::default())) + } +} + #[cfg(not(target_os = "unknown"))] mod inner { use super::*; @@ -84,6 +122,16 @@ mod inner { /// Type alias for ws server pub type WsServer = ws::Server; + impl ws::SessionStats for ServerMetrics { + fn open_session(&self, _id: ws::SessionId) { + self.session_opened.as_ref().map(|m| m.inc()); + } + + fn close_session(&self, _id: ws::SessionId) { + self.session_closed.as_ref().map(|m| m.inc()); + } + } + /// Start HTTP server listening on given address. /// /// **Note**: Only available if `not(target_os = "unknown")`. @@ -114,6 +162,7 @@ mod inner { pub fn start_ipc( addr: &str, io: RpcHandler, + server_metrics: ServerMetrics, ) -> io::Result { let builder = ipc::ServerBuilder::new(io); #[cfg(target_os = "unix")] @@ -122,7 +171,7 @@ mod inner { security_attributes.set_mode(0o600)?; security_attributes }); - builder.start(addr) + builder.session_stats(server_metrics).start(addr) } /// Start WS server listening on given address. @@ -136,6 +185,7 @@ mod inner { cors: Option<&Vec>, io: RpcHandler, maybe_max_payload_mb: Option, + server_metrics: ServerMetrics, ) -> io::Result { let rpc_max_payload = maybe_max_payload_mb .map(|mb| mb.saturating_mul(MEGABYTE)) @@ -147,6 +197,7 @@ mod inner { .max_connections(max_connections.unwrap_or(WS_MAX_CONNECTIONS)) .allowed_origins(map_cors(cors)) .allowed_hosts(hosts_filtering(cors.is_some())) + .session_stats(server_metrics) .start(addr) .map_err(|err| match err { ws::Error::Io(io) => io, diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index 5ba5c18a8e953..43380977455df 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -18,41 +18,104 @@ //! Middleware for RPC requests. -use jsonrpc_core::{ - FutureOutput, FutureResponse, Metadata, Middleware as RequestMiddleware, Request, Response, +use std::collections::HashSet; + +use jsonrpc_core::{FutureOutput, FutureResponse, Metadata, Middleware as RequestMiddleware}; +use prometheus_endpoint::{ + register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, }; -use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; -use futures::{future::Either, Future}; +use futures::{future::Either, Future, FutureExt}; +use pubsub::PubSubMetadata; + +use crate::RpcHandler; /// Metrics for RPC middleware #[derive(Debug, Clone)] pub struct RpcMetrics { - rpc_calls: Option>, + requests_started: CounterVec, + requests_finished: CounterVec, + calls_time: HistogramVec, + calls_started: CounterVec, + calls_finished: CounterVec, } impl RpcMetrics { /// Create an instance of metrics - pub fn new(metrics_registry: Option<&Registry>) -> Result { - Ok(Self { - rpc_calls: metrics_registry - .map(|r| { - register( - CounterVec::new( - Opts::new("rpc_calls_total", "Number of rpc calls received"), - &["protocol"], - )?, - r, - ) - }) - .transpose()?, - }) + pub fn new(metrics_registry: Option<&Registry>) -> Result, PrometheusError> { + if let Some(r) = metrics_registry { + Ok(Some(Self { + requests_started: register( + CounterVec::new( + Opts::new( + "rpc_requests_started", + "Number of RPC requests (not calls) received by the server.", + ), + &["protocol"], + )?, + r, + )?, + requests_finished: register( + CounterVec::new( + Opts::new( + "rpc_requests_finished", + "Number of RPC requests (not calls) processed by the server.", + ), + &["protocol"], + )?, + r, + )?, + calls_time: register( + HistogramVec::new( + HistogramOpts::new( + "rpc_calls_time", + "Total time [μs] of processed RPC calls", + ), + &["protocol", "method"], + )?, + r, + )?, + calls_started: register( + CounterVec::new( + Opts::new( + "rpc_calls_started", + "Number of received RPC calls (unique un-batched requests)", + ), + &["protocol", "method"], + )?, + r, + )?, + calls_finished: register( + CounterVec::new( + Opts::new( + "rpc_calls_finished", + "Number of processed RPC calls (unique un-batched requests)", + ), + &["protocol", "method", "is_error"], + )?, + r, + )?, + })) + } else { + Ok(None) + } } } +/// Instantiates a dummy `IoHandler` given a builder function to extract supported method names. +pub fn method_names(gen_handler: F) -> Result, E> +where + F: FnOnce(RpcMiddleware) -> Result, E>, + M: PubSubMetadata, +{ + let io = gen_handler(RpcMiddleware::new(None, HashSet::new(), "dummy"))?; + Ok(io.iter().map(|x| x.0.clone()).collect()) +} + /// Middleware for RPC calls pub struct RpcMiddleware { - metrics: RpcMetrics, + metrics: Option, + known_rpc_method_names: HashSet, transport_label: String, } @@ -61,8 +124,12 @@ impl RpcMiddleware { /// /// - `metrics`: Will be used to report statistics. /// - `transport_label`: The label that is used when reporting the statistics. - pub fn new(metrics: RpcMetrics, transport_label: &str) -> Self { - RpcMiddleware { metrics, transport_label: String::from(transport_label) } + pub fn new( + metrics: Option, + known_rpc_method_names: HashSet, + transport_label: &str, + ) -> Self { + RpcMiddleware { metrics, known_rpc_method_names, transport_label: transport_label.into() } } } @@ -70,15 +137,113 @@ impl RequestMiddleware for RpcMiddleware { type Future = FutureResponse; type CallFuture = FutureOutput; - fn on_request(&self, request: Request, meta: M, next: F) -> Either + fn on_request( + &self, + request: jsonrpc_core::Request, + meta: M, + next: F, + ) -> Either where - F: Fn(Request, M) -> X + Send + Sync, - X: Future> + Send + 'static, + F: Fn(jsonrpc_core::Request, M) -> X + Send + Sync, + X: Future> + Send + 'static, { - if let Some(ref rpc_calls) = self.metrics.rpc_calls { - rpc_calls.with_label_values(&[self.transport_label.as_str()]).inc(); + let metrics = self.metrics.clone(); + let transport_label = self.transport_label.clone(); + if let Some(ref metrics) = metrics { + metrics.requests_started.with_label_values(&[transport_label.as_str()]).inc(); } + let r = next(request, meta); + Either::Left( + async move { + let r = r.await; + if let Some(ref metrics) = metrics { + metrics.requests_finished.with_label_values(&[transport_label.as_str()]).inc(); + } + r + } + .boxed(), + ) + } + + fn on_call( + &self, + call: jsonrpc_core::Call, + meta: M, + next: F, + ) -> Either + where + F: Fn(jsonrpc_core::Call, M) -> X + Send + Sync, + X: Future> + Send + 'static, + { + #[cfg(not(target_os = "unknown"))] + let start = std::time::Instant::now(); + let name = call_name(&call, &self.known_rpc_method_names).to_owned(); + let metrics = self.metrics.clone(); + let transport_label = self.transport_label.clone(); + log::trace!(target: "rpc_metrics", "[{}] {} call: {:?}", transport_label, name, &call); + if let Some(ref metrics) = metrics { + metrics + .calls_started + .with_label_values(&[transport_label.as_str(), name.as_str()]) + .inc(); + } + let r = next(call, meta); + Either::Left( + async move { + let r = r.await; + #[cfg(not(target_os = "unknown"))] + let micros = start.elapsed().as_micros(); + // seems that std::time is not implemented for browser target + #[cfg(target_os = "unknown")] + let micros = 1; + if let Some(ref metrics) = metrics { + metrics + .calls_time + .with_label_values(&[transport_label.as_str(), name.as_str()]) + .observe(micros as _); + metrics + .calls_finished + .with_label_values(&[ + transport_label.as_str(), + name.as_str(), + if is_success(&r) { "true" } else { "false" }, + ]) + .inc(); + } + log::debug!( + target: "rpc_metrics", + "[{}] {} call took {} μs", + transport_label, + name, + micros, + ); + r + } + .boxed(), + ) + } +} + +fn call_name<'a>(call: &'a jsonrpc_core::Call, known_methods: &HashSet) -> &'a str { + // To prevent bloating metric with all invalid method names we filter them out here. + let only_known = |method: &'a String| { + if known_methods.contains(method) { + method.as_str() + } else { + "invalid method" + } + }; + + match call { + jsonrpc_core::Call::Invalid { .. } => "invalid call", + jsonrpc_core::Call::MethodCall(ref call) => only_known(&call.method), + jsonrpc_core::Call::Notification(ref notification) => only_known(¬ification.method), + } +} - Either::Right(next(request, meta)) +fn is_success(output: &Option) -> bool { + match output { + Some(jsonrpc_core::Output::Success(..)) => true, + _ => false, } } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index dea9953633199..a1fb1b909773f 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -639,12 +639,15 @@ where ) }; let rpc_metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry())?; - let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.clone())?; + let server_metrics = sc_rpc_server::ServerMetrics::new(config.prometheus_registry())?; + let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.clone(), server_metrics)?; // This is used internally, so don't restrict access to unsafe RPC + let known_rpc_method_names = + sc_rpc_server::method_names(|m| gen_handler(sc_rpc::DenyUnsafe::No, m))?; let rpc_handlers = RpcHandlers(Arc::new( gen_handler( sc_rpc::DenyUnsafe::No, - sc_rpc_server::RpcMiddleware::new(rpc_metrics, "inbrowser"), + sc_rpc_server::RpcMiddleware::new(rpc_metrics, known_rpc_method_names, "inbrowser"), )? .into(), )); diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 24506a977e1f7..883ece42362b0 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -349,7 +349,8 @@ fn start_rpc_servers< >( config: &Configuration, mut gen_handler: H, - rpc_metrics: sc_rpc_server::RpcMetrics, + rpc_metrics: Option, + server_metrics: sc_rpc_server::ServerMetrics, ) -> Result, Error> { fn maybe_start_server( address: Option, @@ -383,6 +384,7 @@ fn start_rpc_servers< } } + let rpc_method_names = sc_rpc_server::method_names(|m| gen_handler(sc_rpc::DenyUnsafe::No, m))?; Ok(Box::new(( config .rpc_ipc @@ -392,8 +394,13 @@ fn start_rpc_servers< &*path, gen_handler( sc_rpc::DenyUnsafe::No, - sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "ipc"), + sc_rpc_server::RpcMiddleware::new( + rpc_metrics.clone(), + rpc_method_names.clone(), + "ipc", + ), )?, + server_metrics.clone(), ) .map_err(Error::from) }) @@ -405,7 +412,11 @@ fn start_rpc_servers< config.rpc_cors.as_ref(), gen_handler( deny_unsafe(&address, &config.rpc_methods), - sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "http"), + sc_rpc_server::RpcMiddleware::new( + rpc_metrics.clone(), + rpc_method_names.clone(), + "http", + ), )?, config.rpc_max_payload, ) @@ -419,9 +430,14 @@ fn start_rpc_servers< config.rpc_cors.as_ref(), gen_handler( deny_unsafe(&address, &config.rpc_methods), - sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "ws"), + sc_rpc_server::RpcMiddleware::new( + rpc_metrics.clone(), + rpc_method_names.clone(), + "ws", + ), )?, config.rpc_max_payload, + server_metrics.clone(), ) .map_err(Error::from) })? @@ -440,8 +456,9 @@ fn start_rpc_servers< >( _: &Configuration, _: H, - _: sc_rpc_server::RpcMetrics, -) -> Result, error::Error> { + _: Option, + _: sc_rpc_server::ServerMetrics, +) -> Result, error::Error> { Ok(Box::new(())) } From b7316025ab25213d9d79bbe5292a4350b95bbd02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 24 Aug 2021 16:31:19 +0200 Subject: [PATCH 09/14] Upgrade tokio to 1.10 (#9575) * Upgrade tokio to 1.10 * Fix test runner * Try fix it * Update Cargo.lock * Review feedback * ahhhh * FML * FMT * Fix tests --- Cargo.lock | 296 +++----------------- Cargo.toml | 1 - bin/node/cli/Cargo.toml | 9 +- bin/node/test-runner-example/src/lib.rs | 2 +- client/cli/Cargo.toml | 2 +- client/cli/src/runner.rs | 7 +- client/consensus/manual-seal/Cargo.toml | 2 +- client/consensus/manual-seal/src/lib.rs | 6 +- client/finality-grandpa/Cargo.toml | 2 +- client/finality-grandpa/src/tests.rs | 2 +- client/service/Cargo.toml | 2 +- client/service/src/lib.rs | 2 +- client/service/src/task_manager/mod.rs | 8 +- client/service/src/task_manager/tests.rs | 62 ++-- client/service/test/Cargo.toml | 3 +- client/service/test/src/lib.rs | 65 ++--- test-utils/Cargo.toml | 2 +- test-utils/derive/Cargo.toml | 1 + test-utils/derive/src/lib.rs | 31 +- test-utils/test-crate/Cargo.toml | 2 +- test-utils/test-runner/Cargo.toml | 2 +- test-utils/tests/basic.rs | 8 +- utils/frame/remote-externalities/Cargo.toml | 4 +- utils/frame/rpc/support/Cargo.toml | 2 +- 24 files changed, 144 insertions(+), 379 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ffc6ebd5d0ae..43aa746599d66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1202,17 +1202,6 @@ dependencies = [ "crossbeam-utils 0.8.3", ] -[[package]] -name = "crossbeam-deque" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" -dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - [[package]] name = "crossbeam-deque" version = "0.8.0" @@ -1220,25 +1209,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.3", + "crossbeam-epoch", "crossbeam-utils 0.8.3", ] -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg 1.0.1", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", -] - [[package]] name = "crossbeam-epoch" version = "0.9.3" @@ -1248,21 +1222,10 @@ dependencies = [ "cfg-if 1.0.0", "crossbeam-utils 0.8.3", "lazy_static", - "memoffset 0.6.1", + "memoffset", "scopeguard", ] -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - [[package]] name = "crossbeam-utils" version = "0.7.2" @@ -2682,7 +2645,7 @@ dependencies = [ "itoa", "pin-project-lite 0.2.6", "socket2 0.4.0", - "tokio 1.10.1", + "tokio", "tower-service", "tracing", "want", @@ -2700,8 +2663,8 @@ dependencies = [ "log 0.4.14", "rustls", "rustls-native-certs", - "tokio 1.10.1", - "tokio-rustls 0.22.0", + "tokio", + "tokio-rustls", "webpki", ] @@ -2714,7 +2677,7 @@ dependencies = [ "bytes 1.0.1", "hyper 0.14.11", "native-tls", - "tokio 1.10.1", + "tokio", "tokio-native-tls", ] @@ -2941,7 +2904,7 @@ dependencies = [ "log 0.4.14", "serde", "serde_json", - "tokio 1.10.1", + "tokio", "url 1.7.2", "websocket", ] @@ -3041,9 +3004,9 @@ dependencies = [ "jsonrpc-core", "lazy_static", "log 0.4.14", - "tokio 1.10.1", + "tokio", "tokio-stream", - "tokio-util 0.6.7", + "tokio-util", "unicase 2.6.0", ] @@ -3112,9 +3075,9 @@ dependencies = [ "serde_json", "soketto 0.6.0", "thiserror", - "tokio 0.2.25", - "tokio-rustls 0.15.0", - "tokio-util 0.3.1", + "tokio", + "tokio-rustls", + "tokio-util", "url 2.2.1", ] @@ -3977,15 +3940,6 @@ dependencies = [ "libc", ] -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg 1.0.1", -] - [[package]] name = "memoffset" version = "0.6.1" @@ -4107,17 +4061,6 @@ dependencies = [ "slab", ] -[[package]] -name = "mio-uds" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" -dependencies = [ - "iovec", - "libc", - "mio 0.6.23", -] - [[package]] name = "miow" version = "0.2.2" @@ -5968,7 +5911,7 @@ dependencies = [ "libc", "log 0.4.14", "rand 0.7.3", - "tokio 1.10.1", + "tokio", "winapi 0.3.9", ] @@ -6963,7 +6906,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" dependencies = [ "autocfg 1.0.1", - "crossbeam-deque 0.8.0", + "crossbeam-deque", "either", "rayon-core", ] @@ -6975,7 +6918,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" dependencies = [ "crossbeam-channel", - "crossbeam-deque 0.8.0", + "crossbeam-deque", "crossbeam-utils 0.8.3", "lazy_static", "num_cpus", @@ -7103,7 +7046,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-version", - "tokio 0.2.25", + "tokio", ] [[package]] @@ -7433,7 +7376,7 @@ dependencies = [ "tempfile", "thiserror", "tiny-bip39", - "tokio 0.2.25", + "tokio", ] [[package]] @@ -7681,7 +7624,7 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", - "tokio 0.2.25", + "tokio", ] [[package]] @@ -7706,7 +7649,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "substrate-prometheus-endpoint", - "tokio 1.10.1", + "tokio", "tokio-stream", ] @@ -7877,7 +7820,7 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", - "tokio 0.2.25", + "tokio", ] [[package]] @@ -8089,7 +8032,7 @@ dependencies = [ "sp-utils", "substrate-test-runtime-client", "threadpool", - "tokio 1.10.1", + "tokio", ] [[package]] @@ -8264,7 +8207,7 @@ dependencies = [ "substrate-test-runtime-client", "tempfile", "thiserror", - "tokio 0.2.25", + "tokio", "tracing", "tracing-futures", ] @@ -8274,7 +8217,6 @@ name = "sc-service-test" version = "2.0.0" dependencies = [ "fdlimit", - "futures 0.1.31", "futures 0.3.16", "hex-literal", "log 0.4.14", @@ -8303,7 +8245,7 @@ dependencies = [ "substrate-test-runtime", "substrate-test-runtime-client", "tempfile", - "tokio 0.1.22", + "tokio", ] [[package]] @@ -9791,7 +9733,7 @@ dependencies = [ "sc-rpc-api", "serde", "sp-storage", - "tokio 0.2.25", + "tokio", ] [[package]] @@ -9828,7 +9770,7 @@ dependencies = [ "hyper 0.14.11", "log 0.4.14", "prometheus", - "tokio 1.10.1", + "tokio", ] [[package]] @@ -9943,7 +9885,7 @@ dependencies = [ "futures 0.3.16", "sc-service", "substrate-test-utils-derive", - "tokio 0.2.25", + "tokio", "trybuild", ] @@ -9952,6 +9894,7 @@ name = "substrate-test-utils-derive" version = "0.10.0-dev" dependencies = [ "proc-macro-crate 1.0.0", + "proc-macro2", "quote", "syn", ] @@ -9962,7 +9905,7 @@ version = "0.1.0" dependencies = [ "sc-service", "substrate-test-utils", - "tokio 0.2.25", + "tokio", ] [[package]] @@ -10107,7 +10050,7 @@ dependencies = [ "sp-state-machine", "sp-transaction-pool", "sp-wasm-interface", - "tokio 0.2.25", + "tokio", ] [[package]] @@ -10242,51 +10185,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" -[[package]] -name = "tokio" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "mio 0.6.23", - "num_cpus", - "tokio-codec", - "tokio-current-thread", - "tokio-executor", - "tokio-fs", - "tokio-io", - "tokio-reactor", - "tokio-sync", - "tokio-tcp", - "tokio-threadpool", - "tokio-timer", - "tokio-udp", - "tokio-uds", -] - -[[package]] -name = "tokio" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" -dependencies = [ - "bytes 0.5.6", - "fnv", - "iovec", - "lazy_static", - "libc", - "mio 0.6.23", - "mio-uds", - "num_cpus", - "pin-project-lite 0.1.12", - "signal-hook-registry", - "slab", - "tokio-macros 0.2.6", - "winapi 0.3.9", -] - [[package]] name = "tokio" version = "1.10.1" @@ -10303,7 +10201,7 @@ dependencies = [ "parking_lot 0.11.1", "pin-project-lite 0.2.6", "signal-hook-registry", - "tokio-macros 1.3.0", + "tokio-macros", "winapi 0.3.9", ] @@ -10318,16 +10216,6 @@ dependencies = [ "tokio-io", ] -[[package]] -name = "tokio-current-thread" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" -dependencies = [ - "futures 0.1.31", - "tokio-executor", -] - [[package]] name = "tokio-executor" version = "0.1.10" @@ -10338,17 +10226,6 @@ dependencies = [ "futures 0.1.31", ] -[[package]] -name = "tokio-fs" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" -dependencies = [ - "futures 0.1.31", - "tokio-io", - "tokio-threadpool", -] - [[package]] name = "tokio-io" version = "0.1.13" @@ -10360,17 +10237,6 @@ dependencies = [ "log 0.4.14", ] -[[package]] -name = "tokio-macros" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "tokio-macros" version = "1.3.0" @@ -10389,7 +10255,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" dependencies = [ "native-tls", - "tokio 1.10.1", + "tokio", ] [[package]] @@ -10411,18 +10277,6 @@ dependencies = [ "tokio-sync", ] -[[package]] -name = "tokio-rustls" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d15e5669243a45f630a5167d101b942174ca94b615445b2057eace1c818736" -dependencies = [ - "futures-core", - "rustls", - "tokio 0.2.25", - "webpki", -] - [[package]] name = "tokio-rustls" version = "0.22.0" @@ -10430,7 +10284,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls", - "tokio 1.10.1", + "tokio", "webpki", ] @@ -10442,7 +10296,7 @@ checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" dependencies = [ "futures-core", "pin-project-lite 0.2.6", - "tokio 1.10.1", + "tokio", ] [[package]] @@ -10469,35 +10323,6 @@ dependencies = [ "tokio-reactor", ] -[[package]] -name = "tokio-threadpool" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" -dependencies = [ - "crossbeam-deque 0.7.3", - "crossbeam-queue", - "crossbeam-utils 0.7.2", - "futures 0.1.31", - "lazy_static", - "log 0.4.14", - "num_cpus", - "slab", - "tokio-executor", -] - -[[package]] -name = "tokio-timer" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.31", - "slab", - "tokio-executor", -] - [[package]] name = "tokio-tls" version = "0.2.1" @@ -10509,54 +10334,6 @@ dependencies = [ "tokio-io", ] -[[package]] -name = "tokio-udp" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "log 0.4.14", - "mio 0.6.23", - "tokio-codec", - "tokio-io", - "tokio-reactor", -] - -[[package]] -name = "tokio-uds" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "iovec", - "libc", - "log 0.4.14", - "mio 0.6.23", - "mio-uds", - "tokio-codec", - "tokio-io", - "tokio-reactor", -] - -[[package]] -name = "tokio-util" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" -dependencies = [ - "bytes 0.5.6", - "futures-core", - "futures-io", - "futures-sink", - "log 0.4.14", - "pin-project-lite 0.1.12", - "tokio 0.2.25", -] - [[package]] name = "tokio-util" version = "0.6.7" @@ -10565,10 +10342,11 @@ checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" dependencies = [ "bytes 1.0.1", "futures-core", + "futures-io", "futures-sink", "log 0.4.14", "pin-project-lite 0.2.6", - "tokio 1.10.1", + "tokio", ] [[package]] @@ -11347,7 +11125,7 @@ dependencies = [ "cfg-if 0.1.10", "indexmap", "libc", - "memoffset 0.6.1", + "memoffset", "more-asserts", "region", "serde", @@ -11573,7 +11351,7 @@ dependencies = [ "libc", "log 0.4.14", "mach", - "memoffset 0.6.1", + "memoffset", "more-asserts", "rand 0.8.4", "region", diff --git a/Cargo.toml b/Cargo.toml index 6a1c26e952120..f583c2b087c0c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -227,7 +227,6 @@ cranelift-codegen = { opt-level = 3 } cranelift-wasm = { opt-level = 3 } crc32fast = { opt-level = 3 } crossbeam-deque = { opt-level = 3 } -crossbeam-queue = { opt-level = 3 } crypto-mac = { opt-level = 3 } curve25519-dalek = { opt-level = 3 } ed25519-dalek = { opt-level = 3 } diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 2caefebbbf3b3..75ac03266cff9 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -113,7 +113,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } sc-consensus-epochs = { version = "0.10.0-dev", path = "../../../client/consensus/epochs" } sc-service-test = { version = "2.0.0", path = "../../../client/service/test" } -futures = "0.3.9" +futures = "0.3.16" tempfile = "3.1.0" assert_cmd = "1.0" nix = "0.19" @@ -130,12 +130,7 @@ frame-benchmarking-cli = { version = "4.0.0-dev", optional = true, path = "../.. substrate-build-script-utils = { version = "3.0.0", optional = true, path = "../../../utils/build-script-utils" } substrate-frame-cli = { version = "4.0.0-dev", optional = true, path = "../../../utils/frame/frame-utilities-cli" } try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } - -[build-dependencies.sc-cli] -version = "0.10.0-dev" -package = "sc-cli" -path = "../../../client/cli" -optional = true +sc-cli = { version = "0.10.0-dev", path = "../../../client/cli", optional = true } [features] default = [ "cli" ] diff --git a/bin/node/test-runner-example/src/lib.rs b/bin/node/test-runner-example/src/lib.rs index 04c099a2f4c23..6164372ab4f2f 100644 --- a/bin/node/test-runner-example/src/lib.rs +++ b/bin/node/test-runner-example/src/lib.rs @@ -92,7 +92,7 @@ mod tests { #[test] fn test_runner() { - let mut tokio_runtime = build_runtime().unwrap(); + let tokio_runtime = build_runtime().unwrap(); let task_executor = task_executor(tokio_runtime.handle().clone()); let (rpc, task_manager, client, pool, command_sink, backend) = client_parts::< NodeTemplateChainInfo, diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index aadbdef79551b..e2d27b95eca23 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.11" regex = "1.4.2" -tokio = { version = "0.2.21", features = [ "signal", "rt-core", "rt-threaded", "blocking" ] } +tokio = { version = "1.10", features = [ "signal", "rt-multi-thread" ] } futures = "0.3.9" fdlimit = "0.2.1" libp2p = "0.39.1" diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index 9c5d160c37aa9..686b6b3c05fe4 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -73,8 +73,7 @@ where /// Build a tokio runtime with all features pub fn build_runtime() -> std::result::Result { - tokio::runtime::Builder::new() - .threaded_scheduler() + tokio::runtime::Builder::new_multi_thread() .on_thread_start(|| { TOKIO_THREADS_ALIVE.inc(); TOKIO_THREADS_TOTAL.inc(); @@ -87,7 +86,7 @@ pub fn build_runtime() -> std::result::Result( - mut tokio_runtime: tokio::runtime::Runtime, + tokio_runtime: tokio::runtime::Runtime, future: F, task_manager: TaskManager, ) -> std::result::Result<(), E> @@ -152,7 +151,7 @@ impl Runner { /// A helper function that runs a node with tokio and stops if the process receives the signal /// `SIGTERM` or `SIGINT`. pub fn run_node_until_exit( - mut self, + self, initialize: impl FnOnce(Configuration) -> F, ) -> std::result::Result<(), E> where diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 26172f634fa7c..d9ae8521c12f6 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -45,7 +45,7 @@ sp-timestamp = { path = "../../../primitives/timestamp", version = "4.0.0-dev" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0" } [dev-dependencies] -tokio = { version = "0.2", features = ["rt-core", "macros"] } +tokio = { version = "1.10.0", features = ["rt-multi-thread", "macros"] } sc-basic-authorship = { path = "../../basic-authorship", version = "0.10.0-dev" } substrate-test-runtime-client = { path = "../../../test-utils/runtime/client", version = "2.0.0" } substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool", version = "2.0.0" } diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index f6994e4520246..390c23fe032f1 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -308,7 +308,7 @@ mod tests { consensus_data_provider: None, }); std::thread::spawn(|| { - let mut rt = tokio::runtime::Runtime::new().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); // spawn the background authorship task rt.block_on(future); }); @@ -365,7 +365,7 @@ mod tests { create_inherent_data_providers: |_, _| async { Ok(()) }, }); std::thread::spawn(|| { - let mut rt = tokio::runtime::Runtime::new().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); // spawn the background authorship task rt.block_on(future); }); @@ -443,7 +443,7 @@ mod tests { create_inherent_data_providers: |_, _| async { Ok(()) }, }); std::thread::spawn(|| { - let mut rt = tokio::runtime::Runtime::new().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); // spawn the background authorship task rt.block_on(future); }); diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 37d07c791948b..63a8c9aff225d 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -57,5 +57,5 @@ sc-network-test = { version = "0.8.0", path = "../network/test" } sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } -tokio = { version = "0.2", features = ["rt-core"] } +tokio = "1.10" tempfile = "3.1.0" diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 6b151f314b5c5..1aef7cd1b017a 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1244,7 +1244,7 @@ fn finalize_3_voters_1_light_observer() { #[test] fn voter_catches_up_to_latest_round_when_behind() { sp_tracing::try_init_simple(); - let mut runtime = Runtime::new().unwrap(); + let runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers); diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index fe94d57d96e88..94be302ca2708 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -87,5 +87,5 @@ directories = "3.0.2" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime/" } -tokio = { version = "0.2.25", default-features = false } +tokio = { version = "1.10", features = ["time"] } async-std = { version = "1.6.5", default-features = false } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 883ece42362b0..c8d5a9af35653 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -304,8 +304,8 @@ async fn build_network_future< } } -#[cfg(not(target_os = "unknown"))] // Wrapper for HTTP and WS servers that makes sure they are properly shut down. +#[cfg(not(target_os = "unknown"))] mod waiting { pub struct HttpServer(pub Option); impl Drop for HttpServer { diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs index ae89b785870f0..25b08b37a3a10 100644 --- a/client/service/src/task_manager/mod.rs +++ b/client/service/src/task_manager/mod.rs @@ -311,7 +311,13 @@ impl TaskManager { Box::pin(async move { join_all(children_shutdowns).await; completion_future.await; - drop(keep_alive); + + // The keep_alive stuff is holding references to some RPC handles etc. These + // RPC handles spawn their own tokio stuff and that doesn't like to be closed in an + // async context. So, we move the deletion to some other thread. + std::thread::spawn(move || { + let _ = keep_alive; + }); }) } diff --git a/client/service/src/task_manager/tests.rs b/client/service/src/task_manager/tests.rs index d8789e556e1e9..5b6cd7acdd4ab 100644 --- a/client/service/src/task_manager/tests.rs +++ b/client/service/src/task_manager/tests.rs @@ -35,6 +35,12 @@ impl DropTester { *self.0.lock() += 1; DropTesterRef(self.clone()) } + + fn wait_on_drop(&self) { + while *self != 0 { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + } } impl PartialEq for DropTester { @@ -65,7 +71,7 @@ fn ensure_drop_tester_working() { async fn run_background_task(_keep_alive: impl Any) { loop { - tokio::time::delay_for(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; } } @@ -74,7 +80,7 @@ async fn run_background_task_blocking(duration: Duration, _keep_alive: impl Any) // block for X sec (not interruptible) std::thread::sleep(duration); // await for 1 sec (interruptible) - tokio::time::delay_for(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; } } @@ -84,7 +90,7 @@ fn new_task_manager(task_executor: TaskExecutor) -> TaskManager { #[test] fn ensure_tasks_are_awaited_on_shutdown() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); @@ -95,15 +101,15 @@ fn ensure_tasks_are_awaited_on_shutdown() { spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 2); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 2); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } #[test] fn ensure_keep_alive_during_shutdown() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); @@ -114,15 +120,15 @@ fn ensure_keep_alive_during_shutdown() { spawn_handle.spawn("task1", run_background_task(())); assert_eq!(drop_tester, 1); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 1); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } #[test] fn ensure_blocking_futures_are_awaited_on_shutdown() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); @@ -139,7 +145,7 @@ fn ensure_blocking_futures_are_awaited_on_shutdown() { ); assert_eq!(drop_tester, 2); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 2); runtime.block_on(task_manager.clean_shutdown()); assert_eq!(drop_tester, 0); @@ -147,7 +153,7 @@ fn ensure_blocking_futures_are_awaited_on_shutdown() { #[test] fn ensure_no_task_can_be_spawn_after_terminate() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); @@ -158,17 +164,17 @@ fn ensure_no_task_can_be_spawn_after_terminate() { spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 2); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 2); task_manager.terminate(); spawn_handle.spawn("task3", run_background_task(drop_tester.new_ref())); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } #[test] fn ensure_task_manager_future_ends_when_task_manager_terminated() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); @@ -179,7 +185,7 @@ fn ensure_task_manager_future_ends_when_task_manager_terminated() { spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 2); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 2); task_manager.terminate(); runtime.block_on(task_manager.future()).expect("future has ended without error"); @@ -189,7 +195,7 @@ fn ensure_task_manager_future_ends_when_task_manager_terminated() { #[test] fn ensure_task_manager_future_ends_with_error_when_essential_task_fails() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); @@ -201,7 +207,7 @@ fn ensure_task_manager_future_ends_with_error_when_essential_task_fails() { spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 2); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 2); spawn_essential_handle.spawn("task3", async { panic!("task failed") }); runtime @@ -209,12 +215,12 @@ fn ensure_task_manager_future_ends_with_error_when_essential_task_fails() { .expect_err("future()'s Result must be Err"); assert_eq!(drop_tester, 2); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } #[test] fn ensure_children_tasks_ends_when_task_manager_terminated() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); @@ -233,17 +239,17 @@ fn ensure_children_tasks_ends_when_task_manager_terminated() { spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 4); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 4); task_manager.terminate(); runtime.block_on(task_manager.future()).expect("future has ended without error"); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } #[test] fn ensure_task_manager_future_ends_with_error_when_childs_essential_task_fails() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); @@ -263,7 +269,7 @@ fn ensure_task_manager_future_ends_with_error_when_childs_essential_task_fails() spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 4); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 4); spawn_essential_handle_child_1.spawn("task5", async { panic!("task failed") }); runtime @@ -271,12 +277,12 @@ fn ensure_task_manager_future_ends_with_error_when_childs_essential_task_fails() .expect_err("future()'s Result must be Err"); assert_eq!(drop_tester, 4); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } #[test] fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); @@ -295,12 +301,12 @@ fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 4); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 4); spawn_handle_child_1.spawn("task5", async { panic!("task failed") }); runtime.block_on(async { let t1 = task_manager.future().fuse(); - let t2 = tokio::time::delay_for(Duration::from_secs(3)).fuse(); + let t2 = tokio::time::sleep(Duration::from_secs(3)).fuse(); pin_mut!(t1, t2); @@ -311,5 +317,5 @@ fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { }); assert_eq!(drop_tester, 4); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index e64bb30045bb1..85a6dcc9e8b29 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -14,8 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] hex-literal = "0.3.1" tempfile = "3.1.0" -tokio = "0.1.22" -futures01 = { package = "futures", version = "0.1.29" } +tokio = { version = "1.10.0", features = ["time"] } log = "0.4.8" fdlimit = "0.2.1" parking_lot = "0.11.1" diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 6e86b9fcfdb2c..61313b4488cb4 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -18,8 +18,7 @@ //! Service integration test utils. -use futures::{FutureExt as _, TryFutureExt as _}; -use futures01::{Future, Poll, Stream}; +use futures::{task::Poll, Future, FutureExt, TryFutureExt as _}; use log::{debug, info}; use parking_lot::Mutex; use sc_client_api::{Backend, CallExecutor}; @@ -36,9 +35,9 @@ use sc_service::{ use sc_transaction_pool_api::TransactionPool; use sp_blockchain::HeaderBackend; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use std::{iter, net::Ipv4Addr, pin::Pin, sync::Arc, time::Duration}; +use std::{iter, net::Ipv4Addr, pin::Pin, sync::Arc, task::Context, time::Duration}; use tempfile::TempDir; -use tokio::{prelude::FutureExt, runtime::Runtime, timer::Interval}; +use tokio::{runtime::Runtime, time}; #[cfg(test)] mod client; @@ -57,7 +56,7 @@ struct TestNet { } pub trait TestNetNode: - Clone + Future + Send + 'static + Clone + Future> + Send + 'static { type Block: BlockT; type Backend: Backend; @@ -109,11 +108,10 @@ impl Clone impl Future for TestNetComponents { - type Item = (); - type Error = sc_service::Error; + type Output = Result<(), sc_service::Error>; - fn poll(&mut self) -> Poll { - futures::compat::Compat::new(&mut self.task_manager.lock().future()).poll() + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + Pin::new(&mut self.task_manager.lock().future()).poll(cx) } } @@ -161,33 +159,36 @@ where { let full_nodes = self.full_nodes.clone(); let light_nodes = self.light_nodes.clone(); - let interval = Interval::new_interval(Duration::from_millis(100)) - .map_err(|_| ()) - .for_each(move |_| { + let future = async move { + let mut interval = time::interval(Duration::from_millis(100)); + + loop { + interval.tick().await; + let full_ready = full_nodes .iter() .all(|&(ref id, ref service, _, _)| full_predicate(*id, service)); if !full_ready { - return Ok(()) + continue } let light_ready = light_nodes .iter() .all(|&(ref id, ref service, _)| light_predicate(*id, service)); - if !light_ready { - Ok(()) - } else { - Err(()) + if light_ready { + return } - }) - .timeout(MAX_WAIT_TIME); + } + }; - match self.runtime.block_on(interval) { - Ok(()) => unreachable!("interval always fails; qed"), - Err(ref err) if err.is_inner() => (), - Err(_) => panic!("Waited for too long"), + if self + .runtime + .block_on(async move { time::timeout(MAX_WAIT_TIME, future).await }) + .is_err() + { + panic!("Waited for too long"); } } } @@ -306,11 +307,11 @@ where light: impl Iterator Result>, authorities: impl Iterator Result<(F, U), Error>)>, ) { - let executor = self.runtime.executor(); + let handle = self.runtime.handle().clone(); let task_executor: TaskExecutor = { - let executor = executor.clone(); + let executor = handle.clone(); (move |fut: Pin + Send>>, _| { - executor.spawn(fut.unit_error().compat()); + executor.spawn(fut.unit_error()); async {} }) .into() @@ -330,7 +331,7 @@ where let (service, user_data) = authority(node_config).expect("Error creating test node service"); - executor.spawn(service.clone().map_err(|_| ())); + handle.spawn(service.clone().map_err(|_| ())); let addr = addr .with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); self.authority_nodes.push((self.nodes, service, user_data, addr)); @@ -350,7 +351,7 @@ where let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); let (service, user_data) = full(node_config).expect("Error creating test node service"); - executor.spawn(service.clone().map_err(|_| ())); + handle.spawn(service.clone().map_err(|_| ())); let addr = addr .with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); self.full_nodes.push((self.nodes, service, user_data, addr)); @@ -370,7 +371,7 @@ where let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); let service = light(node_config).expect("Error creating test node service"); - executor.spawn(service.clone().map_err(|_| ())); + handle.spawn(service.clone().map_err(|_| ())); let addr = addr .with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); self.light_nodes.push((self.nodes, service, addr)); @@ -406,7 +407,7 @@ pub fn connectivity( { let temp = tempdir_with_prefix("substrate-connectivity-test"); - let runtime = { + { let mut network = TestNet::new( &temp, spec.clone(), @@ -444,12 +445,8 @@ pub fn connectivity( connected == expected_light_connections }, ); - - network.runtime }; - runtime.shutdown_now().wait().expect("Error shutting down runtime"); - temp.close().expect("Error removing temp dir"); } { diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 560dbb26684c7..4eed6e5e29133 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.16" substrate-test-utils-derive = { version = "0.10.0-dev", path = "./derive" } -tokio = { version = "0.2.13", features = ["macros", "rt-core", "time"] } +tokio = { version = "1.10", features = ["macros", "time"] } [dev-dependencies] sc-service = { version = "0.10.0-dev", path = "../client/service" } diff --git a/test-utils/derive/Cargo.toml b/test-utils/derive/Cargo.toml index 991183edf4aba..566c83f881127 100644 --- a/test-utils/derive/Cargo.toml +++ b/test-utils/derive/Cargo.toml @@ -12,6 +12,7 @@ description = "Substrate test utilities macros" quote = "1.0.6" syn = { version = "1.0.58", features = ["full"] } proc-macro-crate = "1.0.0" +proc-macro2 = "1.0.28" [lib] proc-macro = true diff --git a/test-utils/derive/src/lib.rs b/test-utils/derive/src/lib.rs index 877792f82de6a..2205b259e3e6c 100644 --- a/test-utils/derive/src/lib.rs +++ b/test-utils/derive/src/lib.rs @@ -22,19 +22,14 @@ use quote::quote; #[proc_macro_attribute] pub fn test(args: TokenStream, item: TokenStream) -> TokenStream { - impl_test(args, item) -} - -fn impl_test(args: TokenStream, item: TokenStream) -> TokenStream { let input = syn::parse_macro_input!(item as syn::ItemFn); - let args = syn::parse_macro_input!(args as syn::AttributeArgs); - parse_knobs(input, args).unwrap_or_else(|e| e.to_compile_error().into()) + parse_knobs(input, args.into()).unwrap_or_else(|e| e.to_compile_error().into()) } fn parse_knobs( mut input: syn::ItemFn, - args: syn::AttributeArgs, + args: proc_macro2::TokenStream, ) -> Result { let sig = &mut input.sig; let body = &input.block; @@ -62,7 +57,7 @@ fn parse_knobs( let header = { quote! { - #[#crate_name::tokio::test(#(#args)*)] + #[#crate_name::tokio::test( #args )] } }; @@ -76,25 +71,15 @@ fn parse_knobs( #crate_name::tokio::spawn(fut).map(drop) }) .into(); - let timeout_task = #crate_name::tokio::time::delay_for( + if #crate_name::tokio::time::timeout( std::time::Duration::from_secs( std::env::var("SUBSTRATE_TEST_TIMEOUT") .ok() .and_then(|x| x.parse().ok()) - .unwrap_or(600)) - ).fuse(); - let actual_test_task = async move { - #body - } - .fuse(); - - #crate_name::futures::pin_mut!(timeout_task, actual_test_task); - - #crate_name::futures::select! { - _ = timeout_task => { - panic!("The test took too long!"); - }, - _ = actual_test_task => {}, + .unwrap_or(600)), + async move { #body }, + ).await.is_err() { + panic!("The test took too long!"); } } }; diff --git a/test-utils/test-crate/Cargo.toml b/test-utils/test-crate/Cargo.toml index 6ab53fc752eae..fff39c3964ad8 100644 --- a/test-utils/test-crate/Cargo.toml +++ b/test-utils/test-crate/Cargo.toml @@ -12,6 +12,6 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -tokio = { version = "0.2.13", features = ["macros"] } +tokio = { version = "1.10", features = ["macros"] } test-utils = { version = "4.0.0-dev", path = "..", package = "substrate-test-utils" } sc-service = { version = "0.10.0-dev", path = "../../client/service" } diff --git a/test-utils/test-runner/Cargo.toml b/test-utils/test-runner/Cargo.toml index 1debd6fb01646..b5b115771b539 100644 --- a/test-utils/test-runner/Cargo.toml +++ b/test-utils/test-runner/Cargo.toml @@ -48,7 +48,7 @@ frame-system = { path = "../../frame/system" } log = "0.4.8" futures = "0.3.16" -tokio = { version = "0.2", features = ["signal"] } +tokio = { version = "1.10", features = ["signal"] } # Calling RPC jsonrpc-core = "18.0" num-traits = "0.2.14" diff --git a/test-utils/tests/basic.rs b/test-utils/tests/basic.rs index 3273d0386e8a4..b94f85ccba574 100644 --- a/test-utils/tests/basic.rs +++ b/test-utils/tests/basic.rs @@ -29,7 +29,7 @@ async fn panicking_test(_: TaskExecutor) { panic!("boo!"); } -#[substrate_test_utils::test(max_threads = 2)] +#[substrate_test_utils::test(flavor = "multi_thread", worker_threads = 1)] async fn basic_test_with_args(_: TaskExecutor) { assert!(true); } @@ -41,14 +41,14 @@ async fn rename_argument(ex: TaskExecutor) { assert!(true); } -#[substrate_test_utils::test] -#[should_panic(expected = "test took too long")] // NOTE: enable this test only after setting SUBSTRATE_TEST_TIMEOUT to a smaller value // // SUBSTRATE_TEST_TIMEOUT=1 cargo test -- --ignored timeout +#[substrate_test_utils::test] +#[should_panic(expected = "test took too long")] #[ignore] async fn timeout(_: TaskExecutor) { - tokio::time::delay_for(std::time::Duration::from_secs( + tokio::time::sleep(std::time::Duration::from_secs( std::env::var("SUBSTRATE_TEST_TIMEOUT") .expect("env var SUBSTRATE_TEST_TIMEOUT has been provided by the user") .parse::() diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index f849c89d7053d..d255499d6c3ad 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee-ws-client = { version = "0.3.0", default-features = false, features = [ - "tokio02", + "tokio1", ] } jsonrpsee-proc-macros = "0.3.0" @@ -30,7 +30,7 @@ sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-version = { version = "4.0.0-dev", path = "../../../primitives/version" } [dev-dependencies] -tokio = { version = "0.2", features = ["macros", "rt-threaded"] } +tokio = { version = "1.10", features = ["macros", "rt-multi-thread"] } pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", version = "5.0.0-dev" } [features] diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 827afb090c8f5..aa9f1bbef8024 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -25,4 +25,4 @@ sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } [dev-dependencies] frame-system = { version = "4.0.0-dev", path = "../../../../frame/system" } -tokio = "0.2" +tokio = "1.10" From 0a4c54613411e90433dcfbceef7431990fa9fd2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 25 Aug 2021 09:13:00 +0200 Subject: [PATCH 10/14] Fix `state_subscribeRuntimeVersion` for parachains (#9617) The old implementation was listening for storage changes and every time a block changed the `CODE` storage field, it checked if the runtime version changed. It used the best block to compare against the latest known runtime version. It could happen that you processed the storage notification of block Y and checked the runtime version of block X (the current best block). This is also what happened on parachains. Parachains import blocks and set the new best block in a later step. This means we imported the block that changed the code, got notified and checked the runtime version of the current best block (which would still be the parent of the block that changed the runtime). As the parent did not changed the runtime, the runtime version also did not changed and we never notified the subscribers. The new implementation now switches to listen for best imported blocks. Every time we import a new best block, we check its runtime version against the latest known runtime version. As we also send a notification when the parachains sets a block as new best block, we will trigger this code path correctly. It moves some computation from checking if the key was modified to getting the runtime version. As fetching the runtime version is a rather common pattern, it should not make any big difference performancewise. --- client/rpc/src/state/state_full.rs | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index ef008700f6d5a..0d9a35fd26ec9 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -35,8 +35,7 @@ use sp_blockchain::{ }; use sp_core::{ storage::{ - well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, StorageChangeSet, StorageData, - StorageKey, + ChildInfo, ChildType, PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey, }, Bytes, }; @@ -470,17 +469,6 @@ where _meta: crate::Metadata, subscriber: Subscriber, ) { - let stream = match self.client.storage_changes_notification_stream( - Some(&[StorageKey(well_known_keys::CODE.to_vec())]), - None, - ) { - Ok(stream) => stream, - Err(err) => { - let _ = subscriber.reject(Error::from(client_err(err)).into()); - return - }, - }; - self.subscriptions.add(subscriber, |sink| { let version = self .block_or_best(None) @@ -493,12 +481,16 @@ where let client = self.client.clone(); let mut previous_version = version.clone(); - let stream = stream.filter_map(move |_| { - let info = client.info(); + // A stream of all best blocks. + let stream = + client.import_notification_stream().filter(|n| future::ready(n.is_new_best)); + + let stream = stream.filter_map(move |n| { let version = client - .runtime_version_at(&BlockId::hash(info.best_hash)) + .runtime_version_at(&BlockId::hash(n.hash)) .map_err(|e| Error::Client(Box::new(e))) .map_err(Into::into); + if previous_version != version { previous_version = version.clone(); future::ready(Some(Ok::<_, ()>(version))) From 61347ee38b8bc094c039ef71ae561652a73dab83 Mon Sep 17 00:00:00 2001 From: Shaun Wang Date: Wed, 25 Aug 2021 22:27:56 +1200 Subject: [PATCH 11/14] pallet-proxy: emit events on proxy added. (#9546) * pallet-proxy: emit events on proxy added. * Apply review suggestions. --- frame/proxy/src/lib.rs | 21 +++++++++++++++++++-- frame/proxy/src/tests.rs | 1 + 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 6a853c8e2b8e1..0537ed4a32392 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -534,7 +534,12 @@ pub mod pallet { } #[pallet::event] - #[pallet::metadata(T::AccountId = "AccountId", T::ProxyType = "ProxyType", CallHashOf = "Hash")] + #[pallet::metadata( + T::AccountId = "AccountId", + T::ProxyType = "ProxyType", + CallHashOf = "Hash", + T::BlockNumber = "BlockNumber", + )] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// A proxy was executed correctly, with the given \[result\]. @@ -545,6 +550,8 @@ pub mod pallet { AnonymousCreated(T::AccountId, T::AccountId, T::ProxyType, u16), /// An announcement was placed to make a call in the future. \[real, proxy, call_hash\] Announced(T::AccountId, T::AccountId, CallHashOf), + /// A proxy was added. \[delegator, delegatee, proxy_type, delay\] + ProxyAdded(T::AccountId, T::AccountId, T::ProxyType, T::BlockNumber), } /// Old name generated by `decl_event`. @@ -646,7 +653,11 @@ impl Pallet { ) -> DispatchResult { ensure!(delegator != &delegatee, Error::::NoSelfProxy); Proxies::::try_mutate(delegator, |(ref mut proxies, ref mut deposit)| { - let proxy_def = ProxyDefinition { delegate: delegatee, proxy_type, delay }; + let proxy_def = ProxyDefinition { + delegate: delegatee.clone(), + proxy_type: proxy_type.clone(), + delay, + }; let i = proxies.binary_search(&proxy_def).err().ok_or(Error::::Duplicate)?; proxies.try_insert(i, proxy_def).map_err(|_| Error::::TooMany)?; let new_deposit = Self::deposit(proxies.len() as u32); @@ -656,6 +667,12 @@ impl Pallet { T::Currency::unreserve(delegator, *deposit - new_deposit); } *deposit = new_deposit; + Self::deposit_event(Event::::ProxyAdded( + delegator.clone(), + delegatee, + proxy_type, + delay, + )); Ok(()) }) } diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index f3fe1d674a87d..eb4193a18d935 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -194,6 +194,7 @@ fn expect_events(e: Vec) { fn announcement_works() { new_test_ext().execute_with(|| { assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + System::assert_last_event(ProxyEvent::ProxyAdded(1, 3, ProxyType::Any, 1).into()); assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); assert_eq!(Balances::reserved_balance(3), 0); From ba7eb264c0114dfcb4a01d7ad18c04ae6a86477e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 25 Aug 2021 13:37:15 +0200 Subject: [PATCH 12/14] Remove dependency on sandboxing host functions (#9592) * Embed wasmi into the runtime * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_contracts --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/contracts/src/weights.rs --template=./.maintain/frame-weight-template.hbs Co-authored-by: Parity Benchmarking Bot --- Cargo.lock | 2 + frame/contracts/src/weights.rs | 1250 ++++++++--------- primitives/sandbox/Cargo.toml | 9 + .../{with_std.rs => embedded_executor.rs} | 208 ++- .../{without_std.rs => host_executor.rs} | 0 primitives/sandbox/src/lib.rs | 13 +- 6 files changed, 743 insertions(+), 739 deletions(-) rename primitives/sandbox/{with_std.rs => embedded_executor.rs} (69%) rename primitives/sandbox/{without_std.rs => host_executor.rs} (100%) diff --git a/Cargo.lock b/Cargo.lock index 43aa746599d66..37784ba8f301b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9354,6 +9354,7 @@ name = "sp-sandbox" version = "0.10.0-dev" dependencies = [ "assert_matches", + "log 0.4.14", "parity-scale-codec", "sp-core", "sp-io", @@ -11143,6 +11144,7 @@ dependencies = [ "downcast-rs", "errno", "libc", + "libm", "memory_units", "num-rational 0.2.4", "num-traits", diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index cffdb6ca9f006..b7e711a37aa2b 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-08-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -155,47 +155,47 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_initialize() -> Weight { - (3_175_000 as Weight) + (3_227_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) fn on_initialize_per_trie_key(k: u32, ) -> Weight { (0 as Weight) // Standard Error: 3_000 - .saturating_add((2_201_000 as Weight).saturating_mul(k as Weight)) + .saturating_add((2_273_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } // Storage: Contracts DeletionQueue (r:1 w:0) fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (66_035_000 as Weight) - // Standard Error: 6_000 - .saturating_add((38_159_000 as Weight).saturating_mul(q as Weight)) + (50_365_000 as Weight) + // Standard Error: 7_000 + .saturating_add((39_799_000 as Weight).saturating_mul(q as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) fn instrument(c: u32, ) -> Weight { - (35_007_000 as Weight) - // Standard Error: 110_000 - .saturating_add((75_739_000 as Weight).saturating_mul(c as Weight)) + (40_033_000 as Weight) + // Standard Error: 109_000 + .saturating_add((76_424_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:0) fn code_load(c: u32, ) -> Weight { - (6_238_000 as Weight) - // Standard Error: 0 - .saturating_add((1_671_000 as Weight).saturating_mul(c as Weight)) + (6_675_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_668_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) fn code_refcount(c: u32, ) -> Weight { - (10_080_000 as Weight) - // Standard Error: 0 - .saturating_add((2_694_000 as Weight).saturating_mul(c as Weight)) + (10_560_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_704_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -206,11 +206,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (182_161_000 as Weight) - // Standard Error: 115_000 - .saturating_add((113_515_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 7_000 - .saturating_add((2_314_000 as Weight).saturating_mul(s as Weight)) + (479_578_000 as Weight) + // Standard Error: 166_000 + .saturating_add((187_167_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 10_000 + .saturating_add((2_450_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } @@ -220,9 +220,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) fn instantiate(s: u32, ) -> Weight { - (183_914_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_224_000 as Weight).saturating_mul(s as Weight)) + (237_664_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_249_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -231,7 +231,7 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) fn call() -> Weight { - (166_507_000 as Weight) + (223_426_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -305,9 +305,9 @@ impl WeightInfo for SubstrateWeight { // Storage: unknown [0x3a6368696c645f73746f726167653a64656661756c743aeb9db1dfeed3a7b47b] (r:1 w:0) // Storage: unknown [0x3a6368696c645f73746f726167653a64656661756c743abdc9db5edf43ffcb0d] (r:1 w:0) fn claim_surcharge(c: u32, ) -> Weight { - (126_115_000 as Weight) - // Standard Error: 2_000 - .saturating_add((2_829_000 as Weight).saturating_mul(c as Weight)) + (130_759_000 as Weight) + // Standard Error: 3_000 + .saturating_add((2_850_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } @@ -316,9 +316,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_caller(r: u32, ) -> Weight { - (134_110_000 as Weight) - // Standard Error: 130_000 - .saturating_add((230_337_000 as Weight).saturating_mul(r as Weight)) + (492_555_000 as Weight) + // Standard Error: 174_000 + .saturating_add((136_915_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -327,9 +327,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_address(r: u32, ) -> Weight { - (131_212_000 as Weight) - // Standard Error: 116_000 - .saturating_add((230_568_000 as Weight).saturating_mul(r as Weight)) + (487_655_000 as Weight) + // Standard Error: 165_000 + .saturating_add((137_827_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -338,9 +338,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_gas_left(r: u32, ) -> Weight { - (135_149_000 as Weight) - // Standard Error: 149_000 - .saturating_add((224_830_000 as Weight).saturating_mul(r as Weight)) + (488_993_000 as Weight) + // Standard Error: 195_000 + .saturating_add((137_040_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -349,9 +349,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_balance(r: u32, ) -> Weight { - (148_463_000 as Weight) - // Standard Error: 246_000 - .saturating_add((480_930_000 as Weight).saturating_mul(r as Weight)) + (500_062_000 as Weight) + // Standard Error: 208_000 + .saturating_add((392_337_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -360,9 +360,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_value_transferred(r: u32, ) -> Weight { - (137_790_000 as Weight) - // Standard Error: 152_000 - .saturating_add((224_961_000 as Weight).saturating_mul(r as Weight)) + (492_064_000 as Weight) + // Standard Error: 156_000 + .saturating_add((137_082_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -371,9 +371,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_minimum_balance(r: u32, ) -> Weight { - (134_238_000 as Weight) - // Standard Error: 135_000 - .saturating_add((224_433_000 as Weight).saturating_mul(r as Weight)) + (496_566_000 as Weight) + // Standard Error: 159_000 + .saturating_add((137_377_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -382,9 +382,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_tombstone_deposit(r: u32, ) -> Weight { - (135_053_000 as Weight) - // Standard Error: 147_000 - .saturating_add((223_955_000 as Weight).saturating_mul(r as Weight)) + (491_566_000 as Weight) + // Standard Error: 163_000 + .saturating_add((137_586_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -393,9 +393,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_rent_allowance(r: u32, ) -> Weight { - (138_522_000 as Weight) - // Standard Error: 145_000 - .saturating_add((223_459_000 as Weight).saturating_mul(r as Weight)) + (491_459_000 as Weight) + // Standard Error: 150_000 + .saturating_add((137_402_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -404,9 +404,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_block_number(r: u32, ) -> Weight { - (133_568_000 as Weight) - // Standard Error: 143_000 - .saturating_add((224_792_000 as Weight).saturating_mul(r as Weight)) + (488_379_000 as Weight) + // Standard Error: 170_000 + .saturating_add((136_564_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -415,9 +415,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_now(r: u32, ) -> Weight { - (134_786_000 as Weight) - // Standard Error: 130_000 - .saturating_add((224_331_000 as Weight).saturating_mul(r as Weight)) + (494_827_000 as Weight) + // Standard Error: 175_000 + .saturating_add((137_178_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -427,9 +427,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) fn seal_weight_to_fee(r: u32, ) -> Weight { - (147_402_000 as Weight) - // Standard Error: 233_000 - .saturating_add((439_237_000 as Weight).saturating_mul(r as Weight)) + (497_508_000 as Weight) + // Standard Error: 191_000 + .saturating_add((323_559_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -438,9 +438,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_gas(r: u32, ) -> Weight { - (115_711_000 as Weight) - // Standard Error: 88_000 - .saturating_add((113_467_000 as Weight).saturating_mul(r as Weight)) + (179_076_000 as Weight) + // Standard Error: 124_000 + .saturating_add((62_013_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -449,9 +449,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_input(r: u32, ) -> Weight { - (123_004_000 as Weight) - // Standard Error: 78_000 - .saturating_add((6_674_000 as Weight).saturating_mul(r as Weight)) + (480_920_000 as Weight) + // Standard Error: 182_000 + .saturating_add((3_254_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -460,9 +460,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_input_per_kb(n: u32, ) -> Weight { - (131_611_000 as Weight) - // Standard Error: 0 - .saturating_add((1_035_000 as Weight).saturating_mul(n as Weight)) + (487_910_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_218_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -471,9 +471,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_return(r: u32, ) -> Weight { - (118_327_000 as Weight) - // Standard Error: 84_000 - .saturating_add((4_274_000 as Weight).saturating_mul(r as Weight)) + (470_960_000 as Weight) + // Standard Error: 678_000 + .saturating_add((2_506_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -482,9 +482,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_return_per_kb(n: u32, ) -> Weight { - (126_129_000 as Weight) - // Standard Error: 0 - .saturating_add((495_000 as Weight).saturating_mul(n as Weight)) + (478_623_000 as Weight) + // Standard Error: 1_000 + .saturating_add((749_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -494,9 +494,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: Contracts DeletionQueue (r:1 w:1) fn seal_terminate(r: u32, ) -> Weight { - (123_759_000 as Weight) - // Standard Error: 115_000 - .saturating_add((89_730_000 as Weight).saturating_mul(r as Weight)) + (481_930_000 as Weight) + // Standard Error: 511_000 + .saturating_add((84_726_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -508,9 +508,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: unknown [0x3a6368696c645f73746f726167653a64656661756c743af3fd4cc2fc8d170b6d] (r:1 w:0) fn seal_restore_to(r: u32, ) -> Weight { - (151_364_000 as Weight) - // Standard Error: 263_000 - .saturating_add((99_367_000 as Weight).saturating_mul(r as Weight)) + (514_296_000 as Weight) + // Standard Error: 458_000 + .saturating_add((93_769_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -518,9 +518,9 @@ impl WeightInfo for SubstrateWeight { } // Storage: Skipped Metadata (r:0 w:0) fn seal_restore_to_per_delta(d: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 1_919_000 - .saturating_add((2_415_482_000 as Weight).saturating_mul(d as Weight)) + (313_520_000 as Weight) + // Standard Error: 1_783_000 + .saturating_add((2_435_407_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) .saturating_add(T::DbWeight::get().writes(7 as Weight)) @@ -532,9 +532,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) fn seal_random(r: u32, ) -> Weight { - (137_660_000 as Weight) - // Standard Error: 204_000 - .saturating_add((563_042_000 as Weight).saturating_mul(r as Weight)) + (484_059_000 as Weight) + // Standard Error: 285_000 + .saturating_add((443_946_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -543,9 +543,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_deposit_event(r: u32, ) -> Weight { - (137_087_000 as Weight) - // Standard Error: 413_000 - .saturating_add((835_499_000 as Weight).saturating_mul(r as Weight)) + (491_593_000 as Weight) + // Standard Error: 386_000 + .saturating_add((733_958_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -555,11 +555,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:100 w:100) fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_117_515_000 as Weight) - // Standard Error: 2_167_000 - .saturating_add((494_145_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 427_000 - .saturating_add((150_093_000 as Weight).saturating_mul(n as Weight)) + (1_342_357_000 as Weight) + // Standard Error: 2_458_000 + .saturating_add((521_445_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 484_000 + .saturating_add((195_792_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -570,9 +570,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_set_rent_allowance(r: u32, ) -> Weight { - (132_070_000 as Weight) - // Standard Error: 129_000 - .saturating_add((155_669_000 as Weight).saturating_mul(r as Weight)) + (209_818_000 as Weight) + // Standard Error: 157_000 + .saturating_add((93_289_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -581,17 +581,17 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_debug_message(r: u32, ) -> Weight { - (126_971_000 as Weight) - // Standard Error: 90_000 - .saturating_add((122_445_000 as Weight).saturating_mul(r as Weight)) + (200_027_000 as Weight) + // Standard Error: 145_000 + .saturating_add((79_038_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) fn seal_set_storage(r: u32, ) -> Weight { - (125_746_000 as Weight) - // Standard Error: 610_000 - .saturating_add((501_265_000 as Weight).saturating_mul(r as Weight)) + (477_211_000 as Weight) + // Standard Error: 709_000 + .saturating_add((407_264_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -603,17 +603,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: unknown [0x7afa01283080ef247df84e0ba38ea5a587d25ce6633a6bfbba02068c14023441] (r:1 w:1) fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (563_219_000 as Weight) - // Standard Error: 219_000 - .saturating_add((41_578_000 as Weight).saturating_mul(n as Weight)) + (832_538_000 as Weight) + // Standard Error: 262_000 + .saturating_add((87_211_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) fn seal_clear_storage(r: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 1_727_000 - .saturating_add((1_001_461_000 as Weight).saturating_mul(r as Weight)) + (199_686_000 as Weight) + // Standard Error: 1_610_000 + .saturating_add((905_125_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -621,9 +621,9 @@ impl WeightInfo for SubstrateWeight { } // Storage: Skipped Metadata (r:0 w:0) fn seal_get_storage(r: u32, ) -> Weight { - (9_115_000 as Weight) - // Standard Error: 784_000 - .saturating_add((660_533_000 as Weight).saturating_mul(r as Weight)) + (335_052_000 as Weight) + // Standard Error: 885_000 + .saturating_add((545_754_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -634,9 +634,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: unknown [0x7afa01283080ef247df84e0ba38ea5a587d25ce6633a6bfbba02068c14023441] (r:1 w:0) fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (563_175_000 as Weight) - // Standard Error: 206_000 - .saturating_add((89_626_000 as Weight).saturating_mul(n as Weight)) + (800_556_000 as Weight) + // Standard Error: 337_000 + .saturating_add((133_492_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -645,9 +645,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_transfer(r: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 1_750_000 - .saturating_add((4_820_493_000 as Weight).saturating_mul(r as Weight)) + (317_531_000 as Weight) + // Standard Error: 1_627_000 + .saturating_add((4_748_591_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -659,8 +659,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) fn seal_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 6_692_000 - .saturating_add((11_477_937_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 8_848_000 + .saturating_add((46_947_679_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((200 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -671,13 +671,13 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:101 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { - (11_238_437_000 as Weight) - // Standard Error: 81_620_000 - .saturating_add((3_700_413_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 29_000 - .saturating_add((32_106_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 31_000 - .saturating_add((54_386_000 as Weight).saturating_mul(o as Weight)) + (47_469_660_000 as Weight) + // Standard Error: 45_192_000 + .saturating_add((3_691_145_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 16_000 + .saturating_add((75_339_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 17_000 + .saturating_add((121_494_000 as Weight).saturating_mul(o as Weight)) .saturating_add(T::DbWeight::get().reads(205 as Weight)) .saturating_add(T::DbWeight::get().writes(101 as Weight)) .saturating_add(T::DbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) @@ -689,8 +689,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts AccountCounter (r:1 w:1) fn seal_instantiate(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 35_258_000 - .saturating_add((20_674_357_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 32_740_000 + .saturating_add((55_623_588_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) @@ -702,13 +702,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: Contracts AccountCounter (r:1 w:1) fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { - (14_725_288_000 as Weight) - // Standard Error: 53_000 - .saturating_add((33_848_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 53_000 - .saturating_add((57_054_000 as Weight).saturating_mul(o as Weight)) - // Standard Error: 53_000 - .saturating_add((180_033_000 as Weight).saturating_mul(s as Weight)) + (54_718_944_000 as Weight) + // Standard Error: 29_000 + .saturating_add((75_276_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 29_000 + .saturating_add((121_341_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 29_000 + .saturating_add((223_964_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(206 as Weight)) .saturating_add(T::DbWeight::get().writes(204 as Weight)) } @@ -717,9 +717,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_sha2_256(r: u32, ) -> Weight { - (131_974_000 as Weight) - // Standard Error: 125_000 - .saturating_add((220_711_000 as Weight).saturating_mul(r as Weight)) + (485_310_000 as Weight) + // Standard Error: 169_000 + .saturating_add((143_364_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -728,9 +728,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (367_148_000 as Weight) - // Standard Error: 12_000 - .saturating_add((462_143_000 as Weight).saturating_mul(n as Weight)) + (632_820_000 as Weight) + // Standard Error: 29_000 + .saturating_add((511_722_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -739,9 +739,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_keccak_256(r: u32, ) -> Weight { - (134_585_000 as Weight) - // Standard Error: 131_000 - .saturating_add((227_264_000 as Weight).saturating_mul(r as Weight)) + (484_331_000 as Weight) + // Standard Error: 195_000 + .saturating_add((151_617_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -750,9 +750,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (325_319_000 as Weight) - // Standard Error: 12_000 - .saturating_add((313_033_000 as Weight).saturating_mul(n as Weight)) + (565_213_000 as Weight) + // Standard Error: 28_000 + .saturating_add((359_762_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -761,9 +761,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_256(r: u32, ) -> Weight { - (135_347_000 as Weight) - // Standard Error: 150_000 - .saturating_add((199_764_000 as Weight).saturating_mul(r as Weight)) + (481_843_000 as Weight) + // Standard Error: 186_000 + .saturating_add((122_838_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -772,9 +772,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (424_473_000 as Weight) - // Standard Error: 13_000 - .saturating_add((130_936_000 as Weight).saturating_mul(n as Weight)) + (582_445_000 as Weight) + // Standard Error: 28_000 + .saturating_add((176_329_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -783,9 +783,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_128(r: u32, ) -> Weight { - (128_776_000 as Weight) - // Standard Error: 118_000 - .saturating_add((203_125_000 as Weight).saturating_mul(r as Weight)) + (486_320_000 as Weight) + // Standard Error: 147_000 + .saturating_add((123_460_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -794,266 +794,266 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (445_726_000 as Weight) - // Standard Error: 14_000 - .saturating_add((130_931_000 as Weight).saturating_mul(n as Weight)) + (515_967_000 as Weight) + // Standard Error: 33_000 + .saturating_add((176_423_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn instr_i64const(r: u32, ) -> Weight { - (22_161_000 as Weight) - // Standard Error: 36_000 - .saturating_add((3_329_000 as Weight).saturating_mul(r as Weight)) + (54_127_000 as Weight) + // Standard Error: 25_000 + .saturating_add((10_198_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64load(r: u32, ) -> Weight { - (24_430_000 as Weight) - // Standard Error: 65_000 - .saturating_add((159_566_000 as Weight).saturating_mul(r as Weight)) + (55_411_000 as Weight) + // Standard Error: 148_000 + .saturating_add((22_916_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64store(r: u32, ) -> Weight { - (24_443_000 as Weight) - // Standard Error: 62_000 - .saturating_add((232_854_000 as Weight).saturating_mul(r as Weight)) + (55_462_000 as Weight) + // Standard Error: 134_000 + .saturating_add((24_449_000 as Weight).saturating_mul(r as Weight)) } fn instr_select(r: u32, ) -> Weight { - (22_158_000 as Weight) - // Standard Error: 34_000 - .saturating_add((12_112_000 as Weight).saturating_mul(r as Weight)) + (54_114_000 as Weight) + // Standard Error: 18_000 + .saturating_add((26_214_000 as Weight).saturating_mul(r as Weight)) } fn instr_if(r: u32, ) -> Weight { - (22_178_000 as Weight) - // Standard Error: 23_000 - .saturating_add((11_374_000 as Weight).saturating_mul(r as Weight)) + (54_118_000 as Weight) + // Standard Error: 18_000 + .saturating_add((26_492_000 as Weight).saturating_mul(r as Weight)) } fn instr_br(r: u32, ) -> Weight { - (22_157_000 as Weight) - // Standard Error: 41_000 - .saturating_add((5_826_000 as Weight).saturating_mul(r as Weight)) + (54_119_000 as Weight) + // Standard Error: 304_000 + .saturating_add((18_424_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_if(r: u32, ) -> Weight { - (22_182_000 as Weight) - // Standard Error: 34_000 - .saturating_add((13_647_000 as Weight).saturating_mul(r as Weight)) + (55_352_000 as Weight) + // Standard Error: 13_000 + .saturating_add((32_291_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table(r: u32, ) -> Weight { - (22_083_000 as Weight) - // Standard Error: 44_000 - .saturating_add((14_901_000 as Weight).saturating_mul(r as Weight)) + (54_115_000 as Weight) + // Standard Error: 16_000 + .saturating_add((27_785_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table_per_entry(e: u32, ) -> Weight { - (32_689_000 as Weight) + (86_048_000 as Weight) // Standard Error: 1_000 - .saturating_add((154_000 as Weight).saturating_mul(e as Weight)) + .saturating_add((82_000 as Weight).saturating_mul(e as Weight)) } fn instr_call(r: u32, ) -> Weight { - (22_313_000 as Weight) - // Standard Error: 383_000 - .saturating_add((89_804_000 as Weight).saturating_mul(r as Weight)) + (54_654_000 as Weight) + // Standard Error: 82_000 + .saturating_add((199_159_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect(r: u32, ) -> Weight { - (29_939_000 as Weight) - // Standard Error: 230_000 - .saturating_add((185_309_000 as Weight).saturating_mul(r as Weight)) + (67_478_000 as Weight) + // Standard Error: 113_000 + .saturating_add((302_597_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (221_596_000 as Weight) - // Standard Error: 3_000 - .saturating_add((4_045_000 as Weight).saturating_mul(p as Weight)) + (384_281_000 as Weight) + // Standard Error: 13_000 + .saturating_add((9_984_000 as Weight).saturating_mul(p as Weight)) } fn instr_local_get(r: u32, ) -> Weight { - (22_171_000 as Weight) - // Standard Error: 28_000 - .saturating_add((3_362_000 as Weight).saturating_mul(r as Weight)) + (55_473_000 as Weight) + // Standard Error: 16_000 + .saturating_add((9_287_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_set(r: u32, ) -> Weight { - (22_182_000 as Weight) - // Standard Error: 31_000 - .saturating_add((3_801_000 as Weight).saturating_mul(r as Weight)) + (55_426_000 as Weight) + // Standard Error: 38_000 + .saturating_add((10_559_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_tee(r: u32, ) -> Weight { - (22_200_000 as Weight) - // Standard Error: 27_000 - .saturating_add((5_080_000 as Weight).saturating_mul(r as Weight)) + (55_332_000 as Weight) + // Standard Error: 8_000 + .saturating_add((15_640_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_get(r: u32, ) -> Weight { - (25_255_000 as Weight) - // Standard Error: 41_000 - .saturating_add((8_875_000 as Weight).saturating_mul(r as Weight)) + (74_497_000 as Weight) + // Standard Error: 22_000 + .saturating_add((15_067_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_set(r: u32, ) -> Weight { - (25_145_000 as Weight) - // Standard Error: 37_000 - .saturating_add((9_556_000 as Weight).saturating_mul(r as Weight)) + (74_445_000 as Weight) + // Standard Error: 49_000 + .saturating_add((17_650_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_current(r: u32, ) -> Weight { - (24_435_000 as Weight) - // Standard Error: 49_000 - .saturating_add((4_204_000 as Weight).saturating_mul(r as Weight)) + (54_500_000 as Weight) + // Standard Error: 17_000 + .saturating_add((9_307_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_grow(r: u32, ) -> Weight { - (23_158_000 as Weight) - // Standard Error: 5_969_000 - .saturating_add((2_339_630_000 as Weight).saturating_mul(r as Weight)) + (54_382_000 as Weight) + // Standard Error: 5_644_000 + .saturating_add((748_424_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64clz(r: u32, ) -> Weight { - (21_984_000 as Weight) - // Standard Error: 25_000 - .saturating_add((5_421_000 as Weight).saturating_mul(r as Weight)) + (54_133_000 as Weight) + // Standard Error: 20_000 + .saturating_add((15_830_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ctz(r: u32, ) -> Weight { - (22_069_000 as Weight) - // Standard Error: 26_000 - .saturating_add((5_187_000 as Weight).saturating_mul(r as Weight)) + (54_129_000 as Weight) + // Standard Error: 22_000 + .saturating_add((15_894_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64popcnt(r: u32, ) -> Weight { - (22_042_000 as Weight) - // Standard Error: 28_000 - .saturating_add((6_116_000 as Weight).saturating_mul(r as Weight)) + (54_181_000 as Weight) + // Standard Error: 22_000 + .saturating_add((15_847_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eqz(r: u32, ) -> Weight { - (22_018_000 as Weight) - // Standard Error: 34_000 - .saturating_add((5_130_000 as Weight).saturating_mul(r as Weight)) + (54_130_000 as Weight) + // Standard Error: 17_000 + .saturating_add((15_825_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendsi32(r: u32, ) -> Weight { - (21_933_000 as Weight) - // Standard Error: 29_000 - .saturating_add((5_005_000 as Weight).saturating_mul(r as Weight)) + (54_122_000 as Weight) + // Standard Error: 19_000 + .saturating_add((15_803_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendui32(r: u32, ) -> Weight { - (22_066_000 as Weight) - // Standard Error: 34_000 - .saturating_add((4_877_000 as Weight).saturating_mul(r as Weight)) + (54_100_000 as Weight) + // Standard Error: 28_000 + .saturating_add((15_822_000 as Weight).saturating_mul(r as Weight)) } fn instr_i32wrapi64(r: u32, ) -> Weight { - (22_003_000 as Weight) - // Standard Error: 25_000 - .saturating_add((5_018_000 as Weight).saturating_mul(r as Weight)) + (54_143_000 as Weight) + // Standard Error: 19_000 + .saturating_add((15_868_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eq(r: u32, ) -> Weight { - (22_130_000 as Weight) - // Standard Error: 35_000 - .saturating_add((7_071_000 as Weight).saturating_mul(r as Weight)) + (54_133_000 as Weight) + // Standard Error: 21_000 + .saturating_add((21_121_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ne(r: u32, ) -> Weight { - (22_112_000 as Weight) - // Standard Error: 24_000 - .saturating_add((7_056_000 as Weight).saturating_mul(r as Weight)) + (54_177_000 as Weight) + // Standard Error: 14_000 + .saturating_add((21_003_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64lts(r: u32, ) -> Weight { - (22_114_000 as Weight) - // Standard Error: 27_000 - .saturating_add((6_974_000 as Weight).saturating_mul(r as Weight)) + (54_164_000 as Weight) + // Standard Error: 31_000 + .saturating_add((21_041_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ltu(r: u32, ) -> Weight { - (22_111_000 as Weight) - // Standard Error: 32_000 - .saturating_add((7_183_000 as Weight).saturating_mul(r as Weight)) + (54_171_000 as Weight) + // Standard Error: 21_000 + .saturating_add((21_101_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gts(r: u32, ) -> Weight { - (22_148_000 as Weight) - // Standard Error: 28_000 - .saturating_add((7_044_000 as Weight).saturating_mul(r as Weight)) + (54_177_000 as Weight) + // Standard Error: 12_000 + .saturating_add((21_074_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gtu(r: u32, ) -> Weight { - (22_158_000 as Weight) - // Standard Error: 33_000 - .saturating_add((7_116_000 as Weight).saturating_mul(r as Weight)) + (54_073_000 as Weight) + // Standard Error: 13_000 + .saturating_add((21_136_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64les(r: u32, ) -> Weight { - (22_194_000 as Weight) - // Standard Error: 31_000 - .saturating_add((7_039_000 as Weight).saturating_mul(r as Weight)) + (54_116_000 as Weight) + // Standard Error: 17_000 + .saturating_add((21_140_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64leu(r: u32, ) -> Weight { - (22_219_000 as Weight) - // Standard Error: 23_000 - .saturating_add((7_076_000 as Weight).saturating_mul(r as Weight)) + (54_115_000 as Weight) + // Standard Error: 21_000 + .saturating_add((21_164_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ges(r: u32, ) -> Weight { - (22_170_000 as Weight) - // Standard Error: 50_000 - .saturating_add((7_122_000 as Weight).saturating_mul(r as Weight)) + (54_261_000 as Weight) + // Standard Error: 123_000 + .saturating_add((20_921_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64geu(r: u32, ) -> Weight { - (22_113_000 as Weight) - // Standard Error: 27_000 - .saturating_add((7_069_000 as Weight).saturating_mul(r as Weight)) + (54_090_000 as Weight) + // Standard Error: 38_000 + .saturating_add((21_171_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64add(r: u32, ) -> Weight { - (22_090_000 as Weight) - // Standard Error: 29_000 - .saturating_add((6_956_000 as Weight).saturating_mul(r as Weight)) + (54_129_000 as Weight) + // Standard Error: 27_000 + .saturating_add((21_086_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64sub(r: u32, ) -> Weight { - (22_006_000 as Weight) - // Standard Error: 30_000 - .saturating_add((7_094_000 as Weight).saturating_mul(r as Weight)) + (54_126_000 as Weight) + // Standard Error: 11_000 + .saturating_add((21_051_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64mul(r: u32, ) -> Weight { - (22_111_000 as Weight) - // Standard Error: 29_000 - .saturating_add((6_825_000 as Weight).saturating_mul(r as Weight)) + (54_153_000 as Weight) + // Standard Error: 22_000 + .saturating_add((21_021_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divs(r: u32, ) -> Weight { - (22_041_000 as Weight) - // Standard Error: 29_000 - .saturating_add((13_164_000 as Weight).saturating_mul(r as Weight)) + (54_168_000 as Weight) + // Standard Error: 19_000 + .saturating_add((27_336_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divu(r: u32, ) -> Weight { - (21_989_000 as Weight) - // Standard Error: 28_000 - .saturating_add((12_808_000 as Weight).saturating_mul(r as Weight)) + (54_124_000 as Weight) + // Standard Error: 22_000 + .saturating_add((24_783_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rems(r: u32, ) -> Weight { - (22_045_000 as Weight) - // Standard Error: 39_000 - .saturating_add((13_387_000 as Weight).saturating_mul(r as Weight)) + (54_203_000 as Weight) + // Standard Error: 21_000 + .saturating_add((27_539_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64remu(r: u32, ) -> Weight { - (22_075_000 as Weight) - // Standard Error: 40_000 - .saturating_add((12_791_000 as Weight).saturating_mul(r as Weight)) + (54_176_000 as Weight) + // Standard Error: 19_000 + .saturating_add((24_686_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64and(r: u32, ) -> Weight { - (22_044_000 as Weight) - // Standard Error: 32_000 - .saturating_add((7_090_000 as Weight).saturating_mul(r as Weight)) + (54_111_000 as Weight) + // Standard Error: 356_000 + .saturating_add((22_077_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64or(r: u32, ) -> Weight { - (22_133_000 as Weight) - // Standard Error: 40_000 - .saturating_add((6_967_000 as Weight).saturating_mul(r as Weight)) + (54_124_000 as Weight) + // Standard Error: 15_000 + .saturating_add((21_060_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64xor(r: u32, ) -> Weight { - (22_069_000 as Weight) - // Standard Error: 41_000 - .saturating_add((7_026_000 as Weight).saturating_mul(r as Weight)) + (54_153_000 as Weight) + // Standard Error: 24_000 + .saturating_add((21_064_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shl(r: u32, ) -> Weight { - (22_165_000 as Weight) - // Standard Error: 44_000 - .saturating_add((7_440_000 as Weight).saturating_mul(r as Weight)) + (54_122_000 as Weight) + // Standard Error: 23_000 + .saturating_add((21_187_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shrs(r: u32, ) -> Weight { - (22_063_000 as Weight) - // Standard Error: 34_000 - .saturating_add((7_309_000 as Weight).saturating_mul(r as Weight)) + (54_149_000 as Weight) + // Standard Error: 18_000 + .saturating_add((21_110_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shru(r: u32, ) -> Weight { - (22_086_000 as Weight) - // Standard Error: 36_000 - .saturating_add((7_188_000 as Weight).saturating_mul(r as Weight)) + (54_136_000 as Weight) + // Standard Error: 13_000 + .saturating_add((21_066_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotl(r: u32, ) -> Weight { - (22_109_000 as Weight) - // Standard Error: 45_000 - .saturating_add((7_169_000 as Weight).saturating_mul(r as Weight)) + (54_231_000 as Weight) + // Standard Error: 30_000 + .saturating_add((21_073_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotr(r: u32, ) -> Weight { - (22_076_000 as Weight) - // Standard Error: 28_000 - .saturating_add((7_070_000 as Weight).saturating_mul(r as Weight)) + (54_139_000 as Weight) + // Standard Error: 17_000 + .saturating_add((21_097_000 as Weight).saturating_mul(r as Weight)) } } @@ -1061,47 +1061,47 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_initialize() -> Weight { - (3_175_000 as Weight) + (3_227_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) fn on_initialize_per_trie_key(k: u32, ) -> Weight { (0 as Weight) // Standard Error: 3_000 - .saturating_add((2_201_000 as Weight).saturating_mul(k as Weight)) + .saturating_add((2_273_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } // Storage: Contracts DeletionQueue (r:1 w:0) fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (66_035_000 as Weight) - // Standard Error: 6_000 - .saturating_add((38_159_000 as Weight).saturating_mul(q as Weight)) + (50_365_000 as Weight) + // Standard Error: 7_000 + .saturating_add((39_799_000 as Weight).saturating_mul(q as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) fn instrument(c: u32, ) -> Weight { - (35_007_000 as Weight) - // Standard Error: 110_000 - .saturating_add((75_739_000 as Weight).saturating_mul(c as Weight)) + (40_033_000 as Weight) + // Standard Error: 109_000 + .saturating_add((76_424_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:0) fn code_load(c: u32, ) -> Weight { - (6_238_000 as Weight) - // Standard Error: 0 - .saturating_add((1_671_000 as Weight).saturating_mul(c as Weight)) + (6_675_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_668_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) fn code_refcount(c: u32, ) -> Weight { - (10_080_000 as Weight) - // Standard Error: 0 - .saturating_add((2_694_000 as Weight).saturating_mul(c as Weight)) + (10_560_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_704_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1112,11 +1112,11 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (182_161_000 as Weight) - // Standard Error: 115_000 - .saturating_add((113_515_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 7_000 - .saturating_add((2_314_000 as Weight).saturating_mul(s as Weight)) + (479_578_000 as Weight) + // Standard Error: 166_000 + .saturating_add((187_167_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 10_000 + .saturating_add((2_450_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } @@ -1126,9 +1126,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) fn instantiate(s: u32, ) -> Weight { - (183_914_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_224_000 as Weight).saturating_mul(s as Weight)) + (237_664_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_249_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } @@ -1137,7 +1137,7 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) fn call() -> Weight { - (166_507_000 as Weight) + (223_426_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -1211,9 +1211,9 @@ impl WeightInfo for () { // Storage: unknown [0x3a6368696c645f73746f726167653a64656661756c743aeb9db1dfeed3a7b47b] (r:1 w:0) // Storage: unknown [0x3a6368696c645f73746f726167653a64656661756c743abdc9db5edf43ffcb0d] (r:1 w:0) fn claim_surcharge(c: u32, ) -> Weight { - (126_115_000 as Weight) - // Standard Error: 2_000 - .saturating_add((2_829_000 as Weight).saturating_mul(c as Weight)) + (130_759_000 as Weight) + // Standard Error: 3_000 + .saturating_add((2_850_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } @@ -1222,9 +1222,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_caller(r: u32, ) -> Weight { - (134_110_000 as Weight) - // Standard Error: 130_000 - .saturating_add((230_337_000 as Weight).saturating_mul(r as Weight)) + (492_555_000 as Weight) + // Standard Error: 174_000 + .saturating_add((136_915_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1233,9 +1233,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_address(r: u32, ) -> Weight { - (131_212_000 as Weight) - // Standard Error: 116_000 - .saturating_add((230_568_000 as Weight).saturating_mul(r as Weight)) + (487_655_000 as Weight) + // Standard Error: 165_000 + .saturating_add((137_827_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1244,9 +1244,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_gas_left(r: u32, ) -> Weight { - (135_149_000 as Weight) - // Standard Error: 149_000 - .saturating_add((224_830_000 as Weight).saturating_mul(r as Weight)) + (488_993_000 as Weight) + // Standard Error: 195_000 + .saturating_add((137_040_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1255,9 +1255,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_balance(r: u32, ) -> Weight { - (148_463_000 as Weight) - // Standard Error: 246_000 - .saturating_add((480_930_000 as Weight).saturating_mul(r as Weight)) + (500_062_000 as Weight) + // Standard Error: 208_000 + .saturating_add((392_337_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1266,9 +1266,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_value_transferred(r: u32, ) -> Weight { - (137_790_000 as Weight) - // Standard Error: 152_000 - .saturating_add((224_961_000 as Weight).saturating_mul(r as Weight)) + (492_064_000 as Weight) + // Standard Error: 156_000 + .saturating_add((137_082_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1277,9 +1277,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_minimum_balance(r: u32, ) -> Weight { - (134_238_000 as Weight) - // Standard Error: 135_000 - .saturating_add((224_433_000 as Weight).saturating_mul(r as Weight)) + (496_566_000 as Weight) + // Standard Error: 159_000 + .saturating_add((137_377_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1288,9 +1288,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_tombstone_deposit(r: u32, ) -> Weight { - (135_053_000 as Weight) - // Standard Error: 147_000 - .saturating_add((223_955_000 as Weight).saturating_mul(r as Weight)) + (491_566_000 as Weight) + // Standard Error: 163_000 + .saturating_add((137_586_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1299,9 +1299,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_rent_allowance(r: u32, ) -> Weight { - (138_522_000 as Weight) - // Standard Error: 145_000 - .saturating_add((223_459_000 as Weight).saturating_mul(r as Weight)) + (491_459_000 as Weight) + // Standard Error: 150_000 + .saturating_add((137_402_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1310,9 +1310,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_block_number(r: u32, ) -> Weight { - (133_568_000 as Weight) - // Standard Error: 143_000 - .saturating_add((224_792_000 as Weight).saturating_mul(r as Weight)) + (488_379_000 as Weight) + // Standard Error: 170_000 + .saturating_add((136_564_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1321,9 +1321,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_now(r: u32, ) -> Weight { - (134_786_000 as Weight) - // Standard Error: 130_000 - .saturating_add((224_331_000 as Weight).saturating_mul(r as Weight)) + (494_827_000 as Weight) + // Standard Error: 175_000 + .saturating_add((137_178_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1333,9 +1333,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) fn seal_weight_to_fee(r: u32, ) -> Weight { - (147_402_000 as Weight) - // Standard Error: 233_000 - .saturating_add((439_237_000 as Weight).saturating_mul(r as Weight)) + (497_508_000 as Weight) + // Standard Error: 191_000 + .saturating_add((323_559_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1344,9 +1344,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_gas(r: u32, ) -> Weight { - (115_711_000 as Weight) - // Standard Error: 88_000 - .saturating_add((113_467_000 as Weight).saturating_mul(r as Weight)) + (179_076_000 as Weight) + // Standard Error: 124_000 + .saturating_add((62_013_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1355,9 +1355,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_input(r: u32, ) -> Weight { - (123_004_000 as Weight) - // Standard Error: 78_000 - .saturating_add((6_674_000 as Weight).saturating_mul(r as Weight)) + (480_920_000 as Weight) + // Standard Error: 182_000 + .saturating_add((3_254_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1366,9 +1366,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_input_per_kb(n: u32, ) -> Weight { - (131_611_000 as Weight) - // Standard Error: 0 - .saturating_add((1_035_000 as Weight).saturating_mul(n as Weight)) + (487_910_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_218_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1377,9 +1377,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_return(r: u32, ) -> Weight { - (118_327_000 as Weight) - // Standard Error: 84_000 - .saturating_add((4_274_000 as Weight).saturating_mul(r as Weight)) + (470_960_000 as Weight) + // Standard Error: 678_000 + .saturating_add((2_506_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1388,9 +1388,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_return_per_kb(n: u32, ) -> Weight { - (126_129_000 as Weight) - // Standard Error: 0 - .saturating_add((495_000 as Weight).saturating_mul(n as Weight)) + (478_623_000 as Weight) + // Standard Error: 1_000 + .saturating_add((749_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1400,9 +1400,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: Contracts DeletionQueue (r:1 w:1) fn seal_terminate(r: u32, ) -> Weight { - (123_759_000 as Weight) - // Standard Error: 115_000 - .saturating_add((89_730_000 as Weight).saturating_mul(r as Weight)) + (481_930_000 as Weight) + // Standard Error: 511_000 + .saturating_add((84_726_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1414,9 +1414,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: unknown [0x3a6368696c645f73746f726167653a64656661756c743af3fd4cc2fc8d170b6d] (r:1 w:0) fn seal_restore_to(r: u32, ) -> Weight { - (151_364_000 as Weight) - // Standard Error: 263_000 - .saturating_add((99_367_000 as Weight).saturating_mul(r as Weight)) + (514_296_000 as Weight) + // Standard Error: 458_000 + .saturating_add((93_769_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1424,9 +1424,9 @@ impl WeightInfo for () { } // Storage: Skipped Metadata (r:0 w:0) fn seal_restore_to_per_delta(d: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 1_919_000 - .saturating_add((2_415_482_000 as Weight).saturating_mul(d as Weight)) + (313_520_000 as Weight) + // Standard Error: 1_783_000 + .saturating_add((2_435_407_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) .saturating_add(RocksDbWeight::get().writes(7 as Weight)) @@ -1438,9 +1438,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) fn seal_random(r: u32, ) -> Weight { - (137_660_000 as Weight) - // Standard Error: 204_000 - .saturating_add((563_042_000 as Weight).saturating_mul(r as Weight)) + (484_059_000 as Weight) + // Standard Error: 285_000 + .saturating_add((443_946_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1449,9 +1449,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_deposit_event(r: u32, ) -> Weight { - (137_087_000 as Weight) - // Standard Error: 413_000 - .saturating_add((835_499_000 as Weight).saturating_mul(r as Weight)) + (491_593_000 as Weight) + // Standard Error: 386_000 + .saturating_add((733_958_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1461,11 +1461,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:100 w:100) fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_117_515_000 as Weight) - // Standard Error: 2_167_000 - .saturating_add((494_145_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 427_000 - .saturating_add((150_093_000 as Weight).saturating_mul(n as Weight)) + (1_342_357_000 as Weight) + // Standard Error: 2_458_000 + .saturating_add((521_445_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 484_000 + .saturating_add((195_792_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1476,9 +1476,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_set_rent_allowance(r: u32, ) -> Weight { - (132_070_000 as Weight) - // Standard Error: 129_000 - .saturating_add((155_669_000 as Weight).saturating_mul(r as Weight)) + (209_818_000 as Weight) + // Standard Error: 157_000 + .saturating_add((93_289_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1487,17 +1487,17 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_debug_message(r: u32, ) -> Weight { - (126_971_000 as Weight) - // Standard Error: 90_000 - .saturating_add((122_445_000 as Weight).saturating_mul(r as Weight)) + (200_027_000 as Weight) + // Standard Error: 145_000 + .saturating_add((79_038_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) fn seal_set_storage(r: u32, ) -> Weight { - (125_746_000 as Weight) - // Standard Error: 610_000 - .saturating_add((501_265_000 as Weight).saturating_mul(r as Weight)) + (477_211_000 as Weight) + // Standard Error: 709_000 + .saturating_add((407_264_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1509,17 +1509,17 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: unknown [0x7afa01283080ef247df84e0ba38ea5a587d25ce6633a6bfbba02068c14023441] (r:1 w:1) fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (563_219_000 as Weight) - // Standard Error: 219_000 - .saturating_add((41_578_000 as Weight).saturating_mul(n as Weight)) + (832_538_000 as Weight) + // Standard Error: 262_000 + .saturating_add((87_211_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) fn seal_clear_storage(r: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 1_727_000 - .saturating_add((1_001_461_000 as Weight).saturating_mul(r as Weight)) + (199_686_000 as Weight) + // Standard Error: 1_610_000 + .saturating_add((905_125_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1527,9 +1527,9 @@ impl WeightInfo for () { } // Storage: Skipped Metadata (r:0 w:0) fn seal_get_storage(r: u32, ) -> Weight { - (9_115_000 as Weight) - // Standard Error: 784_000 - .saturating_add((660_533_000 as Weight).saturating_mul(r as Weight)) + (335_052_000 as Weight) + // Standard Error: 885_000 + .saturating_add((545_754_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1540,9 +1540,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: unknown [0x7afa01283080ef247df84e0ba38ea5a587d25ce6633a6bfbba02068c14023441] (r:1 w:0) fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (563_175_000 as Weight) - // Standard Error: 206_000 - .saturating_add((89_626_000 as Weight).saturating_mul(n as Weight)) + (800_556_000 as Weight) + // Standard Error: 337_000 + .saturating_add((133_492_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1551,9 +1551,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_transfer(r: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 1_750_000 - .saturating_add((4_820_493_000 as Weight).saturating_mul(r as Weight)) + (317_531_000 as Weight) + // Standard Error: 1_627_000 + .saturating_add((4_748_591_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) @@ -1565,8 +1565,8 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) fn seal_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 6_692_000 - .saturating_add((11_477_937_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 8_848_000 + .saturating_add((46_947_679_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((200 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1577,13 +1577,13 @@ impl WeightInfo for () { // Storage: System Account (r:101 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { - (11_238_437_000 as Weight) - // Standard Error: 81_620_000 - .saturating_add((3_700_413_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 29_000 - .saturating_add((32_106_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 31_000 - .saturating_add((54_386_000 as Weight).saturating_mul(o as Weight)) + (47_469_660_000 as Weight) + // Standard Error: 45_192_000 + .saturating_add((3_691_145_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 16_000 + .saturating_add((75_339_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 17_000 + .saturating_add((121_494_000 as Weight).saturating_mul(o as Weight)) .saturating_add(RocksDbWeight::get().reads(205 as Weight)) .saturating_add(RocksDbWeight::get().writes(101 as Weight)) .saturating_add(RocksDbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) @@ -1595,8 +1595,8 @@ impl WeightInfo for () { // Storage: Contracts AccountCounter (r:1 w:1) fn seal_instantiate(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 35_258_000 - .saturating_add((20_674_357_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 32_740_000 + .saturating_add((55_623_588_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) @@ -1608,13 +1608,13 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: Contracts AccountCounter (r:1 w:1) fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { - (14_725_288_000 as Weight) - // Standard Error: 53_000 - .saturating_add((33_848_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 53_000 - .saturating_add((57_054_000 as Weight).saturating_mul(o as Weight)) - // Standard Error: 53_000 - .saturating_add((180_033_000 as Weight).saturating_mul(s as Weight)) + (54_718_944_000 as Weight) + // Standard Error: 29_000 + .saturating_add((75_276_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 29_000 + .saturating_add((121_341_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 29_000 + .saturating_add((223_964_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(206 as Weight)) .saturating_add(RocksDbWeight::get().writes(204 as Weight)) } @@ -1623,9 +1623,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_sha2_256(r: u32, ) -> Weight { - (131_974_000 as Weight) - // Standard Error: 125_000 - .saturating_add((220_711_000 as Weight).saturating_mul(r as Weight)) + (485_310_000 as Weight) + // Standard Error: 169_000 + .saturating_add((143_364_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1634,9 +1634,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (367_148_000 as Weight) - // Standard Error: 12_000 - .saturating_add((462_143_000 as Weight).saturating_mul(n as Weight)) + (632_820_000 as Weight) + // Standard Error: 29_000 + .saturating_add((511_722_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1645,9 +1645,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_keccak_256(r: u32, ) -> Weight { - (134_585_000 as Weight) - // Standard Error: 131_000 - .saturating_add((227_264_000 as Weight).saturating_mul(r as Weight)) + (484_331_000 as Weight) + // Standard Error: 195_000 + .saturating_add((151_617_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1656,9 +1656,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (325_319_000 as Weight) - // Standard Error: 12_000 - .saturating_add((313_033_000 as Weight).saturating_mul(n as Weight)) + (565_213_000 as Weight) + // Standard Error: 28_000 + .saturating_add((359_762_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1667,9 +1667,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_256(r: u32, ) -> Weight { - (135_347_000 as Weight) - // Standard Error: 150_000 - .saturating_add((199_764_000 as Weight).saturating_mul(r as Weight)) + (481_843_000 as Weight) + // Standard Error: 186_000 + .saturating_add((122_838_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1678,9 +1678,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (424_473_000 as Weight) - // Standard Error: 13_000 - .saturating_add((130_936_000 as Weight).saturating_mul(n as Weight)) + (582_445_000 as Weight) + // Standard Error: 28_000 + .saturating_add((176_329_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1689,9 +1689,9 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_128(r: u32, ) -> Weight { - (128_776_000 as Weight) - // Standard Error: 118_000 - .saturating_add((203_125_000 as Weight).saturating_mul(r as Weight)) + (486_320_000 as Weight) + // Standard Error: 147_000 + .saturating_add((123_460_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1700,265 +1700,265 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (445_726_000 as Weight) - // Standard Error: 14_000 - .saturating_add((130_931_000 as Weight).saturating_mul(n as Weight)) + (515_967_000 as Weight) + // Standard Error: 33_000 + .saturating_add((176_423_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn instr_i64const(r: u32, ) -> Weight { - (22_161_000 as Weight) - // Standard Error: 36_000 - .saturating_add((3_329_000 as Weight).saturating_mul(r as Weight)) + (54_127_000 as Weight) + // Standard Error: 25_000 + .saturating_add((10_198_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64load(r: u32, ) -> Weight { - (24_430_000 as Weight) - // Standard Error: 65_000 - .saturating_add((159_566_000 as Weight).saturating_mul(r as Weight)) + (55_411_000 as Weight) + // Standard Error: 148_000 + .saturating_add((22_916_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64store(r: u32, ) -> Weight { - (24_443_000 as Weight) - // Standard Error: 62_000 - .saturating_add((232_854_000 as Weight).saturating_mul(r as Weight)) + (55_462_000 as Weight) + // Standard Error: 134_000 + .saturating_add((24_449_000 as Weight).saturating_mul(r as Weight)) } fn instr_select(r: u32, ) -> Weight { - (22_158_000 as Weight) - // Standard Error: 34_000 - .saturating_add((12_112_000 as Weight).saturating_mul(r as Weight)) + (54_114_000 as Weight) + // Standard Error: 18_000 + .saturating_add((26_214_000 as Weight).saturating_mul(r as Weight)) } fn instr_if(r: u32, ) -> Weight { - (22_178_000 as Weight) - // Standard Error: 23_000 - .saturating_add((11_374_000 as Weight).saturating_mul(r as Weight)) + (54_118_000 as Weight) + // Standard Error: 18_000 + .saturating_add((26_492_000 as Weight).saturating_mul(r as Weight)) } fn instr_br(r: u32, ) -> Weight { - (22_157_000 as Weight) - // Standard Error: 41_000 - .saturating_add((5_826_000 as Weight).saturating_mul(r as Weight)) + (54_119_000 as Weight) + // Standard Error: 304_000 + .saturating_add((18_424_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_if(r: u32, ) -> Weight { - (22_182_000 as Weight) - // Standard Error: 34_000 - .saturating_add((13_647_000 as Weight).saturating_mul(r as Weight)) + (55_352_000 as Weight) + // Standard Error: 13_000 + .saturating_add((32_291_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table(r: u32, ) -> Weight { - (22_083_000 as Weight) - // Standard Error: 44_000 - .saturating_add((14_901_000 as Weight).saturating_mul(r as Weight)) + (54_115_000 as Weight) + // Standard Error: 16_000 + .saturating_add((27_785_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table_per_entry(e: u32, ) -> Weight { - (32_689_000 as Weight) + (86_048_000 as Weight) // Standard Error: 1_000 - .saturating_add((154_000 as Weight).saturating_mul(e as Weight)) + .saturating_add((82_000 as Weight).saturating_mul(e as Weight)) } fn instr_call(r: u32, ) -> Weight { - (22_313_000 as Weight) - // Standard Error: 383_000 - .saturating_add((89_804_000 as Weight).saturating_mul(r as Weight)) + (54_654_000 as Weight) + // Standard Error: 82_000 + .saturating_add((199_159_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect(r: u32, ) -> Weight { - (29_939_000 as Weight) - // Standard Error: 230_000 - .saturating_add((185_309_000 as Weight).saturating_mul(r as Weight)) + (67_478_000 as Weight) + // Standard Error: 113_000 + .saturating_add((302_597_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (221_596_000 as Weight) - // Standard Error: 3_000 - .saturating_add((4_045_000 as Weight).saturating_mul(p as Weight)) + (384_281_000 as Weight) + // Standard Error: 13_000 + .saturating_add((9_984_000 as Weight).saturating_mul(p as Weight)) } fn instr_local_get(r: u32, ) -> Weight { - (22_171_000 as Weight) - // Standard Error: 28_000 - .saturating_add((3_362_000 as Weight).saturating_mul(r as Weight)) + (55_473_000 as Weight) + // Standard Error: 16_000 + .saturating_add((9_287_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_set(r: u32, ) -> Weight { - (22_182_000 as Weight) - // Standard Error: 31_000 - .saturating_add((3_801_000 as Weight).saturating_mul(r as Weight)) + (55_426_000 as Weight) + // Standard Error: 38_000 + .saturating_add((10_559_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_tee(r: u32, ) -> Weight { - (22_200_000 as Weight) - // Standard Error: 27_000 - .saturating_add((5_080_000 as Weight).saturating_mul(r as Weight)) + (55_332_000 as Weight) + // Standard Error: 8_000 + .saturating_add((15_640_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_get(r: u32, ) -> Weight { - (25_255_000 as Weight) - // Standard Error: 41_000 - .saturating_add((8_875_000 as Weight).saturating_mul(r as Weight)) + (74_497_000 as Weight) + // Standard Error: 22_000 + .saturating_add((15_067_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_set(r: u32, ) -> Weight { - (25_145_000 as Weight) - // Standard Error: 37_000 - .saturating_add((9_556_000 as Weight).saturating_mul(r as Weight)) + (74_445_000 as Weight) + // Standard Error: 49_000 + .saturating_add((17_650_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_current(r: u32, ) -> Weight { - (24_435_000 as Weight) - // Standard Error: 49_000 - .saturating_add((4_204_000 as Weight).saturating_mul(r as Weight)) + (54_500_000 as Weight) + // Standard Error: 17_000 + .saturating_add((9_307_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_grow(r: u32, ) -> Weight { - (23_158_000 as Weight) - // Standard Error: 5_969_000 - .saturating_add((2_339_630_000 as Weight).saturating_mul(r as Weight)) + (54_382_000 as Weight) + // Standard Error: 5_644_000 + .saturating_add((748_424_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64clz(r: u32, ) -> Weight { - (21_984_000 as Weight) - // Standard Error: 25_000 - .saturating_add((5_421_000 as Weight).saturating_mul(r as Weight)) + (54_133_000 as Weight) + // Standard Error: 20_000 + .saturating_add((15_830_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ctz(r: u32, ) -> Weight { - (22_069_000 as Weight) - // Standard Error: 26_000 - .saturating_add((5_187_000 as Weight).saturating_mul(r as Weight)) + (54_129_000 as Weight) + // Standard Error: 22_000 + .saturating_add((15_894_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64popcnt(r: u32, ) -> Weight { - (22_042_000 as Weight) - // Standard Error: 28_000 - .saturating_add((6_116_000 as Weight).saturating_mul(r as Weight)) + (54_181_000 as Weight) + // Standard Error: 22_000 + .saturating_add((15_847_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eqz(r: u32, ) -> Weight { - (22_018_000 as Weight) - // Standard Error: 34_000 - .saturating_add((5_130_000 as Weight).saturating_mul(r as Weight)) + (54_130_000 as Weight) + // Standard Error: 17_000 + .saturating_add((15_825_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendsi32(r: u32, ) -> Weight { - (21_933_000 as Weight) - // Standard Error: 29_000 - .saturating_add((5_005_000 as Weight).saturating_mul(r as Weight)) + (54_122_000 as Weight) + // Standard Error: 19_000 + .saturating_add((15_803_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendui32(r: u32, ) -> Weight { - (22_066_000 as Weight) - // Standard Error: 34_000 - .saturating_add((4_877_000 as Weight).saturating_mul(r as Weight)) + (54_100_000 as Weight) + // Standard Error: 28_000 + .saturating_add((15_822_000 as Weight).saturating_mul(r as Weight)) } fn instr_i32wrapi64(r: u32, ) -> Weight { - (22_003_000 as Weight) - // Standard Error: 25_000 - .saturating_add((5_018_000 as Weight).saturating_mul(r as Weight)) + (54_143_000 as Weight) + // Standard Error: 19_000 + .saturating_add((15_868_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eq(r: u32, ) -> Weight { - (22_130_000 as Weight) - // Standard Error: 35_000 - .saturating_add((7_071_000 as Weight).saturating_mul(r as Weight)) + (54_133_000 as Weight) + // Standard Error: 21_000 + .saturating_add((21_121_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ne(r: u32, ) -> Weight { - (22_112_000 as Weight) - // Standard Error: 24_000 - .saturating_add((7_056_000 as Weight).saturating_mul(r as Weight)) + (54_177_000 as Weight) + // Standard Error: 14_000 + .saturating_add((21_003_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64lts(r: u32, ) -> Weight { - (22_114_000 as Weight) - // Standard Error: 27_000 - .saturating_add((6_974_000 as Weight).saturating_mul(r as Weight)) + (54_164_000 as Weight) + // Standard Error: 31_000 + .saturating_add((21_041_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ltu(r: u32, ) -> Weight { - (22_111_000 as Weight) - // Standard Error: 32_000 - .saturating_add((7_183_000 as Weight).saturating_mul(r as Weight)) + (54_171_000 as Weight) + // Standard Error: 21_000 + .saturating_add((21_101_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gts(r: u32, ) -> Weight { - (22_148_000 as Weight) - // Standard Error: 28_000 - .saturating_add((7_044_000 as Weight).saturating_mul(r as Weight)) + (54_177_000 as Weight) + // Standard Error: 12_000 + .saturating_add((21_074_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gtu(r: u32, ) -> Weight { - (22_158_000 as Weight) - // Standard Error: 33_000 - .saturating_add((7_116_000 as Weight).saturating_mul(r as Weight)) + (54_073_000 as Weight) + // Standard Error: 13_000 + .saturating_add((21_136_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64les(r: u32, ) -> Weight { - (22_194_000 as Weight) - // Standard Error: 31_000 - .saturating_add((7_039_000 as Weight).saturating_mul(r as Weight)) + (54_116_000 as Weight) + // Standard Error: 17_000 + .saturating_add((21_140_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64leu(r: u32, ) -> Weight { - (22_219_000 as Weight) - // Standard Error: 23_000 - .saturating_add((7_076_000 as Weight).saturating_mul(r as Weight)) + (54_115_000 as Weight) + // Standard Error: 21_000 + .saturating_add((21_164_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ges(r: u32, ) -> Weight { - (22_170_000 as Weight) - // Standard Error: 50_000 - .saturating_add((7_122_000 as Weight).saturating_mul(r as Weight)) + (54_261_000 as Weight) + // Standard Error: 123_000 + .saturating_add((20_921_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64geu(r: u32, ) -> Weight { - (22_113_000 as Weight) - // Standard Error: 27_000 - .saturating_add((7_069_000 as Weight).saturating_mul(r as Weight)) + (54_090_000 as Weight) + // Standard Error: 38_000 + .saturating_add((21_171_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64add(r: u32, ) -> Weight { - (22_090_000 as Weight) - // Standard Error: 29_000 - .saturating_add((6_956_000 as Weight).saturating_mul(r as Weight)) + (54_129_000 as Weight) + // Standard Error: 27_000 + .saturating_add((21_086_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64sub(r: u32, ) -> Weight { - (22_006_000 as Weight) - // Standard Error: 30_000 - .saturating_add((7_094_000 as Weight).saturating_mul(r as Weight)) + (54_126_000 as Weight) + // Standard Error: 11_000 + .saturating_add((21_051_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64mul(r: u32, ) -> Weight { - (22_111_000 as Weight) - // Standard Error: 29_000 - .saturating_add((6_825_000 as Weight).saturating_mul(r as Weight)) + (54_153_000 as Weight) + // Standard Error: 22_000 + .saturating_add((21_021_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divs(r: u32, ) -> Weight { - (22_041_000 as Weight) - // Standard Error: 29_000 - .saturating_add((13_164_000 as Weight).saturating_mul(r as Weight)) + (54_168_000 as Weight) + // Standard Error: 19_000 + .saturating_add((27_336_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divu(r: u32, ) -> Weight { - (21_989_000 as Weight) - // Standard Error: 28_000 - .saturating_add((12_808_000 as Weight).saturating_mul(r as Weight)) + (54_124_000 as Weight) + // Standard Error: 22_000 + .saturating_add((24_783_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rems(r: u32, ) -> Weight { - (22_045_000 as Weight) - // Standard Error: 39_000 - .saturating_add((13_387_000 as Weight).saturating_mul(r as Weight)) + (54_203_000 as Weight) + // Standard Error: 21_000 + .saturating_add((27_539_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64remu(r: u32, ) -> Weight { - (22_075_000 as Weight) - // Standard Error: 40_000 - .saturating_add((12_791_000 as Weight).saturating_mul(r as Weight)) + (54_176_000 as Weight) + // Standard Error: 19_000 + .saturating_add((24_686_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64and(r: u32, ) -> Weight { - (22_044_000 as Weight) - // Standard Error: 32_000 - .saturating_add((7_090_000 as Weight).saturating_mul(r as Weight)) + (54_111_000 as Weight) + // Standard Error: 356_000 + .saturating_add((22_077_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64or(r: u32, ) -> Weight { - (22_133_000 as Weight) - // Standard Error: 40_000 - .saturating_add((6_967_000 as Weight).saturating_mul(r as Weight)) + (54_124_000 as Weight) + // Standard Error: 15_000 + .saturating_add((21_060_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64xor(r: u32, ) -> Weight { - (22_069_000 as Weight) - // Standard Error: 41_000 - .saturating_add((7_026_000 as Weight).saturating_mul(r as Weight)) + (54_153_000 as Weight) + // Standard Error: 24_000 + .saturating_add((21_064_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shl(r: u32, ) -> Weight { - (22_165_000 as Weight) - // Standard Error: 44_000 - .saturating_add((7_440_000 as Weight).saturating_mul(r as Weight)) + (54_122_000 as Weight) + // Standard Error: 23_000 + .saturating_add((21_187_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shrs(r: u32, ) -> Weight { - (22_063_000 as Weight) - // Standard Error: 34_000 - .saturating_add((7_309_000 as Weight).saturating_mul(r as Weight)) + (54_149_000 as Weight) + // Standard Error: 18_000 + .saturating_add((21_110_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shru(r: u32, ) -> Weight { - (22_086_000 as Weight) - // Standard Error: 36_000 - .saturating_add((7_188_000 as Weight).saturating_mul(r as Weight)) + (54_136_000 as Weight) + // Standard Error: 13_000 + .saturating_add((21_066_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotl(r: u32, ) -> Weight { - (22_109_000 as Weight) - // Standard Error: 45_000 - .saturating_add((7_169_000 as Weight).saturating_mul(r as Weight)) + (54_231_000 as Weight) + // Standard Error: 30_000 + .saturating_add((21_073_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotr(r: u32, ) -> Weight { - (22_076_000 as Weight) - // Standard Error: 28_000 - .saturating_add((7_070_000 as Weight).saturating_mul(r as Weight)) + (54_139_000 as Weight) + // Standard Error: 17_000 + .saturating_add((21_097_000 as Weight).saturating_mul(r as Weight)) } } diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index f15f1c02d511c..a4d4a4d5d031a 100755 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -12,6 +12,12 @@ readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +[target.'cfg(target_arch = "wasm32")'.dependencies] +wasmi = { version = "0.9.0", default-features = false, features = ["core"] } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +wasmi = "0.9.0" + [dependencies] wasmi = { version = "0.9.0", optional = true } sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } @@ -19,6 +25,7 @@ sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } sp-io = { version = "4.0.0-dev", default-features = false, path = "../io" } sp-wasm-interface = { version = "4.0.0-dev", default-features = false, path = "../wasm-interface" } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +log = { version = "0.4", default-features = false } [dev-dependencies] wat = "1.0" @@ -33,5 +40,7 @@ std = [ "codec/std", "sp-io/std", "sp-wasm-interface/std", + "log/std", ] strict = [] +wasmer-sandbox = [] diff --git a/primitives/sandbox/with_std.rs b/primitives/sandbox/embedded_executor.rs similarity index 69% rename from primitives/sandbox/with_std.rs rename to primitives/sandbox/embedded_executor.rs index d5f87f165137e..678da3c3aeaf5 100755 --- a/primitives/sandbox/with_std.rs +++ b/primitives/sandbox/embedded_executor.rs @@ -15,16 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_std::collections::btree_map::BTreeMap; -use sp_std::fmt; - +use super::{Error, HostError, HostFuncType, ReturnValue, Value, TARGET}; +use alloc::string::String; +use log::debug; +use sp_std::{ + borrow::ToOwned, collections::btree_map::BTreeMap, fmt, marker::PhantomData, prelude::*, +}; use wasmi::{ - Externals, FuncInstance, FuncRef, GlobalDescriptor, GlobalRef, ImportResolver, - MemoryDescriptor, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef, - RuntimeArgs, RuntimeValue, Signature, TableDescriptor, TableRef, Trap, TrapKind + memory_units::Pages, Externals, FuncInstance, FuncRef, GlobalDescriptor, GlobalRef, + ImportResolver, MemoryDescriptor, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef, + RuntimeArgs, RuntimeValue, Signature, TableDescriptor, TableRef, Trap, TrapKind, }; -use wasmi::memory_units::Pages; -use super::{Error, Value, ReturnValue, HostFuncType, HostError}; #[derive(Clone)] pub struct Memory { @@ -37,7 +38,8 @@ impl Memory { memref: MemoryInstance::alloc( Pages(initial as usize), maximum.map(|m| Pages(m as usize)), - ).map_err(|_| Error::Module)?, + ) + .map_err(|_| Error::Module)?, }) } @@ -60,17 +62,13 @@ struct DefinedHostFunctions { impl Clone for DefinedHostFunctions { fn clone(&self) -> DefinedHostFunctions { - DefinedHostFunctions { - funcs: self.funcs.clone(), - } + DefinedHostFunctions { funcs: self.funcs.clone() } } } impl DefinedHostFunctions { fn new() -> DefinedHostFunctions { - DefinedHostFunctions { - funcs: Vec::new(), - } + DefinedHostFunctions { funcs: Vec::new() } } fn define(&mut self, f: HostFuncType) -> HostFuncIndex { @@ -102,16 +100,12 @@ impl<'a, T> Externals for GuestExternals<'a, T> { index: usize, args: RuntimeArgs, ) -> Result, Trap> { - let args = args.as_ref() - .iter() - .cloned() - .map(Into::into) - .collect::>(); + let args = args.as_ref().iter().cloned().map(to_interface).collect::>(); let result = (self.defined_host_functions.funcs[index])(self.state, &args); match result { Ok(value) => Ok(match value { - ReturnValue::Value(v) => Some(v.into()), + ReturnValue::Value(v) => Some(to_wasmi(v)), ReturnValue::Unit => None, }), Err(HostError) => Err(TrapKind::Host(Box::new(DummyHostError)).into()), @@ -143,8 +137,7 @@ impl EnvironmentDefinitionBuilder { N2: Into>, { let idx = self.defined_host_functions.define(f); - self.map - .insert((module.into(), field.into()), ExternVal::HostFunc(idx)); + self.map.insert((module.into(), field.into()), ExternVal::HostFunc(idx)); } pub fn add_memory(&mut self, module: N1, field: N2, mem: Memory) @@ -152,8 +145,7 @@ impl EnvironmentDefinitionBuilder { N1: Into>, N2: Into>, { - self.map - .insert((module.into(), field.into()), ExternVal::Memory(mem)); + self.map.insert((module.into(), field.into()), ExternVal::Memory(mem)); } } @@ -164,21 +156,17 @@ impl ImportResolver for EnvironmentDefinitionBuilder { field_name: &str, signature: &Signature, ) -> Result { - let key = ( - module_name.as_bytes().to_owned(), - field_name.as_bytes().to_owned(), - ); + let key = (module_name.as_bytes().to_owned(), field_name.as_bytes().to_owned()); let externval = self.map.get(&key).ok_or_else(|| { - wasmi::Error::Instantiation(format!("Export {}:{} not found", module_name, field_name)) + debug!(target: TARGET, "Export {}:{} not found", module_name, field_name); + wasmi::Error::Instantiation(String::new()) })?; let host_func_idx = match *externval { ExternVal::HostFunc(ref idx) => idx, _ => { - return Err(wasmi::Error::Instantiation(format!( - "Export {}:{} is not a host func", - module_name, field_name - ))) - } + debug!(target: TARGET, "Export {}:{} is not a host func", module_name, field_name); + return Err(wasmi::Error::Instantiation(String::new())) + }, }; Ok(FuncInstance::alloc_host(signature.clone(), host_func_idx.0)) } @@ -189,9 +177,8 @@ impl ImportResolver for EnvironmentDefinitionBuilder { _field_name: &str, _global_type: &GlobalDescriptor, ) -> Result { - Err(wasmi::Error::Instantiation(format!( - "Importing globals is not supported yet" - ))) + debug!(target: TARGET, "Importing globals is not supported yet"); + Err(wasmi::Error::Instantiation(String::new())) } fn resolve_memory( @@ -200,21 +187,17 @@ impl ImportResolver for EnvironmentDefinitionBuilder { field_name: &str, _memory_type: &MemoryDescriptor, ) -> Result { - let key = ( - module_name.as_bytes().to_owned(), - field_name.as_bytes().to_owned(), - ); + let key = (module_name.as_bytes().to_owned(), field_name.as_bytes().to_owned()); let externval = self.map.get(&key).ok_or_else(|| { - wasmi::Error::Instantiation(format!("Export {}:{} not found", module_name, field_name)) + debug!(target: TARGET, "Export {}:{} not found", module_name, field_name); + wasmi::Error::Instantiation(String::new()) })?; let memory = match *externval { ExternVal::Memory(ref m) => m, _ => { - return Err(wasmi::Error::Instantiation(format!( - "Export {}:{} is not a memory", - module_name, field_name - ))) - } + debug!(target: TARGET, "Export {}:{} is not a memory", module_name, field_name); + return Err(wasmi::Error::Instantiation(String::new())) + }, }; Ok(memory.memref.clone()) } @@ -225,16 +208,15 @@ impl ImportResolver for EnvironmentDefinitionBuilder { _field_name: &str, _table_type: &TableDescriptor, ) -> Result { - Err(wasmi::Error::Instantiation(format!( - "Importing tables is not supported yet" - ))) + debug!("Importing tables is not supported yet"); + Err(wasmi::Error::Instantiation(String::new())) } } pub struct Instance { instance: ModuleRef, defined_host_functions: DefinedHostFunctions, - _marker: std::marker::PhantomData, + _marker: PhantomData, } impl Instance { @@ -244,26 +226,19 @@ impl Instance { state: &mut T, ) -> Result, Error> { let module = Module::from_buffer(code).map_err(|_| Error::Module)?; - let not_started_instance = ModuleInstance::new(&module, env_def_builder) - .map_err(|_| Error::Module)?; - + let not_started_instance = + ModuleInstance::new(&module, env_def_builder).map_err(|_| Error::Module)?; let defined_host_functions = env_def_builder.defined_host_functions.clone(); let instance = { - let mut externals = GuestExternals { - state, - defined_host_functions: &defined_host_functions, - }; - let instance = not_started_instance.run_start(&mut externals) - .map_err(|_| Error::Execution)?; + let mut externals = + GuestExternals { state, defined_host_functions: &defined_host_functions }; + let instance = + not_started_instance.run_start(&mut externals).map_err(|_| Error::Execution)?; instance }; - Ok(Instance { - instance, - defined_host_functions, - _marker: std::marker::PhantomData::, - }) + Ok(Instance { instance, defined_host_functions, _marker: PhantomData:: }) } pub fn invoke( @@ -272,35 +247,49 @@ impl Instance { args: &[Value], state: &mut T, ) -> Result { - let args = args.iter().cloned().map(Into::into).collect::>(); + let args = args.iter().cloned().map(to_wasmi).collect::>(); - let mut externals = GuestExternals { - state, - defined_host_functions: &self.defined_host_functions, - }; - let result = self.instance - .invoke_export(&name, &args, &mut externals); + let mut externals = + GuestExternals { state, defined_host_functions: &self.defined_host_functions }; + let result = self.instance.invoke_export(&name, &args, &mut externals); match result { Ok(None) => Ok(ReturnValue::Unit), - Ok(Some(val)) => Ok(ReturnValue::Value(val.into())), + Ok(Some(val)) => Ok(ReturnValue::Value(to_interface(val))), Err(_err) => Err(Error::Execution), } } pub fn get_global_val(&self, name: &str) -> Option { - let global = self.instance - .export_by_name(name)? - .as_global()? - .get(); + let global = self.instance.export_by_name(name)?.as_global()?.get(); + + Some(to_interface(global)) + } +} + +/// Convert the substrate value type to the wasmi value type. +fn to_wasmi(value: Value) -> RuntimeValue { + match value { + Value::I32(val) => RuntimeValue::I32(val), + Value::I64(val) => RuntimeValue::I64(val), + Value::F32(val) => RuntimeValue::F32(val.into()), + Value::F64(val) => RuntimeValue::F64(val.into()), + } +} - Some(global.into()) +/// Convert the wasmi value type to the substrate value type. +fn to_interface(value: RuntimeValue) -> Value { + match value { + RuntimeValue::I32(val) => Value::I32(val), + RuntimeValue::I64(val) => Value::I64(val), + RuntimeValue::F32(val) => Value::F32(val.into()), + RuntimeValue::F64(val) => Value::F64(val.into()), } } #[cfg(test)] mod tests { - use crate::{Error, Value, ReturnValue, HostError, EnvironmentDefinitionBuilder, Instance}; + use crate::{EnvironmentDefinitionBuilder, Error, HostError, Instance, ReturnValue, Value}; use assert_matches::assert_matches; fn execute_sandboxed(code: &[u8], args: &[Value]) -> Result { @@ -310,7 +299,7 @@ mod tests { fn env_assert(_e: &mut State, args: &[Value]) -> Result { if args.len() != 1 { - return Err(HostError); + return Err(HostError) } let condition = args[0].as_i32().ok_or_else(|| HostError)?; if condition != 0 { @@ -321,7 +310,7 @@ mod tests { } fn env_inc_counter(e: &mut State, args: &[Value]) -> Result { if args.len() != 1 { - return Err(HostError); + return Err(HostError) } let inc_by = args[0].as_i32().ok_or_else(|| HostError)?; e.counter += inc_by as u32; @@ -330,7 +319,7 @@ mod tests { /// Function that takes one argument of any type and returns that value. fn env_polymorphic_id(_e: &mut State, args: &[Value]) -> Result { if args.len() != 1 { - return Err(HostError); + return Err(HostError) } Ok(ReturnValue::Value(args[0])) } @@ -350,7 +339,8 @@ mod tests { #[test] fn invoke_args() { - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "assert" (func $assert (param i32))) @@ -371,21 +361,19 @@ mod tests { ) ) ) - "#).unwrap(); - - let result = execute_sandboxed( - &code, - &[ - Value::I32(0x12345678), - Value::I64(0x1234567887654321), - ] - ); + "#, + ) + .unwrap(); + + let result = + execute_sandboxed(&code, &[Value::I32(0x12345678), Value::I64(0x1234567887654321)]); assert!(result.is_ok()); } #[test] fn return_value() { - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (func (export "call") (param $x i32) (result i32) (i32.add @@ -394,20 +382,18 @@ mod tests { ) ) ) - "#).unwrap(); - - let return_val = execute_sandboxed( - &code, - &[ - Value::I32(0x1336), - ] - ).unwrap(); + "#, + ) + .unwrap(); + + let return_val = execute_sandboxed(&code, &[Value::I32(0x1336)]).unwrap(); assert_eq!(return_val, ReturnValue::Value(Value::I32(0x1337))); } #[test] fn signatures_dont_matter() { - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "polymorphic_id" (func $id_i32 (param i32) (result i32))) (import "env" "polymorphic_id" (func $id_i64 (param i64) (result i64))) @@ -434,7 +420,9 @@ mod tests { ) ) ) - "#).unwrap(); + "#, + ) + .unwrap(); let return_val = execute_sandboxed(&code, &[]).unwrap(); assert_eq!(return_val, ReturnValue::Unit); @@ -449,7 +437,8 @@ mod tests { let mut env_builder = EnvironmentDefinitionBuilder::new(); env_builder.add_host_func("env", "returns_i32", env_returns_i32); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module ;; It's actually returns i32, but imported as if it returned i64 (import "env" "returns_i32" (func $returns_i32 (result i64))) @@ -460,15 +449,14 @@ mod tests { ) ) ) - "#).unwrap(); + "#, + ) + .unwrap(); // It succeeds since we are able to import functions with types we want. let mut instance = Instance::new(&code, &env_builder, &mut ()).unwrap(); // But this fails since we imported a function that returns i32 as if it returned i64. - assert_matches!( - instance.invoke("call", &[], &mut ()), - Err(Error::Execution) - ); + assert_matches!(instance.invoke("call", &[], &mut ()), Err(Error::Execution)); } } diff --git a/primitives/sandbox/without_std.rs b/primitives/sandbox/host_executor.rs similarity index 100% rename from primitives/sandbox/without_std.rs rename to primitives/sandbox/host_executor.rs diff --git a/primitives/sandbox/src/lib.rs b/primitives/sandbox/src/lib.rs index 94cb676b51edc..1724b4152ff3d 100755 --- a/primitives/sandbox/src/lib.rs +++ b/primitives/sandbox/src/lib.rs @@ -38,17 +38,22 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use sp_std::prelude::*; pub use sp_core::sandbox::HostError; pub use sp_wasm_interface::{ReturnValue, Value}; +/// The target used for logging. +const TARGET: &str = "runtime::sandbox"; + mod imp { - #[cfg(feature = "std")] - include!("../with_std.rs"); + #[cfg(all(feature = "wasmer-sandbox", not(feature = "std")))] + include!("../host_executor.rs"); - #[cfg(not(feature = "std"))] - include!("../without_std.rs"); + #[cfg(not(all(feature = "wasmer-sandbox", not(feature = "std"))))] + include!("../embedded_executor.rs"); } /// Error that can occur while using this crate. From e6787aefa12416ef7ab5811acb0dbb270225252b Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 25 Aug 2021 15:16:47 -0400 Subject: [PATCH 13/14] Expose `storage_prefix` logic, and remove duplicate code (#9621) * expose storage prefix generation, remove duplicate code * remove more duplicate code * clean up import * fix io test * remove slicing * Update frame/support/src/storage/mod.rs Co-authored-by: Guillaume Thiolliere Co-authored-by: Guillaume Thiolliere fix: revert unneccessary changes --- frame/support/src/migrations.rs | 9 +--- .../src/storage/generator/double_map.rs | 49 ++++++------------- frame/support/src/storage/generator/map.rs | 39 +++++---------- frame/support/src/storage/generator/nmap.rs | 45 +++++------------ frame/support/src/storage/generator/value.rs | 6 +-- frame/support/src/storage/migration.rs | 48 +++++++++--------- frame/support/src/storage/mod.rs | 23 ++++++--- frame/support/src/traits/hooks.rs | 9 +--- frame/support/src/traits/metadata.rs | 10 +--- frame/support/test/tests/decl_storage.rs | 19 ++----- frame/support/test/tests/pallet.rs | 19 +------ .../tests/pallet_ui/hooks_invalid_item.stderr | 4 +- 12 files changed, 89 insertions(+), 191 deletions(-) diff --git a/frame/support/src/migrations.rs b/frame/support/src/migrations.rs index cf1ba81982424..dc3402440fdd4 100644 --- a/frame/support/src/migrations.rs +++ b/frame/support/src/migrations.rs @@ -30,14 +30,7 @@ impl PalletVersionToStorageVersionHelpe const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; fn pallet_version_key(name: &str) -> [u8; 32] { - let pallet_name = sp_io::hashing::twox_128(name.as_bytes()); - let postfix = sp_io::hashing::twox_128(PALLET_VERSION_STORAGE_KEY_POSTFIX); - - let mut final_key = [0u8; 32]; - final_key[..16].copy_from_slice(&pallet_name); - final_key[16..].copy_from_slice(&postfix); - - final_key + crate::storage::storage_prefix(name.as_bytes(), PALLET_VERSION_STORAGE_KEY_POSTFIX) } sp_io::storage::clear(&pallet_version_key(::name())); diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index cec5bf57e50ce..d28e42028de53 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -16,8 +16,8 @@ // limitations under the License. use crate::{ - hash::{ReversibleStorageHasher, StorageHasher, Twox128}, - storage::{self, unhashed, KeyPrefixIterator, PrefixIterator, StorageAppend}, + hash::{ReversibleStorageHasher, StorageHasher}, + storage::{self, storage_prefix, unhashed, KeyPrefixIterator, PrefixIterator, StorageAppend}, Never, }; use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; @@ -62,16 +62,8 @@ pub trait StorageDoubleMap { /// The full prefix; just the hash of `module_prefix` concatenated to the hash of /// `storage_prefix`. fn prefix_hash() -> Vec { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - - let mut result = - Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len()); - - result.extend_from_slice(&module_prefix_hashed[..]); - result.extend_from_slice(&storage_prefix_hashed[..]); - - result + let result = storage_prefix(Self::module_prefix(), Self::storage_prefix()); + result.to_vec() } /// Convert an optional value retrieved from storage to the type queried. @@ -85,16 +77,12 @@ pub trait StorageDoubleMap { where KArg1: EncodeLike, { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); let key_hashed = k1.borrow().using_encoded(Self::Hasher1::hash); - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len(), - ); + let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.as_ref().len()); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key_hashed.as_ref()); final_key @@ -106,20 +94,15 @@ pub trait StorageDoubleMap { KArg1: EncodeLike, KArg2: EncodeLike, { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); let key1_hashed = k1.borrow().using_encoded(Self::Hasher1::hash); let key2_hashed = k2.borrow().using_encoded(Self::Hasher2::hash); let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + - storage_prefix_hashed.len() + - key1_hashed.as_ref().len() + - key2_hashed.as_ref().len(), + storage_prefix.len() + key1_hashed.as_ref().len() + key2_hashed.as_ref().len(), ); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key1_hashed.as_ref()); final_key.extend_from_slice(key2_hashed.as_ref()); @@ -319,20 +302,16 @@ where key2: KeyArg2, ) -> Option { let old_key = { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); + let key1_hashed = key1.borrow().using_encoded(OldHasher1::hash); let key2_hashed = key2.borrow().using_encoded(OldHasher2::hash); let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + - storage_prefix_hashed.len() + - key1_hashed.as_ref().len() + - key2_hashed.as_ref().len(), + storage_prefix.len() + key1_hashed.as_ref().len() + key2_hashed.as_ref().len(), ); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key1_hashed.as_ref()); final_key.extend_from_slice(key2_hashed.as_ref()); diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index b78e9f96496fa..3fd3b9a0ea7b8 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -16,8 +16,8 @@ // limitations under the License. use crate::{ - hash::{ReversibleStorageHasher, StorageHasher, Twox128}, - storage::{self, unhashed, KeyPrefixIterator, PrefixIterator, StorageAppend}, + hash::{ReversibleStorageHasher, StorageHasher}, + storage::{self, storage_prefix, unhashed, KeyPrefixIterator, PrefixIterator, StorageAppend}, Never, }; use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; @@ -52,16 +52,8 @@ pub trait StorageMap { /// The full prefix; just the hash of `module_prefix` concatenated to the hash of /// `storage_prefix`. fn prefix_hash() -> Vec { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - - let mut result = - Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len()); - - result.extend_from_slice(&module_prefix_hashed[..]); - result.extend_from_slice(&storage_prefix_hashed[..]); - - result + let result = storage_prefix(Self::module_prefix(), Self::storage_prefix()); + result.to_vec() } /// Convert an optional value retrieved from storage to the type queried. @@ -75,16 +67,12 @@ pub trait StorageMap { where KeyArg: EncodeLike, { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); let key_hashed = key.borrow().using_encoded(Self::Hasher::hash); - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len(), - ); + let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.as_ref().len()); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key_hashed.as_ref()); final_key @@ -330,18 +318,13 @@ impl> storage::StorageMap fn migrate_key>(key: KeyArg) -> Option { let old_key = { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); let key_hashed = key.borrow().using_encoded(OldHasher::hash); - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + - storage_prefix_hashed.len() + - key_hashed.as_ref().len(), - ); + let mut final_key = + Vec::with_capacity(storage_prefix.len() + key_hashed.as_ref().len()); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key_hashed.as_ref()); final_key diff --git a/frame/support/src/storage/generator/nmap.rs b/frame/support/src/storage/generator/nmap.rs index 2ea401f44e96f..592bcc81341bf 100755 --- a/frame/support/src/storage/generator/nmap.rs +++ b/frame/support/src/storage/generator/nmap.rs @@ -30,9 +30,8 @@ //! be compromised. use crate::{ - hash::{StorageHasher, Twox128}, storage::{ - self, + self, storage_prefix, types::{ EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, KeyGenerator, ReversibleKeyGenerator, TupleToEncodedIter, @@ -71,16 +70,8 @@ pub trait StorageNMap { /// The full prefix; just the hash of `module_prefix` concatenated to the hash of /// `storage_prefix`. fn prefix_hash() -> Vec { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - - let mut result = - Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len()); - - result.extend_from_slice(&module_prefix_hashed[..]); - result.extend_from_slice(&storage_prefix_hashed[..]); - - result + let result = storage_prefix(Self::module_prefix(), Self::storage_prefix()); + result.to_vec() } /// Convert an optional value retrieved from storage to the type queried. @@ -94,16 +85,12 @@ pub trait StorageNMap { where K: HasKeyPrefix, { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); let key_hashed = >::partial_key(key); - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len(), - ); + let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.len()); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key_hashed.as_ref()); final_key @@ -115,16 +102,12 @@ pub trait StorageNMap { KG: KeyGenerator, KArg: EncodeLikeTuple + TupleToEncodedIter, { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); let key_hashed = KG::final_key(key); - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len(), - ); + let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.len()); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key_hashed.as_ref()); final_key @@ -286,16 +269,12 @@ where KArg: EncodeLikeTuple + TupleToEncodedIter, { let old_key = { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); let key_hashed = K::migrate_key(&key, hash_fns); - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len(), - ); + let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.len()); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key_hashed.as_ref()); final_key diff --git a/frame/support/src/storage/generator/value.rs b/frame/support/src/storage/generator/value.rs index c765e059ec149..3486eaa005c06 100644 --- a/frame/support/src/storage/generator/value.rs +++ b/frame/support/src/storage/generator/value.rs @@ -16,7 +16,6 @@ // limitations under the License. use crate::{ - hash::{StorageHasher, Twox128}, storage::{self, unhashed, StorageAppend}, Never, }; @@ -46,10 +45,7 @@ pub trait StorageValue { /// Generate the full key used in top storage. fn storage_value_final_key() -> [u8; 32] { - let mut final_key = [0u8; 32]; - final_key[0..16].copy_from_slice(&Twox128::hash(Self::module_prefix())); - final_key[16..32].copy_from_slice(&Twox128::hash(Self::storage_prefix())); - final_key + crate::storage::storage_prefix(Self::module_prefix(), Self::storage_prefix()) } } diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index 0f10c5cbb47d3..eae45b1e96ad0 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -17,7 +17,11 @@ //! Some utilities for helping access storage with arbitrary key types. -use crate::{hash::ReversibleStorageHasher, storage::unhashed, StorageHasher, Twox128}; +use crate::{ + hash::ReversibleStorageHasher, + storage::{storage_prefix, unhashed}, + StorageHasher, Twox128, +}; use codec::{Decode, Encode}; use sp_std::prelude::*; @@ -47,8 +51,8 @@ impl StorageIterator { )] pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { let mut prefix = Vec::new(); - prefix.extend_from_slice(&Twox128::hash(module)); - prefix.extend_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + prefix.extend_from_slice(&storage_prefix); prefix.extend_from_slice(suffix); let previous_key = prefix.clone(); Self { prefix, previous_key, drain: false, _phantom: Default::default() } @@ -112,8 +116,8 @@ impl StorageKeyIterator { )] pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { let mut prefix = Vec::new(); - prefix.extend_from_slice(&Twox128::hash(module)); - prefix.extend_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + prefix.extend_from_slice(&storage_prefix); prefix.extend_from_slice(suffix); let previous_key = prefix.clone(); Self { prefix, previous_key, drain: false, _phantom: Default::default() } @@ -173,8 +177,8 @@ pub fn storage_iter_with_suffix( suffix: &[u8], ) -> PrefixIterator<(Vec, T)> { let mut prefix = Vec::new(); - prefix.extend_from_slice(&Twox128::hash(module)); - prefix.extend_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + prefix.extend_from_slice(&storage_prefix); prefix.extend_from_slice(suffix); let previous_key = prefix.clone(); let closure = |raw_key_without_prefix: &[u8], raw_value: &[u8]| { @@ -204,8 +208,9 @@ pub fn storage_key_iter_with_suffix< suffix: &[u8], ) -> PrefixIterator<(K, T)> { let mut prefix = Vec::new(); - prefix.extend_from_slice(&Twox128::hash(module)); - prefix.extend_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + + prefix.extend_from_slice(&storage_prefix); prefix.extend_from_slice(suffix); let previous_key = prefix.clone(); let closure = |raw_key_without_prefix: &[u8], raw_value: &[u8]| { @@ -225,8 +230,8 @@ pub fn have_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> bool { /// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn get_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> Option { let mut key = vec![0u8; 32 + hash.len()]; - key[0..16].copy_from_slice(&Twox128::hash(module)); - key[16..32].copy_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + key[0..32].copy_from_slice(&storage_prefix); key[32..].copy_from_slice(hash); frame_support::storage::unhashed::get::(&key) } @@ -234,8 +239,8 @@ pub fn get_storage_value(module: &[u8], item: &[u8], hash: &[ /// Take a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn take_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> Option { let mut key = vec![0u8; 32 + hash.len()]; - key[0..16].copy_from_slice(&Twox128::hash(module)); - key[16..32].copy_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + key[0..32].copy_from_slice(&storage_prefix); key[32..].copy_from_slice(hash); frame_support::storage::unhashed::take::(&key) } @@ -243,8 +248,8 @@ pub fn take_storage_value(module: &[u8], item: &[u8], hash: & /// Put a particular value into storage by the `module`, the map's `item` name and the key `hash`. pub fn put_storage_value(module: &[u8], item: &[u8], hash: &[u8], value: T) { let mut key = vec![0u8; 32 + hash.len()]; - key[0..16].copy_from_slice(&Twox128::hash(module)); - key[16..32].copy_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + key[0..32].copy_from_slice(&storage_prefix); key[32..].copy_from_slice(hash); frame_support::storage::unhashed::put(&key, &value); } @@ -253,8 +258,8 @@ pub fn put_storage_value(module: &[u8], item: &[u8], hash: &[u8], val /// `hash`. pub fn remove_storage_prefix(module: &[u8], item: &[u8], hash: &[u8]) { let mut key = vec![0u8; 32 + hash.len()]; - key[0..16].copy_from_slice(&Twox128::hash(module)); - key[16..32].copy_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + key[0..32].copy_from_slice(&storage_prefix); key[32..].copy_from_slice(hash); frame_support::storage::unhashed::kill_prefix(&key, None); } @@ -293,13 +298,8 @@ pub fn move_storage_from_pallet( old_pallet_name: &[u8], new_pallet_name: &[u8], ) { - let mut new_prefix = Vec::new(); - new_prefix.extend_from_slice(&Twox128::hash(new_pallet_name)); - new_prefix.extend_from_slice(&Twox128::hash(storage_name)); - - let mut old_prefix = Vec::new(); - old_prefix.extend_from_slice(&Twox128::hash(old_pallet_name)); - old_prefix.extend_from_slice(&Twox128::hash(storage_name)); + let new_prefix = storage_prefix(new_pallet_name, storage_name); + let old_prefix = storage_prefix(old_pallet_name, storage_name); move_prefix(&old_prefix, &new_prefix); diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index ac2ddaa73c3b6..8cee9faf6e81d 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -18,7 +18,7 @@ //! Stuff to do with the runtime's storage. use crate::{ - hash::{ReversibleStorageHasher, StorageHasher, Twox128}, + hash::{ReversibleStorageHasher, StorageHasher}, storage::types::{ EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, KeyGenerator, ReversibleKeyGenerator, TupleToEncodedIter, @@ -1108,10 +1108,7 @@ pub trait StoragePrefixedMap { /// Final full prefix that prefixes all keys. fn final_prefix() -> [u8; 32] { - let mut final_key = [0u8; 32]; - final_key[0..16].copy_from_slice(&Twox128::hash(Self::module_prefix())); - final_key[16..32].copy_from_slice(&Twox128::hash(Self::storage_prefix())); - final_key + crate::storage::storage_prefix(Self::module_prefix(), Self::storage_prefix()) } /// Remove all value of the storage. @@ -1361,10 +1358,24 @@ where } } +/// Returns the storage prefix for a specific pallet name and storage name. +/// +/// The storage prefix is `concat(twox_128(pallet_name), twox_128(storage_name))`. +pub fn storage_prefix(pallet_name: &[u8], storage_name: &[u8]) -> [u8; 32] { + let pallet_hash = sp_io::hashing::twox_128(pallet_name); + let storage_hash = sp_io::hashing::twox_128(storage_name); + + let mut final_key = [0u8; 32]; + final_key[..16].copy_from_slice(&pallet_hash); + final_key[16..].copy_from_slice(&storage_hash); + + final_key +} + #[cfg(test)] mod test { use super::*; - use crate::{assert_ok, hash::Identity}; + use crate::{assert_ok, hash::Identity, Twox128}; use bounded_vec::BoundedVec; use core::convert::{TryFrom, TryInto}; use generator::StorageValue as _; diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index 965cce234288f..adba88e5acbf3 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -124,14 +124,7 @@ pub trait OnRuntimeUpgradeHelpersExt { /// them. See [`Self::set_temp_storage`] and [`Self::get_temp_storage`]. #[cfg(feature = "try-runtime")] fn storage_key(ident: &str) -> [u8; 32] { - let prefix = sp_io::hashing::twox_128(ON_RUNTIME_UPGRADE_PREFIX); - let ident = sp_io::hashing::twox_128(ident.as_bytes()); - - let mut final_key = [0u8; 32]; - final_key[..16].copy_from_slice(&prefix); - final_key[16..].copy_from_slice(&ident); - - final_key + crate::storage::storage_prefix(ON_RUNTIME_UPGRADE_PREFIX, ident.as_bytes()) } /// Get temporary storage data written by [`Self::set_temp_storage`]. diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs index 8b1707855f7b0..e877f29e0a137 100644 --- a/frame/support/src/traits/metadata.rs +++ b/frame/support/src/traits/metadata.rs @@ -92,15 +92,7 @@ impl StorageVersion { /// See [`STORAGE_VERSION_STORAGE_KEY_POSTFIX`] on how this key is built. pub fn storage_key() -> [u8; 32] { let pallet_name = P::name(); - - let pallet_name = sp_io::hashing::twox_128(pallet_name.as_bytes()); - let postfix = sp_io::hashing::twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX); - - let mut final_key = [0u8; 32]; - final_key[..16].copy_from_slice(&pallet_name); - final_key[16..].copy_from_slice(&postfix); - - final_key + crate::storage::storage_prefix(pallet_name.as_bytes(), STORAGE_VERSION_STORAGE_KEY_POSTFIX) } /// Put this storage version for the given pallet into the storage. diff --git a/frame/support/test/tests/decl_storage.rs b/frame/support/test/tests/decl_storage.rs index 666dda49935ef..50c8387bca555 100644 --- a/frame/support/test/tests/decl_storage.rs +++ b/frame/support/test/tests/decl_storage.rs @@ -428,16 +428,10 @@ mod tests { #[test] fn storage_info() { use frame_support::{ - pallet_prelude::*, + storage::storage_prefix as prefix, traits::{StorageInfo, StorageInfoTrait}, - StorageHasher, - }; - let prefix = |pallet_name, storage_name| { - let mut res = [0u8; 32]; - res[0..16].copy_from_slice(&Twox128::hash(pallet_name)); - res[16..32].copy_from_slice(&Twox128::hash(storage_name)); - res }; + pretty_assertions::assert_eq!( >::storage_info(), vec![ @@ -717,15 +711,8 @@ mod test2 { #[test] fn storage_info() { use frame_support::{ - pallet_prelude::*, + storage::storage_prefix as prefix, traits::{StorageInfo, StorageInfoTrait}, - StorageHasher, - }; - let prefix = |pallet_name, storage_name| { - let mut res = [0u8; 32]; - res[0..16].copy_from_slice(&Twox128::hash(pallet_name)); - res[16..32].copy_from_slice(&Twox128::hash(storage_name)); - res }; pretty_assertions::assert_eq!( >::storage_info(), diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 00af4d261c659..80bae000b6c77 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -934,14 +934,7 @@ fn migrate_from_pallet_version_to_storage_version() { const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; fn pallet_version_key(name: &str) -> [u8; 32] { - let pallet_name = sp_io::hashing::twox_128(name.as_bytes()); - let postfix = sp_io::hashing::twox_128(PALLET_VERSION_STORAGE_KEY_POSTFIX); - - let mut final_key = [0u8; 32]; - final_key[..16].copy_from_slice(&pallet_name); - final_key[16..].copy_from_slice(&postfix); - - final_key + frame_support::storage::storage_prefix(name.as_bytes(), PALLET_VERSION_STORAGE_KEY_POSTFIX) } TestExternalities::default().execute_with(|| { @@ -1274,16 +1267,8 @@ fn test_pallet_info_access() { #[test] fn test_storage_info() { use frame_support::{ - pallet_prelude::*, + storage::storage_prefix as prefix, traits::{StorageInfo, StorageInfoTrait}, - StorageHasher, - }; - - let prefix = |pallet_name, storage_name| { - let mut res = [0u8; 32]; - res[0..16].copy_from_slice(&Twox128::hash(pallet_name)); - res[16..32].copy_from_slice(&Twox128::hash(storage_name)); - res }; assert_eq!( diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index f3677113dabeb..3d7303fafdcf5 100644 --- a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -5,9 +5,9 @@ error[E0107]: missing generics for trait `Hooks` | ^^^^^ expected 1 type argument | note: trait defined here, with 1 type parameter: `BlockNumber` - --> $DIR/hooks.rs:221:11 + --> $DIR/hooks.rs:214:11 | -221 | pub trait Hooks { +214 | pub trait Hooks { | ^^^^^ ----------- help: use angle brackets to add missing type argument | From d5dd9153fab3bc7a6a15bd0c6fd0bc1dc81cee7a Mon Sep 17 00:00:00 2001 From: david Date: Wed, 25 Aug 2021 22:07:56 +0100 Subject: [PATCH 14/14] implemented pow multithreaded worker --- client/consensus/pow/src/lib.rs | 40 ++++++++++++++++----------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index a03d13a37fe13..a03e008a743fd 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -277,7 +277,7 @@ where execution_context: ExecutionContext, ) -> Result<(), Error> { if *block.header().number() < self.check_inherents_after { - return Ok(()); + return Ok(()) } if let Err(e) = self.can_author_with.can_author_with(&block_id) { @@ -287,7 +287,7 @@ where e, ); - return Ok(()); + return Ok(()) } let inherent_data = inherent_data_providers @@ -389,7 +389,7 @@ where &inner_seal, difficulty, )? { - return Err(Error::::InvalidSeal.into()); + return Err(Error::::InvalidSeal.into()) } aux.difficulty = difficulty; @@ -407,7 +407,7 @@ where fetch_seal::(best_header.digest().logs.last(), best_hash)?; self.algorithm.break_tie(&best_inner_seal, &inner_seal) - } + }, }, )); } @@ -437,20 +437,20 @@ impl PowVerifier { let hash = header.hash(); let (seal, inner_seal) = match header.digest_mut().pop() { - Some(DigestItem::Seal(id, seal)) => { + Some(DigestItem::Seal(id, seal)) => if id == POW_ENGINE_ID { (DigestItem::Seal(id, seal.clone()), seal) } else { return Err(Error::WrongEngine(id)); } - } + _ => return Err(Error::HeaderUnsealed(hash)), }; let pre_hash = header.hash(); if !self.algorithm.preliminary_verify(&pre_hash, &inner_seal)?.unwrap_or(true) { - return Err(Error::FailedPreliminaryVerify); + return Err(Error::FailedPreliminaryVerify) } Ok((header, seal)) @@ -579,7 +579,7 @@ where if sync_oracle.is_major_syncing() { debug!(target: "pow", "Skipping proposal due to sync."); - continue; + continue } let best_header = match select_chain.best_chain().await { @@ -591,7 +591,7 @@ where Select best chain error: {:?}", err ); - continue; + continue } }; let best_hash = best_header.hash(); @@ -603,7 +603,7 @@ where Probably a node update is required!", err, ); - continue; + continue } // The worker is locked for the duration of the whole proposing period. Within this @@ -618,7 +618,7 @@ where Fetch difficulty failed: {:?}", err, ); - continue; + continue } }; @@ -634,7 +634,7 @@ where Creating inherent data providers failed: {:?}", err, ); - continue; + continue } }; @@ -647,7 +647,7 @@ where Creating inherent data failed: {:?}", e, ); - continue; + continue } }; @@ -667,7 +667,7 @@ where Creating proposer failed: {:?}", err, ); - continue; + continue } }; @@ -683,7 +683,7 @@ where Creating proposal failed: {:?}", err, ); - continue; + continue } }; @@ -719,12 +719,12 @@ fn find_pre_digest(header: &B::Header) -> Result>, Err for log in header.digest().logs() { trace!(target: "pow", "Checking log {:?}, looking for pre runtime digest", log); match (log, pre_digest.is_some()) { - (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => { - return Err(Error::MultiplePreRuntimeDigests) - } + (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => + return Err(Error::MultiplePreRuntimeDigests), + (DigestItem::PreRuntime(POW_ENGINE_ID, v), false) => { pre_digest = Some(v.clone()); - } + }, (_, _) => trace!(target: "pow", "Ignoring digest not meant for us"), } } @@ -742,7 +742,7 @@ fn fetch_seal( if id == &POW_ENGINE_ID { Ok(seal.clone()) } else { - return Err(Error::::WrongEngine(*id).into()); + return Err(Error::::WrongEngine(*id).into()) } } _ => return Err(Error::::HeaderUnsealed(hash).into()),