From f0b945c3a42f966721b223ef59bcd243069c9506 Mon Sep 17 00:00:00 2001 From: Benjamin Kampmann Date: Fri, 6 Jul 2018 15:44:26 +0200 Subject: [PATCH 1/2] adding rustfmt.toml --- .rustfmt.toml | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 .rustfmt.toml diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 0000000000000..35fc4954dc42b --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1,75 @@ +# SEE https://github.com/rust-lang-nursery/rustfmt/blob/master/Configurations.md +# activate unstable features, so we can configure a few of them +unstable_features = true + + +# everything without a comment is at the default value, but we could +# also always pin the rustfmt version with the following line +# (at the time of writing): +# +# required_version = "0.8.2" +# + +indent_style = "Block" +use_small_heuristics = "Default" +binop_separator = "Front" +combine_control_expr = true +comment_width = 100 +condense_wildcard_suffixes = false +control_brace_style = "AlwaysSameLine" +disable_all_formatting = false +error_on_line_overflow = false +error_on_unformatted = false +fn_args_density = "Tall" +brace_style = "SameLineWhere" +empty_item_single_line = true +fn_single_line = false +where_single_line = false +force_explicit_abi = true +format_strings = false +# use tabs not spaces +hard_tabs = true +imports_indent = "Block" +imports_layout = "Mixed" +# do keep imports as they were written +merge_imports = false +# put a trailing comma in match block +match_block_trailing_comma = true +max_width = 100 +merge_derives = true +force_multiline_blocks = false +newline_style = "Unix" +normalize_comments = false +remove_nested_parens = true +reorder_imports = true +# our module order is intentional +reorder_modules = false +reorder_impl_items = false +report_todo = "Never" +report_fixme = "Never" +skip_children = false +space_after_colon = true +space_before_colon = false +struct_field_align_threshold = 0 +spaces_around_ranges = false +struct_lit_single_line = true +tab_spaces = 4 +trailing_comma = "Vertical" +# do not leave unneeded semicolons around +trailing_semicolon = false +type_punctuation_density = "Wide" +# we use field shorthands +use_field_init_shorthand = true +# we prefer `?` over `try!` +use_try_shorthand = true +# we prefer to have a block over long lines +# even in comments :D +wrap_comments = true +# do not create unnaccesary arm blocks +match_arm_blocks = false +blank_lines_upper_bound = 1 +blank_lines_lower_bound = 0 +hide_parse_errors = false +color = "Auto" +# license_template_path = "" +ignore = [] \ No newline at end of file From 0b778c27c9904f2e5971d2e5d3c22b8dc3cf17a1 Mon Sep 17 00:00:00 2001 From: Benjamin Kampmann Date: Fri, 6 Jul 2018 15:45:15 +0200 Subject: [PATCH 2/2] Rustfmt our codebase --- demo/cli/src/error.rs | 2 +- demo/cli/src/lib.rs | 94 +-- demo/executor/src/lib.rs | 223 +++++-- demo/primitives/src/lib.rs | 10 +- demo/runtime/src/lib.rs | 17 +- polkadot/api/src/full.rs | 121 ++-- polkadot/api/src/lib.rs | 39 +- polkadot/api/src/light.rs | 53 +- polkadot/cli/src/error.rs | 2 +- polkadot/cli/src/informant.rs | 63 +- polkadot/cli/src/lib.rs | 262 +++++--- polkadot/collator/src/lib.rs | 129 ++-- polkadot/consensus/src/collation.rs | 61 +- polkadot/consensus/src/dynamic_inclusion.rs | 53 +- polkadot/consensus/src/evaluation.rs | 31 +- polkadot/consensus/src/lib.rs | 330 ++++++---- polkadot/consensus/src/service.rs | 280 +++++--- .../consensus/src/shared_table/includable.rs | 28 +- polkadot/consensus/src/shared_table/mod.rs | 209 ++++-- polkadot/executor/src/lib.rs | 3 +- polkadot/parachain/src/lib.rs | 12 +- polkadot/parachain/src/wasm.rs | 45 +- polkadot/parachain/tests/basic_add.rs | 42 +- polkadot/primitives/src/lib.rs | 9 +- polkadot/primitives/src/parachain.rs | 63 +- polkadot/runtime/src/checked_block.rs | 84 ++- polkadot/runtime/src/lib.rs | 108 +-- polkadot/runtime/src/parachains.rs | 230 ++++--- polkadot/runtime/src/utils.rs | 19 +- polkadot/service/src/chain_spec.rs | 180 +++-- polkadot/service/src/components.rs | 207 ++++-- polkadot/service/src/config.rs | 6 +- polkadot/service/src/error.rs | 2 +- polkadot/service/src/lib.rs | 80 ++- polkadot/statement-table/src/generic.rs | 350 +++++----- polkadot/statement-table/src/lib.rs | 20 +- polkadot/transaction-pool/src/lib.rs | 493 +++++++++----- safe-mix/src/lib.rs | 27 +- subkey/src/main.rs | 34 +- substrate/bft/src/generic/accumulator.rs | 532 +++++++++------ substrate/bft/src/generic/mod.rs | 232 +++---- substrate/bft/src/generic/tests.rs | 101 +-- substrate/bft/src/lib.rs | 385 +++++++---- substrate/client/db/src/lib.rs | 380 +++++++---- substrate/client/db/src/light.rs | 80 ++- substrate/client/db/src/utils.rs | 109 ++- substrate/client/src/backend.rs | 20 +- substrate/client/src/block_builder.rs | 59 +- substrate/client/src/blockchain.rs | 9 +- substrate/client/src/call_executor.rs | 109 ++- substrate/client/src/client.rs | 405 ++++++++---- substrate/client/src/error.rs | 4 +- substrate/client/src/genesis.rs | 117 ++-- substrate/client/src/in_mem.rs | 136 ++-- substrate/client/src/lib.rs | 33 +- substrate/client/src/light/backend.rs | 72 +- substrate/client/src/light/blockchain.rs | 34 +- substrate/client/src/light/call_executor.rs | 121 ++-- substrate/client/src/light/fetcher.rs | 31 +- substrate/client/src/light/mod.rs | 29 +- substrate/codec/src/joiner.rs | 7 +- substrate/codec/src/keyedvec.rs | 4 +- substrate/codec/src/lib.rs | 2 +- substrate/codec/src/slicable.rs | 59 +- substrate/ed25519/src/lib.rs | 49 +- substrate/environmental/src/lib.rs | 91 +-- substrate/executor/src/lib.rs | 32 +- substrate/executor/src/native_executor.rs | 59 +- substrate/executor/src/sandbox.rs | 123 ++-- substrate/executor/src/wasm_executor.rs | 157 +++-- substrate/executor/src/wasm_utils.rs | 95 ++- substrate/extrinsic-pool/src/api.rs | 8 +- substrate/extrinsic-pool/src/listener.rs | 28 +- substrate/extrinsic-pool/src/pool.rs | 59 +- substrate/extrinsic-pool/src/watcher.rs | 7 +- substrate/keyring/src/lib.rs | 40 +- substrate/keystore/src/lib.rs | 41 +- substrate/misbehavior-check/src/lib.rs | 130 ++-- substrate/network/src/blocks.rs | 245 +++++-- substrate/network/src/chain.rs | 82 ++- substrate/network/src/config.rs | 4 +- substrate/network/src/consensus.rs | 113 ++-- substrate/network/src/error.rs | 2 +- substrate/network/src/io.rs | 10 +- substrate/network/src/lib.rs | 71 +- substrate/network/src/message.rs | 99 ++- substrate/network/src/on_demand.rs | 268 +++++--- substrate/network/src/protocol.rs | 232 ++++--- substrate/network/src/service.rs | 173 +++-- substrate/network/src/sync.rs | 232 ++++--- substrate/network/src/test/consensus.rs | 16 +- substrate/network/src/test/mod.rs | 82 ++- substrate/network/src/test/sync.rs | 93 ++- substrate/primitives/src/authority_id.rs | 15 +- substrate/primitives/src/bytes.rs | 49 +- substrate/primitives/src/hash.rs | 92 ++- substrate/primitives/src/hashing.rs | 4 +- substrate/primitives/src/hexdisplay.rs | 26 +- substrate/primitives/src/lib.rs | 21 +- substrate/primitives/src/sandbox.rs | 60 +- substrate/primitives/src/storage.rs | 4 +- substrate/primitives/src/uint.rs | 30 +- substrate/rpc-servers/src/lib.rs | 23 +- substrate/rpc/src/author/mod.rs | 27 +- substrate/rpc/src/author/tests.rs | 27 +- substrate/rpc/src/chain/mod.rs | 34 +- substrate/rpc/src/chain/tests.rs | 13 +- substrate/rpc/src/lib.rs | 2 +- substrate/rpc/src/metadata.rs | 2 +- substrate/rpc/src/state/mod.rs | 22 +- substrate/rpc/src/state/tests.rs | 4 +- substrate/rpc/src/subscriptions.rs | 11 +- substrate/rpc/src/system/tests.rs | 7 +- substrate/runtime-io/build.rs | 12 +- substrate/runtime-io/src/lib.rs | 11 +- substrate/runtime-sandbox/build.rs | 12 +- substrate/runtime-sandbox/src/lib.rs | 11 +- substrate/runtime-std/src/lib.rs | 19 +- substrate/runtime-support/src/dispatch.rs | 8 +- substrate/runtime-support/src/lib.rs | 37 +- .../runtime-support/src/storage/generator.rs | 74 ++- substrate/runtime-support/src/storage/mod.rs | 66 +- substrate/runtime/consensus/src/lib.rs | 31 +- substrate/runtime/contract/src/lib.rs | 153 ++--- substrate/runtime/council/src/lib.rs | 620 ++++++++++++------ substrate/runtime/council/src/voting.rs | 194 ++++-- substrate/runtime/democracy/src/lib.rs | 255 ++++--- .../runtime/democracy/src/vote_threshold.rs | 47 +- substrate/runtime/executive/src/lib.rs | 219 ++++--- substrate/runtime/primitives/src/bft.rs | 71 +- substrate/runtime/primitives/src/generic.rs | 172 +++-- substrate/runtime/primitives/src/lib.rs | 43 +- substrate/runtime/primitives/src/testing.rs | 144 +++- substrate/runtime/primitives/src/traits.rs | 183 ++++-- substrate/runtime/session/src/lib.rs | 84 ++- substrate/runtime/staking/src/account_db.rs | 27 +- substrate/runtime/staking/src/address.rs | 43 +- substrate/runtime/staking/src/double_map.rs | 4 +- .../runtime/staking/src/genesis_config.rs | 55 +- substrate/runtime/staking/src/lib.rs | 200 ++++-- substrate/runtime/staking/src/mock.rs | 124 ++-- substrate/runtime/staking/src/tests.rs | 98 ++- substrate/runtime/system/src/lib.rs | 78 ++- substrate/runtime/timestamp/src/lib.rs | 50 +- substrate/runtime/version/src/lib.rs | 30 +- substrate/serializer/src/lib.rs | 7 +- substrate/state-db/src/lib.rs | 170 +++-- substrate/state-db/src/pruning.rs | 78 ++- substrate/state-db/src/test.rs | 15 +- substrate/state-db/src/unfinalized.rs | 101 ++- substrate/state-machine/src/backend.rs | 45 +- substrate/state-machine/src/ext.rs | 32 +- substrate/state-machine/src/lib.rs | 88 ++- .../state-machine/src/proving_backend.rs | 52 +- substrate/state-machine/src/testing.rs | 21 +- substrate/state-machine/src/trie_backend.rs | 74 ++- substrate/telemetry/src/lib.rs | 42 +- substrate/test-client/src/client_ext.rs | 66 +- substrate/test-client/src/lib.rs | 7 +- substrate/test-runtime/src/genesismap.rs | 30 +- substrate/test-runtime/src/lib.rs | 36 +- substrate/test-runtime/src/system.rs | 116 ++-- 162 files changed, 9297 insertions(+), 5063 deletions(-) diff --git a/demo/cli/src/error.rs b/demo/cli/src/error.rs index 84fa2f1092987..8de348c4ef1b2 100644 --- a/demo/cli/src/error.rs +++ b/demo/cli/src/error.rs @@ -25,5 +25,5 @@ error_chain! { } links { Client(client::error::Error, client::error::ErrorKind) #[doc="Client error"]; - } + } } diff --git a/demo/cli/src/lib.rs b/demo/cli/src/lib.rs index 9dc871fa9f3ce..b9b2bf6f1766d 100644 --- a/demo/cli/src/lib.rs +++ b/demo/cli/src/lib.rs @@ -19,22 +19,22 @@ #![warn(missing_docs)] extern crate ctrlc; +extern crate demo_executor; +extern crate demo_primitives; +extern crate demo_runtime; extern crate ed25519; extern crate env_logger; extern crate futures; -extern crate tokio_core; -extern crate triehash; extern crate substrate_client as client; extern crate substrate_codec as codec; +extern crate substrate_extrinsic_pool as extrinsic_pool; extern crate substrate_primitives as primitives; extern crate substrate_rpc; extern crate substrate_rpc_servers as rpc; extern crate substrate_runtime_io as runtime_io; extern crate substrate_state_machine as state_machine; -extern crate substrate_extrinsic_pool as extrinsic_pool; -extern crate demo_executor; -extern crate demo_primitives; -extern crate demo_runtime; +extern crate tokio_core; +extern crate triehash; #[macro_use] extern crate hex_literal; @@ -47,20 +47,23 @@ extern crate log; pub mod error; -use std::sync::Arc; use demo_primitives::Hash; -use demo_runtime::{Block, BlockId, UncheckedExtrinsic, GenesisConfig, - ConsensusConfig, CouncilConfig, DemocracyConfig, SessionConfig, StakingConfig, - TimestampConfig}; +use demo_runtime::{ + Block, BlockId, ConsensusConfig, CouncilConfig, DemocracyConfig, GenesisConfig, SessionConfig, + StakingConfig, TimestampConfig, UncheckedExtrinsic, +}; use futures::{Future, Sink, Stream}; +use std::sync::Arc; struct DummyPool; impl extrinsic_pool::api::ExtrinsicPool for DummyPool { type Error = extrinsic_pool::txpool::Error; - fn submit(&self, _block: BlockId, _: Vec) - -> Result, Self::Error> - { + fn submit( + &self, + _block: BlockId, + _: Vec, + ) -> Result, Self::Error> { Err("unimplemented".into()) } } @@ -86,12 +89,15 @@ impl substrate_rpc::system::SystemApi for DummySystem { /// 9556-9591 Unassigned /// 9803-9874 Unassigned /// 9926-9949 Unassigned -pub fn run(args: I) -> error::Result<()> where +pub fn run(args: I) -> error::Result<()> +where I: IntoIterator, T: Into + Clone, { let yaml = load_yaml!("./cli.yml"); - let matches = clap::App::from_yaml(yaml).version(crate_version!()).get_matches_from_safe(args)?; + let matches = clap::App::from_yaml(yaml) + .version(crate_version!()) + .get_matches_from_safe(args)?; // TODO [ToDr] Split parameters parsing from actual execution. let log_pattern = matches.value_of("log").unwrap_or(""); @@ -103,13 +109,13 @@ pub fn run(args: I) -> error::Result<()> where let god_key = hex!["3d866ec8a9190c8343c2fc593d21d8a6d0c5c4763aaab2349de3a6111d64d124"]; let genesis_config = GenesisConfig { consensus: Some(ConsensusConfig { - code: vec![], // TODO + code: vec![], // TODO authorities: vec![god_key.clone().into()], }), system: None, session: Some(SessionConfig { validators: vec![god_key.clone().into()], - session_length: 720, // that's 1 hour per session. + session_length: 720, // that's 1 hour per session. broken_percent_late: 30, }), staking: Some(StakingConfig { @@ -122,35 +128,41 @@ pub fn run(args: I) -> error::Result<()> where contract_fee: 0, reclaim_rebate: 0, existential_deposit: 500, - balances: vec![(god_key.clone().into(), 1u64 << 63)].into_iter().collect(), + balances: vec![(god_key.clone().into(), 1u64 << 63)] + .into_iter() + .collect(), validator_count: 12, - sessions_per_era: 24, // 24 hours per era. - bonding_duration: 90, // 90 days per bond. + sessions_per_era: 24, // 24 hours per era. + bonding_duration: 90, // 90 days per bond. early_era_slash: 10000, session_reward: 100, }), democracy: Some(DemocracyConfig { - launch_period: 120 * 24 * 14, // 2 weeks per public referendum - voting_period: 120 * 24 * 28, // 4 weeks to discuss & vote on an active referendum - minimum_deposit: 1000, // 1000 as the minimum deposit for a referendum + launch_period: 120 * 24 * 14, // 2 weeks per public referendum + voting_period: 120 * 24 * 28, // 4 weeks to discuss & vote on an active referendum + minimum_deposit: 1000, // 1000 as the minimum deposit for a referendum }), council: Some(CouncilConfig { active_council: vec![], - candidacy_bond: 1000, // 1000 to become a council candidate - voter_bond: 100, // 100 down to vote for a candidate - present_slash_per_voter: 1, // slash by 1 per voter for an invalid presentation. - carry_count: 24, // carry over the 24 runners-up to the next council election - presentation_duration: 120 * 24, // one day for presenting winners. - approval_voting_period: 7 * 120 * 24, // one week period between possible council elections. - term_duration: 180 * 120 * 24, // 180 day term duration for the council. - desired_seats: 0, // start with no council: we'll raise this once the stake has been dispersed a bit. - inactive_grace_period: 1, // one addition vote should go by before an inactive voter can be reaped. - - cooloff_period: 90 * 120 * 24, // 90 day cooling off period if council member vetoes a proposal. + candidacy_bond: 1000, // 1000 to become a council candidate + voter_bond: 100, // 100 down to vote for a candidate + present_slash_per_voter: 1, /* slash by 1 per voter for an invalid + * presentation. */ + carry_count: 24, // carry over the 24 runners-up to the next council election + presentation_duration: 120 * 24, // one day for presenting winners. + approval_voting_period: 7 * 120 * 24, // one week period between possible council elections. + term_duration: 180 * 120 * 24, // 180 day term duration for the council. + desired_seats: 0, /* start with no council: we'll raise this once the stake has been + * dispersed a bit. */ + inactive_grace_period: 1, /* one addition vote should go by before an inactive voter + * can be reaped. */ + + cooloff_period: 90 * 120 * 24, /* 90 day cooling off period if council member vetoes + * a proposal. */ voting_period: 7 * 120 * 24, // 7 day voting period for council members. }), timestamp: Some(TimestampConfig { - period: 5, // 5 second block time. + period: 5, // 5 second block time. }), }; @@ -168,7 +180,7 @@ pub fn run(args: I) -> error::Result<()> where ( rpc::start_http(&http_address, handler())?, - rpc::start_ws(&ws_address, handler())? + rpc::start_ws(&ws_address, handler())?, ) }; @@ -176,9 +188,14 @@ pub fn run(args: I) -> error::Result<()> where info!("Starting validator."); let (exit_send, exit) = futures::sync::mpsc::channel(1); ctrlc::CtrlC::set_handler(move || { - exit_send.clone().send(()).wait().expect("Error sending exit notification"); + exit_send + .clone() + .send(()) + .wait() + .expect("Error sending exit notification"); }); - core.run(exit.into_future()).expect("Error running informant event loop"); + core.run(exit.into_future()) + .expect("Error running informant event loop"); return Ok(()) } @@ -201,6 +218,5 @@ fn init_logger(pattern: &str) { builder.parse(pattern); - builder.init().expect("Logger initialized only once."); } diff --git a/demo/executor/src/lib.rs b/demo/executor/src/lib.rs index 0b914b396c4ef..3a6ceab492bdc 100644 --- a/demo/executor/src/lib.rs +++ b/demo/executor/src/lib.rs @@ -18,44 +18,58 @@ //! executed is equivalent to the natively compiled code. extern crate demo_runtime; -#[macro_use] extern crate substrate_executor; -extern crate substrate_codec as codec; -extern crate substrate_state_machine as state_machine; -extern crate substrate_runtime_io as runtime_io; -extern crate substrate_primitives as primitives; +#[macro_use] +extern crate substrate_executor; extern crate demo_primitives; extern crate ed25519; +extern crate substrate_codec as codec; +extern crate substrate_primitives as primitives; +extern crate substrate_runtime_io as runtime_io; +extern crate substrate_state_machine as state_machine; extern crate triehash; -#[cfg(test)] extern crate substrate_keyring as keyring; -#[cfg(test)] extern crate substrate_runtime_primitives as runtime_primitives; -#[cfg(test)] extern crate substrate_runtime_support as runtime_support; -#[cfg(test)] extern crate substrate_runtime_staking as staking; -#[cfg(test)] extern crate substrate_runtime_system as system; -#[cfg(test)] #[macro_use] extern crate hex_literal; +#[cfg(test)] +extern crate substrate_keyring as keyring; +#[cfg(test)] +extern crate substrate_runtime_primitives as runtime_primitives; +#[cfg(test)] +extern crate substrate_runtime_staking as staking; +#[cfg(test)] +extern crate substrate_runtime_support as runtime_support; +#[cfg(test)] +extern crate substrate_runtime_system as system; +#[cfg(test)] +#[macro_use] +extern crate hex_literal; native_executor_instance!(pub Executor, demo_runtime::api::dispatch, demo_runtime::VERSION, include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.compact.wasm")); #[cfg(test)] mod tests { - use runtime_io; use super::Executor; - use substrate_executor::WasmExecutor; - use codec::{Slicable, Joiner}; + use codec::{Joiner, Slicable}; + use demo_primitives::{AccountId, BlockNumber, Hash}; + use demo_runtime::{ + BareExtrinsic, Block, BuildStorage, Call, Concrete, Extrinsic, GenesisConfig, Header, + SessionConfig, Staking, StakingConfig, UncheckedExtrinsic, + }; + use ed25519::{Pair, Public}; use keyring::Keyring; - use runtime_support::{Hashable, StorageValue, StorageMap}; - use state_machine::{CodeExecutor, TestExternalities}; use primitives::twox_128; - use demo_primitives::{Hash, BlockNumber, AccountId}; + use runtime_io; use runtime_primitives::traits::Header as HeaderT; - use runtime_primitives::{ApplyOutcome, ApplyError, ApplyResult, MaybeUnsigned}; + use runtime_primitives::{ApplyError, ApplyOutcome, ApplyResult, MaybeUnsigned}; + use runtime_support::{Hashable, StorageMap, StorageValue}; + use state_machine::{CodeExecutor, TestExternalities}; + use substrate_executor::WasmExecutor; use {staking, system}; - use demo_runtime::{Header, Block, UncheckedExtrinsic, Extrinsic, Call, Concrete, Staking, - BuildStorage, GenesisConfig, SessionConfig, StakingConfig, BareExtrinsic}; - use ed25519::{Public, Pair}; - const BLOATY_CODE: &[u8] = include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.wasm"); - const COMPACT_CODE: &[u8] = include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.compact.wasm"); + const BLOATY_CODE: &[u8] = include_bytes!( + "../../runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.wasm" + ); + const COMPACT_CODE: &[u8] = include_bytes!( + "../../runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.compact.wasm" + ); // TODO: move into own crate. macro_rules! map { @@ -78,8 +92,12 @@ mod tests { index: 0, function: Call::Staking(staking::Call::transfer::(bob().into(), 69)), }; - let signature = MaybeUnsigned(Keyring::from_raw_public(extrinsic.signed.0.clone()).unwrap() - .sign(&extrinsic.encode()).into()); + let signature = MaybeUnsigned( + Keyring::from_raw_public(extrinsic.signed.0.clone()) + .unwrap() + .sign(&extrinsic.encode()) + .into(), + ); let extrinsic = Extrinsic { signed: extrinsic.signed.into(), index: extrinsic.index, @@ -89,7 +107,13 @@ mod tests { } fn from_block_number(n: u64) -> Header { - Header::new(n, Default::default(), Default::default(), [69; 32].into(), Default::default()) + Header::new( + n, + Default::default(), + Default::default(), + [69; 32].into(), + Default::default(), + ) } #[test] @@ -104,9 +128,16 @@ mod tests { twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32] ]; - let r = Executor::new().call(&mut t, BLOATY_CODE, "initialise_block", &vec![].and(&from_block_number(1u64))); + let r = Executor::new().call( + &mut t, + BLOATY_CODE, + "initialise_block", + &vec![].and(&from_block_number(1u64)), + ); assert!(r.is_ok()); - let v = Executor::new().call(&mut t, BLOATY_CODE, "apply_extrinsic", &vec![].and(&xt())).unwrap(); + let v = Executor::new() + .call(&mut t, BLOATY_CODE, "apply_extrinsic", &vec![].and(&xt())) + .unwrap(); let r = ApplyResult::decode(&mut &v[..]).unwrap(); assert_eq!(r, Err(ApplyError::CantPay)); } @@ -123,9 +154,16 @@ mod tests { twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32] ]; - let r = Executor::new().call(&mut t, COMPACT_CODE, "initialise_block", &vec![].and(&from_block_number(1u64))); + let r = Executor::new().call( + &mut t, + COMPACT_CODE, + "initialise_block", + &vec![].and(&from_block_number(1u64)), + ); assert!(r.is_ok()); - let v = Executor::new().call(&mut t, COMPACT_CODE, "apply_extrinsic", &vec![].and(&xt())).unwrap(); + let v = Executor::new() + .call(&mut t, COMPACT_CODE, "apply_extrinsic", &vec![].and(&xt())) + .unwrap(); let r = ApplyResult::decode(&mut &v[..]).unwrap(); assert_eq!(r, Err(ApplyError::CantPay)); } @@ -142,7 +180,12 @@ mod tests { twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32] ]; - let r = Executor::new().call(&mut t, COMPACT_CODE, "initialise_block", &vec![].and(&from_block_number(1u64))); + let r = Executor::new().call( + &mut t, + COMPACT_CODE, + "initialise_block", + &vec![].and(&from_block_number(1u64)), + ); assert!(r.is_ok()); let r = Executor::new().call(&mut t, COMPACT_CODE, "apply_extrinsic", &vec![].and(&xt())); assert!(r.is_ok()); @@ -165,7 +208,12 @@ mod tests { twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32] ]; - let r = Executor::new().call(&mut t, BLOATY_CODE, "initialise_block", &vec![].and(&from_block_number(1u64))); + let r = Executor::new().call( + &mut t, + BLOATY_CODE, + "initialise_block", + &vec![].and(&from_block_number(1u64)), + ); assert!(r.is_ok()); let r = Executor::new().call(&mut t, BLOATY_CODE, "apply_extrinsic", &vec![].and(&xt())); assert!(r.is_ok()); @@ -184,7 +232,11 @@ mod tests { system: Some(Default::default()), session: Some(SessionConfig { session_length: 2, - validators: vec![One.to_raw_public().into(), Two.to_raw_public().into(), three], + validators: vec![ + One.to_raw_public().into(), + Two.to_raw_public().into(), + three, + ], broken_percent_late: 100, }), staking: Some(StakingConfig { @@ -207,24 +259,39 @@ mod tests { democracy: Some(Default::default()), council: Some(Default::default()), timestamp: Some(Default::default()), - }.build_storage().unwrap() + }.build_storage() + .unwrap() } - fn construct_block(number: BlockNumber, parent_hash: Hash, state_root: Hash, extrinsics: Vec) -> (Vec, Hash) { + fn construct_block( + number: BlockNumber, + parent_hash: Hash, + state_root: Hash, + extrinsics: Vec, + ) -> (Vec, Hash) { use triehash::ordered_trie_root; - let extrinsics = extrinsics.into_iter().map(|extrinsic| { - let signature = MaybeUnsigned(Pair::from(Keyring::from_public(Public::from_raw(extrinsic.signed.0.clone())).unwrap()) - .sign(&extrinsic.encode()).into()); - let extrinsic = Extrinsic { - signed: extrinsic.signed.into(), - index: extrinsic.index, - function: extrinsic.function, - }; - UncheckedExtrinsic::new(extrinsic, signature) - }).collect::>(); - - let extrinsics_root = ordered_trie_root(extrinsics.iter().map(Slicable::encode)).0.into(); + let extrinsics = extrinsics + .into_iter() + .map(|extrinsic| { + let signature = MaybeUnsigned( + Pair::from( + Keyring::from_public(Public::from_raw(extrinsic.signed.0.clone())).unwrap(), + ).sign(&extrinsic.encode()) + .into(), + ); + let extrinsic = Extrinsic { + signed: extrinsic.signed.into(), + index: extrinsic.index, + function: extrinsic.function, + }; + UncheckedExtrinsic::new(extrinsic, signature) + }) + .collect::>(); + + let extrinsics_root = ordered_trie_root(extrinsics.iter().map(Slicable::encode)) + .0 + .into(); let header = Header { parent_hash, @@ -247,7 +314,7 @@ mod tests { signed: alice(), index: 0, function: Call::Staking(staking::Call::transfer(bob().into(), 69)), - }] + }], ) } @@ -266,8 +333,8 @@ mod tests { signed: alice(), index: 1, function: Call::Staking(staking::Call::transfer(bob().into(), 15)), - } - ] + }, + ], ) } @@ -275,14 +342,18 @@ mod tests { fn full_native_block_import_works() { let mut t = new_test_ext(); - Executor::new().call(&mut t, COMPACT_CODE, "execute_block", &block1().0).unwrap(); + Executor::new() + .call(&mut t, COMPACT_CODE, "execute_block", &block1().0) + .unwrap(); runtime_io::with_externalities(&mut t, || { assert_eq!(Staking::voting_balance(&alice()), 41); assert_eq!(Staking::voting_balance(&bob()), 69); }); - Executor::new().call(&mut t, COMPACT_CODE, "execute_block", &block2().0).unwrap(); + Executor::new() + .call(&mut t, COMPACT_CODE, "execute_block", &block2().0) + .unwrap(); runtime_io::with_externalities(&mut t, || { assert_eq!(Staking::voting_balance(&alice()), 30); @@ -294,14 +365,18 @@ mod tests { fn full_wasm_block_import_works() { let mut t = new_test_ext(); - WasmExecutor.call(&mut t, COMPACT_CODE, "execute_block", &block1().0).unwrap(); + WasmExecutor + .call(&mut t, COMPACT_CODE, "execute_block", &block1().0) + .unwrap(); runtime_io::with_externalities(&mut t, || { assert_eq!(Staking::voting_balance(&alice()), 41); assert_eq!(Staking::voting_balance(&bob()), 69); }); - WasmExecutor.call(&mut t, COMPACT_CODE, "execute_block", &block2().0).unwrap(); + WasmExecutor + .call(&mut t, COMPACT_CODE, "execute_block", &block2().0) + .unwrap(); runtime_io::with_externalities(&mut t, || { assert_eq!(Staking::voting_balance(&alice()), 30); @@ -321,10 +396,24 @@ mod tests { twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32] ]; - let foreign_code = include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.wasm"); - let r = WasmExecutor.call(&mut t, &foreign_code[..], "initialise_block", &vec![].and(&from_block_number(1u64))); + let foreign_code = include_bytes!( + "../../runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.wasm" + ); + let r = WasmExecutor.call( + &mut t, + &foreign_code[..], + "initialise_block", + &vec![].and(&from_block_number(1u64)), + ); assert!(r.is_ok()); - let r = WasmExecutor.call(&mut t, &foreign_code[..], "apply_extrinsic", &vec![].and(&xt())).unwrap(); + let r = WasmExecutor + .call( + &mut t, + &foreign_code[..], + "apply_extrinsic", + &vec![].and(&xt()), + ) + .unwrap(); let r = ApplyResult::decode(&mut &r[..]).unwrap(); assert_eq!(r, Err(ApplyError::CantPay)); } @@ -341,10 +430,24 @@ mod tests { twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32] ]; - let foreign_code = include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.compact.wasm"); - let r = WasmExecutor.call(&mut t, &foreign_code[..], "initialise_block", &vec![].and(&from_block_number(1u64))); + let foreign_code = include_bytes!( + "../../runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.compact.wasm" + ); + let r = WasmExecutor.call( + &mut t, + &foreign_code[..], + "initialise_block", + &vec![].and(&from_block_number(1u64)), + ); assert!(r.is_ok()); - let r = WasmExecutor.call(&mut t, &foreign_code[..], "apply_extrinsic", &vec![].and(&xt())).unwrap(); + let r = WasmExecutor + .call( + &mut t, + &foreign_code[..], + "apply_extrinsic", + &vec![].and(&xt()), + ) + .unwrap(); let r = ApplyResult::decode(&mut &r[..]).unwrap(); assert_eq!(r, Ok(ApplyOutcome::Success)); diff --git a/demo/primitives/src/lib.rs b/demo/primitives/src/lib.rs index a435edd3696e2..4d8a37cc74915 100644 --- a/demo/primitives/src/lib.rs +++ b/demo/primitives/src/lib.rs @@ -17,16 +17,16 @@ //! Low-level types used throughout the Substrate Demo code. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(alloc))] -#[cfg(feature = "std")] extern crate serde; +#[cfg(feature = "std")] +extern crate serde; -extern crate substrate_runtime_std as rstd; -extern crate substrate_runtime_primitives as runtime_primitives; -extern crate substrate_primitives as primitives; extern crate substrate_codec as codec; +extern crate substrate_primitives as primitives; +extern crate substrate_runtime_primitives as runtime_primitives; +extern crate substrate_runtime_std as rstd; /// An index to a block. pub type BlockNumber = u64; diff --git a/demo/runtime/src/lib.rs b/demo/runtime/src/lib.rs index 94c2610aa824d..cfad429eae48a 100644 --- a/demo/runtime/src/lib.rs +++ b/demo/runtime/src/lib.rs @@ -34,23 +34,25 @@ extern crate serde_derive; #[cfg(feature = "std")] extern crate serde; -extern crate substrate_runtime_std as rstd; extern crate substrate_runtime_consensus as consensus; extern crate substrate_runtime_council as council; extern crate substrate_runtime_democracy as democracy; extern crate substrate_runtime_executive as executive; extern crate substrate_runtime_session as session; extern crate substrate_runtime_staking as staking; +extern crate substrate_runtime_std as rstd; extern crate substrate_runtime_system as system; extern crate substrate_runtime_timestamp as timestamp; #[macro_use] extern crate substrate_runtime_version as version; extern crate demo_primitives; +use demo_primitives::{ + AccountId, AccountIndex, Balance, BlockNumber, Hash, Index, SessionKey, Signature, +}; use rstd::prelude::*; -use demo_primitives::{AccountId, AccountIndex, Balance, BlockNumber, Hash, Index, SessionKey, Signature}; use runtime_primitives::generic; -use runtime_primitives::traits::{Convert, HasPublicAux, BlakeTwo256}; +use runtime_primitives::traits::{BlakeTwo256, Convert, HasPublicAux}; use version::RuntimeVersion; #[cfg(any(feature = "std", test))] @@ -191,8 +193,13 @@ pub type Extrinsic = generic::Extrinsic; /// Extrinsic type that is signed. pub type BareExtrinsic = generic::Extrinsic; /// Executive: handles dispatch to the various modules. -pub type Executive = executive::Executive; +pub type Executive = executive::Executive< + Concrete, + Block, + Staking, + Staking, + (((((), Council), Democracy), Staking), Session), +>; impl_outer_config! { pub struct GenesisConfig for Concrete { diff --git a/polkadot/api/src/full.rs b/polkadot/api/src/full.rs index 7c39bc3c6c42d..069456c8e6c6d 100644 --- a/polkadot/api/src/full.rs +++ b/polkadot/api/src/full.rs @@ -20,45 +20,55 @@ use client::backend::{Backend, LocalBackend}; use client::block_builder::BlockBuilder as ClientBlockBuilder; use client::{Client, LocalCallExecutor}; use polkadot_executor::Executor as LocalDispatch; -use substrate_executor::NativeExecutor; use state_machine; +use substrate_executor::NativeExecutor; +use primitives::parachain::{CandidateReceipt, DutyRoster, Id as ParaId}; +use primitives::{ + AccountId, Block, BlockId, Hash, Header, Index, SessionKey, Timestamp, UncheckedExtrinsic, +}; use runtime::Address; use runtime_primitives::traits::AuxLookup; -use primitives::{AccountId, Block, Header, BlockId, Hash, Index, SessionKey, Timestamp, UncheckedExtrinsic}; -use primitives::parachain::{CandidateReceipt, DutyRoster, Id as ParaId}; -use {BlockBuilder, PolkadotApi, LocalPolkadotApi, ErrorKind, Error, Result}; +use {BlockBuilder, Error, ErrorKind, LocalPolkadotApi, PolkadotApi, Result}; // set up the necessary scaffolding to execute a set of calls to the runtime. // this creates a new block on top of the given ID and initialises it. macro_rules! with_runtime { - ($client: ident, $at: expr, $exec: expr) => {{ + ($client:ident, $at:expr, $exec:expr) => {{ let parent = $at; let header = Header { - parent_hash: $client.block_hash_from_id(&parent)? + parent_hash: $client + .block_hash_from_id(&parent)? .ok_or_else(|| ErrorKind::UnknownBlock(format!("{:?}", parent)))?, - number: $client.block_number_from_id(&parent)? + number: $client + .block_number_from_id(&parent)? .ok_or_else(|| ErrorKind::UnknownBlock(format!("{:?}", parent)))? + 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), - }; - - $client.state_at(&parent).map_err(Error::from).and_then(|state| { - let mut changes = Default::default(); - let mut ext = state_machine::Ext::new(&mut changes, &state); - - ::substrate_executor::with_native_environment(&mut ext, || { - ::runtime::Executive::initialise_block(&header); - ($exec)() - }).map_err(Into::into) - }) - }} + }; + + $client + .state_at(&parent) + .map_err(Error::from) + .and_then(|state| { + let mut changes = Default::default(); + let mut ext = state_machine::Ext::new(&mut changes, &state); + + ::substrate_executor::with_native_environment(&mut ext, || { + ::runtime::Executive::initialise_block(&header); + ($exec)() + }).map_err(Into::into) + }) + }}; } -impl> BlockBuilder for ClientBlockBuilder>, Block> - where ::client::error::Error: From<<>::State as state_machine::backend::Backend>::Error> +impl> BlockBuilder + for ClientBlockBuilder>, Block> +where + ::client::error::Error: + From<<>::State as state_machine::backend::Backend>::Error>, { fn push_extrinsic(&mut self, extrinsic: UncheckedExtrinsic) -> Result<()> { self.push(extrinsic).map_err(Into::into) @@ -70,10 +80,14 @@ impl> BlockBuilder for ClientBlockBuilder> PolkadotApi for Client>, Block> - where ::client::error::Error: From<<>::State as state_machine::backend::Backend>::Error> +impl> PolkadotApi + for Client>, Block> +where + ::client::error::Error: + From<<>::State as state_machine::backend::Backend>::Error>, { - type BlockBuilder = ClientBlockBuilder>, Block>; + type BlockBuilder = + ClientBlockBuilder>, Block>; fn session_keys(&self, at: &BlockId) -> Result> { with_runtime!(self, at, ::runtime::Consensus::authorities) @@ -96,9 +110,9 @@ impl> PolkadotApi for Client Result { - use substrate_executor::error::ErrorKind as ExecErrorKind; use codec::Slicable; use runtime::Block as RuntimeBlock; + use substrate_executor::error::ErrorKind as ExecErrorKind; let encoded = block.encode(); let runtime_block = match RuntimeBlock::decode(&mut &encoded[..]) { @@ -106,13 +120,15 @@ impl> PolkadotApi for Client return Ok(false), }; - let res = with_runtime!(self, at, || ::runtime::Executive::execute_block(runtime_block)); + let res = with_runtime!(self, at, || ::runtime::Executive::execute_block( + runtime_block + )); match res { Ok(()) => Ok(true), Err(err) => match err.kind() { &ErrorKind::Executor(ExecErrorKind::Runtime) => Ok(false), - _ => Err(err) - } + _ => Err(err), + }, } } @@ -121,7 +137,9 @@ impl> PolkadotApi for Client Result> { - with_runtime!(self, at, || <::runtime::Staking as AuxLookup>::lookup(address).ok()) + with_runtime!(self, at, || <::runtime::Staking as AuxLookup>::lookup( + address + ).ok()) } fn active_parachains(&self, at: &BlockId) -> Result> { @@ -129,14 +147,23 @@ impl> PolkadotApi for Client Result>> { - with_runtime!(self, at, || ::runtime::Parachains::parachain_code(parachain)) + with_runtime!(self, at, || ::runtime::Parachains::parachain_code( + parachain + )) } fn parachain_head(&self, at: &BlockId, parachain: ParaId) -> Result>> { - with_runtime!(self, at, || ::runtime::Parachains::parachain_head(parachain)) + with_runtime!(self, at, || ::runtime::Parachains::parachain_head( + parachain + )) } - fn build_block(&self, at: &BlockId, timestamp: Timestamp, new_heads: Vec) -> Result { + fn build_block( + &self, + at: &BlockId, + timestamp: Timestamp, + new_heads: Vec, + ) -> Result { let mut block_builder = self.new_block_at(at)?; for inherent in self.inherent_extrinsics(at, timestamp, new_heads)? { block_builder.push(inherent)?; @@ -145,7 +172,12 @@ impl> PolkadotApi for Client) -> Result> { + fn inherent_extrinsics( + &self, + at: &BlockId, + timestamp: Timestamp, + new_heads: Vec, + ) -> Result> { use codec::Slicable; with_runtime!(self, at, || { @@ -159,18 +191,21 @@ impl> PolkadotApi for Client> LocalPolkadotApi for Client>, Block> - where ::client::error::Error: From<<>::State as state_machine::backend::Backend>::Error> +impl> LocalPolkadotApi + for Client>, Block> +where + ::client::error::Error: + From<<>::State as state_machine::backend::Backend>::Error>, {} #[cfg(test)] mod tests { use super::*; - use keyring::Keyring; - use client::LocalCallExecutor; use client::in_mem::Backend as InMemory; + use client::LocalCallExecutor; + use keyring::Keyring; + use runtime::{ConsensusConfig, GenesisConfig, SessionConfig}; use substrate_executor::NativeExecutionDispatch; - use runtime::{GenesisConfig, ConsensusConfig, SessionConfig}; fn validators() -> Vec { vec![ @@ -186,7 +221,11 @@ mod tests { ] } - fn client() -> Client, LocalCallExecutor, NativeExecutor>, Block> { + fn client() -> Client< + InMemory, + LocalCallExecutor, NativeExecutor>, + Block, + > { let genesis_config = GenesisConfig { consensus: Some(ConsensusConfig { code: LocalDispatch::native_equivalent().to_vec(), @@ -233,7 +272,9 @@ mod tests { let client = client(); let id = BlockId::number(0); - let inherent = client.inherent_extrinsics(&id, 1_000_000, Vec::new()).unwrap(); + let inherent = client + .inherent_extrinsics(&id, 1_000_000, Vec::new()) + .unwrap(); let mut block_builder = client.new_block_at(&id).unwrap(); for extrinsic in inherent { diff --git a/polkadot/api/src/lib.rs b/polkadot/api/src/lib.rs index 27bea18a0d0a3..8547e55c82eb6 100644 --- a/polkadot/api/src/lib.rs +++ b/polkadot/api/src/lib.rs @@ -20,12 +20,12 @@ extern crate polkadot_executor; extern crate polkadot_primitives as primitives; extern crate polkadot_runtime as runtime; -extern crate substrate_codec as codec; -extern crate substrate_runtime_io as runtime_io; extern crate substrate_client as client; +extern crate substrate_codec as codec; extern crate substrate_executor as substrate_executor; -extern crate substrate_runtime_executive; extern crate substrate_primitives; +extern crate substrate_runtime_executive; +extern crate substrate_runtime_io as runtime_io; extern crate substrate_runtime_primitives as runtime_primitives; extern crate substrate_state_machine as state_machine; @@ -38,10 +38,11 @@ extern crate substrate_keyring as keyring; pub mod full; pub mod light; -use primitives::{AccountId, Block, BlockId, Hash, Index, SessionKey, Timestamp, - UncheckedExtrinsic}; -use runtime::Address; use primitives::parachain::{CandidateReceipt, DutyRoster, Id as ParaId}; +use primitives::{ + AccountId, Block, BlockId, Hash, Index, SessionKey, Timestamp, UncheckedExtrinsic, +}; +use runtime::Address; error_chain! { errors { @@ -71,7 +72,8 @@ error_chain! { impl From for Error { fn from(e: client::error::Error) -> Error { match e { - client::error::Error(client::error::ErrorKind::UnknownBlock(b), _) => Error::from_kind(ErrorKind::UnknownBlock(b)), + client::error::Error(client::error::ErrorKind::UnknownBlock(b), _) => + Error::from_kind(ErrorKind::UnknownBlock(b)), other => Error::from_kind(ErrorKind::Other(Box::new(other) as Box<_>)), } } @@ -117,10 +119,12 @@ pub trait PolkadotApi { /// Get the active parachains at a block. fn active_parachains(&self, at: &BlockId) -> Result>; - /// Get the validation code of a parachain at a block. If the parachain is active, this will always return `Some`. + /// Get the validation code of a parachain at a block. If the parachain is active, this will + /// always return `Some`. fn parachain_code(&self, at: &BlockId, parachain: ParaId) -> Result>>; - /// Get the chain head of a parachain. If the parachain is active, this will always return `Some`. + /// Get the chain head of a parachain. If the parachain is active, this will always return + /// `Some`. fn parachain_head(&self, at: &BlockId, parachain: ParaId) -> Result>>; /// Evaluate a block. Returns true if the block is good, false if it is known to be bad, @@ -128,15 +132,26 @@ pub trait PolkadotApi { fn evaluate_block(&self, at: &BlockId, block: Block) -> Result; /// Build a block on top of the given, with inherent extrinsics pre-pushed. - fn build_block(&self, at: &BlockId, timestamp: Timestamp, new_heads: Vec) -> Result; + fn build_block( + &self, + at: &BlockId, + timestamp: Timestamp, + new_heads: Vec, + ) -> Result; /// Attempt to produce the (encoded) inherent extrinsics for a block being built upon the given. /// This may vary by runtime and will fail if a runtime doesn't follow the same API. - fn inherent_extrinsics(&self, at: &BlockId, timestamp: Timestamp, new_heads: Vec) -> Result>; + fn inherent_extrinsics( + &self, + at: &BlockId, + timestamp: Timestamp, + new_heads: Vec, + ) -> Result>; } /// Mark for all Polkadot API implementations, that are making use of state data, stored locally. pub trait LocalPolkadotApi: PolkadotApi {} -/// Mark for all Polkadot API implementations, that are fetching required state data from remote nodes. +/// Mark for all Polkadot API implementations, that are fetching required state data from remote +/// nodes. pub trait RemotePolkadotApi: PolkadotApi {} diff --git a/polkadot/api/src/light.rs b/polkadot/api/src/light.rs index e20c1a245ec82..16219623be38f 100644 --- a/polkadot/api/src/light.rs +++ b/polkadot/api/src/light.rs @@ -16,15 +16,17 @@ //! Strongly typed API for light Polkadot client. -use std::sync::Arc; use client::backend::{Backend, RemoteBackend}; -use client::{Client, CallExecutor}; +use client::{CallExecutor, Client}; use codec::Slicable; -use state_machine; -use primitives::{AccountId, Block, BlockId, Hash, Index, SessionKey, Timestamp, UncheckedExtrinsic}; -use runtime::Address; use primitives::parachain::{CandidateReceipt, DutyRoster, Id as ParaId}; -use {PolkadotApi, BlockBuilder, RemotePolkadotApi, Result, ErrorKind}; +use primitives::{ + AccountId, Block, BlockId, Hash, Index, SessionKey, Timestamp, UncheckedExtrinsic, +}; +use runtime::Address; +use state_machine; +use std::sync::Arc; +use {BlockBuilder, ErrorKind, PolkadotApi, RemotePolkadotApi, Result}; /// Light block builder. TODO: make this work (efficiently) #[derive(Clone, Copy)] @@ -41,17 +43,25 @@ impl BlockBuilder for LightBlockBuilder { } /// Remote polkadot API implementation. -pub struct RemotePolkadotApiWrapper, E: CallExecutor>(pub Arc>); +pub struct RemotePolkadotApiWrapper, E: CallExecutor>( + pub Arc>, +); impl, E: CallExecutor> PolkadotApi for RemotePolkadotApiWrapper - where ::client::error::Error: From<<>::State as state_machine::backend::Backend>::Error> +where + ::client::error::Error: + From<<>::State as state_machine::backend::Backend>::Error>, { type BlockBuilder = LightBlockBuilder; fn session_keys(&self, at: &BlockId) -> Result> { - self.0.executor().call(at, "authorities", &[]) - .and_then(|r| Vec::::decode(&mut &r.return_data[..]) - .ok_or("error decoding session keys".into())) + self.0 + .executor() + .call(at, "authorities", &[]) + .and_then(|r| { + Vec::::decode(&mut &r.return_data[..]) + .ok_or("error decoding session keys".into()) + }) .map_err(Into::into) } @@ -95,15 +105,28 @@ impl, E: CallExecutor> PolkadotApi for RemotePolkadotAp Err(ErrorKind::UnknownRuntime.into()) } - fn build_block(&self, _at: &BlockId, _timestamp: Timestamp, _new_heads: Vec) -> Result { + fn build_block( + &self, + _at: &BlockId, + _timestamp: Timestamp, + _new_heads: Vec, + ) -> Result { Err(ErrorKind::UnknownRuntime.into()) } - fn inherent_extrinsics(&self, _at: &BlockId, _timestamp: Timestamp, _new_heads: Vec) -> Result>> { + fn inherent_extrinsics( + &self, + _at: &BlockId, + _timestamp: Timestamp, + _new_heads: Vec, + ) -> Result>> { Err(ErrorKind::UnknownRuntime.into()) } } -impl, E: CallExecutor> RemotePolkadotApi for RemotePolkadotApiWrapper - where ::client::error::Error: From<<>::State as state_machine::backend::Backend>::Error> +impl, E: CallExecutor> RemotePolkadotApi + for RemotePolkadotApiWrapper +where + ::client::error::Error: + From<<>::State as state_machine::backend::Backend>::Error>, {} diff --git a/polkadot/cli/src/error.rs b/polkadot/cli/src/error.rs index d7c690276ca8d..b458d9fc075a6 100644 --- a/polkadot/cli/src/error.rs +++ b/polkadot/cli/src/error.rs @@ -26,7 +26,7 @@ error_chain! { } links { Client(client::error::Error, client::error::ErrorKind) #[doc="Client error"]; - } + } errors { /// Input error. Input(m: String) { diff --git a/polkadot/cli/src/informant.rs b/polkadot/cli/src/informant.rs index 26915da32e8e2..728d6df326798 100644 --- a/polkadot/cli/src/informant.rs +++ b/polkadot/cli/src/informant.rs @@ -16,49 +16,55 @@ //! Console informant. Prints sync progress and block events. Runs on the calling thread. -use std::time::{Duration, Instant}; +use client::{self, BlockchainEvents}; use futures::stream::Stream; -use service::{Service, Components}; -use tokio_core::reactor; -use network::{SyncState, SyncProvider}; +use network::{SyncProvider, SyncState}; use polkadot_primitives::Block; +use service::{Components, Service}; use state_machine; -use client::{self, BlockchainEvents}; +use std::time::{Duration, Instant}; +use tokio_core::reactor; const TIMER_INTERVAL_MS: u64 = 5000; /// Spawn informant on the event loop pub fn start(service: &Service, handle: reactor::Handle) - where - C: Components, - client::error::Error: From<<<::Backend as client::backend::Backend>::State as state_machine::Backend>::Error>, +where + C: Components, + client::error::Error: + From<<<::Backend as client::backend::Backend>::State as state_machine::Backend>::Error>, { - let interval = reactor::Interval::new_at(Instant::now(), Duration::from_millis(TIMER_INTERVAL_MS), &handle) - .expect("Error creating informant timer"); + let interval = reactor::Interval::new_at( + Instant::now(), + Duration::from_millis(TIMER_INTERVAL_MS), + &handle, + ).expect("Error creating informant timer"); let network = service.network(); let client = service.client(); let txpool = service.transaction_pool(); - let display_notifications = interval.map_err(|e| debug!("Timer error: {:?}", e)).for_each(move |_| { - let sync_status = network.status(); + let display_notifications = interval + .map_err(|e| debug!("Timer error: {:?}", e)) + .for_each(move |_| { + let sync_status = network.status(); - if let Ok(best_block) = client.best_block_header() { - let hash = best_block.hash(); - let num_peers = sync_status.num_peers; - let status = match (sync_status.sync.state, sync_status.sync.best_seen_block) { - (SyncState::Idle, _) => "Idle".into(), - (SyncState::Downloading, None) => "Syncing".into(), - (SyncState::Downloading, Some(n)) => format!("Syncing, target=#{}", n), - }; - let txpool_status = txpool.light_status(); - info!(target: "polkadot", "{} ({} peers), best: #{} ({})", status, sync_status.num_peers, best_block.number, hash); - telemetry!("system.interval"; "status" => status, "peers" => num_peers, "height" => best_block.number, "best" => ?hash, "txcount" => txpool_status.transaction_count); - } else { - warn!("Error getting best block information"); - } - Ok(()) - }); + if let Ok(best_block) = client.best_block_header() { + let hash = best_block.hash(); + let num_peers = sync_status.num_peers; + let status = match (sync_status.sync.state, sync_status.sync.best_seen_block) { + (SyncState::Idle, _) => "Idle".into(), + (SyncState::Downloading, None) => "Syncing".into(), + (SyncState::Downloading, Some(n)) => format!("Syncing, target=#{}", n), + }; + let txpool_status = txpool.light_status(); + info!(target: "polkadot", "{} ({} peers), best: #{} ({})", status, sync_status.num_peers, best_block.number, hash); + telemetry!("system.interval"; "status" => status, "peers" => num_peers, "height" => best_block.number, "best" => ?hash, "txcount" => txpool_status.transaction_count); + } else { + warn!("Error getting best block information"); + } + Ok(()) + }); let client = service.client(); let display_block_import = client.import_notification_stream().for_each(|n| { @@ -77,4 +83,3 @@ pub fn start(service: &Service, handle: reactor::Handle) handle.spawn(display_block_import); handle.spawn(display_txpool_import); } - diff --git a/polkadot/cli/src/lib.rs b/polkadot/cli/src/lib.rs index f96690f79eaa6..f8fbe2a99871a 100644 --- a/polkadot/cli/src/lib.rs +++ b/polkadot/cli/src/lib.rs @@ -18,35 +18,35 @@ #![warn(missing_docs)] +extern crate ansi_term; extern crate app_dirs; -extern crate env_logger; extern crate atty; -extern crate ansi_term; -extern crate regex; -extern crate time; -extern crate futures; -extern crate tokio_core; extern crate ctrlc; -extern crate fdlimit; extern crate ed25519; -extern crate triehash; +extern crate env_logger; +extern crate fdlimit; +extern crate futures; extern crate parking_lot; +extern crate regex; extern crate serde; extern crate serde_json; +extern crate time; +extern crate tokio_core; +extern crate triehash; +extern crate polkadot_primitives; +extern crate polkadot_runtime; +extern crate polkadot_service as service; extern crate substrate_client as client; -extern crate substrate_network as network; extern crate substrate_codec as codec; +extern crate substrate_network as network; extern crate substrate_primitives; extern crate substrate_rpc; extern crate substrate_rpc_servers as rpc; extern crate substrate_runtime_primitives as runtime_primitives; extern crate substrate_state_machine as state_machine; -extern crate polkadot_primitives; -extern crate polkadot_runtime; -extern crate polkadot_service as service; #[macro_use] -extern crate slog; // needed until we can reexport `slog_info` from `substrate_telemetry` +extern crate slog; // needed until we can reexport `slog_info` from `substrate_telemetry` #[macro_use] extern crate substrate_telemetry; extern crate polkadot_transaction_pool as txpool; @@ -66,20 +66,20 @@ mod chain_spec; pub use chain_spec::ChainSpec; -use std::io::{self, Write, Read, stdin, stdout}; +use client::BlockOrigin; +use codec::Slicable; +use polkadot_primitives::{Block, BlockId}; +use runtime_primitives::generic::SignedBlock; use std::fs::File; +use std::io::{self, stdin, stdout, Read, Write}; use std::net::SocketAddr; use std::path::{Path, PathBuf}; use substrate_telemetry::{init_telemetry, TelemetryConfig}; -use polkadot_primitives::{Block, BlockId}; -use codec::Slicable; -use client::BlockOrigin; -use runtime_primitives::generic::SignedBlock; use futures::sync::mpsc; -use futures::{Sink, Future, Stream}; -use tokio_core::reactor; +use futures::{Future, Sink, Stream}; use service::PruningMode; +use tokio_core::reactor; const DEFAULT_TELEMETRY_URL: &str = "ws://telemetry.polkadot.io:1024"; @@ -103,16 +103,24 @@ impl substrate_rpc::system::SystemApi for SystemConfiguration { } fn load_spec(matches: &clap::ArgMatches) -> Result { - let chain_spec = matches.value_of("chain") + let chain_spec = matches + .value_of("chain") .map(ChainSpec::from) - .unwrap_or_else(|| if matches.is_present("dev") { ChainSpec::Development } else { ChainSpec::PoC2Testnet }); + .unwrap_or_else(|| { + if matches.is_present("dev") { + ChainSpec::Development + } else { + ChainSpec::PoC2Testnet + } + }); let spec = chain_spec.load()?; info!("Chain specification: {}", spec.name()); Ok(spec) } fn base_path(matches: &clap::ArgMatches) -> PathBuf { - matches.value_of("base-path") + matches + .value_of("base-path") .map(|x| Path::new(x).to_owned()) .unwrap_or_else(default_base_path) } @@ -125,18 +133,22 @@ fn base_path(matches: &clap::ArgMatches) -> PathBuf { /// 9556-9591 Unassigned /// 9803-9874 Unassigned /// 9926-9949 Unassigned -pub fn run(args: I) -> error::Result<()> where +pub fn run(args: I) -> error::Result<()> +where I: IntoIterator, T: Into + Clone, { let yaml = load_yaml!("./cli.yml"); - let matches = match clap::App::from_yaml(yaml).version(&(crate_version!().to_owned() + "\n")[..]).get_matches_from_safe(args) { + let matches = match clap::App::from_yaml(yaml) + .version(&(crate_version!().to_owned() + "\n")[..]) + .get_matches_from_safe(args) + { Ok(m) => m, Err(ref e) if e.kind == clap::ErrorKind::VersionDisplayed => return Ok(()), Err(ref e) if e.kind == clap::ErrorKind::HelpDisplayed => { print!("{}", e); return Ok(()) - } + }, Err(e) => e.exit(), }; @@ -150,15 +162,15 @@ pub fn run(args: I) -> error::Result<()> where info!(" by Parity Technologies, 2017, 2018"); if let Some(matches) = matches.subcommand_matches("build-spec") { - return build_spec(matches); + return build_spec(matches) } if let Some(matches) = matches.subcommand_matches("export-blocks") { - return export_blocks(matches); + return export_blocks(matches) } if let Some(matches) = matches.subcommand_matches("import-blocks") { - return import_blocks(matches); + return import_blocks(matches) } let spec = load_spec(&matches)?; @@ -170,7 +182,8 @@ pub fn run(args: I) -> error::Result<()> where } let base_path = base_path(&matches); - config.keystore_path = matches.value_of("keystore") + config.keystore_path = matches + .value_of("keystore") .map(|x| Path::new(x).to_owned()) .unwrap_or_else(|| keystore_path(&base_path)) .to_string_lossy() @@ -181,30 +194,35 @@ pub fn run(args: I) -> error::Result<()> where config.pruning = match matches.value_of("pruning") { Some("archive") => PruningMode::ArchiveAll, None => PruningMode::keep_blocks(256), - Some(s) => PruningMode::keep_blocks(s.parse() - .map_err(|_| error::ErrorKind::Input("Invalid pruning mode specified".to_owned()))?), + Some(s) => PruningMode::keep_blocks( + s.parse() + .map_err(|_| error::ErrorKind::Input("Invalid pruning mode specified".to_owned()))?, + ), }; - let role = - if matches.is_present("collator") { - info!("Starting collator"); - service::Role::COLLATOR - } else if matches.is_present("light") { - info!("Starting (light)"); - service::Role::LIGHT - } else if matches.is_present("validator") || matches.is_present("dev") { - info!("Starting validator"); - service::Role::VALIDATOR - } else { - info!("Starting (heavy)"); - service::Role::FULL - }; + let role = if matches.is_present("collator") { + info!("Starting collator"); + service::Role::COLLATOR + } else if matches.is_present("light") { + info!("Starting (light)"); + service::Role::LIGHT + } else if matches.is_present("validator") || matches.is_present("dev") { + info!("Starting validator"); + service::Role::VALIDATOR + } else { + info!("Starting (heavy)"); + service::Role::FULL + }; config.roles = role; { - config.network.boot_nodes.extend(matches - .values_of("bootnodes") - .map_or(Default::default(), |v| v.map(|n| n.to_owned()).collect::>())); + config.network.boot_nodes.extend( + matches + .values_of("bootnodes") + .map_or(Default::default(), |v| { + v.map(|n| n.to_owned()).collect::>() + }), + ); config.network.config_path = Some(network_path(&base_path).to_string_lossy().into()); config.network.net_config_path = config.network.config_path.clone(); @@ -222,7 +240,11 @@ pub fn run(args: I) -> error::Result<()> where }; } - config.keys = matches.values_of("key").unwrap_or_default().map(str::to_owned).collect(); + config.keys = matches + .values_of("key") + .unwrap_or_default() + .map(str::to_owned) + .collect(); if matches.is_present("dev") { config.keys.push("Alice".into()); } @@ -235,7 +257,10 @@ pub fn run(args: I) -> error::Result<()> where let name = config.name.clone(); let chain_name = config.chain_spec.name().to_owned(); Some(init_telemetry(TelemetryConfig { - url: matches.value_of("telemetry-url").unwrap_or(DEFAULT_TELEMETRY_URL).into(), + url: matches + .value_of("telemetry-url") + .unwrap_or(DEFAULT_TELEMETRY_URL) + .into(), on_connect: Box::new(move || { telemetry!("system.connected"; "name" => name.clone(), @@ -274,7 +299,10 @@ fn export_blocks(matches: &clap::ArgMatches) -> error::Result<()> { let client = service::new_client(config)?; let (exit_send, exit) = std::sync::mpsc::channel(); ctrlc::CtrlC::set_handler(move || { - exit_send.clone().send(()).expect("Error sending exit notification"); + exit_send + .clone() + .send(()) + .expect("Error sending exit notification"); }); info!("Exporting blocks"); let mut block: u32 = match matches.value_of("from") { @@ -288,7 +316,7 @@ fn export_blocks(matches: &clap::ArgMatches) -> error::Result<()> { }; if last < block { - return Err("Invalid block range specified".into()); + return Err("Invalid block range specified".into()) } let json = matches.is_present("json"); @@ -304,15 +332,14 @@ fn export_blocks(matches: &clap::ArgMatches) -> error::Result<()> { loop { if exit.try_recv().is_ok() { - break; + break } match client.block(&BlockId::number(block as u64))? { - Some(block) => { - if json { - serde_json::to_writer(&mut *file, &block).map_err(|e| format!("Eror writing JSON: {}", e))?; - } else { - file.write(&block.encode())?; - } + Some(block) => if json { + serde_json::to_writer(&mut *file, &block) + .map_err(|e| format!("Eror writing JSON: {}", e))?; + } else { + file.write(&block.encode())?; }, None => break, } @@ -320,7 +347,7 @@ fn export_blocks(matches: &clap::ArgMatches) -> error::Result<()> { info!("#{}", block); } if block == last { - break; + break } block += 1; } @@ -335,7 +362,10 @@ fn import_blocks(matches: &clap::ArgMatches) -> error::Result<()> { let client = service::new_client(config)?; let (exit_send, exit) = std::sync::mpsc::channel(); ctrlc::CtrlC::set_handler(move || { - exit_send.clone().send(()).expect("Error sending exit notification"); + exit_send + .clone() + .send(()) + .expect("Error sending exit notification"); }); let mut file: Box = match matches.value_of("INPUT") { @@ -346,26 +376,31 @@ fn import_blocks(matches: &clap::ArgMatches) -> error::Result<()> { info!("Importing blocks"); let count: u32 = Slicable::decode(&mut file).ok_or("Error reading file")?; let mut block = 0; - for _ in 0 .. count { + for _ in 0..count { if exit.try_recv().is_ok() { - break; + break } match SignedBlock::decode(&mut file) { Some(block) => { - let header = client.check_justification(block.block.header, block.justification.into())?; + let header = + client.check_justification(block.block.header, block.justification.into())?; client.import_block(BlockOrigin::File, header, Some(block.block.extrinsics))?; }, None => { warn!("Error reading block data."); - break; - } + break + }, } block += 1; if block % 10000 == 0 { info!("#{}", block); } } - info!("Imported {} blocks. Best: #{}", block, client.info()?.chain.best_number); + info!( + "Imported {} blocks. Best: #{}", + block, + client.info()?.chain.best_number + ); Ok(()) } @@ -379,7 +414,11 @@ fn run_until_exit(mut core: reactor::Core, service: service::Service, matc // can't use signal directly here because CtrlC takes only `Fn`. let (exit_send, exit) = mpsc::channel(1); ctrlc::CtrlC::set_handler(move || { - exit_send.clone().send(()).wait().expect("Error sending exit notification"); + exit_send + .clone() + .send(()) + .wait() + .expect("Error sending exit notification"); }); exit @@ -393,13 +432,9 @@ fn run_until_exit(mut core: reactor::Core, service: service::Service, matc let handler = || { let chain = rpc::apis::chain::Chain::new(service.client(), core.remote()); - let author = rpc::apis::author::Author::new(service.client(), service.transaction_pool()); - rpc::rpc_handler::( - service.client(), - chain, - author, - sys_conf.clone(), - ) + let author = + rpc::apis::author::Author::new(service.client(), service.transaction_pool()); + rpc::rpc_handler::(service.client(), chain, author, sys_conf.clone()) }; ( start_server(http_address, |address| rpc::start_http(address, handler())), @@ -407,29 +442,39 @@ fn run_until_exit(mut core: reactor::Core, service: service::Service, matc ) }; - core.run(exit.into_future()).expect("Error running informant event loop"); + core.run(exit.into_future()) + .expect("Error running informant event loop"); Ok(()) } -fn start_server(mut address: SocketAddr, start: F) -> Result where +fn start_server(mut address: SocketAddr, start: F) -> Result +where F: Fn(&SocketAddr) -> Result, { - start(&address) - .or_else(|e| match e.kind() { - io::ErrorKind::AddrInUse | - io::ErrorKind::PermissionDenied => { - warn!("Unable to bind server to {}. Trying random port.", address); - address.set_port(0); - start(&address) - }, - _ => Err(e), - }) + start(&address).or_else(|e| match e.kind() { + io::ErrorKind::AddrInUse | io::ErrorKind::PermissionDenied => { + warn!("Unable to bind server to {}. Trying random port.", address); + address.set_port(0); + start(&address) + }, + _ => Err(e), + }) } -fn parse_address(default: &str, port_param: &str, matches: &clap::ArgMatches) -> Result { - let mut address: SocketAddr = default.parse().ok().ok_or(format!("Invalid address specified for --{}.", port_param))?; +fn parse_address( + default: &str, + port_param: &str, + matches: &clap::ArgMatches, +) -> Result { + let mut address: SocketAddr = default + .parse() + .ok() + .ok_or(format!("Invalid address specified for --{}.", port_param))?; if let Some(port) = matches.value_of(port_param) { - let port: u16 = port.parse().ok().ok_or(format!("Invalid port for --{} specified.", port_param))?; + let port: u16 = port + .parse() + .ok() + .ok_or(format!("Invalid port for --{} specified.", port_param))?; address.set_port(port); } @@ -455,17 +500,15 @@ fn network_path(base_path: &Path) -> PathBuf { } fn default_base_path() -> PathBuf { - use app_dirs::{AppInfo, AppDataType}; + use app_dirs::{AppDataType, AppInfo}; let app_info = AppInfo { name: "Polkadot", author: "Parity Technologies", }; - app_dirs::get_app_root( - AppDataType::UserData, - &app_info, - ).expect("app directories exist on all supported platforms; qed") + app_dirs::get_app_root(AppDataType::UserData, &app_info) + .expect("app directories exist on all supported platforms; qed") } fn init_logger(pattern: &str) { @@ -487,13 +530,29 @@ fn init_logger(pattern: &str) { let enable_color = isatty; let format = move |record: &log::LogRecord| { - let timestamp = time::strftime("%Y-%m-%d %H:%M:%S", &time::now()).expect("Error formatting log timestamp"); + let timestamp = time::strftime("%Y-%m-%d %H:%M:%S", &time::now()) + .expect("Error formatting log timestamp"); let mut output = if log::max_log_level() <= log::LogLevelFilter::Info { - format!("{} {}", Colour::Black.bold().paint(timestamp), record.args()) + format!( + "{} {}", + Colour::Black.bold().paint(timestamp), + record.args() + ) } else { - let name = ::std::thread::current().name().map_or_else(Default::default, |x| format!("{}", Colour::Blue.bold().paint(x))); - format!("{} {} {} {} {}", Colour::Black.bold().paint(timestamp), name, record.level(), record.target(), record.args()) + let name = ::std::thread::current() + .name() + .map_or_else(Default::default, |x| { + format!("{}", Colour::Blue.bold().paint(x)) + }); + format!( + "{} {} {} {} {}", + Colour::Black.bold().paint(timestamp), + name, + record.level(), + record.target(), + record.args() + ) }; if !enable_color { @@ -513,7 +572,8 @@ fn init_logger(pattern: &str) { fn kill_color(s: &str) -> String { lazy_static! { - static ref RE: regex::Regex = regex::Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); + static ref RE: regex::Regex = + regex::Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); } RE.replace_all(s, "").to_string() } diff --git a/polkadot/collator/src/lib.rs b/polkadot/collator/src/lib.rs index 55eca734d269b..bfc7581731bee 100644 --- a/polkadot/collator/src/lib.rs +++ b/polkadot/collator/src/lib.rs @@ -45,25 +45,31 @@ //! to be performed, as the collation logic itself. extern crate futures; +extern crate polkadot_primitives; +extern crate polkadot_runtime; extern crate substrate_codec as codec; extern crate substrate_primitives as primitives; -extern crate polkadot_runtime; -extern crate polkadot_primitives; -use std::collections::{BTreeSet, BTreeMap}; +use std::collections::{BTreeMap, BTreeSet}; -use futures::{stream, Stream, Future, IntoFuture}; -use polkadot_primitives::parachain::{self, CandidateSignature, ConsolidatedIngress, Message, Id as ParaId}; +use futures::{stream, Future, IntoFuture, Stream}; +use polkadot_primitives::parachain::{ + self, CandidateSignature, ConsolidatedIngress, Id as ParaId, Message, +}; /// Parachain context needed for collation. /// /// This can be implemented through an externally attached service or a stub. pub trait ParachainContext { /// Produce a candidate, given the latest ingress queue information. - fn produce_candidate>( + fn produce_candidate>( &self, ingress: I, - ) -> (parachain::BlockData, polkadot_primitives::AccountId, CandidateSignature); + ) -> ( + parachain::BlockData, + polkadot_primitives::AccountId, + CandidateSignature, + ); } /// Relay chain context needed to collate. @@ -74,7 +80,7 @@ pub trait RelayChainContext { /// Future that resolves to the un-routed egress queues of a parachain. /// The first item is the oldest. - type FutureEgress: IntoFuture>, Error=Self::Error>; + type FutureEgress: IntoFuture>, Error = Self::Error>; /// Provide a set of all parachains meant to be routed to at a block. fn routing_parachains(&self) -> BTreeSet; @@ -84,12 +90,13 @@ pub trait RelayChainContext { } /// Collate the necessary ingress queue using the given context. -pub fn collate_ingress<'a, R>(relay_context: R) - -> Box + 'a> - where - R: RelayChainContext, - R::Error: 'a, - R::FutureEgress: 'a, +pub fn collate_ingress<'a, R>( + relay_context: R, +) -> Box + 'a> +where + R: RelayChainContext, + R::Error: 'a, + R::FutureEgress: 'a, { let mut egress_fetch = Vec::new(); @@ -106,32 +113,40 @@ pub fn collate_ingress<'a, R>(relay_context: R) // and then by the parachain ID. // // then transform that into the consolidated egress queue. - Box::new(stream::futures_unordered(egress_fetch) - .fold(BTreeMap::new(), |mut map, (routing_id, egresses)| { - for (depth, egress) in egresses.into_iter().rev().enumerate() { - let depth = -(depth as i64); - map.insert((depth, routing_id), egress); - } - - Ok(map) - }) - .map(|ordered| ordered.into_iter().map(|((_, id), egress)| (id, egress))) - .map(|i| i.collect::>()) - .map(ConsolidatedIngress)) + Box::new( + stream::futures_unordered(egress_fetch) + .fold(BTreeMap::new(), |mut map, (routing_id, egresses)| { + for (depth, egress) in egresses.into_iter().rev().enumerate() { + let depth = -(depth as i64); + map.insert((depth, routing_id), egress); + } + + Ok(map) + }) + .map(|ordered| ordered.into_iter().map(|((_, id), egress)| (id, egress))) + .map(|i| i.collect::>()) + .map(ConsolidatedIngress), + ) } /// Produce a candidate for the parachain. -pub fn collate<'a, R, P>(local_id: ParaId, relay_context: R, para_context: P) - -> Box + 'a> - where - R: RelayChainContext, - R::Error: 'a, - R::FutureEgress: 'a, - P: ParachainContext + 'a, +pub fn collate<'a, R, P>( + local_id: ParaId, + relay_context: R, + para_context: P, +) -> Box + 'a> +where + R: RelayChainContext, + R::Error: 'a, + R::FutureEgress: 'a, + P: ParachainContext + 'a, { Box::new(collate_ingress(relay_context).map(move |ingress| { let (block_data, _, signature) = para_context.produce_candidate( - ingress.0.iter().flat_map(|&(id, ref msgs)| msgs.iter().cloned().map(move |msg| (id, msg))) + ingress + .0 + .iter() + .flat_map(|&(id, ref msgs)| msgs.iter().cloned().map(move |msg| (id, msg))), ); parachain::Candidate { @@ -147,10 +162,10 @@ pub fn collate<'a, R, P>(local_id: ParaId, relay_context: R, para_context: P) mod tests { use super::*; - use std::collections::{HashMap, BTreeSet}; + use std::collections::{BTreeSet, HashMap}; use futures::Future; - use polkadot_primitives::parachain::{Message, Id as ParaId}; + use polkadot_primitives::parachain::{Id as ParaId, Message}; pub struct DummyRelayChainCtx { egresses: HashMap>>, @@ -170,12 +185,12 @@ mod tests { } } - #[test] + #[test] fn collates_ingress() { let route_from = |x: &[ParaId]| { - let mut set = BTreeSet::new(); - set.extend(x.iter().cloned()); - set + let mut set = BTreeSet::new(); + set.extend(x.iter().cloned()); + set }; let message = |x: Vec| vec![Message(x)]; @@ -184,21 +199,23 @@ mod tests { currently_routing: route_from(&[2.into(), 3.into()]), egresses: vec![ // egresses for `2`: last routed successfully 5 blocks ago. - (2.into(), vec![ - message(vec![1, 2, 3]), - message(vec![4, 5, 6]), - message(vec![7, 8]), - message(vec![10]), - message(vec![12]), - ]), - + ( + 2.into(), + vec![ + message(vec![1, 2, 3]), + message(vec![4, 5, 6]), + message(vec![7, 8]), + message(vec![10]), + message(vec![12]), + ], + ), // egresses for `3`: last routed successfully 3 blocks ago. - (3.into(), vec![ - message(vec![9]), - message(vec![11]), - message(vec![13]), - ]), - ].into_iter().collect(), + ( + 3.into(), + vec![message(vec![9]), message(vec![11]), message(vec![13])], + ), + ].into_iter() + .collect(), }; assert_eq!( @@ -212,7 +229,7 @@ mod tests { (3.into(), message(vec![11])), (2.into(), message(vec![12])), (3.into(), message(vec![13])), - ] - )) + ]) + ) } } diff --git a/polkadot/consensus/src/collation.rs b/polkadot/consensus/src/collation.rs index f0b1f626c68e1..7e841f885c936 100644 --- a/polkadot/consensus/src/collation.rs +++ b/polkadot/consensus/src/collation.rs @@ -22,8 +22,8 @@ use std::sync::Arc; use polkadot_api::PolkadotApi; -use polkadot_primitives::{Hash, AccountId, BlockId}; -use polkadot_primitives::parachain::{Id as ParaId, Chain, BlockData, Extrinsic, CandidateReceipt}; +use polkadot_primitives::parachain::{BlockData, CandidateReceipt, Chain, Extrinsic, Id as ParaId}; +use polkadot_primitives::{AccountId, BlockId, Hash}; use futures::prelude::*; @@ -42,7 +42,7 @@ pub trait Collators: Clone { /// Errors when producing collations. type Error; /// A full collation. - type Collation: IntoFuture; + type Collation: IntoFuture; /// Collate on a specific parachain, building on a given relay chain parent hash. fn collate(&self, parachain: ParaId, relay_parent: Hash) -> Self::Collation; @@ -65,7 +65,13 @@ pub struct CollationFetch { impl CollationFetch { /// Create a new collation fetcher for the given chain. - pub fn new(parachain: Chain, relay_parent: BlockId, relay_parent_hash: Hash, collators: C, client: Arc

) -> Self { + pub fn new( + parachain: Chain, + relay_parent: BlockId, + relay_parent_hash: Hash, + collators: C, + client: Arc

, + ) -> Self { CollationFetch { relay_parent_hash, relay_parent, @@ -92,12 +98,15 @@ impl Future for CollationFetch { loop { let x = { - let (r, c) = (self.relay_parent_hash, &self.collators); - let poll = self.live_fetch + let (r, c) = (self.relay_parent_hash, &self.collators); + let poll = self + .live_fetch .get_or_insert_with(move || c.collate(parachain, r).into_future()) .poll(); - if let Err(_) = poll { self.parachain = None } + if let Err(_) = poll { + self.parachain = None + } try_ready!(poll) }; @@ -106,15 +115,15 @@ impl Future for CollationFetch { self.parachain = None; // TODO: generate extrinsic while verifying. - return Ok(Async::Ready((x, Extrinsic))); - } + return Ok(Async::Ready((x, Extrinsic))) + }, Err(e) => { debug!("Failed to validate parachain due to API error: {}", e); // just continue if we got a bad collation or failed to validate self.live_fetch = None; self.collators.note_bad_collator(x.receipt.collator) - } + }, } } } @@ -145,14 +154,20 @@ error_chain! { } /// Check whether a given collation is valid. Returns `Ok` on success, error otherwise. -pub fn validate_collation(client: &P, relay_parent: &BlockId, collation: &Collation) -> Result<(), Error> { +pub fn validate_collation( + client: &P, + relay_parent: &BlockId, + collation: &Collation, +) -> Result<(), Error> { use parachain::{self, ValidationParams}; let para_id = collation.receipt.parachain_index; - let validation_code = client.parachain_code(relay_parent, para_id)? + let validation_code = client + .parachain_code(relay_parent, para_id)? .ok_or_else(|| ErrorKind::InactiveParachain(para_id))?; - let chain_head = client.parachain_head(relay_parent, para_id)? + let chain_head = client + .parachain_head(relay_parent, para_id)? .ok_or_else(|| ErrorKind::InactiveParachain(para_id))?; let params = ValidationParams { @@ -161,16 +176,14 @@ pub fn validate_collation(client: &P, relay_parent: &BlockId, co }; match parachain::wasm::validate_candidate(&validation_code, params) { - Ok(result) => { - if result.head_data == collation.receipt.head_data.0 { - Ok(()) - } else { - Err(ErrorKind::WrongHeadData( - collation.receipt.head_data.0.clone(), - result.head_data - ).into()) - } - } - Err(_) => Err(ErrorKind::ValidationFailure.into()) + Ok(result) => if result.head_data == collation.receipt.head_data.0 { + Ok(()) + } else { + Err( + ErrorKind::WrongHeadData(collation.receipt.head_data.0.clone(), result.head_data) + .into(), + ) + }, + Err(_) => Err(ErrorKind::ValidationFailure.into()), } } diff --git a/polkadot/consensus/src/dynamic_inclusion.rs b/polkadot/consensus/src/dynamic_inclusion.rs index d48e486274598..74176d19d34e5 100644 --- a/polkadot/consensus/src/dynamic_inclusion.rs +++ b/polkadot/consensus/src/dynamic_inclusion.rs @@ -50,11 +50,7 @@ impl DynamicInclusion { (0, 0) }; - DynamicInclusion { - start, - y, - m, - } + DynamicInclusion { start, y, m } } /// Returns the duration from `now` after which the amount of included parachain candidates @@ -83,42 +79,49 @@ mod tests { fn full_immediately_allowed() { let now = Instant::now(); - let dynamic = DynamicInclusion::new( - 10, - now, - Duration::from_millis(4000), - ); + let dynamic = DynamicInclusion::new(10, now, Duration::from_millis(4000)); assert!(dynamic.acceptable_in(now, 10).is_none()); assert!(dynamic.acceptable_in(now, 11).is_none()); - assert!(dynamic.acceptable_in(now + Duration::from_millis(2000), 10).is_none()); + assert!( + dynamic + .acceptable_in(now + Duration::from_millis(2000), 10) + .is_none() + ); } #[test] fn half_allowed_halfway() { let now = Instant::now(); - let dynamic = DynamicInclusion::new( - 10, - now, - Duration::from_millis(4000), - ); + let dynamic = DynamicInclusion::new(10, now, Duration::from_millis(4000)); - assert_eq!(dynamic.acceptable_in(now, 5), Some(Duration::from_millis(2000))); - assert!(dynamic.acceptable_in(now + Duration::from_millis(2000), 5).is_none()); - assert!(dynamic.acceptable_in(now + Duration::from_millis(3000), 5).is_none()); - assert!(dynamic.acceptable_in(now + Duration::from_millis(4000), 5).is_none()); + assert_eq!( + dynamic.acceptable_in(now, 5), + Some(Duration::from_millis(2000)) + ); + assert!( + dynamic + .acceptable_in(now + Duration::from_millis(2000), 5) + .is_none() + ); + assert!( + dynamic + .acceptable_in(now + Duration::from_millis(3000), 5) + .is_none() + ); + assert!( + dynamic + .acceptable_in(now + Duration::from_millis(4000), 5) + .is_none() + ); } #[test] fn zero_initial_is_flat() { let now = Instant::now(); - let dynamic = DynamicInclusion::new( - 0, - now, - Duration::from_secs(10_000), - ); + let dynamic = DynamicInclusion::new(0, now, Duration::from_secs(10_000)); for i in 0..10_001 { let now = now + Duration::from_secs(i); diff --git a/polkadot/consensus/src/evaluation.rs b/polkadot/consensus/src/evaluation.rs index c7c4fe2c25dcc..3452fe83e4520 100644 --- a/polkadot/consensus/src/evaluation.rs +++ b/polkadot/consensus/src/evaluation.rs @@ -19,9 +19,9 @@ use super::MAX_TRANSACTIONS_SIZE; use codec::Slicable; -use polkadot_runtime::{Block as PolkadotGenericBlock, CheckedBlock}; -use polkadot_primitives::{Block, Hash, BlockNumber, Timestamp}; use polkadot_primitives::parachain::Id as ParaId; +use polkadot_primitives::{Block, BlockNumber, Hash, Timestamp}; +use polkadot_runtime::{Block as PolkadotGenericBlock, CheckedBlock}; error_chain! { links { @@ -83,20 +83,27 @@ pub fn evaluate_initial( .and_then(|b| CheckedBlock::new(b).ok()) .ok_or_else(|| ErrorKind::ProposalNotForPolkadot)?; - let transactions_size = proposal.extrinsics.iter().fold(0, |a, tx| { - a + Slicable::encode(tx).len() - }); + let transactions_size = proposal + .extrinsics + .iter() + .fold(0, |a, tx| a + Slicable::encode(tx).len()); if transactions_size > MAX_TRANSACTIONS_SIZE { bail!(ErrorKind::ProposalTooLarge(transactions_size)) } if proposal.header.parent_hash != *parent_hash { - bail!(ErrorKind::WrongParentHash(*parent_hash, proposal.header.parent_hash)); + bail!(ErrorKind::WrongParentHash( + *parent_hash, + proposal.header.parent_hash + )); } if proposal.header.number != parent_number + 1 { - bail!(ErrorKind::WrongNumber(parent_number + 1, proposal.header.number)); + bail!(ErrorKind::WrongNumber( + parent_number + 1, + proposal.header.number + )); } let block_timestamp = proposal.timestamp(); @@ -109,14 +116,20 @@ pub fn evaluate_initial( { let n_parachains = active_parachains.len(); if proposal.parachain_heads().len() > n_parachains { - bail!(ErrorKind::TooManyCandidates(n_parachains, proposal.parachain_heads().len())); + bail!(ErrorKind::TooManyCandidates( + n_parachains, + proposal.parachain_heads().len() + )); } let mut last_id = None; let mut iter = active_parachains.iter(); for head in proposal.parachain_heads() { // proposed heads must be ascending order by parachain ID without duplicate. - if last_id.as_ref().map_or(false, |x| x >= &head.parachain_index) { + if last_id + .as_ref() + .map_or(false, |x| x >= &head.parachain_index) + { bail!(ErrorKind::ParachainOutOfOrder); } diff --git a/polkadot/consensus/src/lib.rs b/polkadot/consensus/src/lib.rs index c75d8ac6ee6b7..dc480dfb2d451 100644 --- a/polkadot/consensus/src/lib.rs +++ b/polkadot/consensus/src/lib.rs @@ -33,22 +33,22 @@ extern crate ed25519; extern crate parking_lot; extern crate polkadot_api; extern crate polkadot_collator as collator; -extern crate polkadot_statement_table as table; extern crate polkadot_parachain as parachain; -extern crate polkadot_transaction_pool as transaction_pool; -extern crate polkadot_runtime; extern crate polkadot_primitives; +extern crate polkadot_runtime; +extern crate polkadot_statement_table as table; +extern crate polkadot_transaction_pool as transaction_pool; extern crate substrate_bft as bft; extern crate substrate_codec as codec; +extern crate substrate_network; extern crate substrate_primitives as primitives; -extern crate substrate_runtime_support as runtime_support; extern crate substrate_runtime_primitives as runtime_primitives; -extern crate substrate_network; +extern crate substrate_runtime_support as runtime_support; extern crate exit_future; -extern crate tokio_core; extern crate substrate_client as client; +extern crate tokio_core; #[macro_use] extern crate error_chain; @@ -67,24 +67,26 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use codec::Slicable; -use table::generic::Statement as GenericStatement; -use runtime_support::Hashable; use polkadot_api::PolkadotApi; -use polkadot_primitives::{Hash, Block, BlockId, BlockNumber, Header, Timestamp}; -use polkadot_primitives::parachain::{Id as ParaId, Chain, DutyRoster, BlockData, Extrinsic as ParachainExtrinsic, CandidateReceipt}; +use polkadot_primitives::parachain::{ + BlockData, CandidateReceipt, Chain, DutyRoster, Extrinsic as ParachainExtrinsic, Id as ParaId, +}; +use polkadot_primitives::{Block, BlockId, BlockNumber, Hash, Header, Timestamp}; use polkadot_runtime::BareExtrinsic; use primitives::AuthorityId; -use transaction_pool::{TransactionPool}; -use tokio_core::reactor::{Handle, Timeout, Interval}; +use runtime_support::Hashable; +use table::generic::Statement as GenericStatement; +use tokio_core::reactor::{Handle, Interval, Timeout}; +use transaction_pool::TransactionPool; -use futures::prelude::*; -use futures::future::{self, Shared}; use collation::CollationFetch; use dynamic_inclusion::DynamicInclusion; +use futures::future::{self, Shared}; +use futures::prelude::*; -pub use self::collation::{Collators, Collation}; -pub use self::error::{ErrorKind, Error}; -pub use self::shared_table::{SharedTable, StatementSource, StatementProducer, ProducedStatements}; +pub use self::collation::{Collation, Collators}; +pub use self::error::{Error, ErrorKind}; +pub use self::shared_table::{ProducedStatements, SharedTable, StatementProducer, StatementSource}; pub use service::Service; mod collation; @@ -104,12 +106,17 @@ pub trait TableRouter: Clone { /// Errors when fetching data from the network. type Error; /// Future that resolves when candidate data is fetched. - type FetchCandidate: IntoFuture; + type FetchCandidate: IntoFuture; /// Future that resolves when extrinsic candidate data is fetched. - type FetchExtrinsic: IntoFuture; + type FetchExtrinsic: IntoFuture; /// Note local candidate data, making it available on the network to other validators. - fn local_candidate_data(&self, hash: Hash, block_data: BlockData, extrinsic: ParachainExtrinsic); + fn local_candidate_data( + &self, + hash: Hash, + block_data: BlockData, + extrinsic: ParachainExtrinsic, + ); /// Fetch block data for a specific candidate. fn fetch_block_data(&self, candidate: &CandidateReceipt) -> Self::FetchCandidate; @@ -144,7 +151,11 @@ pub struct GroupInfo { /// Sign a table statement against a parent hash. /// The actual message signed is the encoded statement concatenated with the /// parent hash. -pub fn sign_table_statement(statement: &table::Statement, key: &ed25519::Pair, parent_hash: &Hash) -> ed25519::Signature { +pub fn sign_table_statement( + statement: &table::Statement, + key: &ed25519::Pair, + parent_hash: &Hash, +) -> ed25519::Signature { use polkadot_primitives::parachain::Statement as RawStatement; let raw = match *statement { @@ -160,19 +171,32 @@ pub fn sign_table_statement(statement: &table::Statement, key: &ed25519::Pair, p key.sign(&encoded) } -fn make_group_info(roster: DutyRoster, authorities: &[AuthorityId], local_id: AuthorityId) -> Result<(HashMap, LocalDuty), Error> { +fn make_group_info( + roster: DutyRoster, + authorities: &[AuthorityId], + local_id: AuthorityId, +) -> Result<(HashMap, LocalDuty), Error> { if roster.validator_duty.len() != authorities.len() { - bail!(ErrorKind::InvalidDutyRosterLength(authorities.len(), roster.validator_duty.len())) + bail!(ErrorKind::InvalidDutyRosterLength( + authorities.len(), + roster.validator_duty.len() + )) } if roster.guarantor_duty.len() != authorities.len() { - bail!(ErrorKind::InvalidDutyRosterLength(authorities.len(), roster.guarantor_duty.len())) + bail!(ErrorKind::InvalidDutyRosterLength( + authorities.len(), + roster.guarantor_duty.len() + )) } let mut local_validation = None; let mut map = HashMap::new(); - let duty_iter = authorities.iter().zip(&roster.validator_duty).zip(&roster.guarantor_duty); + let duty_iter = authorities + .iter() + .zip(&roster.validator_duty) + .zip(&roster.guarantor_duty); for ((authority, v_duty), a_duty) in duty_iter { if authority == &local_id { local_validation = Some(v_duty.clone()); @@ -181,19 +205,21 @@ fn make_group_info(roster: DutyRoster, authorities: &[AuthorityId], local_id: Au match *v_duty { Chain::Relay => {}, // does nothing for now. Chain::Parachain(ref id) => { - map.entry(id.clone()).or_insert_with(GroupInfo::default) + map.entry(id.clone()) + .or_insert_with(GroupInfo::default) .validity_guarantors .insert(authority.clone()); - } + }, } match *a_duty { Chain::Relay => {}, // does nothing for now. Chain::Parachain(ref id) => { - map.entry(id.clone()).or_insert_with(GroupInfo::default) + map.entry(id.clone()) + .or_insert_with(GroupInfo::default) .availability_guarantors .insert(authority.clone()); - } + }, } } @@ -212,7 +238,7 @@ fn make_group_info(roster: DutyRoster, authorities: &[AuthorityId], local_id: Au }; Ok((map, local_duty)) - } + }, None => bail!(ErrorKind::NotValidator(local_id)), } } @@ -238,15 +264,20 @@ pub struct ProposerFactory { } impl bft::ProposerFactory for ProposerFactory - where - C: PolkadotApi + Send + Sync, - N: Network, - P: Collators, +where + C: PolkadotApi + Send + Sync, + N: Network, + P: Collators, { type Proposer = Proposer; type Error = Error; - fn init(&self, parent_header: &Header, authorities: &[AuthorityId], sign_with: Arc) -> Result { + fn init( + &self, + parent_header: &Header, + authorities: &[AuthorityId], + sign_with: Arc, + ) -> Result { use std::time::Duration; const DELAY_UNTIL: Duration = Duration::from_millis(5000); @@ -257,11 +288,8 @@ impl bft::ProposerFactory for ProposerFactory let duty_roster = self.client.duty_roster(&id)?; let random_seed = self.client.random_seed(&id)?; - let (group_info, local_duty) = make_group_info( - duty_roster, - authorities, - sign_with.public().into(), - )?; + let (group_info, local_duty) = + make_group_info(duty_roster, authorities, sign_with.public().into())?; let active_parachains = self.client.active_parachains(&id)?; @@ -274,8 +302,7 @@ impl bft::ProposerFactory for ProposerFactory self.parachain_empty_duration.clone(), ); - let timeout = Timeout::new(DELAY_UNTIL, &self.handle) - .map_err(|e| timer_error(&e))?; + let timeout = Timeout::new(DELAY_UNTIL, &self.handle).map_err(|e| timer_error(&e))?; debug!(target: "bft", "Initialising consensus proposer. Refusing to evaluate for {:?} from now.", DELAY_UNTIL); @@ -323,26 +350,23 @@ pub struct Proposer { } impl bft::Proposer for Proposer - where - C: PolkadotApi + Send + Sync, - R: TableRouter, - P: Collators, +where + C: PolkadotApi + Send + Sync, + R: TableRouter, + P: Collators, { type Error = Error; - type Create = future::Either< - CreateProposal, - future::FutureResult, - >; - type Evaluate = Box>; + type Create = future::Either, future::FutureResult>; + type Evaluate = Box>; fn propose(&self) -> Self::Create { const ATTEMPT_PROPOSE_EVERY: Duration = Duration::from_millis(100); let initial_included = self.table.includable_count(); - let enough_candidates = self.dynamic_inclusion.acceptable_in( - Instant::now(), - initial_included, - ).unwrap_or_default(); + let enough_candidates = self + .dynamic_inclusion + .acceptable_in(Instant::now(), initial_included) + .unwrap_or_default(); let timing = { let delay = self.delay.clone(); @@ -361,9 +385,7 @@ impl bft::Proposer for Proposer match make_timing(&self.handle) { Ok(timing) => timing, - Err(e) => { - return future::Either::B(future::err(timer_error(&e))); - } + Err(e) => return future::Either::B(future::err(timer_error(&e))), } }; @@ -378,7 +400,7 @@ impl bft::Proposer for Proposer self.parent_id.clone(), self.parent_hash.clone(), self.collators.clone(), - self.client.clone() + self.client.clone(), ), table: self.table.clone(), router: self.router.clone(), @@ -410,14 +432,13 @@ impl bft::Proposer for Proposer Err(e) => { // TODO: these errors are easily re-checked in runtime. debug!(target: "bft", "Invalid proposal: {:?}", e); - return Box::new(future::ok(false)); - } + return Box::new(future::ok(false)) + }, }; let vote_delays = { // delay casting vote until able (according to minimum block time) - let minimum_delay = self.delay.clone() - .map_err(|e| timer_error(&*e)); + let minimum_delay = self.delay.clone().map_err(|e| timer_error(&*e)); let included_candidate_hashes = proposal .parachain_heads() @@ -426,14 +447,15 @@ impl bft::Proposer for Proposer // delay casting vote until we have proof that all candidates are // includable. - let includability_tracker = self.table.track_includability(included_candidate_hashes) + let includability_tracker = self + .table + .track_includability(included_candidate_hashes) .map_err(|_| ErrorKind::PrematureDestruction.into()); // the duration at which the given number of parachains is acceptable. - let count_delay = self.dynamic_inclusion.acceptable_in( - Instant::now(), - proposal.parachain_heads().len(), - ); + let count_delay = self + .dynamic_inclusion + .acceptable_in(Instant::now(), proposal.parachain_heads().len()); // the duration until the given timestamp is current let proposed_timestamp = proposal.timestamp(); @@ -453,7 +475,7 @@ impl bft::Proposer for Proposer .map_err(|e| timer_error(&e)); future::Either::A(f) - } + }, None => future::Either::B(future::ok(())), }; @@ -462,7 +484,8 @@ impl bft::Proposer for Proposer // evaluate whether the block is actually valid. // TODO: is it better to delay this until the delays are finished? - let evaluated = self.client + let evaluated = self + .client .evaluate_block(&self.parent_id, unchecked_proposal.clone()) .map_err(Into::into); @@ -495,29 +518,33 @@ impl bft::Proposer for Proposer fn import_misbehavior(&self, misbehavior: Vec<(AuthorityId, bft::Misbehavior)>) { use bft::generic::Misbehavior as GenericMisbehavior; + use polkadot_runtime::{Call, ConsensusCall, Extrinsic, UncheckedExtrinsic}; use runtime_primitives::bft::{MisbehaviorKind, MisbehaviorReport}; use runtime_primitives::MaybeUnsigned; - use polkadot_runtime::{Call, Extrinsic, UncheckedExtrinsic, ConsensusCall}; let local_id = self.local_key.public().0.into(); let mut next_index = { - let cur_index = self.transaction_pool.cull_and_get_pending(BlockId::hash(self.parent_hash), |pending| pending - .filter(|tx| tx.sender().map(|s| s == local_id).unwrap_or(false)) - .last() - .map(|tx| Ok(tx.index())) - .unwrap_or_else(|| self.client.index(&self.parent_id, local_id)) + let cur_index = self.transaction_pool.cull_and_get_pending( + BlockId::hash(self.parent_hash), + |pending| { + pending + .filter(|tx| tx.sender().map(|s| s == local_id).unwrap_or(false)) + .last() + .map(|tx| Ok(tx.index())) + .unwrap_or_else(|| self.client.index(&self.parent_id, local_id)) + }, ); match cur_index { Ok(Ok(cur_index)) => cur_index + 1, Ok(Err(e)) => { warn!(target: "consensus", "Error computing next transaction index: {}", e); - return; - } + return + }, Err(e) => { warn!(target: "consensus", "Error computing next transaction index: {}", e); - return; - } + return + }, } }; @@ -529,11 +556,19 @@ impl bft::Proposer for Proposer misbehavior: match misbehavior { GenericMisbehavior::ProposeOutOfTurn(_, _, _) => continue, GenericMisbehavior::DoublePropose(_, _, _) => continue, - GenericMisbehavior::DoublePrepare(round, (h1, s1), (h2, s2)) - => MisbehaviorKind::BftDoublePrepare(round as u32, (h1, s1.signature), (h2, s2.signature)), - GenericMisbehavior::DoubleCommit(round, (h1, s1), (h2, s2)) - => MisbehaviorKind::BftDoubleCommit(round as u32, (h1, s1.signature), (h2, s2.signature)), - } + GenericMisbehavior::DoublePrepare(round, (h1, s1), (h2, s2)) => + MisbehaviorKind::BftDoublePrepare( + round as u32, + (h1, s1.signature), + (h2, s2.signature), + ), + GenericMisbehavior::DoubleCommit(round, (h1, s1), (h2, s2)) => + MisbehaviorKind::BftDoubleCommit( + round as u32, + (h1, s1.signature), + (h2, s2.signature), + ), + }, }; let extrinsic = BareExtrinsic { signed: local_id, @@ -552,7 +587,8 @@ impl bft::Proposer for Proposer }; let uxt = UncheckedExtrinsic::new(extrinsic, signature); - self.transaction_pool.import_unchecked_extrinsic(BlockId::hash(self.parent_hash), uxt) + self.transaction_pool + .import_unchecked_extrinsic(BlockId::hash(self.parent_hash), uxt) .expect("locally signed extrinsic is valid; qed"); } } @@ -561,7 +597,8 @@ impl bft::Proposer for Proposer fn current_timestamp() -> Timestamp { use std::time; - time::SystemTime::now().duration_since(time::UNIX_EPOCH) + time::SystemTime::now() + .duration_since(time::UNIX_EPOCH) .expect("now always later than unix epoch; qed") .as_secs() } @@ -583,9 +620,7 @@ impl ProposalTiming { // // this interval is just meant to produce periodic task wakeups // that lead to the `dynamic_inclusion` getting updated as necessary. - if let Async::Ready(x) = self.attempt_propose.poll() - .map_err(|e| timer_error(&e))? - { + if let Async::Ready(x) = self.attempt_propose.poll().map_err(|e| timer_error(&e))? { x.expect("timer still alive; intervals never end; qed"); } @@ -596,7 +631,7 @@ impl ProposalTiming { self.minimum_delay = None; // after this point, the future must have completed. if included == self.last_included { - return self.enough_candidates.poll().map_err(|e| timer_error(&e)); + return self.enough_candidates.poll().map_err(|e| timer_error(&e)) } // the amount of includable candidates has changed. schedule a wakeup @@ -607,16 +642,14 @@ impl ProposalTiming { self.last_included = included; self.enough_candidates.reset(now + duration); self.enough_candidates.poll().map_err(|e| timer_error(&e)) - } - None => { - Ok(Async::Ready(())) - } + }, + None => Ok(Async::Ready(())), } } } /// Future which resolves upon the creation of a proposal. -pub struct CreateProposal { +pub struct CreateProposal { parent_hash: Hash, parent_number: BlockNumber, parent_id: BlockId, @@ -629,43 +662,50 @@ pub struct CreateProposal { } impl CreateProposal - where - C: PolkadotApi, - R: TableRouter, - P: Collators, +where + C: PolkadotApi, + R: TableRouter, + P: Collators, { fn propose_with(&self, candidates: Vec) -> Result { use polkadot_api::BlockBuilder; - use runtime_primitives::traits::{Hashing, BlakeTwo256}; + use runtime_primitives::traits::{BlakeTwo256, Hashing}; // TODO: handle case when current timestamp behind that in state. let timestamp = current_timestamp(); - let mut block_builder = self.client.build_block(&self.parent_id, timestamp, candidates)?; + let mut block_builder = self + .client + .build_block(&self.parent_id, timestamp, candidates)?; { let mut unqueue_invalid = Vec::new(); - let result = self.transaction_pool.cull_and_get_pending(BlockId::hash(self.parent_hash), |pending_iterator| { - let mut pending_size = 0; - for pending in pending_iterator { - // skip and cull transactions which are too large. - if pending.encoded_size() > MAX_TRANSACTIONS_SIZE { - unqueue_invalid.push(pending.hash().clone()); - continue - } - - if pending_size + pending.encoded_size() >= MAX_TRANSACTIONS_SIZE { break } + let result = self.transaction_pool.cull_and_get_pending( + BlockId::hash(self.parent_hash), + |pending_iterator| { + let mut pending_size = 0; + for pending in pending_iterator { + // skip and cull transactions which are too large. + if pending.encoded_size() > MAX_TRANSACTIONS_SIZE { + unqueue_invalid.push(pending.hash().clone()); + continue + } - match block_builder.push_extrinsic(pending.primitive_extrinsic()) { - Ok(()) => { - pending_size += pending.encoded_size(); + if pending_size + pending.encoded_size() >= MAX_TRANSACTIONS_SIZE { + break } - Err(e) => { - trace!(target: "transaction-pool", "Invalid transaction: {}", e); - unqueue_invalid.push(pending.hash().clone()); + + match block_builder.push_extrinsic(pending.primitive_extrinsic()) { + Ok(()) => { + pending_size += pending.encoded_size(); + }, + Err(e) => { + trace!(target: "transaction-pool", "Invalid transaction: {}", e); + unqueue_invalid.push(pending.hash().clone()); + }, } } - } - }); + }, + ); if let Err(e) = result { warn!("Unable to get the pending set: {:?}", e); } @@ -675,11 +715,14 @@ impl CreateProposal let polkadot_block = block_builder.bake()?; - info!("Proposing block [number: {}; hash: {}; parent_hash: {}; extrinsics: [{}]]", + info!( + "Proposing block [number: {}; hash: {}; parent_hash: {}; extrinsics: [{}]]", polkadot_block.header.number, Hash::from(polkadot_block.header.hash()), polkadot_block.header.parent_hash, - polkadot_block.extrinsics.iter() + polkadot_block + .extrinsics + .iter() .map(|xt| format!("{}", BlakeTwo256::hash_of(xt))) .collect::>() .join(", ") @@ -690,23 +733,25 @@ impl CreateProposal // TODO: full re-evaluation let active_parachains = self.client.active_parachains(&self.parent_id)?; - assert!(evaluation::evaluate_initial( - &substrate_block, - timestamp, - &self.parent_hash, - self.parent_number, - &active_parachains, - ).is_ok()); + assert!( + evaluation::evaluate_initial( + &substrate_block, + timestamp, + &self.parent_hash, + self.parent_number, + &active_parachains, + ).is_ok() + ); Ok(substrate_block) } } impl Future for CreateProposal - where - C: PolkadotApi, - R: TableRouter, - P: Collators, +where + C: PolkadotApi, + R: TableRouter, + P: Collators, { type Item = Block; type Error = Error; @@ -716,11 +761,14 @@ impl Future for CreateProposal match self.collation.poll() { Ok(Async::Ready((collation, extrinsic))) => { let hash = collation.receipt.hash(); - self.router.local_candidate_data(hash, collation.block_data, extrinsic); - - // TODO: if we are an availability guarantor also, we should produce an availability statement. - self.table.sign_and_import(&self.router, GenericStatement::Candidate(collation.receipt)); - } + self.router + .local_candidate_data(hash, collation.block_data, extrinsic); + + // TODO: if we are an availability guarantor also, we should produce an + // availability statement. + self.table + .sign_and_import(&self.router, GenericStatement::Candidate(collation.receipt)); + }, Ok(Async::NotReady) => {}, Err(_) => {}, // TODO: handle this failure to collate. } @@ -731,9 +779,9 @@ impl Future for CreateProposal try_ready!(self.timing.poll(included)); // 3. propose - let proposed_candidates = self.table.with_proposal(|proposed_set| { - proposed_set.into_iter().cloned().collect() - }); + let proposed_candidates = self + .table + .with_proposal(|proposed_set| proposed_set.into_iter().cloned().collect()); self.propose_with(proposed_candidates).map(Async::Ready) } diff --git a/polkadot/consensus/src/service.rs b/polkadot/consensus/src/service.rs index 494e7858096e8..ebbef74fcac02 100644 --- a/polkadot/consensus/src/service.rs +++ b/polkadot/consensus/src/service.rs @@ -16,12 +16,11 @@ //! Consensus service. +use std::sync::Arc; /// Consensus service. A long runnung service that manages BFT agreement and parachain /// candidate agreement over the network. - use std::thread; use std::time::{Duration, Instant}; -use std::sync::Arc; use bft::{self, BftService}; use client::{BlockchainEvents, ChainHead}; @@ -29,15 +28,15 @@ use ed25519; use futures::prelude::*; use futures::{future, Canceled}; use polkadot_api::LocalPolkadotApi; -use polkadot_primitives::{BlockId, Block, Header, Hash, AccountId}; -use polkadot_primitives::parachain::{Id as ParaId, BlockData, Extrinsic, CandidateReceipt}; +use polkadot_primitives::parachain::{BlockData, CandidateReceipt, Extrinsic, Id as ParaId}; +use polkadot_primitives::{AccountId, Block, BlockId, Hash, Header}; use primitives::AuthorityId; use runtime_support::Hashable; use substrate_network as net; use tokio_core::reactor; use transaction_pool::TransactionPool; -use super::{TableRouter, SharedTable, ProposerFactory}; +use super::{ProposerFactory, SharedTable, TableRouter}; use error; const TIMER_DELAY_MS: u64 = 5000; @@ -65,70 +64,90 @@ impl Stream for Messages { match self.network_stream.poll() { Err(_) => return Err(bft::InputStreamConcluded.into()), Ok(Async::NotReady) => return Ok(Async::NotReady), - Ok(Async::Ready(None)) => return Ok(Async::NotReady), // the input stream for agreements is never meant to logically end. + Ok(Async::Ready(None)) => return Ok(Async::NotReady), /* the input stream for + * agreements is never + * meant to logically end. */ Ok(Async::Ready(Some(message))) => { match process_message(message, &self.local_id, &self.authorities) { Ok(Some(message)) => return Ok(Async::Ready(Some(message))), - Ok(None) => {} // ignored local message. + Ok(None) => {}, // ignored local message. Err(e) => { debug!("Message validation failed: {:?}", e); - } + }, } - } + }, } } } } -fn process_message(msg: net::LocalizedBftMessage, local_id: &AuthorityId, authorities: &[AuthorityId]) -> Result>, bft::Error> { +fn process_message( + msg: net::LocalizedBftMessage, + local_id: &AuthorityId, + authorities: &[AuthorityId], +) -> Result>, bft::Error> { Ok(Some(match msg.message { - net::generic_message::BftMessage::Consensus(c) => bft::generic::Communication::Consensus(match c { - net::generic_message::SignedConsensusMessage::Propose(proposal) => bft::generic::LocalizedMessage::Propose({ - if &proposal.sender == local_id { return Ok(None) } - let proposal = bft::generic::LocalizedProposal { - round_number: proposal.round_number as usize, - proposal: proposal.proposal, - digest: proposal.digest, - sender: proposal.sender, - digest_signature: ed25519::LocalizedSignature { - signature: proposal.digest_signature, - signer: proposal.sender.into(), - }, - full_signature: ed25519::LocalizedSignature { - signature: proposal.full_signature, - signer: proposal.sender.into(), - } - }; - bft::check_proposal(authorities, &msg.parent_hash, &proposal)?; - - trace!(target: "bft", "importing proposal message for round {} from {}", proposal.round_number, proposal.sender); - proposal - }), - net::generic_message::SignedConsensusMessage::Vote(vote) => bft::generic::LocalizedMessage::Vote({ - if &vote.sender == local_id { return Ok(None) } - let vote = bft::generic::LocalizedVote { - sender: vote.sender, - signature: ed25519::LocalizedSignature { - signature: vote.signature, - signer: vote.sender.into(), - }, - vote: match vote.vote { - net::generic_message::ConsensusVote::Prepare(r, h) => bft::generic::Vote::Prepare(r as usize, h), - net::generic_message::ConsensusVote::Commit(r, h) => bft::generic::Vote::Commit(r as usize, h), - net::generic_message::ConsensusVote::AdvanceRound(r) => bft::generic::Vote::AdvanceRound(r as usize), - } - }; - bft::check_vote::(authorities, &msg.parent_hash, &vote)?; - - trace!(target: "bft", "importing vote {:?} from {}", vote.vote, vote.sender); - vote + net::generic_message::BftMessage::Consensus(c) => + bft::generic::Communication::Consensus(match c { + net::generic_message::SignedConsensusMessage::Propose(proposal) => + bft::generic::LocalizedMessage::Propose({ + if &proposal.sender == local_id { + return Ok(None) + } + let proposal = bft::generic::LocalizedProposal { + round_number: proposal.round_number as usize, + proposal: proposal.proposal, + digest: proposal.digest, + sender: proposal.sender, + digest_signature: ed25519::LocalizedSignature { + signature: proposal.digest_signature, + signer: proposal.sender.into(), + }, + full_signature: ed25519::LocalizedSignature { + signature: proposal.full_signature, + signer: proposal.sender.into(), + }, + }; + bft::check_proposal(authorities, &msg.parent_hash, &proposal)?; + + trace!(target: "bft", "importing proposal message for round {} from {}", proposal.round_number, proposal.sender); + proposal + }), + net::generic_message::SignedConsensusMessage::Vote(vote) => + bft::generic::LocalizedMessage::Vote({ + if &vote.sender == local_id { + return Ok(None) + } + let vote = bft::generic::LocalizedVote { + sender: vote.sender, + signature: ed25519::LocalizedSignature { + signature: vote.signature, + signer: vote.sender.into(), + }, + vote: match vote.vote { + net::generic_message::ConsensusVote::Prepare(r, h) => + bft::generic::Vote::Prepare(r as usize, h), + net::generic_message::ConsensusVote::Commit(r, h) => + bft::generic::Vote::Commit(r as usize, h), + net::generic_message::ConsensusVote::AdvanceRound(r) => + bft::generic::Vote::AdvanceRound(r as usize), + }, + }; + bft::check_vote::(authorities, &msg.parent_hash, &vote)?; + + trace!(target: "bft", "importing vote {:?} from {}", vote.vote, vote.sender); + vote + }), }), - }), net::generic_message::BftMessage::Auxiliary(a) => { let justification = bft::UncheckedJustification::::from(a); // TODO: get proper error - let justification: Result<_, bft::Error> = bft::check_prepare_justification::(authorities, msg.parent_hash, justification) - .map_err(|_| bft::ErrorKind::InvalidJustification.into()); + let justification: Result<_, bft::Error> = + bft::check_prepare_justification::( + authorities, + msg.parent_hash, + justification, + ).map_err(|_| bft::ErrorKind::InvalidJustification.into()); bft::generic::Communication::Auxiliary(justification?) }, })) @@ -139,29 +158,47 @@ impl Sink for BftSink { // TODO: replace this with the ! type when that's stabilized type SinkError = E; - fn start_send(&mut self, message: bft::Communication) -> ::futures::StartSend, E> { + fn start_send( + &mut self, + message: bft::Communication, + ) -> ::futures::StartSend, E> { let network_message = net::generic_message::LocalizedBftMessage { message: match message { - bft::generic::Communication::Consensus(c) => net::generic_message::BftMessage::Consensus(match c { - bft::generic::LocalizedMessage::Propose(proposal) => net::generic_message::SignedConsensusMessage::Propose(net::generic_message::SignedConsensusProposal { - round_number: proposal.round_number as u32, - proposal: proposal.proposal, - digest: proposal.digest, - sender: proposal.sender, - digest_signature: proposal.digest_signature.signature, - full_signature: proposal.full_signature.signature, - }), - bft::generic::LocalizedMessage::Vote(vote) => net::generic_message::SignedConsensusMessage::Vote(net::generic_message::SignedConsensusVote { - sender: vote.sender, - signature: vote.signature.signature, - vote: match vote.vote { - bft::generic::Vote::Prepare(r, h) => net::generic_message::ConsensusVote::Prepare(r as u32, h), - bft::generic::Vote::Commit(r, h) => net::generic_message::ConsensusVote::Commit(r as u32, h), - bft::generic::Vote::AdvanceRound(r) => net::generic_message::ConsensusVote::AdvanceRound(r as u32), - } + bft::generic::Communication::Consensus(c) => + net::generic_message::BftMessage::Consensus(match c { + bft::generic::LocalizedMessage::Propose(proposal) => + net::generic_message::SignedConsensusMessage::Propose( + net::generic_message::SignedConsensusProposal { + round_number: proposal.round_number as u32, + proposal: proposal.proposal, + digest: proposal.digest, + sender: proposal.sender, + digest_signature: proposal.digest_signature.signature, + full_signature: proposal.full_signature.signature, + }, + ), + bft::generic::LocalizedMessage::Vote(vote) => + net::generic_message::SignedConsensusMessage::Vote( + net::generic_message::SignedConsensusVote { + sender: vote.sender, + signature: vote.signature.signature, + vote: match vote.vote { + bft::generic::Vote::Prepare(r, h) => + net::generic_message::ConsensusVote::Prepare( + r as u32, h, + ), + bft::generic::Vote::Commit(r, h) => + net::generic_message::ConsensusVote::Commit(r as u32, h), + bft::generic::Vote::AdvanceRound(r) => + net::generic_message::ConsensusVote::AdvanceRound( + r as u32, + ), + }, + }, + ), }), - }), - bft::generic::Communication::Auxiliary(justification) => net::generic_message::BftMessage::Auxiliary(justification.uncheck().into()), + bft::generic::Communication::Auxiliary(justification) => + net::generic_message::BftMessage::Auxiliary(justification.uncheck().into()), }, parent_hash: self.parent_hash, }; @@ -180,7 +217,7 @@ impl super::Network for Network { type TableRouter = Router; fn table_router(&self, _table: Arc) -> Self::TableRouter { Router { - network: self.0.clone() + network: self.0.clone(), } } } @@ -198,15 +235,18 @@ fn start_bft( >::Error: ::std::fmt::Display + Into, { let parent_hash = header.hash(); - if bft_service.live_agreement().map_or(false, |h| h == parent_hash) { - return; + if bft_service + .live_agreement() + .map_or(false, |h| h == parent_hash) + { + return } let authorities = match client.authorities(&BlockId::hash(parent_hash)) { Ok(authorities) => authorities, Err(e) => { debug!("Error reading authorities: {:?}", e); - return; - } + return + }, }; let input = Messages { @@ -215,7 +255,11 @@ fn start_bft( authorities, }; - let output = BftSink { network: network, parent_hash: parent_hash, _e: Default::default() }; + let output = BftSink { + network, + parent_hash, + _e: Default::default(), + }; match bft_service.build_upon(&header, input.map_err(Into::into), output) { Ok(Some(bft)) => handle.spawn(bft), Ok(None) => {}, @@ -239,9 +283,15 @@ impl Service { parachain_empty_duration: Duration, key: ed25519::Pair, ) -> Service - where - A: LocalPolkadotApi + Send + Sync + 'static, - C: BlockchainEvents + ChainHead + bft::BlockImport + bft::Authorities + Send + Sync + 'static, + where + A: LocalPolkadotApi + Send + Sync + 'static, + C: BlockchainEvents + + ChainHead + + bft::BlockImport + + bft::Authorities + + Send + + Sync + + 'static, { let (signal, exit) = ::exit_future::signal(); let thread = thread::spawn(move || { @@ -264,25 +314,37 @@ impl Service { let client = client.clone(); let bft_service = bft_service.clone(); - client.import_notification_stream().for_each(move |notification| { - if notification.is_new_best { - start_bft(¬ification.header, handle.clone(), &*client, network.clone(), &*bft_service); - } - Ok(()) - }) + client + .import_notification_stream() + .for_each(move |notification| { + if notification.is_new_best { + start_bft( + ¬ification.header, + handle.clone(), + &*client, + network.clone(), + &*bft_service, + ); + } + Ok(()) + }) }; - let interval = reactor::Interval::new_at( - Instant::now() + Duration::from_millis(TIMER_DELAY_MS), - Duration::from_millis(TIMER_INTERVAL_MS), - &core.handle(), - ).expect("it is always possible to create an interval with valid params"); + let interval = + reactor::Interval::new_at( + Instant::now() + Duration::from_millis(TIMER_DELAY_MS), + Duration::from_millis(TIMER_INTERVAL_MS), + &core.handle(), + ).expect("it is always possible to create an interval with valid params"); let mut prev_best = match client.best_block_header() { Ok(header) => header.blake2_256(), Err(e) => { - warn!("Cant's start consensus service. Error reading best block header: {:?}", e); - return; - } + warn!( + "Cant's start consensus service. Error reading best block header: {:?}", + e + ); + return + }, }; let timed = { @@ -291,17 +353,19 @@ impl Service { let n = network.clone(); let handle = core.handle(); - interval.map_err(|e| debug!("Timer error: {:?}", e)).for_each(move |_| { - if let Ok(best_block) = c.best_block_header() { - let hash = best_block.blake2_256(); - if hash == prev_best { - debug!("Starting consensus round after a timeout"); - start_bft(&best_block, handle.clone(), &*c, n.clone(), &*s); + interval + .map_err(|e| debug!("Timer error: {:?}", e)) + .for_each(move |_| { + if let Ok(best_block) = c.best_block_header() { + let hash = best_block.blake2_256(); + if hash == prev_best { + debug!("Starting consensus round after a timeout"); + start_bft(&best_block, handle.clone(), &*c, n.clone(), &*s); + } + prev_best = hash; } - prev_best = hash; - } - Ok(()) - }) + Ok(()) + }) }; core.handle().spawn(notifications); @@ -342,7 +406,7 @@ impl ::collation::Collators for NoCollators { future::empty() } - fn note_bad_collator(&self, _collator: AccountId) { } + fn note_bad_collator(&self, _collator: AccountId) {} } #[derive(Clone)] @@ -352,7 +416,7 @@ struct Router { impl TableRouter for Router { type Error = Canceled; - type FetchCandidate = future::Empty; + type FetchCandidate = future::Empty; type FetchExtrinsic = future::FutureResult; fn local_candidate_data(&self, _hash: Hash, _block_data: BlockData, _extrinsic: Extrinsic) { diff --git a/polkadot/consensus/src/shared_table/includable.rs b/polkadot/consensus/src/shared_table/includable.rs index 873c3af94c403..9b77b2177cfde 100644 --- a/polkadot/consensus/src/shared_table/includable.rs +++ b/polkadot/consensus/src/shared_table/includable.rs @@ -24,7 +24,9 @@ use futures::sync::oneshot; use polkadot_primitives::Hash; /// Track includability of a set of candidates, -pub(super) fn track>(candidates: I) -> (IncludabilitySender, Includable) { +pub(super) fn track>( + candidates: I, +) -> (IncludabilitySender, Includable) { let (tx, rx) = oneshot::channel(); let tracking: HashMap<_, _> = candidates.into_iter().collect(); let includable_count = tracking.values().filter(|x| **x).count(); @@ -37,10 +39,7 @@ pub(super) fn track>(candidates: I) -> (Inclu sender.try_complete(); - ( - sender, - Includable(rx), - ) + (sender, Includable(rx)) } /// The sending end of the includability sender. @@ -59,7 +58,7 @@ impl IncludabilitySender { use std::collections::hash_map::Entry; match self.tracking.entry(candidate) { - Entry::Vacant(_) => {} + Entry::Vacant(_) => {}, Entry::Occupied(mut entry) => { let old = entry.insert(includable); if !old && includable { @@ -67,7 +66,7 @@ impl IncludabilitySender { } else if old && !includable { self.includable_count -= 1; } - } + }, } self.try_complete() @@ -113,12 +112,15 @@ mod tests { let hash2 = [2; 32].into(); let hash3 = [3; 32].into(); - let (mut sender, recv) = track([ - (hash1, true), - (hash2, true), - (hash2, false), // overwrite should favor latter. - (hash3, true), - ].iter().cloned()); + let (mut sender, recv) = track( + [ + (hash1, true), + (hash2, true), + (hash2, false), // overwrite should favor latter. + (hash3, true), + ].iter() + .cloned(), + ); assert!(!sender.is_complete()); diff --git a/polkadot/consensus/src/shared_table/mod.rs b/polkadot/consensus/src/shared_table/mod.rs index 7b22c6c0b330a..c5551c11c0674 100644 --- a/polkadot/consensus/src/shared_table/mod.rs +++ b/polkadot/consensus/src/shared_table/mod.rs @@ -20,18 +20,18 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use table::{self, Table, Context as TableContextTrait}; -use table::generic::Statement as GenericStatement; use collation::Collation; +use polkadot_primitives::parachain::{BlockData, CandidateReceipt, Extrinsic, Id as ParaId}; use polkadot_primitives::Hash; -use polkadot_primitives::parachain::{Id as ParaId, BlockData, Extrinsic, CandidateReceipt}; use primitives::AuthorityId; +use table::generic::Statement as GenericStatement; +use table::{self, Context as TableContextTrait, Table}; -use parking_lot::Mutex; use futures::{future, prelude::*}; +use parking_lot::Mutex; -use super::{GroupInfo, TableRouter}; use self::includable::IncludabilitySender; +use super::{GroupInfo, TableRouter}; mod includable; @@ -45,18 +45,23 @@ struct TableContext { impl table::Context for TableContext { fn is_member_of(&self, authority: &AuthorityId, group: &ParaId) -> bool { - self.groups.get(group).map_or(false, |g| g.validity_guarantors.contains(authority)) + self.groups + .get(group) + .map_or(false, |g| g.validity_guarantors.contains(authority)) } fn is_availability_guarantor_of(&self, authority: &AuthorityId, group: &ParaId) -> bool { - self.groups.get(group).map_or(false, |g| g.availability_guarantors.contains(authority)) + self.groups + .get(group) + .map_or(false, |g| g.availability_guarantors.contains(authority)) } fn requisite_votes(&self, group: &ParaId) -> (usize, usize) { - self.groups.get(group).map_or( - (usize::max_value(), usize::max_value()), - |g| (g.needed_validity, g.needed_availability), - ) + self.groups + .get(group) + .map_or((usize::max_value(), usize::max_value()), |g| { + (g.needed_validity, g.needed_availability) + }) } } @@ -115,7 +120,10 @@ impl SharedTableInner { StatementSource::Remote(from) => from, }; - let summary = match self.table.import_statement(context, statement, received_from) { + let summary = match self + .table + .import_statement(context, statement, received_from) + { Some(summary) => summary, None => return Default::default(), }; @@ -136,20 +144,17 @@ impl SharedTableInner { && self.proposed_digest.as_ref().map_or(true, |d| d != digest) && self.checked_validity.insert(digest.clone()); - let checking_availability = is_availability_member - && self.checked_availability.insert(digest.clone()); + let checking_availability = + is_availability_member && self.checked_availability.insert(digest.clone()); let work = if checking_validity || checking_availability { match self.table.get_candidate(&digest) { None => None, // TODO: handle table inconsistency somehow? Some(candidate) => { - let fetch_block_data = - router.fetch_block_data(candidate).into_future().fuse(); + let fetch_block_data = router.fetch_block_data(candidate).into_future().fuse(); let fetch_extrinsic = if checking_availability { - Some( - router.fetch_extrinsic_data(candidate).into_future().fuse() - ) + Some(router.fetch_extrinsic_data(candidate).into_future().fuse()) } else { None }; @@ -161,7 +166,7 @@ impl SharedTableInner { evaluate: checking_validity, check_candidate, }) - } + }, } } else { None @@ -209,7 +214,7 @@ struct Work { fetch_block_data: future::Fuse, fetch_extrinsic: Option>, evaluate: bool, - check_candidate: C + check_candidate: C, } impl Default for StatementProducer { @@ -222,10 +227,10 @@ impl Default for StatementProducer { } impl Future for StatementProducer - where - D: Future, - E: Future, - C: FnMut(Collation) -> bool, +where + D: Future, + E: Future, + C: FnMut(Collation) -> bool, { type Item = ProducedStatements; type Error = Err; @@ -233,7 +238,11 @@ impl Future for StatementProducer fn poll(&mut self) -> Poll { let work = match self.work { Some(ref mut work) => work, - None => return Ok(Async::Ready(::std::mem::replace(&mut self.produced_statements, Default::default()))), + None => + return Ok(Async::Ready(::std::mem::replace( + &mut self.produced_statements, + Default::default(), + ))), }; if let Async::Ready(block_data) = work.fetch_block_data.poll()? { @@ -273,7 +282,10 @@ impl Future for StatementProducer }; if done { - Ok(Async::Ready(::std::mem::replace(&mut self.produced_statements, Default::default()))) + Ok(Async::Ready(::std::mem::replace( + &mut self.produced_statements, + Default::default(), + ))) } else { Ok(Async::NotReady) } @@ -300,16 +312,24 @@ impl SharedTable { /// /// Provide the key to sign with, and the parent hash of the relay chain /// block being built. - pub fn new(groups: HashMap, key: Arc<::ed25519::Pair>, parent_hash: Hash) -> Self { + pub fn new( + groups: HashMap, + key: Arc<::ed25519::Pair>, + parent_hash: Hash, + ) -> Self { SharedTable { - context: Arc::new(TableContext { groups, key, parent_hash }), + context: Arc::new(TableContext { + groups, + key, + parent_hash, + }), inner: Arc::new(Mutex::new(SharedTableInner { table: Table::default(), proposed_digest: None, checked_validity: HashSet::new(), checked_availability: HashSet::new(), trackers: Vec::new(), - })) + })), } } @@ -326,16 +346,22 @@ impl SharedTable { statement: table::SignedStatement, received_from: StatementSource, check_candidate: C, - ) -> StatementProducer<::Future, ::Future, C> { - self.inner.lock().import_statement(&*self.context, router, statement, received_from, check_candidate) + ) -> StatementProducer< + ::Future, + ::Future, + C, + > { + self.inner.lock().import_statement( + &*self.context, + router, + statement, + received_from, + check_candidate, + ) } /// Sign and import a local statement. - pub fn sign_and_import( - &self, - router: &R, - statement: table::Statement, - ) { + pub fn sign_and_import(&self, router: &R, statement: table::Statement) { let proposed_digest = match statement { GenericStatement::Candidate(ref c) => Some(c.hash()), _ => None, @@ -356,35 +382,50 @@ impl SharedTable { |_| true, ); - assert!(producer.work.is_none(), "local statement import never leads to additional work; qed"); + assert!( + producer.work.is_none(), + "local statement import never leads to additional work; qed" + ); } /// Import many statements at once. /// /// Provide an iterator yielding pairs of (statement, statement_source). pub fn import_statements(&self, router: &R, iterable: I) -> U - where - R: TableRouter, - I: IntoIterator, - C: FnMut(Collation) -> bool, - U: ::std::iter::FromIterator, + C: FnMut(Collation) -> bool, + U: ::std::iter::FromIterator< + StatementProducer< ::Future, ::Future, C, - >>, + >, + >, { let mut inner = self.inner.lock(); - iterable.into_iter().map(move |(statement, statement_source, check_candidate)| { - inner.import_statement(&*self.context, router, statement, statement_source, check_candidate) - }).collect() + iterable + .into_iter() + .map(move |(statement, statement_source, check_candidate)| { + inner.import_statement( + &*self.context, + router, + statement, + statement_source, + check_candidate, + ) + }) + .collect() } /// Execute a closure using a specific candidate. /// /// Deadlocks if called recursively. pub fn with_candidate(&self, digest: &Hash, f: F) -> U - where F: FnOnce(Option<&CandidateReceipt>) -> U + where + F: FnOnce(Option<&CandidateReceipt>) -> U, { let inner = self.inner.lock(); f(inner.table.get_candidate(digest)) @@ -394,7 +435,8 @@ impl SharedTable { /// /// Deadlocks if called recursively. pub fn with_proposal(&self, f: F) -> U - where F: FnOnce(Vec<&CandidateReceipt>) -> U + where + F: FnOnce(Vec<&CandidateReceipt>) -> U, { let inner = self.inner.lock(); f(inner.table.proposed_candidates(&*self.context)) @@ -417,7 +459,8 @@ impl SharedTable { /// Track includability of a given set of candidate hashes. pub fn track_includability(&self, iterable: I) -> Includable - where I: IntoIterator + where + I: IntoIterator, { let mut inner = self.inner.lock(); @@ -443,12 +486,11 @@ mod tests { struct DummyRouter; impl TableRouter for DummyRouter { type Error = (); - type FetchCandidate = ::futures::future::Empty; - type FetchExtrinsic = ::futures::future::Empty; + type FetchCandidate = ::futures::future::Empty; + type FetchExtrinsic = ::futures::future::Empty; /// Note local candidate data, making it available on the network to other validators. fn local_candidate_data(&self, _hash: Hash, _block_data: BlockData, _extrinsic: Extrinsic) { - } /// Fetch block data for a specific candidate. @@ -474,12 +516,15 @@ mod tests { let validity_other_key = Keyring::Bob.pair(); let parent_hash = Default::default(); - groups.insert(para_id, GroupInfo { - validity_guarantors: [local_id, validity_other].iter().cloned().collect(), - availability_guarantors: Default::default(), - needed_validity: 2, - needed_availability: 0, - }); + groups.insert( + para_id, + GroupInfo { + validity_guarantors: [local_id, validity_other].iter().cloned().collect(), + availability_guarantors: Default::default(), + needed_validity: 2, + needed_availability: 0, + }, + ); let shared_table = SharedTable::new(groups, local_key.clone(), parent_hash); @@ -494,7 +539,8 @@ mod tests { let candidate_statement = GenericStatement::Candidate(candidate); - let signature = ::sign_table_statement(&candidate_statement, &validity_other_key, &parent_hash); + let signature = + ::sign_table_statement(&candidate_statement, &validity_other_key, &parent_hash); let signed_statement = ::table::generic::SignedStatement { statement: candidate_statement, signature: signature.into(), @@ -508,8 +554,14 @@ mod tests { |_| true, ); - assert!(producer.work.is_some(), "candidate and local validity group are same"); - assert!(producer.work.as_ref().unwrap().evaluate, "should evaluate validity"); + assert!( + producer.work.is_some(), + "candidate and local validity group are same" + ); + assert!( + producer.work.as_ref().unwrap().evaluate, + "should evaluate validity" + ); } #[test] @@ -524,12 +576,15 @@ mod tests { let validity_other_key = Keyring::Bob.pair(); let parent_hash = Default::default(); - groups.insert(para_id, GroupInfo { - validity_guarantors: [validity_other].iter().cloned().collect(), - availability_guarantors: [local_id].iter().cloned().collect(), - needed_validity: 1, - needed_availability: 1, - }); + groups.insert( + para_id, + GroupInfo { + validity_guarantors: [validity_other].iter().cloned().collect(), + availability_guarantors: [local_id].iter().cloned().collect(), + needed_validity: 1, + needed_availability: 1, + }, + ); let shared_table = SharedTable::new(groups, local_key.clone(), parent_hash); @@ -544,7 +599,8 @@ mod tests { let candidate_statement = GenericStatement::Candidate(candidate); - let signature = ::sign_table_statement(&candidate_statement, &validity_other_key, &parent_hash); + let signature = + ::sign_table_statement(&candidate_statement, &validity_other_key, &parent_hash); let signed_statement = ::table::generic::SignedStatement { statement: candidate_statement, signature: signature.into(), @@ -558,8 +614,17 @@ mod tests { |_| true, ); - assert!(producer.work.is_some(), "candidate and local availability group are same"); - assert!(producer.work.as_ref().unwrap().fetch_extrinsic.is_some(), "should fetch extrinsic when guaranteeing availability"); - assert!(!producer.work.as_ref().unwrap().evaluate, "should not evaluate validity"); + assert!( + producer.work.is_some(), + "candidate and local availability group are same" + ); + assert!( + producer.work.as_ref().unwrap().fetch_extrinsic.is_some(), + "should fetch extrinsic when guaranteeing availability" + ); + assert!( + !producer.work.as_ref().unwrap().evaluate, + "should not evaluate validity" + ); } } diff --git a/polkadot/executor/src/lib.rs b/polkadot/executor/src/lib.rs index 82cd5cd47851c..257dba68f486b 100644 --- a/polkadot/executor/src/lib.rs +++ b/polkadot/executor/src/lib.rs @@ -18,6 +18,7 @@ //! executed is equivalent to the natively compiled code. extern crate polkadot_runtime; -#[macro_use] extern crate substrate_executor; +#[macro_use] +extern crate substrate_executor; native_executor_instance!(pub Executor, polkadot_runtime::api::dispatch, polkadot_runtime::VERSION, include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/polkadot_runtime.compact.wasm")); diff --git a/polkadot/parachain/src/lib.rs b/polkadot/parachain/src/lib.rs index 2f3c064f33f36..897941fca6028 100644 --- a/polkadot/parachain/src/lib.rs +++ b/polkadot/parachain/src/lib.rs @@ -27,8 +27,9 @@ //! `validate` accepts as input two `i32` values, representing a pointer/length pair //! respectively, that encodes `ValidationParams`. //! -//! `validate` returns an `i32` which is a pointer to a little-endian 32-bit integer denoting a length. -//! Subtracting the length from the initial pointer will give a new pointer to the actual return data, +//! `validate` returns an `i32` which is a pointer to a little-endian 32-bit integer denoting a +//! length. Subtracting the length from the initial pointer will give a new pointer to the actual +//! return data, //! //! ASCII-diagram demonstrating the return data format: //! @@ -101,7 +102,7 @@ impl Slicable for ValidationParams { #[cfg_attr(feature = "std", derive(Debug))] pub struct ValidationResult { /// New head data that should be included in the relay chain state. - pub head_data: Vec + pub head_data: Vec, } impl Slicable for ValidationResult { @@ -134,7 +135,10 @@ pub fn write_result(result: ValidationResult) -> usize { let mut encoded = result.encode(); let len = encoded.len(); - assert!(len <= u32::max_value() as usize, "Len too large for parachain-WASM abi"); + assert!( + len <= u32::max_value() as usize, + "Len too large for parachain-WASM abi" + ); (len as u32).using_encoded(|s| encoded.extend(s)); // do not alter `encoded` beyond this point. may reallocate. diff --git a/polkadot/parachain/src/wasm.rs b/polkadot/parachain/src/wasm.rs index a16383acb908b..bed7f36c86447 100644 --- a/polkadot/parachain/src/wasm.rs +++ b/polkadot/parachain/src/wasm.rs @@ -22,9 +22,11 @@ use codec::Slicable; -use wasmi::{self, Module, ModuleInstance, MemoryInstance, MemoryDescriptor, MemoryRef, ModuleImportResolver}; -use wasmi::{memory_units, RuntimeValue}; use wasmi::Error as WasmError; +use wasmi::{ + self, MemoryDescriptor, MemoryInstance, MemoryRef, Module, ModuleImportResolver, ModuleInstance, +}; +use wasmi::{memory_units, RuntimeValue}; use super::{ValidationParams, ValidationResult}; @@ -63,17 +65,23 @@ impl ModuleImportResolver for Resolver { if field_name == "memory" { let effective_max = descriptor.maximum().unwrap_or(self.max_memory); if descriptor.initial() > self.max_memory || effective_max > self.max_memory { - Err(WasmError::Instantiation("Module requested too much memory".to_owned())) + Err(WasmError::Instantiation( + "Module requested too much memory".to_owned(), + )) } else { let mem = MemoryInstance::alloc( memory_units::Pages(descriptor.initial() as usize), - descriptor.maximum().map(|x| memory_units::Pages(x as usize)), + descriptor + .maximum() + .map(|x| memory_units::Pages(x as usize)), )?; *self.memory.borrow_mut() = Some(mem.clone()); Ok(mem) } } else { - Err(WasmError::Instantiation("Memory imported under unknown name".to_owned())) + Err(WasmError::Instantiation( + "Memory imported under unknown name".to_owned(), + )) } } } @@ -81,7 +89,10 @@ impl ModuleImportResolver for Resolver { /// Validate a candidate under the given validation code. /// /// This will fail if the validation code is not a proper parachain validation module. -pub fn validate_candidate(validation_code: &[u8], params: ValidationParams) -> Result { +pub fn validate_candidate( + validation_code: &[u8], + params: ValidationParams, +) -> Result { use wasmi::LINEAR_MEMORY_PAGE_SIZE; // maximum memory in bytes @@ -99,9 +110,12 @@ pub fn validate_candidate(validation_code: &[u8], params: ValidationParams) -> R let module = ModuleInstance::new( &module, &wasmi::ImportsBuilder::new().with_resolver("env", &module_resolver), - )?.run_start(&mut wasmi::NopExternals).map_err(WasmError::Trap)?; + )?.run_start(&mut wasmi::NopExternals) + .map_err(WasmError::Trap)?; - let memory = module_resolver.memory.borrow_mut() + let memory = module_resolver + .memory + .borrow_mut() .as_ref() .ok_or_else(|| WasmError::Instantiation("No imported memory instance".to_owned()))? .clone(); @@ -118,8 +132,8 @@ pub fn validate_candidate(validation_code: &[u8], params: ValidationParams) -> R bail!(ErrorKind::ParamsTooLarge(encoded_call_data.len())); } - let call_data_pages = (encoded_call_data.len() / LINEAR_MEMORY_PAGE_SIZE.0) + - (encoded_call_data.len() % LINEAR_MEMORY_PAGE_SIZE.0); + let call_data_pages = (encoded_call_data.len() / LINEAR_MEMORY_PAGE_SIZE.0) + + (encoded_call_data.len() % LINEAR_MEMORY_PAGE_SIZE.0); let call_data_pages = wasmi::memory_units::Pages(call_data_pages); @@ -127,8 +141,10 @@ pub fn validate_candidate(validation_code: &[u8], params: ValidationParams) -> R memory.grow(call_data_pages - memory.current_size())?; } - memory.set(0, &encoded_call_data).expect("enough memory allocated just before this; \ - copying never fails if memory is large enough; qed"); + memory.set(0, &encoded_call_data).expect( + "enough memory allocated just before this; \ + copying never fails if memory is large enough; qed", + ); (0, encoded_call_data.len() as i32) }; @@ -146,8 +162,7 @@ pub fn validate_candidate(validation_code: &[u8], params: ValidationParams) -> R let mut len_bytes = [0u8; 4]; memory.get_into(len_offset, &mut len_bytes)?; - let len = u32::decode(&mut &len_bytes[..]) - .ok_or_else(|| ErrorKind::BadReturn)?; + let len = u32::decode(&mut &len_bytes[..]).ok_or_else(|| ErrorKind::BadReturn)?; let return_offset = if len > len_offset { bail!(ErrorKind::BadReturn); @@ -160,7 +175,7 @@ pub fn validate_candidate(validation_code: &[u8], params: ValidationParams) -> R ValidationResult::decode(&mut &raw_return[..]) .ok_or_else(|| ErrorKind::BadReturn) .map_err(Into::into) - } + }, _ => bail!(ErrorKind::BadReturn), } } diff --git a/polkadot/parachain/tests/basic_add.rs b/polkadot/parachain/tests/basic_add.rs index c5fbe467a433d..adb60e5287eac 100644 --- a/polkadot/parachain/tests/basic_add.rs +++ b/polkadot/parachain/tests/basic_add.rs @@ -19,8 +19,8 @@ extern crate polkadot_parachain as parachain; extern crate tiny_keccak; +use parachain::codec::{Input, Slicable}; use parachain::ValidationParams; -use parachain::codec::{Slicable, Input}; // Head data for this parachain. #[derive(Default, Clone)] @@ -98,15 +98,15 @@ fn execute_good_on_parent() { post_state: hash_state(0), }; - let block_data = BlockData { - state: 0, - add: 512, - }; + let block_data = BlockData { state: 0, add: 512 }; - let ret = parachain::wasm::validate_candidate(TEST_CODE, ValidationParams { - parent_head: parent_head.encode(), - block_data: block_data.encode(), - }).unwrap(); + let ret = parachain::wasm::validate_candidate( + TEST_CODE, + ValidationParams { + parent_head: parent_head.encode(), + block_data: block_data.encode(), + }, + ).unwrap(); let new_head = HeadData::decode(&mut &ret.head_data[..]).unwrap(); @@ -133,10 +133,13 @@ fn execute_good_chain_on_parent() { add, }; - let ret = parachain::wasm::validate_candidate(TEST_CODE, ValidationParams { - parent_head: parent_head.encode(), - block_data: block_data.encode(), - }).unwrap(); + let ret = parachain::wasm::validate_candidate( + TEST_CODE, + ValidationParams { + parent_head: parent_head.encode(), + block_data: block_data.encode(), + }, + ).unwrap(); let new_head = HeadData::decode(&mut &ret.head_data[..]).unwrap(); @@ -152,7 +155,7 @@ fn execute_good_chain_on_parent() { #[test] fn execute_bad_on_parent() { - let parent_head = HeadData { + let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0), @@ -163,8 +166,11 @@ fn execute_bad_on_parent() { add: 256, }; - let _ret = parachain::wasm::validate_candidate(TEST_CODE, ValidationParams { - parent_head: parent_head.encode(), - block_data: block_data.encode(), - }).unwrap_err(); + let _ret = parachain::wasm::validate_candidate( + TEST_CODE, + ValidationParams { + parent_head: parent_head.encode(), + block_data: block_data.encode(), + }, + ).unwrap_err(); } diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index f7861a8337fbe..3a5fb293f16e5 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -17,13 +17,12 @@ //! Shareable Polkadot types. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(alloc))] -extern crate substrate_runtime_std as rstd; extern crate substrate_primitives as primitives; extern crate substrate_runtime_primitives as runtime_primitives; +extern crate substrate_runtime_std as rstd; #[cfg(test)] extern crate substrate_serializer; @@ -39,10 +38,10 @@ extern crate serde; #[cfg(feature = "std")] use primitives::bytes; +use codec::{Input, Slicable}; use rstd::prelude::*; -use runtime_primitives::traits::BlakeTwo256; use runtime_primitives::generic; -use codec::{Input, Slicable}; +use runtime_primitives::traits::BlakeTwo256; pub mod parachain; @@ -107,7 +106,7 @@ pub type BlockId = generic::BlockId; /// A log entry in the block. #[derive(PartialEq, Eq, Clone, Default)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] -pub struct Log(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct Log(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); impl Slicable for Log { fn decode(input: &mut I) -> Option { diff --git a/polkadot/primitives/src/parachain.rs b/polkadot/primitives/src/parachain.rs index 9f803810e4538..8fedac55dfb92 100644 --- a/polkadot/primitives/src/parachain.rs +++ b/polkadot/primitives/src/parachain.rs @@ -16,10 +16,10 @@ //! Polkadot parachain types. -use codec::{Slicable, Input}; -use rstd::prelude::*; -use rstd::cmp::Ordering; use super::Hash; +use codec::{Input, Slicable}; +use rstd::cmp::Ordering; +use rstd::prelude::*; #[cfg(feature = "std")] use primitives::bytes; @@ -33,11 +33,15 @@ pub type CandidateSignature = ::runtime_primitives::Ed25519Signature; pub struct Id(u32); impl From for u32 { - fn from(x: Id) -> Self { x.0 } + fn from(x: Id) -> Self { + x.0 + } } impl From for Id { - fn from(x: u32) -> Self { Id(x) } + fn from(x: u32) -> Self { + Id(x) + } } impl Id { @@ -80,11 +84,13 @@ impl Slicable for Chain { fn encode(&self) -> Vec { let mut v = Vec::new(); match *self { - Chain::Relay => { v.push(0); } + Chain::Relay => { + v.push(0); + }, Chain::Parachain(id) => { v.push(1u8); id.using_encoded(|s| v.extend(s)); - } + }, } v } @@ -218,7 +224,8 @@ impl PartialOrd for CandidateReceipt { impl Ord for CandidateReceipt { fn cmp(&self, other: &Self) -> Ordering { // TODO: compare signatures or something more sane - self.parachain_index.cmp(&other.parachain_index) + self.parachain_index + .cmp(&other.parachain_index) .then_with(|| self.head_data.cmp(&other.head_data)) } } @@ -226,7 +233,7 @@ impl Ord for CandidateReceipt { /// Parachain ingress queue message. #[derive(PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] -pub struct Message(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct Message(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); /// Consolidated ingress queue data. /// @@ -241,27 +248,27 @@ pub struct ConsolidatedIngress(pub Vec<(Id, Vec)>); /// contains everything required to validate para-block, may contain block and witness data #[derive(PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] -pub struct BlockData(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct BlockData(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); /// Parachain header raw bytes wrapper type. #[derive(PartialEq, Eq)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] -pub struct Header(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct Header(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); /// Parachain head data included in the chain. #[derive(PartialEq, Eq, Clone, PartialOrd, Ord)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] -pub struct HeadData(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct HeadData(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); /// Parachain validation code. #[derive(PartialEq, Eq)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] -pub struct ValidationCode(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct ValidationCode(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); /// Activitiy bit field #[derive(PartialEq, Eq, Clone, Default)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] -pub struct Activity(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct Activity(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); impl Slicable for Activity { fn decode(input: &mut I) -> Option { @@ -304,19 +311,19 @@ impl Slicable for Statement { Statement::Candidate(ref candidate) => { v.push(StatementKind::Candidate as u8); candidate.using_encoded(|s| v.extend(s)); - } + }, Statement::Valid(ref hash) => { v.push(StatementKind::Valid as u8); hash.using_encoded(|s| v.extend(s)); - } + }, Statement::Invalid(ref hash) => { v.push(StatementKind::Invalid as u8); hash.using_encoded(|s| v.extend(s)); - } + }, Statement::Available(ref hash) => { v.push(StatementKind::Available as u8); hash.using_encoded(|s| v.extend(s)); - } + }, } v @@ -324,18 +331,14 @@ impl Slicable for Statement { fn decode(value: &mut I) -> Option { match value.read_byte() { - Some(x) if x == StatementKind::Candidate as u8 => { - Slicable::decode(value).map(Statement::Candidate) - } - Some(x) if x == StatementKind::Valid as u8 => { - Slicable::decode(value).map(Statement::Valid) - } - Some(x) if x == StatementKind::Invalid as u8 => { - Slicable::decode(value).map(Statement::Invalid) - } - Some(x) if x == StatementKind::Available as u8 => { - Slicable::decode(value).map(Statement::Available) - } + Some(x) if x == StatementKind::Candidate as u8 => + Slicable::decode(value).map(Statement::Candidate), + Some(x) if x == StatementKind::Valid as u8 => + Slicable::decode(value).map(Statement::Valid), + Some(x) if x == StatementKind::Invalid as u8 => + Slicable::decode(value).map(Statement::Invalid), + Some(x) if x == StatementKind::Available as u8 => + Slicable::decode(value).map(Statement::Available), _ => None, } } diff --git a/polkadot/runtime/src/checked_block.rs b/polkadot/runtime/src/checked_block.rs index 4ea7bd0e88163..e751f3c21ac11 100644 --- a/polkadot/runtime/src/checked_block.rs +++ b/polkadot/runtime/src/checked_block.rs @@ -16,10 +16,10 @@ //! Typesafe block interaction. -use super::{Call, Block, TIMESTAMP_SET_POSITION, PARACHAINS_SET_POSITION}; -use timestamp::Call as TimestampCall; +use super::{Block, Call, PARACHAINS_SET_POSITION, TIMESTAMP_SET_POSITION}; use parachains::Call as ParachainsCall; use primitives::parachain::CandidateReceipt; +use timestamp::Call as TimestampCall; /// Provides a type-safe wrapper around a structurally valid block. pub struct CheckedBlock { @@ -30,23 +30,33 @@ pub struct CheckedBlock { impl CheckedBlock { /// Create a new checked block. Fails if the block is not structurally valid. pub fn new(block: Block) -> Result { - let has_timestamp = block.extrinsics.get(TIMESTAMP_SET_POSITION as usize).map_or(false, |xt| { - !xt.is_signed() && match xt.extrinsic.function { - Call::Timestamp(TimestampCall::set(_)) => true, - _ => false, - } - }); - - if !has_timestamp { return Err(block) } - - let has_heads = block.extrinsics.get(PARACHAINS_SET_POSITION as usize).map_or(false, |xt| { - !xt.is_signed() && match xt.extrinsic.function { - Call::Parachains(ParachainsCall::set_heads(_)) => true, - _ => false, - } - }); - - if !has_heads { return Err(block) } + let has_timestamp = block + .extrinsics + .get(TIMESTAMP_SET_POSITION as usize) + .map_or(false, |xt| { + !xt.is_signed() && match xt.extrinsic.function { + Call::Timestamp(TimestampCall::set(_)) => true, + _ => false, + } + }); + + if !has_timestamp { + return Err(block) + } + + let has_heads = block + .extrinsics + .get(PARACHAINS_SET_POSITION as usize) + .map_or(false, |xt| { + !xt.is_signed() && match xt.extrinsic.function { + Call::Parachains(ParachainsCall::set_heads(_)) => true, + _ => false, + } + }); + + if !has_heads { + return Err(block) + } Ok(CheckedBlock { inner: block, file_line: None, @@ -64,10 +74,14 @@ impl CheckedBlock { /// Extract the timestamp from the block. pub fn timestamp(&self) -> ::primitives::Timestamp { - let x = self.inner.extrinsics.get(TIMESTAMP_SET_POSITION as usize).and_then(|xt| match xt.extrinsic.function { - Call::Timestamp(TimestampCall::set(x)) => Some(x), - _ => None - }); + let x = self + .inner + .extrinsics + .get(TIMESTAMP_SET_POSITION as usize) + .and_then(|xt| match xt.extrinsic.function { + Call::Timestamp(TimestampCall::set(x)) => Some(x), + _ => None, + }); match x { Some(x) => x, @@ -77,10 +91,14 @@ impl CheckedBlock { /// Extract the parachain heads from the block. pub fn parachain_heads(&self) -> &[CandidateReceipt] { - let x = self.inner.extrinsics.get(PARACHAINS_SET_POSITION as usize).and_then(|xt| match xt.extrinsic.function { - Call::Parachains(ParachainsCall::set_heads(ref x)) => Some(&x[..]), - _ => None - }); + let x = self + .inner + .extrinsics + .get(PARACHAINS_SET_POSITION as usize) + .and_then(|xt| match xt.extrinsic.function { + Call::Parachains(ParachainsCall::set_heads(ref x)) => Some(&x[..]), + _ => None, + }); match x { Some(x) => x, @@ -89,20 +107,24 @@ impl CheckedBlock { } /// Convert into inner block. - pub fn into_inner(self) -> Block { self.inner } + pub fn into_inner(self) -> Block { + self.inner + } } impl ::std::ops::Deref for CheckedBlock { type Target = Block; - fn deref(&self) -> &Block { &self.inner } + fn deref(&self) -> &Block { + &self.inner + } } /// Assert that a block is structurally valid. May lead to panic in the future /// in case it isn't. #[macro_export] macro_rules! assert_polkadot_block { - ($block: expr) => { + ($block:expr) => { $crate::CheckedBlock::new_unchecked($block, file!(), line!()) - } + }; } diff --git a/polkadot/runtime/src/lib.rs b/polkadot/runtime/src/lib.rs index c6a7cfdc8365f..5ead385f02bd3 100644 --- a/polkadot/runtime/src/lib.rs +++ b/polkadot/runtime/src/lib.rs @@ -66,20 +66,25 @@ mod utils; #[cfg(feature = "std")] pub use checked_block::CheckedBlock; -pub use utils::{inherent_extrinsics, check_extrinsic}; pub use staking::address::Address as RawAddress; +pub use utils::{check_extrinsic, inherent_extrinsics}; -use primitives::{AccountId, AccountIndex, Balance, BlockNumber, Hash, Index, Log, SessionKey, Signature}; -use runtime_primitives::{generic, traits::{HasPublicAux, BlakeTwo256, Convert}}; +use primitives::{ + AccountId, AccountIndex, Balance, BlockNumber, Hash, Index, Log, SessionKey, Signature, +}; +use runtime_primitives::{ + generic, + traits::{BlakeTwo256, Convert, HasPublicAux}, +}; use version::RuntimeVersion; #[cfg(feature = "std")] pub use runtime_primitives::BuildStorage; pub use consensus::Call as ConsensusCall; -pub use timestamp::Call as TimestampCall; pub use parachains::Call as ParachainsCall; pub use primitives::Header; +pub use timestamp::Call as TimestampCall; /// The position of the timestamp set extrinsic. pub const TIMESTAMP_SET_POSITION: u32 = 0; @@ -122,7 +127,7 @@ impl version::Trait for Concrete { pub type Version = version::Module; impl HasPublicAux for Concrete { - type PublicAux = AccountId; // TODO: Option + type PublicAux = AccountId; // TODO: Option } impl system::Trait for Concrete { @@ -222,8 +227,16 @@ impl_outer_dispatch! { } /// Executive: handles dispatch to the various modules. -pub type Executive = executive::Executive; +pub type Executive = executive::Executive< + Concrete, + Block, + Staking, + Staking, + ( + ((((((), Parachains), Council), Democracy), Staking), Session), + Timestamp, + ), +>; impl_outer_config! { pub struct GenesisConfig for Concrete { @@ -255,11 +268,11 @@ pub mod api { #[cfg(test)] mod tests { use super::*; + use codec::Slicable; + use runtime_primitives::traits::{Digest as DigestT, Header as HeaderT}; use substrate_primitives as primitives; - use ::codec::Slicable; use substrate_primitives::hexdisplay::HexDisplay; use substrate_serializer as ser; - use runtime_primitives::traits::{Digest as DigestT, Header as HeaderT}; type Digest = generic::Digest; #[test] @@ -269,10 +282,16 @@ mod tests { number: 67, state_root: 3.into(), extrinsics_root: 6.into(), - digest: { let mut d = Digest::default(); d.push(Log(vec![1])); d }, + digest: { + let mut d = Digest::default(); + d.push(Log(vec![1])); + d + }, }; - assert_eq!(ser::to_string_pretty(&header), r#"{ + assert_eq!( + ser::to_string_pretty(&header), + r#"{ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000005", "number": 67, "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000003", @@ -282,7 +301,8 @@ mod tests { "0x01" ] } -}"#); +}"# + ); let v = header.encode(); assert_eq!(Header::decode(&mut &v[..]).unwrap(), header); @@ -291,17 +311,21 @@ mod tests { #[test] fn block_encoding_round_trip() { let mut block = Block { - header: Header::new(1, Default::default(), Default::default(), Default::default(), Default::default()), - extrinsics: vec![ - UncheckedExtrinsic::new( - generic::Extrinsic { - function: Call::Timestamp(timestamp::Call::set(100_000_000)), - signed: Default::default(), - index: Default::default(), - }, - Default::default(), - ) - ], + header: Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + extrinsics: vec![UncheckedExtrinsic::new( + generic::Extrinsic { + function: Call::Timestamp(timestamp::Call::set(100_000_000)), + signed: Default::default(), + index: Default::default(), + }, + Default::default(), + )], }; let raw = block.encode(); @@ -327,17 +351,21 @@ mod tests { #[test] fn block_encoding_substrate_round_trip() { let mut block = Block { - header: Header::new(1, Default::default(), Default::default(), Default::default(), Default::default()), - extrinsics: vec![ - UncheckedExtrinsic::new( - generic::Extrinsic { - function: Call::Timestamp(timestamp::Call::set(100_000_000)), - signed: Default::default(), - index: Default::default(), - }, - Default::default(), - ) - ], + header: Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + extrinsics: vec![UncheckedExtrinsic::new( + generic::Extrinsic { + function: Call::Timestamp(timestamp::Call::set(100_000_000)), + signed: Default::default(), + index: Default::default(), + }, + Default::default(), + )], }; block.extrinsics.push(UncheckedExtrinsic::new( @@ -346,7 +374,7 @@ mod tests { signed: Default::default(), index: 10101, }, - Default::default() + Default::default(), )); let raw = block.encode(); @@ -365,7 +393,7 @@ mod tests { index: 999, function: Call::Timestamp(TimestampCall::set(135135)), }, - runtime_primitives::Ed25519Signature(primitives::hash::H512([0; 64])).into() + runtime_primitives::Ed25519Signature(primitives::hash::H512([0; 64])).into(), ); // 6f000000 @@ -384,12 +412,12 @@ mod tests { #[test] fn serialize_checked() { let xt = Extrinsic { - signed: AccountId::from(hex!["0d71d1a9cad6f2ab773435a7dec1bac019994d05d1dd5eb3108211dcf25c9d1e"]).into(), + signed: AccountId::from(hex![ + "0d71d1a9cad6f2ab773435a7dec1bac019994d05d1dd5eb3108211dcf25c9d1e" + ]).into(), index: 0, function: Call::CouncilVoting(council::voting::Call::propose(Box::new( - PrivCall::Consensus(consensus::PrivCall::set_code( - vec![] - )) + PrivCall::Consensus(consensus::PrivCall::set_code(vec![])), ))), }; let v = Slicable::encode(&xt); diff --git a/polkadot/runtime/src/parachains.rs b/polkadot/runtime/src/parachains.rs index 3625633adf392..e4753fa67d9f4 100644 --- a/polkadot/runtime/src/parachains.rs +++ b/polkadot/runtime/src/parachains.rs @@ -16,16 +16,16 @@ //! Main parachains logic. For now this is just the determination of which validators do what. +use codec::{Joiner, Slicable}; use primitives; use rstd::prelude::*; -use codec::{Slicable, Joiner}; -use runtime_primitives::traits::{Executable, RefInto, MaybeEmpty}; -use primitives::parachain::{Id, Chain, DutyRoster, CandidateReceipt}; -use {system, session}; +use primitives::parachain::{CandidateReceipt, Chain, DutyRoster, Id}; +use runtime_primitives::traits::{Executable, MaybeEmpty, RefInto}; +use {session, system}; -use substrate_runtime_support::{Hashable, StorageValue, StorageMap}; use substrate_runtime_support::dispatch::Result; +use substrate_runtime_support::{Hashable, StorageMap, StorageValue}; #[cfg(any(feature = "std", test))] use rstd::marker::PhantomData; @@ -71,20 +71,29 @@ impl Module { let parachains = Self::active_parachains(); let parachain_count = parachains.len(); let validator_count = >::validator_count() as usize; - let validators_per_parachain = if parachain_count != 0 { (validator_count - 1) / parachain_count } else { 0 }; - - let mut roles_val = (0..validator_count).map(|i| match i { - i if i < parachain_count * validators_per_parachain => { - let idx = i / validators_per_parachain; - Chain::Parachain(parachains[idx].clone()) - } - _ => Chain::Relay, - }).collect::>(); + let validators_per_parachain = if parachain_count != 0 { + (validator_count - 1) / parachain_count + } else { + 0 + }; + + let mut roles_val = (0..validator_count) + .map(|i| match i { + i if i < parachain_count * validators_per_parachain => { + let idx = i / validators_per_parachain; + Chain::Parachain(parachains[idx].clone()) + }, + _ => Chain::Relay, + }) + .collect::>(); let mut roles_gua = roles_val.clone(); let random_seed = system::Module::::random_seed(); - let mut seed = random_seed.to_vec().and(b"validator_role_pairs").blake2_256(); + let mut seed = random_seed + .to_vec() + .and(b"validator_role_pairs") + .blake2_256(); // shuffle for i in 0..(validator_count - 1) { @@ -95,8 +104,10 @@ impl Module { let remaining = (validator_count - i) as usize; // 4 * 2 32-bit ints per 256-bit seed. - let val_index = u32::decode(&mut &seed[offset..offset + 4]).expect("using 4 bytes for a 32-bit quantity") as usize % remaining; - let gua_index = u32::decode(&mut &seed[offset + 4..offset + 8]).expect("using 4 bytes for a 32-bit quantity") as usize % remaining; + let val_index = u32::decode(&mut &seed[offset..offset + 4]) + .expect("using 4 bytes for a 32-bit quantity") as usize % remaining; + let gua_index = u32::decode(&mut &seed[offset + 4..offset + 8]) + .expect("using 4 bytes for a 32-bit quantity") as usize % remaining; if offset == 24 { // into the last 8 bytes - rehash to gather new entropy @@ -132,8 +143,10 @@ impl Module { pub fn deregister_parachain(id: Id) { let mut parachains = Self::active_parachains(); match parachains.binary_search(&id) { - Ok(idx) => { parachains.remove(idx); } - Err(_) => {} + Ok(idx) => { + parachains.remove(idx); + }, + Err(_) => {}, } >::remove(id); @@ -143,11 +156,13 @@ impl Module { fn set_heads(aux: &::PublicAux, heads: Vec) -> Result { ensure!(aux.is_empty(), "set_heads must not be signed"); - ensure!(!>::exists(), "Parachain heads must be updated only once in the block"); + ensure!( + !>::exists(), + "Parachain heads must be updated only once in the block" + ); ensure!( >::extrinsic_index() == T::SET_POSITION, - "Parachain heads update extrinsic must be at position {} in the block" -// , T::SET_POSITION + "Parachain heads update extrinsic must be at position {} in the block" /* , T::SET_POSITION */ ); let active_parachains = Self::active_parachains(); @@ -157,8 +172,9 @@ impl Module { for head in &heads { ensure!( iter.find(|&p| p == &head.parachain_index).is_some(), - "Submitted candidate for unregistered or out-of-order parachain {}" -// , head.parachain_index.into_inner() + "Submitted candidate for unregistered or out-of-order parachain {}" /* , head. + * parachain_index. + * into_inner() */ ); } @@ -175,7 +191,10 @@ impl Module { impl Executable for Module { fn execute() { - assert!(::DidUpdate::take(), "Parachain heads must be updated once in the block"); + assert!( + ::DidUpdate::take(), + "Parachain heads must be updated once in the block" + ); } } @@ -203,17 +222,22 @@ impl Default for GenesisConfig { } #[cfg(any(feature = "std", test))] -impl runtime_primitives::BuildStorage for GenesisConfig -{ +impl runtime_primitives::BuildStorage for GenesisConfig { fn build_storage(mut self) -> ::std::result::Result { - use std::collections::HashMap; - use runtime_io::twox_128; use codec::Slicable; + use runtime_io::twox_128; + use std::collections::HashMap; - self.parachains.sort_unstable_by_key(|&(ref id, _)| id.clone()); + self.parachains + .sort_unstable_by_key(|&(ref id, _)| id.clone()); self.parachains.dedup_by_key(|&mut (ref id, _)| id.clone()); - let only_ids: Vec<_> = self.parachains.iter().map(|&(ref id, _)| id).cloned().collect(); + let only_ids: Vec<_> = self + .parachains + .iter() + .map(|&(ref id, _)| id) + .cloned() + .collect(); let mut map: HashMap<_, _> = map![ twox_128(>::key()).to_vec() => only_ids.encode() @@ -232,10 +256,10 @@ impl runtime_primitives::BuildStorage for GenesisConfig mod tests { use super::*; use runtime_io::with_externalities; - use substrate_primitives::H256; - use runtime_primitives::BuildStorage; - use runtime_primitives::traits::{HasPublicAux, Identity, BlakeTwo256}; use runtime_primitives::testing::{Digest, Header}; + use runtime_primitives::traits::{BlakeTwo256, HasPublicAux, Identity}; + use runtime_primitives::BuildStorage; + use substrate_primitives::H256; use {consensus, timestamp}; #[derive(Clone, Eq, PartialEq)] @@ -273,79 +297,136 @@ mod tests { type Parachains = Module; fn new_test_ext(parachains: Vec<(Id, Vec)>) -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap(); - t.extend(consensus::GenesisConfig::{ - code: vec![], - authorities: vec![1, 2, 3], - }.build_storage().unwrap()); - t.extend(session::GenesisConfig::{ - session_length: 1000, - validators: vec![1, 2, 3, 4, 5, 6, 7, 8], - broken_percent_late: 100, - }.build_storage().unwrap()); - t.extend(GenesisConfig::{ - parachains: parachains, - phantom: PhantomData, - }.build_storage().unwrap()); + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap(); + t.extend( + consensus::GenesisConfig:: { + code: vec![], + authorities: vec![1, 2, 3], + }.build_storage() + .unwrap(), + ); + t.extend( + session::GenesisConfig:: { + session_length: 1000, + validators: vec![1, 2, 3, 4, 5, 6, 7, 8], + broken_percent_late: 100, + }.build_storage() + .unwrap(), + ); + t.extend( + GenesisConfig:: { + parachains, + phantom: PhantomData, + }.build_storage() + .unwrap(), + ); t } #[test] fn active_parachains_should_work() { - let parachains = vec![ - (5u32.into(), vec![1,2,3]), - (100u32.into(), vec![4,5,6]), - ]; + let parachains = vec![(5u32.into(), vec![1, 2, 3]), (100u32.into(), vec![4, 5, 6])]; with_externalities(&mut new_test_ext(parachains), || { - assert_eq!(Parachains::active_parachains(), vec![5u32.into(), 100u32.into()]); - assert_eq!(Parachains::parachain_code(&5u32.into()), Some(vec![1,2,3])); - assert_eq!(Parachains::parachain_code(&100u32.into()), Some(vec![4,5,6])); + assert_eq!( + Parachains::active_parachains(), + vec![5u32.into(), 100u32.into()] + ); + assert_eq!( + Parachains::parachain_code(&5u32.into()), + Some(vec![1, 2, 3]) + ); + assert_eq!( + Parachains::parachain_code(&100u32.into()), + Some(vec![4, 5, 6]) + ); }); } #[test] fn register_deregister() { - let parachains = vec![ - (5u32.into(), vec![1,2,3]), - (100u32.into(), vec![4,5,6]), - ]; + let parachains = vec![(5u32.into(), vec![1, 2, 3]), (100u32.into(), vec![4, 5, 6])]; with_externalities(&mut new_test_ext(parachains), || { - assert_eq!(Parachains::active_parachains(), vec![5u32.into(), 100u32.into()]); + assert_eq!( + Parachains::active_parachains(), + vec![5u32.into(), 100u32.into()] + ); - assert_eq!(Parachains::parachain_code(&5u32.into()), Some(vec![1,2,3])); - assert_eq!(Parachains::parachain_code(&100u32.into()), Some(vec![4,5,6])); + assert_eq!( + Parachains::parachain_code(&5u32.into()), + Some(vec![1, 2, 3]) + ); + assert_eq!( + Parachains::parachain_code(&100u32.into()), + Some(vec![4, 5, 6]) + ); - Parachains::register_parachain(99u32.into(), vec![7,8,9], vec![1, 1, 1]); + Parachains::register_parachain(99u32.into(), vec![7, 8, 9], vec![1, 1, 1]); - assert_eq!(Parachains::active_parachains(), vec![5u32.into(), 99u32.into(), 100u32.into()]); - assert_eq!(Parachains::parachain_code(&99u32.into()), Some(vec![7,8,9])); + assert_eq!( + Parachains::active_parachains(), + vec![5u32.into(), 99u32.into(), 100u32.into()] + ); + assert_eq!( + Parachains::parachain_code(&99u32.into()), + Some(vec![7, 8, 9]) + ); Parachains::deregister_parachain(5u32.into()); - assert_eq!(Parachains::active_parachains(), vec![99u32.into(), 100u32.into()]); + assert_eq!( + Parachains::active_parachains(), + vec![99u32.into(), 100u32.into()] + ); assert_eq!(Parachains::parachain_code(&5u32.into()), None); }); } #[test] fn duty_roster_works() { - let parachains = vec![ - (0u32.into(), vec![]), - (1u32.into(), vec![]), - ]; + let parachains = vec![(0u32.into(), vec![]), (1u32.into(), vec![])]; with_externalities(&mut new_test_ext(parachains), || { let check_roster = |duty_roster: &DutyRoster| { assert_eq!(duty_roster.validator_duty.len(), 8); assert_eq!(duty_roster.guarantor_duty.len(), 8); for i in (0..2).map(Id::from) { - assert_eq!(duty_roster.validator_duty.iter().filter(|&&j| j == Chain::Parachain(i)).count(), 3); - assert_eq!(duty_roster.guarantor_duty.iter().filter(|&&j| j == Chain::Parachain(i)).count(), 3); + assert_eq!( + duty_roster + .validator_duty + .iter() + .filter(|&&j| j == Chain::Parachain(i)) + .count(), + 3 + ); + assert_eq!( + duty_roster + .guarantor_duty + .iter() + .filter(|&&j| j == Chain::Parachain(i)) + .count(), + 3 + ); } - assert_eq!(duty_roster.validator_duty.iter().filter(|&&j| j == Chain::Relay).count(), 2); - assert_eq!(duty_roster.guarantor_duty.iter().filter(|&&j| j == Chain::Relay).count(), 2); + assert_eq!( + duty_roster + .validator_duty + .iter() + .filter(|&&j| j == Chain::Relay) + .count(), + 2 + ); + assert_eq!( + duty_roster + .guarantor_duty + .iter() + .filter(|&&j| j == Chain::Relay) + .count(), + 2 + ); }; system::Module::::set_random_seed([0u8; 32].into()); @@ -357,7 +438,6 @@ mod tests { check_roster(&duty_roster_1); assert!(duty_roster_0 != duty_roster_1); - system::Module::::set_random_seed([2u8; 32].into()); let duty_roster_2 = Parachains::calculate_duty_roster(); check_roster(&duty_roster_2); diff --git a/polkadot/runtime/src/utils.rs b/polkadot/runtime/src/utils.rs index 4c16e215bab7e..4e75cfa3d196b 100644 --- a/polkadot/runtime/src/utils.rs +++ b/polkadot/runtime/src/utils.rs @@ -16,15 +16,18 @@ //! Utils for block interaction. -use rstd::prelude::*; -use super::{Call, UncheckedExtrinsic, Extrinsic, Staking}; -use runtime_primitives::traits::{Checkable, AuxLookup}; +use super::{Call, Extrinsic, Staking, UncheckedExtrinsic}; +use parachains::Call as ParachainsCall; use primitives::parachain::CandidateReceipt; +use rstd::prelude::*; +use runtime_primitives::traits::{AuxLookup, Checkable}; use timestamp::Call as TimestampCall; -use parachains::Call as ParachainsCall; /// Produces the list of inherent extrinsics. -pub fn inherent_extrinsics(timestamp: ::primitives::Timestamp, parachain_heads: Vec) -> Vec { +pub fn inherent_extrinsics( + timestamp: ::primitives::Timestamp, + parachain_heads: Vec, +) -> Vec { vec![ UncheckedExtrinsic::new( Extrinsic { @@ -32,7 +35,7 @@ pub fn inherent_extrinsics(timestamp: ::primitives::Timestamp, parachain_heads: function: Call::Timestamp(TimestampCall::set(timestamp)), index: 0, }, - Default::default() + Default::default(), ), UncheckedExtrinsic::new( Extrinsic { @@ -40,8 +43,8 @@ pub fn inherent_extrinsics(timestamp: ::primitives::Timestamp, parachain_heads: function: Call::Parachains(ParachainsCall::set_heads(parachain_heads)), index: 0, }, - Default::default() - ) + Default::default(), + ), ] } diff --git a/polkadot/service/src/chain_spec.rs b/polkadot/service/src/chain_spec.rs index 3c323f22f8ade..b34a03dbe079a 100644 --- a/polkadot/service/src/chain_spec.rs +++ b/polkadot/service/src/chain_spec.rs @@ -17,14 +17,19 @@ //! Polkadot chain configurations. use ed25519; +use polkadot_runtime::{ + ConsensusConfig, CouncilConfig, DemocracyConfig, GenesisConfig, SessionConfig, StakingConfig, + TimestampConfig, +}; +use primitives::{ + storage::{StorageData, StorageKey}, + AuthorityId, +}; +use runtime_primitives::{BuildStorage, StorageMap}; +use serde_json as json; use std::collections::HashMap; use std::fs::File; use std::path::PathBuf; -use primitives::{AuthorityId, storage::{StorageKey, StorageData}}; -use runtime_primitives::{BuildStorage, StorageMap}; -use polkadot_runtime::{GenesisConfig, ConsensusConfig, CouncilConfig, DemocracyConfig, - SessionConfig, StakingConfig, TimestampConfig}; -use serde_json as json; enum GenesisSource { File(PathBuf), @@ -42,11 +47,13 @@ impl GenesisSource { match *self { GenesisSource::File(ref path) => { let file = File::open(path).map_err(|e| format!("Error opening spec file: {}", e))?; - let genesis: GenesisContainer = json::from_reader(file).map_err(|e| format!("Error parsing spec file: {}", e))?; + let genesis: GenesisContainer = + json::from_reader(file).map_err(|e| format!("Error parsing spec file: {}", e))?; Ok(genesis.genesis) }, GenesisSource::Embedded(buf) => { - let genesis: GenesisContainer = json::from_reader(buf).map_err(|e| format!("Error parsing embedded file: {}", e))?; + let genesis: GenesisContainer = json::from_reader(buf) + .map_err(|e| format!("Error parsing embedded file: {}", e))?; Ok(genesis.genesis) }, GenesisSource::Factory(f) => Ok(f()), @@ -116,7 +123,9 @@ impl ChainSpec { pub fn to_json(self, raw: bool) -> Result { let genesis = match (raw, self.genesis.resolve()?) { (true, Genesis::Runtime(g)) => { - let storage = g.build_storage()?.into_iter() + let storage = g + .build_storage()? + .into_iter() .map(|(k, v)| (StorageKey(k), StorageData(v))) .collect(); @@ -124,10 +133,15 @@ impl ChainSpec { }, (_, genesis) => genesis, }; - let mut spec = json::to_value(self.spec).map_err(|e| format!("Error generating spec json: {}", e))?; + let mut spec = + json::to_value(self.spec).map_err(|e| format!("Error generating spec json: {}", e))?; { let map = spec.as_object_mut().expect("spec is an object"); - map.insert("genesis".to_owned(), json::to_value(genesis).map_err(|e| format!("Error generating genesis json: {}", e))?); + map.insert( + "genesis".to_owned(), + json::to_value(genesis) + .map_err(|e| format!("Error generating genesis json: {}", e))?, + ); } json::to_string_pretty(&spec).map_err(|e| format!("Error generating spec json: {}", e)) } @@ -143,23 +157,30 @@ impl ChainSpec { hex!["063d7787ebca768b7445dfebe7d62cbb1625ff4dba288ea34488da266dd6dca5"].into(), hex!["8101764f45778d4980dadaceee6e8af2517d3ab91ac9bec9cd1714fa5994081c"].into(), ]; - let endowed_accounts = vec![ - hex!["f295940fa750df68a686fcf4abd4111c8a9c5a5a5a83c4c8639c451a94a7adfd"].into(), - ]; + let endowed_accounts = + vec![hex!["f295940fa750df68a686fcf4abd4111c8a9c5a5a5a83c4c8639c451a94a7adfd"].into()]; Genesis::Runtime(GenesisConfig { consensus: Some(ConsensusConfig { - code: include_bytes!("../../runtime/wasm/genesis.wasm").to_vec(), // TODO change + code: include_bytes!("../../runtime/wasm/genesis.wasm").to_vec(), // TODO change authorities: initial_authorities.clone(), }), system: None, session: Some(SessionConfig { - validators: initial_authorities.iter().cloned().map(Into::into).collect(), - session_length: 60, // that's 5 minutes per session. + validators: initial_authorities + .iter() + .cloned() + .map(Into::into) + .collect(), + session_length: 60, // that's 5 minutes per session. broken_percent_late: 50, }), staking: Some(StakingConfig { current_era: 0, - intentions: initial_authorities.iter().cloned().map(Into::into).collect(), + intentions: initial_authorities + .iter() + .cloned() + .map(Into::into) + .collect(), transaction_base_fee: 100, transaction_byte_fee: 1, existential_deposit: 500, @@ -169,34 +190,39 @@ impl ChainSpec { reclaim_rebate: 0, early_era_slash: 10000, session_reward: 100, - balances: endowed_accounts.iter().map(|&k|(k, 1u128 << 60)).collect(), + balances: endowed_accounts.iter().map(|&k| (k, 1u128 << 60)).collect(), validator_count: 12, - sessions_per_era: 12, // 1 hour per era - bonding_duration: 24, // 1 day per bond. + sessions_per_era: 12, // 1 hour per era + bonding_duration: 24, // 1 day per bond. }), democracy: Some(DemocracyConfig { - launch_period: 12 * 60 * 24, // 1 day per public referendum - voting_period: 12 * 60 * 24 * 3, // 3 days to discuss & vote on an active referendum - minimum_deposit: 5000, // 12000 as the minimum deposit for a referendum + launch_period: 12 * 60 * 24, // 1 day per public referendum + voting_period: 12 * 60 * 24 * 3, // 3 days to discuss & vote on an active referendum + minimum_deposit: 5000, // 12000 as the minimum deposit for a referendum }), council: Some(CouncilConfig { active_council: vec![], - candidacy_bond: 5000, // 5000 to become a council candidate - voter_bond: 1000, // 1000 down to vote for a candidate - present_slash_per_voter: 1, // slash by 1 per voter for an invalid presentation. - carry_count: 6, // carry over the 6 runners-up to the next council election - presentation_duration: 12 * 60 * 24, // one day for presenting winners. - approval_voting_period: 12 * 60 * 24 * 2, // two days period between possible council elections. - term_duration: 12 * 60 * 24 * 24, // 24 day term duration for the council. - desired_seats: 0, // start with no council: we'll raise this once the stake has been dispersed a bit. - inactive_grace_period: 1, // one addition vote should go by before an inactive voter can be reaped. + candidacy_bond: 5000, // 5000 to become a council candidate + voter_bond: 1000, // 1000 down to vote for a candidate + present_slash_per_voter: 1, /* slash by 1 per voter for an invalid + * presentation. */ + carry_count: 6, // carry over the 6 runners-up to the next council election + presentation_duration: 12 * 60 * 24, // one day for presenting winners. + approval_voting_period: 12 * 60 * 24 * 2, // two days period between possible council elections. + term_duration: 12 * 60 * 24 * 24, // 24 day term duration for the council. + desired_seats: 0, /* start with no council: we'll raise this once the stake + * has + * been dispersed a bit. */ + inactive_grace_period: 1, /* one addition vote should go by before an inactive + * voter can be reaped. */ - cooloff_period: 12 * 60 * 24 * 4, // 4 day cooling off period if council member vetoes a proposal. + cooloff_period: 12 * 60 * 24 * 4, /* 4 day cooling off period if council member + * vetoes a proposal. */ voting_period: 12 * 60 * 24, // 1 day voting period for council members. }), parachains: Some(Default::default()), timestamp: Some(TimestampConfig { - period: 5, // 5 second block time. + period: 5, // 5 second block time. }), }) } @@ -208,34 +234,65 @@ impl ChainSpec { "enode://c831ec9011d2c02d2c4620fc88db6d897a40d2f88fd75f47b9e4cf3b243999acb6f01b7b7343474650b34eeb1363041a422a91f1fc3850e43482983ee15aa582@104.211.48.247:30333".into(), ]; ChainSpec { - spec: ChainSpecFile { name: "PoC-2 Testnet".to_owned(), boot_nodes }, + spec: ChainSpecFile { + name: "PoC-2 Testnet".to_owned(), + boot_nodes, + }, genesis: GenesisSource::Factory(Self::poc_2_testnet_config_genesis), } } fn testnet_genesis(initial_authorities: Vec) -> Genesis { let endowed_accounts = vec![ - ed25519::Pair::from_seed(b"Alice ").public().0.into(), - ed25519::Pair::from_seed(b"Bob ").public().0.into(), - ed25519::Pair::from_seed(b"Charlie ").public().0.into(), - ed25519::Pair::from_seed(b"Dave ").public().0.into(), - ed25519::Pair::from_seed(b"Eve ").public().0.into(), - ed25519::Pair::from_seed(b"Ferdie ").public().0.into(), + ed25519::Pair::from_seed(b"Alice ") + .public() + .0 + .into(), + ed25519::Pair::from_seed(b"Bob ") + .public() + .0 + .into(), + ed25519::Pair::from_seed(b"Charlie ") + .public() + .0 + .into(), + ed25519::Pair::from_seed(b"Dave ") + .public() + .0 + .into(), + ed25519::Pair::from_seed(b"Eve ") + .public() + .0 + .into(), + ed25519::Pair::from_seed(b"Ferdie ") + .public() + .0 + .into(), ]; Genesis::Runtime(GenesisConfig { consensus: Some(ConsensusConfig { - code: include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/polkadot_runtime.compact.wasm").to_vec(), + code: include_bytes!( + "../../runtime/wasm/target/wasm32-unknown-unknown/release/polkadot_runtime.compact.wasm" + ).to_vec(), authorities: initial_authorities.clone(), }), system: None, session: Some(SessionConfig { - validators: initial_authorities.iter().cloned().map(Into::into).collect(), + validators: initial_authorities + .iter() + .cloned() + .map(Into::into) + .collect(), session_length: 10, broken_percent_late: 30, }), staking: Some(StakingConfig { current_era: 0, - intentions: initial_authorities.iter().cloned().map(Into::into).collect(), + intentions: initial_authorities + .iter() + .cloned() + .map(Into::into) + .collect(), transaction_base_fee: 1, transaction_byte_fee: 0, existential_deposit: 500, @@ -243,7 +300,10 @@ impl ChainSpec { creation_fee: 0, contract_fee: 0, reclaim_rebate: 0, - balances: endowed_accounts.iter().map(|&k|(k, (1u128 << 60))).collect(), + balances: endowed_accounts + .iter() + .map(|&k| (k, (1u128 << 60))) + .collect(), validator_count: 2, sessions_per_era: 5, bonding_duration: 2, @@ -256,7 +316,11 @@ impl ChainSpec { minimum_deposit: 10, }), council: Some(CouncilConfig { - active_council: endowed_accounts.iter().filter(|a| initial_authorities.iter().find(|&b| a.0 == b.0).is_none()).map(|a| (a.clone(), 1000000)).collect(), + active_council: endowed_accounts + .iter() + .filter(|a| initial_authorities.iter().find(|&b| a.0 == b.0).is_none()) + .map(|a| (a.clone(), 1000000)) + .collect(), candidacy_bond: 10, voter_bond: 2, present_slash_per_voter: 1, @@ -272,36 +336,48 @@ impl ChainSpec { }), parachains: Some(Default::default()), timestamp: Some(TimestampConfig { - period: 5, // 5 second block time. + period: 5, // 5 second block time. }), }) } fn development_config_genesis() -> Genesis { Self::testnet_genesis(vec![ - ed25519::Pair::from_seed(b"Alice ").public().into(), + ed25519::Pair::from_seed(b"Alice ") + .public() + .into(), ]) } /// Development config (single validator Alice) pub fn development_config() -> Self { ChainSpec { - spec: ChainSpecFile { name: "Development".to_owned(), boot_nodes: vec![] }, + spec: ChainSpecFile { + name: "Development".to_owned(), + boot_nodes: vec![], + }, genesis: GenesisSource::Factory(Self::development_config_genesis), } } fn local_testnet_genesis() -> Genesis { Self::testnet_genesis(vec![ - ed25519::Pair::from_seed(b"Alice ").public().into(), - ed25519::Pair::from_seed(b"Bob ").public().into(), + ed25519::Pair::from_seed(b"Alice ") + .public() + .into(), + ed25519::Pair::from_seed(b"Bob ") + .public() + .into(), ]) } /// Local testnet config (multivalidator Alice + Bob) pub fn local_testnet_config() -> Self { ChainSpec { - spec: ChainSpecFile { name: "Local Testnet".to_owned(), boot_nodes: vec![] }, + spec: ChainSpecFile { + name: "Local Testnet".to_owned(), + boot_nodes: vec![], + }, genesis: GenesisSource::Factory(Self::local_testnet_genesis), } } diff --git a/polkadot/service/src/components.rs b/polkadot/service/src/components.rs index b6709b111fac4..726bc5285e60a 100644 --- a/polkadot/service/src/components.rs +++ b/polkadot/service/src/components.rs @@ -16,22 +16,22 @@ //! Polkadot service components. -use std::collections::HashMap; -use std::sync::Arc; +use chain_spec::ChainSpec; use client::{self, Client}; use client_db; use codec::{self, Slicable}; use consensus; +use error; use keystore::Store as Keystore; use network; use polkadot_api; use polkadot_executor::Executor as LocalDispatch; use polkadot_primitives::{Block, BlockId, Hash}; use state_machine; +use std::collections::HashMap; +use std::sync::Arc; use substrate_executor::NativeExecutor; use transaction_pool::{self, TransactionPool}; -use error; -use chain_spec::ChainSpec; /// Code executor. pub type CodeExecutor = NativeExecutor; @@ -48,19 +48,40 @@ pub trait Components { type Executor: 'static + client::CallExecutor + Send + Sync; /// Create client. - fn build_client(&self, settings: client_db::DatabaseSettings, executor: CodeExecutor, chain_spec: &ChainSpec) - -> Result<(Arc>, Option>>>), error::Error>; + fn build_client( + &self, + settings: client_db::DatabaseSettings, + executor: CodeExecutor, + chain_spec: &ChainSpec, + ) -> Result< + ( + Arc>, + Option>>>, + ), + error::Error, + >; /// Create api. - fn build_api(&self, client: Arc>) -> Arc; + fn build_api( + &self, + client: Arc>, + ) -> Arc; /// Create network transaction pool adapter. - fn build_network_tx_pool(&self, client: Arc>, tx_pool: Arc>) - -> Arc>; + fn build_network_tx_pool( + &self, + client: Arc>, + tx_pool: Arc>, + ) -> Arc>; /// Create consensus service. - fn build_consensus(&self, client: Arc>, network: Arc>, tx_pool: Arc>, keystore: &Keystore) - -> Result, error::Error>; + fn build_consensus( + &self, + client: Arc>, + network: Arc>, + tx_pool: Arc>, + keystore: &Keystore, + ) -> Result, error::Error>; } /// Components for full Polkadot service. @@ -72,19 +93,39 @@ pub struct FullComponents { impl Components for FullComponents { type Backend = client_db::Backend; type Api = Client; - type Executor = client::LocalCallExecutor, NativeExecutor>; - - fn build_client(&self, db_settings: client_db::DatabaseSettings, executor: CodeExecutor, chain_spec: &ChainSpec) - -> Result<(Arc>, Option>>>), error::Error> { - Ok((Arc::new(client_db::new_client(db_settings, executor, chain_spec)?), None)) + type Executor = + client::LocalCallExecutor, NativeExecutor>; + + fn build_client( + &self, + db_settings: client_db::DatabaseSettings, + executor: CodeExecutor, + chain_spec: &ChainSpec, + ) -> Result< + ( + Arc>, + Option>>>, + ), + error::Error, + > { + Ok(( + Arc::new(client_db::new_client(db_settings, executor, chain_spec)?), + None, + )) } - fn build_api(&self, client: Arc>) -> Arc { + fn build_api( + &self, + client: Arc>, + ) -> Arc { client } - fn build_network_tx_pool(&self, client: Arc>, pool: Arc>) - -> Arc> { + fn build_network_tx_pool( + &self, + client: Arc>, + pool: Arc>, + ) -> Arc> { Arc::new(TransactionPoolAdapter { imports_external_transactions: true, pool, @@ -92,10 +133,15 @@ impl Components for FullComponents { }) } - fn build_consensus(&self, client: Arc>, network: Arc>, tx_pool: Arc>, keystore: &Keystore) - -> Result, error::Error> { + fn build_consensus( + &self, + client: Arc>, + network: Arc>, + tx_pool: Arc>, + keystore: &Keystore, + ) -> Result, error::Error> { if !self.is_validator { - return Ok(None); + return Ok(None) } // Load the first available key @@ -116,29 +162,57 @@ impl Components for FullComponents { pub struct LightComponents; impl Components for LightComponents { - type Backend = client::light::backend::Backend, network::OnDemand>>; + type Backend = client::light::backend::Backend< + client_db::light::LightStorage, + network::OnDemand>, + >; type Api = polkadot_api::light::RemotePolkadotApiWrapper; type Executor = client::light::call_executor::RemoteCallExecutor< - client::light::blockchain::Blockchain, network::OnDemand>>, - network::OnDemand>>; - - fn build_client(&self, db_settings: client_db::DatabaseSettings, executor: CodeExecutor, spec: &ChainSpec) - -> Result<(Arc>, Option>>>), error::Error> { + client::light::blockchain::Blockchain< + client_db::light::LightStorage, + network::OnDemand>, + >, + network::OnDemand>, + >; + + fn build_client( + &self, + db_settings: client_db::DatabaseSettings, + executor: CodeExecutor, + spec: &ChainSpec, + ) -> Result< + ( + Arc>, + Option>>>, + ), + error::Error, + > { let db_storage = client_db::light::LightStorage::new(db_settings)?; let light_blockchain = client::light::new_light_blockchain(db_storage); - let fetch_checker = Arc::new(client::light::new_fetch_checker(light_blockchain.clone(), executor)); + let fetch_checker = Arc::new(client::light::new_fetch_checker( + light_blockchain.clone(), + executor, + )); let fetcher = Arc::new(network::OnDemand::new(fetch_checker)); let client_backend = client::light::new_light_backend(light_blockchain, fetcher.clone()); let client = client::light::new_light(client_backend, fetcher.clone(), spec)?; Ok((Arc::new(client), Some(fetcher))) } - fn build_api(&self, client: Arc>) -> Arc { - Arc::new(polkadot_api::light::RemotePolkadotApiWrapper(client.clone())) + fn build_api( + &self, + client: Arc>, + ) -> Arc { + Arc::new(polkadot_api::light::RemotePolkadotApiWrapper( + client.clone(), + )) } - fn build_network_tx_pool(&self, client: Arc>, pool: Arc>) - -> Arc> { + fn build_network_tx_pool( + &self, + client: Arc>, + pool: Arc>, + ) -> Arc> { Arc::new(TransactionPoolAdapter { imports_external_transactions: false, pool, @@ -146,28 +220,40 @@ impl Components for LightComponents { }) } - fn build_consensus(&self, _client: Arc>, _network: Arc>, _tx_pool: Arc>, _keystore: &Keystore) - -> Result, error::Error> { + fn build_consensus( + &self, + _client: Arc>, + _network: Arc>, + _tx_pool: Arc>, + _keystore: &Keystore, + ) -> Result, error::Error> { Ok(None) } } /// Transaction pool adapter. -pub struct TransactionPoolAdapter where A: Send + Sync, E: Send + Sync { +pub struct TransactionPoolAdapter +where + A: Send + Sync, + E: Send + Sync, +{ imports_external_transactions: bool, pool: Arc>, client: Arc>, } impl TransactionPoolAdapter - where - A: Send + Sync, - B: client::backend::Backend + Send + Sync, - E: client::CallExecutor + Send + Sync, - client::error::Error: From<<>::State as state_machine::backend::Backend>::Error>, +where + A: Send + Sync, + B: client::backend::Backend + Send + Sync, + E: client::CallExecutor + Send + Sync, + client::error::Error: From< + <>::State as state_machine::backend::Backend>::Error, + >, { fn best_block_id(&self) -> Option { - self.client.info() + self.client + .info() .map(|info| BlockId::hash(info.chain.best_hash)) .map_err(|e| { debug!("Error getting best block: {:?}", e); @@ -177,32 +263,37 @@ impl TransactionPoolAdapter } impl network::TransactionPool for TransactionPoolAdapter - where - B: client::backend::Backend + Send + Sync, - E: client::CallExecutor + Send + Sync, - client::error::Error: From<<>::State as state_machine::backend::Backend>::Error>, - A: polkadot_api::PolkadotApi + Send + Sync, +where + B: client::backend::Backend + Send + Sync, + E: client::CallExecutor + Send + Sync, + client::error::Error: From< + <>::State as state_machine::backend::Backend>::Error, + >, + A: polkadot_api::PolkadotApi + Send + Sync, { fn transactions(&self) -> Vec<(Hash, Vec)> { let best_block_id = match self.best_block_id() { Some(id) => id, None => return vec![], }; - self.pool.cull_and_get_pending(best_block_id, |pending| pending - .map(|t| { - let hash = t.hash().clone(); - (hash, t.primitive_extrinsic()) + self.pool + .cull_and_get_pending(best_block_id, |pending| { + pending + .map(|t| { + let hash = t.hash().clone(); + (hash, t.primitive_extrinsic()) + }) + .collect() + }) + .unwrap_or_else(|e| { + warn!("Error retrieving pending set: {}", e); + vec![] }) - .collect() - ).unwrap_or_else(|e| { - warn!("Error retrieving pending set: {}", e); - vec![] - }) } fn import(&self, transaction: &Vec) -> Option { if !self.imports_external_transactions { - return None; + return None } let encoded = transaction.encode(); @@ -216,7 +307,7 @@ impl network::TransactionPool for TransactionPoolAdapter { @@ -89,23 +89,33 @@ pub struct Service { } /// Creates light client and register protocol with the network service -pub fn new_light(config: Configuration) -> Result, error::Error> { +pub fn new_light( + config: Configuration, +) -> Result, error::Error> { Service::new(components::LightComponents, config) } /// Creates full client and register protocol with the network service -pub fn new_full(config: Configuration) -> Result, error::Error> { +pub fn new_full( + config: Configuration, +) -> Result, error::Error> { let is_validator = (config.roles & Role::VALIDATOR) == Role::VALIDATOR; Service::new(components::FullComponents { is_validator }, config) } /// Creates bare client without any networking. -pub fn new_client(config: Configuration) -> Result::Backend, - ::Executor, - Block>>, - error::Error> -{ +pub fn new_client( + config: Configuration, +) -> Result< + Arc< + Client< + ::Backend, + ::Executor, + Block, + >, + >, + error::Error, +> { let db_settings = client_db::DatabaseSettings { cache_size: None, path: config.database_path.into(), @@ -246,7 +256,8 @@ impl Service /// Produce a task which prunes any finalized transactions from the pool. pub fn prune_imported(pool: &TransactionPool, hash: Hash) - where A: PolkadotApi, +where + A: PolkadotApi, { let block = BlockId::hash(hash); if let Err(e) = pool.cull(block) { @@ -258,7 +269,10 @@ pub fn prune_imported(pool: &TransactionPool, hash: Hash) } } -impl Drop for Service where Components: components::Components { +impl Drop for Service +where + Components: components::Components, +{ fn drop(&mut self) { self.network.stop_network(); diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index 4436730b3efce..af5a04bf30078 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -27,10 +27,10 @@ //! propose and attest to validity of candidates, and those who can only attest //! to availability. +use std::collections::hash_map::{Entry, HashMap}; use std::collections::HashSet; -use std::collections::hash_map::{HashMap, Entry}; -use std::hash::Hash; use std::fmt::Debug; +use std::hash::Hash; /// A batch of statements to send out. pub trait StatementBatch { @@ -290,8 +290,8 @@ impl Table { /// /// The vector is sorted in ascending order by group id. pub fn proposed_candidates<'a>(&'a self, context: &C) -> Vec<&'a C::Candidate> { - use std::collections::BTreeMap; use std::collections::btree_map::Entry as BTreeEntry; + use std::collections::BTreeMap; let mut best_candidates = BTreeMap::new(); for candidate_data in self.candidate_votes.values() { @@ -303,7 +303,9 @@ impl Table { let (validity_t, availability_t) = context.requisite_votes(group_id); - if !candidate_data.can_be_included(validity_t, availability_t) { continue } + if !candidate_data.can_be_included(validity_t, availability_t) { + continue + } let candidate = &candidate_data.candidate; match best_candidates.entry(group_id.clone()) { BTreeEntry::Occupied(mut occ) => { @@ -311,8 +313,10 @@ impl Table { if *candidate_ref > candidate { *candidate_ref = candidate; } - } - BTreeEntry::Vacant(vacant) => { vacant.insert(candidate); }, + }, + BTreeEntry::Vacant(vacant) => { + vacant.insert(candidate); + }, } } @@ -336,9 +340,13 @@ impl Table { &mut self, context: &C, statement: SignedStatement, - from: Option + from: Option, ) -> Option> { - let SignedStatement { statement, signature, sender: signer } = statement; + let SignedStatement { + statement, + signature, + sender: signer, + } = statement; let trace = match statement { Statement::Candidate(_) => StatementTrace::Candidate(signer.clone()), @@ -348,12 +356,8 @@ impl Table { }; let (maybe_misbehavior, maybe_summary) = match statement { - Statement::Candidate(candidate) => self.import_candidate( - context, - signer.clone(), - candidate, - signature - ), + Statement::Candidate(candidate) => + self.import_candidate(context, signer.clone(), candidate, signature), Statement::Valid(digest) => self.validity_vote( context, signer.clone(), @@ -366,12 +370,8 @@ impl Table { digest, ValidityVote::Invalid(signature), ), - Statement::Available(digest) => self.availability_vote( - context, - signer.clone(), - digest, - signature, - ), + Statement::Available(digest) => + self.availability_vote(context, signer.clone(), digest, signature), }; if let Some(misbehavior) = maybe_misbehavior { @@ -395,9 +395,9 @@ impl Table { } /// Access all witnessed misbehavior. - pub fn get_misbehavior(&self) - -> &HashMap::Misbehavior> - { + pub fn get_misbehavior( + &self, + ) -> &HashMap::Misbehavior> { &self.detected_misbehavior } @@ -408,10 +408,11 @@ impl Table { /// Fill a statement batch and note messages as seen by the targets. pub fn fill_batch(&mut self, batch: &mut B) - where B: StatementBatch< + where + B: StatementBatch< C::AuthorityId, SignedStatement, - > + >, { // naively iterate all statements so far, taking any that // at least one of the targets has not seen. @@ -447,16 +448,17 @@ impl Table { SwappedTargetData { authority_data, - target_data + target_data, } }; let target_data = &mut target_data.target_data; macro_rules! attempt_send { - ($trace:expr, sender=$sender:expr, sig=$sig:expr, statement=$statement:expr) => {{ + ($trace:expr,sender = $sender:expr,sig = $sig:expr,statement = $statement:expr) => {{ let trace = $trace; - let can_send = target_data.iter() + let can_send = target_data + .iter() .any(|t| !t.1.known_statements.contains(&trace)); if can_send { @@ -471,50 +473,53 @@ impl Table { target.1.known_statements.insert(trace.clone()); } } else { - return; + return + } } - } - }} + }}; } // reconstruct statements for anything whose trace passes the filter. for (digest, candidate) in self.candidate_votes.iter() { - let issuance_iter = candidate.validity_votes.iter() - .filter(|&(_, x)| if let ValidityVote::Issued(_) = *x { true } else { false }); + let issuance_iter = candidate.validity_votes.iter().filter(|&(_, x)| { + if let ValidityVote::Issued(_) = *x { + true + } else { + false + } + }); - let validity_iter = candidate.validity_votes.iter() - .filter(|&(_, x)| if let ValidityVote::Issued(_) = *x { false } else { true }); + let validity_iter = candidate.validity_votes.iter().filter(|&(_, x)| { + if let ValidityVote::Issued(_) = *x { + false + } else { + true + } + }); // send issuance statements before votes. for (sender, vote) in issuance_iter.chain(validity_iter) { match *vote { - ValidityVote::Issued(ref sig) => { - attempt_send!( - StatementTrace::Candidate(sender.clone()), - sender = sender.clone(), - sig = sig.clone(), - statement = Statement::Candidate(candidate.candidate.clone()) - ) - } - ValidityVote::Valid(ref sig) => { - attempt_send!( - StatementTrace::Valid(sender.clone(), digest.clone()), - sender = sender.clone(), - sig = sig.clone(), - statement = Statement::Valid(digest.clone()) - ) - } - ValidityVote::Invalid(ref sig) => { - attempt_send!( - StatementTrace::Invalid(sender.clone(), digest.clone()), - sender = sender.clone(), - sig = sig.clone(), - statement = Statement::Invalid(digest.clone()) - ) - } + ValidityVote::Issued(ref sig) => attempt_send!( + StatementTrace::Candidate(sender.clone()), + sender = sender.clone(), + sig = sig.clone(), + statement = Statement::Candidate(candidate.candidate.clone()) + ), + ValidityVote::Valid(ref sig) => attempt_send!( + StatementTrace::Valid(sender.clone(), digest.clone()), + sender = sender.clone(), + sig = sig.clone(), + statement = Statement::Valid(digest.clone()) + ), + ValidityVote::Invalid(ref sig) => attempt_send!( + StatementTrace::Invalid(sender.clone(), digest.clone()), + sender = sender.clone(), + sig = sig.clone(), + statement = Statement::Invalid(digest.clone()) + ), } - }; - + } // and lastly send availability. for (sender, sig) in candidate.availability_votes.iter() { @@ -526,14 +531,21 @@ impl Table { ) } } - } - fn note_trace_seen(&mut self, trace: StatementTrace, known_by: C::AuthorityId) { - self.authority_data.entry(known_by).or_insert_with(|| AuthorityData { - proposal: None, - known_statements: HashSet::default(), - }).known_statements.insert(trace); + fn note_trace_seen( + &mut self, + trace: StatementTrace, + known_by: C::AuthorityId, + ) { + self.authority_data + .entry(known_by) + .or_insert_with(|| AuthorityData { + proposal: None, + known_statements: HashSet::default(), + }) + .known_statements + .insert(trace); } fn import_candidate( @@ -542,7 +554,10 @@ impl Table { from: C::AuthorityId, candidate: C::Candidate, signature: C::Signature, - ) -> (Option<::Misbehavior>, Option>) { + ) -> ( + Option<::Misbehavior>, + Option>, + ) { let group = C::candidate_group(&candidate); if !context.is_member_of(&from, &group) { return ( @@ -554,7 +569,7 @@ impl Table { }, })), None, - ); + ) } // check that authority hasn't already specified another candidate. @@ -570,10 +585,12 @@ impl Table { if old_digest != &digest { const EXISTENCE_PROOF: &str = "when proposal first received from authority, candidate \ - votes entry is created. proposal here is `Some`, therefore \ - candidate votes entry exists; qed"; + votes entry is created. proposal here is `Some`, therefore \ + candidate votes entry exists; qed"; - let old_candidate = self.candidate_votes.get(old_digest) + let old_candidate = self + .candidate_votes + .get(old_digest) .expect(EXISTENCE_PROOF) .candidate .clone(); @@ -584,7 +601,7 @@ impl Table { second: (candidate, signature.clone()), })), None, - ); + ) } false @@ -592,34 +609,31 @@ impl Table { existing.proposal = Some((digest.clone(), signature.clone())); true } - } + }, Entry::Vacant(vacant) => { vacant.insert(AuthorityData { proposal: Some((digest.clone(), signature.clone())), known_statements: HashSet::new(), }); true - } + }, }; // NOTE: altering this code may affect the existence proof above. ensure it remains // valid. if new_proposal { - self.candidate_votes.entry(digest.clone()).or_insert_with(move || CandidateData { - group_id: group, - candidate: candidate, - validity_votes: HashMap::new(), - availability_votes: HashMap::new(), - indicated_bad_by: Vec::new(), - }); + self.candidate_votes + .entry(digest.clone()) + .or_insert_with(move || CandidateData { + group_id: group, + candidate, + validity_votes: HashMap::new(), + availability_votes: HashMap::new(), + indicated_bad_by: Vec::new(), + }); } - self.validity_vote( - context, - from, - digest, - ValidityVote::Issued(signature), - ) + self.validity_vote(context, from, digest, ValidityVote::Issued(signature)) } fn validity_vote( @@ -628,7 +642,10 @@ impl Table { from: C::AuthorityId, digest: C::Digest, vote: ValidityVote, - ) -> (Option<::Misbehavior>, Option>) { + ) -> ( + Option<::Misbehavior>, + Option>, + ) { let votes = match self.candidate_votes.get_mut(&digest) { None => return (None, None), // TODO: queue up but don't get DoS'ed Some(votes) => votes, @@ -642,9 +659,10 @@ impl Table { let (sig, valid) = match vote { ValidityVote::Valid(s) => (s, true), ValidityVote::Invalid(s) => (s, false), - ValidityVote::Issued(_) => - panic!("implicit issuance vote only cast from `import_candidate` after \ - checking group membership of issuer; qed"), + ValidityVote::Issued(_) => panic!( + "implicit issuance vote only cast from `import_candidate` after \ + checking group membership of issuer; qed" + ), }; return ( @@ -656,11 +674,11 @@ impl Table { Statement::Valid(digest) } else { Statement::Invalid(digest) - } - } + }, + }, })), None, - ); + ) } // check for double votes. @@ -668,20 +686,26 @@ impl Table { Entry::Occupied(occ) => { if occ.get() != &vote { let double_vote_proof = match (occ.get().clone(), vote) { - (ValidityVote::Issued(iss), ValidityVote::Valid(good)) | - (ValidityVote::Valid(good), ValidityVote::Issued(iss)) => - ValidityDoubleVote::IssuedAndValidity((votes.candidate.clone(), iss), (digest, good)), - (ValidityVote::Issued(iss), ValidityVote::Invalid(bad)) | - (ValidityVote::Invalid(bad), ValidityVote::Issued(iss)) => - ValidityDoubleVote::IssuedAndInvalidity((votes.candidate.clone(), iss), (digest, bad)), - (ValidityVote::Valid(good), ValidityVote::Invalid(bad)) | - (ValidityVote::Invalid(bad), ValidityVote::Valid(good)) => + (ValidityVote::Issued(iss), ValidityVote::Valid(good)) + | (ValidityVote::Valid(good), ValidityVote::Issued(iss)) => + ValidityDoubleVote::IssuedAndValidity( + (votes.candidate.clone(), iss), + (digest, good), + ), + (ValidityVote::Issued(iss), ValidityVote::Invalid(bad)) + | (ValidityVote::Invalid(bad), ValidityVote::Issued(iss)) => + ValidityDoubleVote::IssuedAndInvalidity( + (votes.candidate.clone(), iss), + (digest, bad), + ), + (ValidityVote::Valid(good), ValidityVote::Invalid(bad)) + | (ValidityVote::Invalid(bad), ValidityVote::Valid(good)) => ValidityDoubleVote::ValidityAndInvalidity(digest, good, bad), _ => { // this would occur if two different but valid signatures // on the same kind of vote occurred. - return (None, None); - } + return (None, None) + }, }; return ( @@ -690,19 +714,24 @@ impl Table { ) } - return (None, None); - } + return (None, None) + }, Entry::Vacant(vacant) => { if let ValidityVote::Invalid(_) = vote { votes.indicated_bad_by.push(from); } vacant.insert(vote); - } + }, } let is_includable = votes.can_be_included(v_threshold, a_threshold); - update_includable_count(&mut self.includable_count, &votes.group_id, was_includable, is_includable); + update_includable_count( + &mut self.includable_count, + &votes.group_id, + was_includable, + is_includable, + ); (None, Some(votes.summary(digest))) } @@ -713,7 +742,10 @@ impl Table { from: C::AuthorityId, digest: C::Digest, signature: C::Signature, - ) -> (Option<::Misbehavior>, Option>) { + ) -> ( + Option<::Misbehavior>, + Option>, + ) { let votes = match self.candidate_votes.get_mut(&digest) { None => return (None, None), // TODO: queue up but don't get DoS'ed Some(votes) => votes, @@ -730,22 +762,32 @@ impl Table { signature: signature.clone(), statement: Statement::Available(digest), sender: from, - } + }, })), - None - ); + None, + ) } votes.availability_votes.insert(from, signature); let is_includable = votes.can_be_included(v_threshold, a_threshold); - update_includable_count(&mut self.includable_count, &votes.group_id, was_includable, is_includable); + update_includable_count( + &mut self.includable_count, + &votes.group_id, + was_includable, + is_includable, + ); (None, Some(votes.summary(digest))) } } -fn update_includable_count(map: &mut HashMap, group_id: &G, was_includable: bool, is_includable: bool) { +fn update_includable_count( + map: &mut HashMap, + group_id: &G, + was_includable: bool, + is_includable: bool, +) { if was_includable && !is_includable { if let Entry::Occupied(mut entry) = map.entry(group_id.clone()) { *entry.get_mut() -= 1; @@ -773,8 +815,12 @@ mod tests { } impl ::generic::StatementBatch for VecBatch { - fn targets(&self) -> &[V] { &self.targets } - fn is_empty(&self) -> bool { self.items.is_empty() } + fn targets(&self) -> &[V] { + &self.targets + } + fn is_empty(&self) -> bool { + self.items.is_empty() + } fn push(&mut self, item: T) -> bool { if self.items.len() == self.max_len { false @@ -808,7 +854,7 @@ mod tests { #[derive(Debug, PartialEq, Eq)] struct TestContext { // v -> (validity, availability) - authorities: HashMap + authorities: HashMap, } impl Context for TestContext { @@ -826,20 +872,18 @@ mod tests { GroupId(candidate.0) } - fn is_member_of( - &self, - authority: &AuthorityId, - group: &GroupId - ) -> bool { - self.authorities.get(authority).map(|v| &v.0 == group).unwrap_or(false) + fn is_member_of(&self, authority: &AuthorityId, group: &GroupId) -> bool { + self.authorities + .get(authority) + .map(|v| &v.0 == group) + .unwrap_or(false) } - fn is_availability_guarantor_of( - &self, - authority: &AuthorityId, - group: &GroupId - ) -> bool { - self.authorities.get(authority).map(|v| &v.1 == group).unwrap_or(false) + fn is_availability_guarantor_of(&self, authority: &AuthorityId, group: &GroupId) -> bool { + self.authorities + .get(authority) + .map(|v| &v.1 == group) + .unwrap_or(false) } fn requisite_votes(&self, id: &GroupId) -> (usize, usize) { @@ -847,8 +891,12 @@ mod tests { let mut total_availability = 0; for &(ref validity, ref availability) in self.authorities.values() { - if validity == id { total_validity += 1 } - if availability == id { total_availability += 1 } + if validity == id { + total_validity += 1 + } + if availability == id { + total_availability += 1 + } } (total_validity / 2 + 1, total_availability / 2 + 1) @@ -862,7 +910,7 @@ mod tests { let mut map = HashMap::new(); map.insert(AuthorityId(1), (GroupId(2), GroupId(455))); map - } + }, }; let mut table = create(); @@ -898,7 +946,7 @@ mod tests { let mut map = HashMap::new(); map.insert(AuthorityId(1), (GroupId(3), GroupId(455))); map - } + }, }; let mut table = create(); @@ -930,7 +978,7 @@ mod tests { map.insert(AuthorityId(1), (GroupId(2), GroupId(455))); map.insert(AuthorityId(2), (GroupId(3), GroupId(222))); map - } + }, }; let mut table = create(); @@ -1001,7 +1049,7 @@ mod tests { map.insert(AuthorityId(1), (GroupId(2), GroupId(455))); map.insert(AuthorityId(2), (GroupId(2), GroupId(246))); map - } + }, }; let mut table = create(); @@ -1049,7 +1097,7 @@ mod tests { let mut map = HashMap::new(); map.insert(AuthorityId(1), (GroupId(2), GroupId(455))); map - } + }, }; let mut table = create(); @@ -1095,13 +1143,18 @@ mod tests { assert!(!candidate.can_be_included(validity_threshold, availability_threshold)); for i in 0..validity_threshold { - candidate.validity_votes.insert(AuthorityId(i + 100), ValidityVote::Valid(Signature(i + 100))); + candidate.validity_votes.insert( + AuthorityId(i + 100), + ValidityVote::Valid(Signature(i + 100)), + ); } assert!(!candidate.can_be_included(validity_threshold, availability_threshold)); for i in 0..availability_threshold { - candidate.availability_votes.insert(AuthorityId(i + 255), Signature(i + 255)); + candidate + .availability_votes + .insert(AuthorityId(i + 255), Signature(i + 255)); } assert!(candidate.can_be_included(validity_threshold, availability_threshold)); @@ -1121,7 +1174,7 @@ mod tests { map.insert(AuthorityId(3), (GroupId(2), GroupId(455))); map.insert(AuthorityId(4), (GroupId(455), GroupId(2))); map - } + }, }; // have 2/3 validity guarantors note validity. @@ -1181,7 +1234,7 @@ mod tests { let mut map = HashMap::new(); map.insert(AuthorityId(1), (GroupId(2), GroupId(455))); map - } + }, }; let mut table = create(); @@ -1191,7 +1244,8 @@ mod tests { sender: AuthorityId(1), }; - let summary = table.import_statement(&context, statement, None) + let summary = table + .import_statement(&context, statement, None) .expect("candidate import to give summary"); assert_eq!(summary.candidate, Digest(100)); @@ -1208,7 +1262,7 @@ mod tests { map.insert(AuthorityId(1), (GroupId(2), GroupId(455))); map.insert(AuthorityId(2), (GroupId(2), GroupId(455))); map - } + }, }; let mut table = create(); @@ -1228,7 +1282,8 @@ mod tests { sender: AuthorityId(2), }; - let summary = table.import_statement(&context, vote, None) + let summary = table + .import_statement(&context, vote, None) .expect("candidate vote to give summary"); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); @@ -1247,7 +1302,7 @@ mod tests { map.insert(AuthorityId(1), (GroupId(2), GroupId(455))); map.insert(AuthorityId(2), (GroupId(5), GroupId(2))); map - } + }, }; let mut table = create(); @@ -1267,7 +1322,8 @@ mod tests { sender: AuthorityId(2), }; - let summary = table.import_statement(&context, vote, None) + let summary = table + .import_statement(&context, vote, None) .expect("candidate vote to give summary"); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); @@ -1287,7 +1343,7 @@ mod tests { map.insert(AuthorityId(i), (GroupId(2), GroupId(400 + i))); } map - } + }, }; let mut table = create(); diff --git a/polkadot/statement-table/src/lib.rs b/polkadot/statement-table/src/lib.rs index 86b95b0d90ff1..5a143131e9971 100644 --- a/polkadot/statement-table/src/lib.rs +++ b/polkadot/statement-table/src/lib.rs @@ -14,15 +14,15 @@ //! propose and attest to validity of candidates, and those who can only attest //! to availability. -extern crate substrate_primitives; extern crate polkadot_primitives as primitives; +extern crate substrate_primitives; pub mod generic; pub use generic::Table; -use primitives::parachain::{Id, CandidateReceipt, CandidateSignature as Signature}; -use primitives::{SessionKey, Hash}; +use primitives::parachain::{CandidateReceipt, CandidateSignature as Signature, Id}; +use primitives::{Hash, SessionKey}; /// Statements about candidates on the network. pub type Statement = generic::Statement; @@ -45,11 +45,7 @@ pub trait Context { /// Whether a authority is an availability guarantor of a group. /// Guarantors are meant to vote on availability for candidates submitted /// in a group. - fn is_availability_guarantor_of( - &self, - authority: &SessionKey, - group: &Id, - ) -> bool; + fn is_availability_guarantor_of(&self, authority: &SessionKey, group: &Id) -> bool; // requisite number of votes for validity and availability respectively from a group. fn requisite_votes(&self, group: &Id) -> (usize, usize); @@ -100,8 +96,12 @@ pub trait StatementBatch { } impl generic::StatementBatch for T { - fn targets(&self) -> &[SessionKey] { StatementBatch::targets(self ) } - fn is_empty(&self) -> bool { StatementBatch::is_empty(self) } + fn targets(&self) -> &[SessionKey] { + StatementBatch::targets(self) + } + fn is_empty(&self) -> bool { + StatementBatch::is_empty(self) + } fn push(&mut self, statement: SignedStatement) -> bool { StatementBatch::push(self, statement) } diff --git a/polkadot/transaction-pool/src/lib.rs b/polkadot/transaction-pool/src/lib.rs index 5f344035cec0a..45d5caf2b38c3 100644 --- a/polkadot/transaction-pool/src/lib.rs +++ b/polkadot/transaction-pool/src/lib.rs @@ -15,15 +15,15 @@ // along with Polkadot. If not, see . extern crate ed25519; +extern crate parking_lot; +extern crate polkadot_api; +extern crate polkadot_primitives as primitives; +extern crate polkadot_runtime as runtime; extern crate substrate_client as client; extern crate substrate_codec as codec; extern crate substrate_extrinsic_pool as extrinsic_pool; extern crate substrate_primitives as substrate_primitives; extern crate substrate_runtime_primitives; -extern crate polkadot_runtime as runtime; -extern crate polkadot_primitives as primitives; -extern crate polkadot_api; -extern crate parking_lot; #[cfg(test)] extern crate substrate_keyring; @@ -36,23 +36,29 @@ extern crate log; mod error; -use std::{ - cmp::Ordering, - collections::HashMap, - ops::Deref, - sync::Arc, -}; +use std::{cmp::Ordering, collections::HashMap, ops::Deref, sync::Arc}; use codec::Slicable; -use extrinsic_pool::{Pool, Listener, txpool::{self, Readiness, scoring::{Change, Choice}}}; use extrinsic_pool::api::ExtrinsicPool; +use extrinsic_pool::{ + txpool::{ + self, + scoring::{Change, Choice}, + Readiness, + }, + Listener, Pool, +}; use polkadot_api::PolkadotApi; -use primitives::{AccountId, BlockId, Hash, Index, UncheckedExtrinsic as FutureProofUncheckedExtrinsic}; +use primitives::{ + AccountId, BlockId, Hash, Index, UncheckedExtrinsic as FutureProofUncheckedExtrinsic, +}; use runtime::{Address, UncheckedExtrinsic}; -use substrate_runtime_primitives::traits::{Bounded, Checkable, Hashing, BlakeTwo256}; +use substrate_runtime_primitives::traits::{BlakeTwo256, Bounded, Checkable, Hashing}; -pub use extrinsic_pool::txpool::{Options, Status, LightStatus, VerifiedTransaction as VerifiedTransactionOps}; pub use error::{Error, ErrorKind, Result}; +pub use extrinsic_pool::txpool::{ + LightStatus, Options, Status, VerifiedTransaction as VerifiedTransactionOps, +}; /// Type alias for convenience. pub type CheckedExtrinsic = ::Checked; @@ -141,10 +147,13 @@ impl txpool::Scoring for Scoring { fn choose(&self, old: &VerifiedTransaction, new: &VerifiedTransaction) -> Choice { if old.is_fully_verified() { - assert!(new.is_fully_verified(), "Scoring::choose called with transactions from different senders"); + assert!( + new.is_fully_verified(), + "Scoring::choose called with transactions from different senders" + ); if old.index() == new.index() { // TODO [ToDr] Do we allow replacement? If yes then it should be Choice::ReplaceOld - return Choice::RejectNew; + return Choice::RejectNew } } @@ -159,7 +168,7 @@ impl txpool::Scoring for Scoring { &self, xts: &[txpool::Transaction], scores: &mut [Self::Score], - _change: Change<()> + _change: Change<()>, ) { for i in 0..xts.len() { if !xts[i].is_fully_verified() { @@ -207,12 +216,11 @@ impl<'a, T: 'a + PolkadotApi> Clone for Ready<'a, T> { } } -impl<'a, A: 'a + PolkadotApi> txpool::Ready for Ready<'a, A> -{ +impl<'a, A: 'a + PolkadotApi> txpool::Ready for Ready<'a, A> { fn is_ready(&mut self, xt: &VerifiedTransaction) -> Readiness { let sender = match xt.sender() { Some(sender) => sender, - None => return Readiness::Future + None => return Readiness::Future, }; trace!(target: "transaction-pool", "Checking readiness of {} (from {})", xt.hash, Hash::from(sender)); @@ -220,20 +228,24 @@ impl<'a, A: 'a + PolkadotApi> txpool::Ready for Ready<'a, A // TODO: find a way to handle index error properly -- will need changes to // transaction-pool trait. let (api, at_block) = (&self.api, &self.at_block); - let next_index = self.known_nonces.entry(sender) - .or_insert_with(|| api.index(at_block, sender).ok().unwrap_or_else(Bounded::max_value)); + let next_index = self.known_nonces.entry(sender).or_insert_with(|| { + api.index(at_block, sender) + .ok() + .unwrap_or_else(Bounded::max_value) + }); trace!(target: "transaction-pool", "Next index for sender is {}; xt index is {}", next_index, xt.original.extrinsic.index); let result = match xt.original.extrinsic.index.cmp(&next_index) { // TODO: this won't work perfectly since accounts can now be killed, returning the nonce // to zero. - // We should detect if the index was reset and mark all transactions as `Stale` for cull to work correctly. - // Otherwise those transactions will keep occupying the queue. - // Perhaps we could mark as stale if `index - state_index` > X? + // We should detect if the index was reset and mark all transactions as `Stale` for + // cull to work correctly. Otherwise those transactions will keep occupying the + // queue. Perhaps we could mark as stale if `index - state_index` > X? Ordering::Greater => Readiness::Future, Ordering::Equal => Readiness::Ready, - // TODO [ToDr] Should mark transactions referrencing too old blockhash as `Stale` as well. + // TODO [ToDr] Should mark transactions referrencing too old blockhash as `Stale` as + // well. Ordering::Less => Readiness::Stale, }; @@ -249,7 +261,8 @@ pub struct Verifier<'a, A: 'a> { at_block: BlockId, } -impl<'a, A> Verifier<'a, A> where +impl<'a, A> Verifier<'a, A> +where A: 'a + PolkadotApi, { const NO_ACCOUNT: &'static str = "Account not found."; @@ -267,7 +280,8 @@ impl<'a, A> Verifier<'a, A> where } } -impl<'a, A> txpool::Verifier for Verifier<'a, A> where +impl<'a, A> txpool::Verifier for Verifier<'a, A> +where A: 'a + PolkadotApi, { type VerifiedTransaction = VerifiedTransaction; @@ -294,7 +308,7 @@ impl<'a, A> txpool::Verifier for Verifier<'a, A> where inner, sender, hash, - encoded_size + encoded_size, }) } } @@ -307,7 +321,8 @@ pub struct TransactionPool { api: Arc, } -impl TransactionPool where +impl TransactionPool +where A: PolkadotApi, { /// Create a new transaction pool. @@ -319,12 +334,18 @@ impl TransactionPool where } /// Attempt to directly import `UncheckedExtrinsic` without going through serialization. - pub fn import_unchecked_extrinsic(&self, block: BlockId, uxt: UncheckedExtrinsic) -> Result> { + pub fn import_unchecked_extrinsic( + &self, + block: BlockId, + uxt: UncheckedExtrinsic, + ) -> Result> { let verifier = Verifier { api: &*self.api, at_block: block, }; - self.inner.submit(verifier, vec![uxt]).map(|mut v| v.swap_remove(0)) + self.inner + .submit(verifier, vec![uxt]) + .map(|mut v| v.swap_remove(0)) } /// Retry to import all semi-verified transactions (unknown account indices) @@ -335,22 +356,34 @@ impl TransactionPool where at_block: block, }; - self.inner.submit(verifier, to_reverify.into_iter().map(|tx| tx.original.clone()))?; + self.inner.submit( + verifier, + to_reverify.into_iter().map(|tx| tx.original.clone()), + )?; Ok(()) } /// Reverify transaction that has been reported incorrect. /// - /// Returns `Ok(None)` in case the hash is missing, `Err(e)` in case of verification error and new transaction - /// reference otherwise. + /// Returns `Ok(None)` in case the hash is missing, `Err(e)` in case of verification error and + /// new transaction reference otherwise. /// /// TODO [ToDr] That method is currently unused, should be used together with BlockBuilder /// when we detect that particular transaction has failed. /// In such case we will attempt to remove or re-verify it. - pub fn reverify_transaction(&self, block: BlockId, hash: Hash) -> Result>> { - let result = self.inner.remove(&[hash], false).pop().expect("One hash passed; one result received; qed"); + pub fn reverify_transaction( + &self, + block: BlockId, + hash: Hash, + ) -> Result>> { + let result = self + .inner + .remove(&[hash], false) + .pop() + .expect("One hash passed; one result received; qed"); if let Some(tx) = result { - self.import_unchecked_extrinsic(block, tx.original.clone()).map(Some) + self.import_unchecked_extrinsic(block, tx.original.clone()) + .map(Some) } else { Ok(None) } @@ -363,8 +396,10 @@ impl TransactionPool where } /// Cull transactions from the queue and then compute the pending set. - pub fn cull_and_get_pending(&self, block: BlockId, f: F) -> Result where - F: FnOnce(txpool::PendingIterator, Scoring, Listener>) -> T, + pub fn cull_and_get_pending(&self, block: BlockId, f: F) -> Result + where + F: FnOnce(txpool::PendingIterator, Scoring, Listener>) + -> T, { let ready = Ready::create(block, &*self.api); self.inner.cull(None, ready.clone()); @@ -385,19 +420,21 @@ impl Deref for TransactionPool { } } -impl ExtrinsicPool for TransactionPool where +impl ExtrinsicPool for TransactionPool +where A: Send + Sync + 'static, A: PolkadotApi, { type Error = Error; fn submit(&self, block: BlockId, xts: Vec) -> Result> { - // TODO: more general transaction pool, which can handle more kinds of vec-encoded transactions, - // even when runtime is out of date. + // TODO: more general transaction pool, which can handle more kinds of vec-encoded + // transactions, even when runtime is out of date. xts.into_iter() .map(|xt| xt.encode()) .map(|encoded| { - let decoded = UncheckedExtrinsic::decode(&mut &encoded[..]).ok_or(ErrorKind::InvalidExtrinsicFormat)?; + let decoded = UncheckedExtrinsic::decode(&mut &encoded[..]) + .ok_or(ErrorKind::InvalidExtrinsicFormat)?; let tx = self.import_unchecked_extrinsic(block, decoded)?; Ok(*tx.hash()) }) @@ -407,21 +444,30 @@ impl ExtrinsicPool for Transact #[cfg(test)] mod tests { - use std::sync::{atomic::{self, AtomicBool}, Arc}; use super::TransactionPool; - use substrate_keyring::Keyring::{self, *}; use codec::Slicable; - use polkadot_api::{PolkadotApi, BlockBuilder, Result}; - use primitives::{AccountId, AccountIndex, Block, BlockId, Hash, Index, SessionKey, Timestamp, - UncheckedExtrinsic as FutureProofUncheckedExtrinsic}; - use runtime::{RawAddress, Call, TimestampCall, BareExtrinsic, Extrinsic, UncheckedExtrinsic}; + use polkadot_api::{BlockBuilder, PolkadotApi, Result}; use primitives::parachain::{CandidateReceipt, DutyRoster, Id as ParaId}; - use substrate_runtime_primitives::{MaybeUnsigned, generic}; + use primitives::{ + AccountId, AccountIndex, Block, BlockId, Hash, Index, SessionKey, Timestamp, + UncheckedExtrinsic as FutureProofUncheckedExtrinsic, + }; + use runtime::{BareExtrinsic, Call, Extrinsic, RawAddress, TimestampCall, UncheckedExtrinsic}; + use std::sync::{ + atomic::{self, AtomicBool}, + Arc, + }; + use substrate_keyring::Keyring::{self, *}; + use substrate_runtime_primitives::{generic, MaybeUnsigned}; struct TestBlockBuilder; impl BlockBuilder for TestBlockBuilder { - fn push_extrinsic(&mut self, _extrinsic: FutureProofUncheckedExtrinsic) -> Result<()> { unimplemented!() } - fn bake(self) -> Result { unimplemented!() } + fn push_extrinsic(&mut self, _extrinsic: FutureProofUncheckedExtrinsic) -> Result<()> { + unimplemented!() + } + fn bake(self) -> Result { + unimplemented!() + } } fn number_of(at: &BlockId) -> u32 { @@ -451,22 +497,58 @@ mod tests { impl PolkadotApi for TestPolkadotApi { type BlockBuilder = TestBlockBuilder; - fn session_keys(&self, _at: &BlockId) -> Result> { unimplemented!() } - fn validators(&self, _at: &BlockId) -> Result> { unimplemented!() } - fn random_seed(&self, _at: &BlockId) -> Result { unimplemented!() } - fn duty_roster(&self, _at: &BlockId) -> Result { unimplemented!() } - fn timestamp(&self, _at: &BlockId) -> Result { unimplemented!() } - fn evaluate_block(&self, _at: &BlockId, _block: Block) -> Result { unimplemented!() } - fn active_parachains(&self, _at: &BlockId) -> Result> { unimplemented!() } - fn parachain_code(&self, _at: &BlockId, _parachain: ParaId) -> Result>> { unimplemented!() } - fn parachain_head(&self, _at: &BlockId, _parachain: ParaId) -> Result>> { unimplemented!() } - fn build_block(&self, _at: &BlockId, _timestamp: Timestamp, _new_heads: Vec) -> Result { unimplemented!() } - fn inherent_extrinsics(&self, _at: &BlockId, _timestamp: Timestamp, _new_heads: Vec) -> Result>> { unimplemented!() } + fn session_keys(&self, _at: &BlockId) -> Result> { + unimplemented!() + } + fn validators(&self, _at: &BlockId) -> Result> { + unimplemented!() + } + fn random_seed(&self, _at: &BlockId) -> Result { + unimplemented!() + } + fn duty_roster(&self, _at: &BlockId) -> Result { + unimplemented!() + } + fn timestamp(&self, _at: &BlockId) -> Result { + unimplemented!() + } + fn evaluate_block(&self, _at: &BlockId, _block: Block) -> Result { + unimplemented!() + } + fn active_parachains(&self, _at: &BlockId) -> Result> { + unimplemented!() + } + fn parachain_code(&self, _at: &BlockId, _parachain: ParaId) -> Result>> { + unimplemented!() + } + fn parachain_head(&self, _at: &BlockId, _parachain: ParaId) -> Result>> { + unimplemented!() + } + fn build_block( + &self, + _at: &BlockId, + _timestamp: Timestamp, + _new_heads: Vec, + ) -> Result { + unimplemented!() + } + fn inherent_extrinsics( + &self, + _at: &BlockId, + _timestamp: Timestamp, + _new_heads: Vec, + ) -> Result>> { + unimplemented!() + } fn index(&self, _at: &BlockId, _account: AccountId) -> Result { Ok((_account[0] as u32) + number_of(_at)) } - fn lookup(&self, _at: &BlockId, _address: RawAddress) -> Result> { + fn lookup( + &self, + _at: &BlockId, + _address: RawAddress, + ) -> Result> { match _address { RawAddress::Id(i) => Ok(Some(i)), RawAddress::Index(_) if self.no_lookup.load(atomic::Ordering::SeqCst) => Ok(None), @@ -493,22 +575,28 @@ mod tests { function: Call::Timestamp(TimestampCall::set(0)), }; let sig = sxt.using_encoded(|e| who.sign(e)); - UncheckedExtrinsic::new(Extrinsic { - signed: if use_id { RawAddress::Id(sxt.signed) } else { RawAddress::Index( - match who { - Alice => 0, - Bob => 1, - Charlie => 2, - Dave => 3, - Eve => 4, - Ferdie => 5, - One => 6, - Two => 7, - } - )}, - index: sxt.index, - function: sxt.function, - }, MaybeUnsigned(sig.into())).using_encoded(|e| UncheckedExtrinsic::decode(&mut &e[..])).unwrap() + UncheckedExtrinsic::new( + Extrinsic { + signed: if use_id { + RawAddress::Id(sxt.signed) + } else { + RawAddress::Index(match who { + Alice => 0, + Bob => 1, + Charlie => 2, + Dave => 3, + Eve => 4, + Ferdie => 5, + One => 6, + Two => 7, + }) + }, + index: sxt.index, + function: sxt.function, + }, + MaybeUnsigned(sig.into()), + ).using_encoded(|e| UncheckedExtrinsic::decode(&mut &e[..])) + .unwrap() } fn pool(api: &TestPolkadotApi) -> TransactionPool { @@ -519,9 +607,13 @@ mod tests { fn id_submission_should_work() { let api = TestPolkadotApi::default(); let pool = pool(&api); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, true)).unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, true)) + .unwrap(); - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); assert_eq!(pending, vec![(Some(Alice.to_raw_public().into()), 209)]); } @@ -529,9 +621,13 @@ mod tests { fn index_submission_should_work() { let api = TestPolkadotApi::default(); let pool = pool(&api); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, false)).unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, false)) + .unwrap(); - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); assert_eq!(pending, vec![(Some(Alice.to_raw_public().into()), 209)]); } @@ -539,31 +635,57 @@ mod tests { fn multiple_id_submission_should_work() { let api = TestPolkadotApi::default(); let pool = pool(&api); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, true)).unwrap(); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, true)).unwrap(); - - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); - assert_eq!(pending, vec![(Some(Alice.to_raw_public().into()), 209), (Some(Alice.to_raw_public().into()), 210)]); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, true)) + .unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, true)) + .unwrap(); + + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); + assert_eq!( + pending, + vec![ + (Some(Alice.to_raw_public().into()), 209), + (Some(Alice.to_raw_public().into()), 210), + ] + ); } #[test] fn multiple_index_submission_should_work() { let api = TestPolkadotApi::default(); let pool = pool(&api); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, false)).unwrap(); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, false)).unwrap(); - - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); - assert_eq!(pending, vec![(Some(Alice.to_raw_public().into()), 209), (Some(Alice.to_raw_public().into()), 210)]); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, false)) + .unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, false)) + .unwrap(); + + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); + assert_eq!( + pending, + vec![ + (Some(Alice.to_raw_public().into()), 209), + (Some(Alice.to_raw_public().into()), 210), + ] + ); } #[test] fn id_based_early_nonce_should_be_culled() { let api = TestPolkadotApi::default(); let pool = pool(&api); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 208, true)).unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 208, true)) + .unwrap(); - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); assert_eq!(pending, vec![]); } @@ -571,9 +693,13 @@ mod tests { fn index_based_early_nonce_should_be_culled() { let api = TestPolkadotApi::default(); let pool = pool(&api); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 208, false)).unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 208, false)) + .unwrap(); - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); assert_eq!(pending, vec![]); } @@ -582,13 +708,27 @@ mod tests { let api = TestPolkadotApi::default(); let pool = pool(&api); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, true)).unwrap(); - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, true)) + .unwrap(); + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); assert_eq!(pending, vec![]); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, true)).unwrap(); - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); - assert_eq!(pending, vec![(Some(Alice.to_raw_public().into()), 209), (Some(Alice.to_raw_public().into()), 210)]); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, true)) + .unwrap(); + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); + assert_eq!( + pending, + vec![ + (Some(Alice.to_raw_public().into()), 209), + (Some(Alice.to_raw_public().into()), 210), + ] + ); } #[test] @@ -596,48 +736,81 @@ mod tests { let api = TestPolkadotApi::default(); let pool = pool(&api); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, false)).unwrap(); - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, false)) + .unwrap(); + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); assert_eq!(pending, vec![]); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, false)).unwrap(); - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); - assert_eq!(pending, vec![(Some(Alice.to_raw_public().into()), 209), (Some(Alice.to_raw_public().into()), 210)]); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, false)) + .unwrap(); + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); + assert_eq!( + pending, + vec![ + (Some(Alice.to_raw_public().into()), 209), + (Some(Alice.to_raw_public().into()), 210), + ] + ); } #[test] fn index_then_id_submission_should_make_progress() { let api = TestPolkadotApi::without_lookup(); let pool = pool(&api); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, false)).unwrap(); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, true)).unwrap(); - - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, false)) + .unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, true)) + .unwrap(); + + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); assert_eq!(pending, vec![]); api.enable_lookup(); pool.retry_verification(BlockId::number(0)).unwrap(); - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); - assert_eq!(pending, vec![ - (Some(Alice.to_raw_public().into()), 209), - (Some(Alice.to_raw_public().into()), 210) - ]); + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); + assert_eq!( + pending, + vec![ + (Some(Alice.to_raw_public().into()), 209), + (Some(Alice.to_raw_public().into()), 210), + ] + ); } #[test] fn retrying_verification_might_not_change_anything() { let api = TestPolkadotApi::without_lookup(); let pool = pool(&api); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, false)).unwrap(); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, true)).unwrap(); - - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, false)) + .unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, true)) + .unwrap(); + + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); assert_eq!(pending, vec![]); pool.retry_verification(BlockId::number(1)).unwrap(); - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); assert_eq!(pending, vec![]); } @@ -645,23 +818,32 @@ mod tests { fn id_then_index_submission_should_make_progress() { let api = TestPolkadotApi::without_lookup(); let pool = pool(&api); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, true)).unwrap(); - pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, false)).unwrap(); - - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); - assert_eq!(pending, vec![ - (Some(Alice.to_raw_public().into()), 209) - ]); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 209, true)) + .unwrap(); + pool.import_unchecked_extrinsic(BlockId::number(0), uxt(Alice, 210, false)) + .unwrap(); + + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); + assert_eq!(pending, vec![(Some(Alice.to_raw_public().into()), 209)]); // when api.enable_lookup(); pool.retry_verification(BlockId::number(0)).unwrap(); - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(0), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); - assert_eq!(pending, vec![ - (Some(Alice.to_raw_public().into()), 209), - (Some(Alice.to_raw_public().into()), 210) - ]); + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(0), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); + assert_eq!( + pending, + vec![ + (Some(Alice.to_raw_public().into()), 209), + (Some(Alice.to_raw_public().into()), 210), + ] + ); } #[test] @@ -669,14 +851,23 @@ mod tests { let api = TestPolkadotApi::default(); let pool = pool(&api); let block = BlockId::number(0); - pool.import_unchecked_extrinsic(block, uxt(Alice, 209, false)).unwrap(); - let hash = *pool.import_unchecked_extrinsic(block, uxt(Alice, 210, false)).unwrap().hash(); - - let pending: Vec<_> = pool.cull_and_get_pending(block, |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); - assert_eq!(pending, vec![ - (Some(Alice.to_raw_public().into()), 209), - (Some(Alice.to_raw_public().into()), 210) - ]); + pool.import_unchecked_extrinsic(block, uxt(Alice, 209, false)) + .unwrap(); + let hash = *pool + .import_unchecked_extrinsic(block, uxt(Alice, 210, false)) + .unwrap() + .hash(); + + let pending: Vec<_> = pool + .cull_and_get_pending(block, |p| p.map(|a| (a.sender(), a.index())).collect()) + .unwrap(); + assert_eq!( + pending, + vec![ + (Some(Alice.to_raw_public().into()), 209), + (Some(Alice.to_raw_public().into()), 210), + ] + ); // first xt is mined, but that has a side-effect of switching index 0 from Alice to Bob. // second xt now invalid signature, so it fails. @@ -686,14 +877,22 @@ mod tests { // after this, a re-evaluation of the second's readiness should result in it being thrown // out (or maybe placed in future queue). - let err = pool.reverify_transaction(BlockId::number(1), hash).unwrap_err(); + let err = pool + .reverify_transaction(BlockId::number(1), hash) + .unwrap_err(); match *err.kind() { ::error::ErrorKind::Msg(ref m) if m == "bad signature in extrinsic" => {}, - ref e => assert!(false, "The transaction should be rejected with BadSignature error, got: {:?}", e), + ref e => assert!( + false, + "The transaction should be rejected with BadSignature error, got: {:?}", + e + ), } - let pending: Vec<_> = pool.cull_and_get_pending(BlockId::number(1), |p| p.map(|a| (a.sender(), a.index())).collect()).unwrap(); + let pending: Vec<_> = + pool.cull_and_get_pending(BlockId::number(1), |p| { + p.map(|a| (a.sender(), a.index())).collect() + }).unwrap(); assert_eq!(pending, vec![]); - } } diff --git a/safe-mix/src/lib.rs b/safe-mix/src/lib.rs index 05e8bad95a7aa..53fc4ea8859bc 100644 --- a/safe-mix/src/lib.rs +++ b/safe-mix/src/lib.rs @@ -29,16 +29,18 @@ use core::ops::{BitAnd, BitOr}; pub const MAX_DEPTH: usize = 17; -fn sub_mix(seeds: &[T]) -> T where - T: BitAnd + BitOr + Copy +fn sub_mix(seeds: &[T]) -> T +where + T: BitAnd + BitOr + Copy, { (seeds[0] & seeds[1]) | (seeds[1] & seeds[2]) | (seeds[0] & seeds[2]) } /// Mix a slice. -pub fn triplet_mix(seeds: &[T]) -> Result where +pub fn triplet_mix(seeds: &[T]) -> Result +where T: BitAnd + BitOr, - T: Default + Copy + T: Default + Copy, { Ok(seeds.iter().cloned().triplet_mix()) } @@ -53,9 +55,10 @@ pub trait TripletMix { fn triplet_mix(self) -> Self::Item; } -impl TripletMix for I where +impl TripletMix for I +where I: Iterator, - T: BitAnd + BitOr + Default + Copy + T: BitAnd + BitOr + Default + Copy, { type Item = T; fn triplet_mix(self) -> Self::Item { @@ -66,7 +69,7 @@ impl TripletMix for I where let mut index_at_depth = i; for depth in 0..MAX_DEPTH { if index_at_depth % 3 != 2 { - break; + break } index_at_depth /= 3; result = sub_mix(&accum[depth]); @@ -74,7 +77,7 @@ impl TripletMix for I where // end of the threesome at depth. if depth == MAX_DEPTH - 1 { // end of our stack - bail with result. - break; + break } else { // save in the stack for parent computation accum[depth + 1][index_at_depth % 3] = result; @@ -140,6 +143,12 @@ mod tests { #[test] fn triplet_mix_works_on_third_level() { - assert_eq!(triplet_mix(&[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0][..]).unwrap(), 1); + assert_eq!( + triplet_mix( + &[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0] + [..] + ).unwrap(), + 1 + ); } } diff --git a/subkey/src/main.rs b/subkey/src/main.rs index 1186ad8a51c10..202a6ec3e05ca 100644 --- a/subkey/src/main.rs +++ b/subkey/src/main.rs @@ -1,17 +1,17 @@ extern crate ed25519; -extern crate substrate_primitives; extern crate rand; +extern crate substrate_primitives; +use ed25519::Pair; use rand::{OsRng, Rng}; use std::env::args; -use ed25519::Pair; use substrate_primitives::hexdisplay::HexDisplay; fn good_waypoint(done: u64) -> u64 { match done { - 0 ... 1_000_000 => 100_000, - 0 ... 10_000_000 => 1_000_000, - 0 ... 100_000_000 => 10_000_000, + 0...1_000_000 => 100_000, + 0...10_000_000 => 1_000_000, + 0...100_000_000 => 10_000_000, _ => 100_000_000, } } @@ -19,17 +19,22 @@ fn good_waypoint(done: u64) -> u64 { fn next_seed(mut seed: [u8; 32]) -> [u8; 32] { for i in 0..32 { match seed[i] { - 255 => { seed[i] = 0; } - _ => { seed[i] += 1; break; } + 255 => { + seed[i] = 0; + }, + _ => { + seed[i] += 1; + break + }, } } - return seed; + return seed } fn main() { if args().len() != 2 { println!("Usage: subkey "); - return; + return } let desired = args().last().unwrap(); let score = |s: &str| { @@ -37,7 +42,7 @@ fn main() { let snip_size = desired.len() - truncate; let truncated = &desired[0..snip_size]; if let Some(pos) = s.find(truncated) { - return (31 - pos) + (snip_size * 32); + return (31 - pos) + (snip_size * 32) } } 0 @@ -56,10 +61,15 @@ fn main() { let ss58 = p.public().to_ss58check(); let s = score(&ss58); if s > best { - println!("{}: {} ({}% complete)", ss58, HexDisplay::from(&seed), s * 100 / top); + println!( + "{}: {} ({}% complete)", + ss58, + HexDisplay::from(&seed), + s * 100 / top + ); best = s; if best == top { - break; + break } } seed = next_seed(seed); diff --git a/substrate/bft/src/generic/accumulator.rs b/substrate/bft/src/generic/accumulator.rs index 811826b7d68a4..7e81ef9ddded8 100644 --- a/substrate/bft/src/generic/accumulator.rs +++ b/substrate/bft/src/generic/accumulator.rs @@ -16,11 +16,11 @@ //! Vote accumulator for each round of BFT consensus. -use std::collections::{HashMap, HashSet}; use std::collections::hash_map::Entry; +use std::collections::{HashMap, HashSet}; use std::hash::Hash; -use generic::{Vote, LocalizedMessage, LocalizedProposal}; +use generic::{LocalizedMessage, LocalizedProposal, Vote}; /// Justification for some state at a given round. #[derive(Debug, Clone, PartialEq, Eq)] @@ -39,16 +39,19 @@ impl UncheckedJustification { /// Provide a closure for checking whether the signature is valid on a /// digest. /// - /// The closure should returns a checked justification iff the round number, digest, and signature - /// represent a valid message and the signer was authorized to issue + /// The closure should returns a checked justification iff the round number, digest, and + /// signature represent a valid message and the signer was authorized to issue /// it. /// /// The `check_message` closure may vary based on context. - pub fn check(self, threshold: usize, mut check_message: F) - -> Result, Self> - where - F: FnMut(usize, &D, &S) -> Option, - V: Hash + Eq, + pub fn check( + self, + threshold: usize, + mut check_message: F, + ) -> Result, Self> + where + F: FnMut(usize, &D, &S) -> Option, + V: Hash + Eq, { let checks_out = { let mut checks_out = || { @@ -57,11 +60,9 @@ impl UncheckedJustification { for signature in &self.signatures { match check_message(self.round_number, &self.digest, signature) { None => return false, - Some(v) => { - if !voted.insert(v) { - return false; - } - } + Some(v) => if !voted.insert(v) { + return false + }, } } @@ -81,7 +82,7 @@ impl UncheckedJustification { /// A checked justification. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct Justification(UncheckedJustification); +pub struct Justification(UncheckedJustification); impl Justification { /// Convert this justification back to unchecked. @@ -149,7 +150,7 @@ pub enum Misbehavior { /// on all messages imported. #[derive(Debug)] pub struct Accumulator - where +where Candidate: Eq + Clone, Digest: Hash + Eq + Clone, AuthorityId: Hash + Eq + Clone, @@ -166,8 +167,9 @@ pub struct Accumulator state: State, } -impl Accumulator - where +impl + Accumulator +where Candidate: Eq + Clone, Digest: Hash + Eq + Clone, AuthorityId: Hash + Eq + Clone, @@ -215,7 +217,7 @@ impl Accumulator Result<(), Misbehavior> { // message from different round. if message.round_number() != self.round_number { - return Ok(()); + return Ok(()) } match message { @@ -227,7 +229,7 @@ impl Accumulator self.import_commit(d, sender, signature), Vote::AdvanceRound(_) => self.import_advance_round(sender), } - } + }, } } @@ -241,21 +243,23 @@ impl Accumulator { + Some(ref p) if &p.digest != &proposal.digest => return Err(Misbehavior::DoublePropose( self.round_number, { - let old = self.proposal.as_ref().expect("just checked to be Some; qed"); + let old = self + .proposal + .as_ref() + .expect("just checked to be Some; qed"); (old.digest.clone(), old.digest_signature.clone()) }, - (proposal.digest.clone(), proposal.digest_signature.clone()) - )) - } + (proposal.digest.clone(), proposal.digest_signature.clone()), + )), _ => {}, } @@ -284,7 +288,10 @@ impl Accumulator { vacant.insert((digest.clone(), signature)); - let count = self.vote_counts.entry(digest.clone()).or_insert_with(Default::default); + let count = self + .vote_counts + .entry(digest.clone()) + .or_insert_with(Default::default); count.prepared += 1; if count.prepared >= self.threshold { @@ -292,19 +299,19 @@ impl Accumulator { // if digest is different, that's misbehavior. if occupied.get().0 != digest { return Err(Misbehavior::DoublePrepare( self.round_number, occupied.get().clone(), - (digest, signature) - )); + (digest, signature), + )) } None - } + }, }; // only allow transition to prepare from begin or proposed state. @@ -314,7 +321,8 @@ impl Accumulator Accumulator Accumulator { vacant.insert((digest.clone(), signature)); - let count = self.vote_counts.entry(digest.clone()).or_insert_with(Default::default); + let count = self + .vote_counts + .entry(digest.clone()) + .or_insert_with(Default::default); count.committed += 1; if count.committed >= self.threshold { @@ -349,19 +360,19 @@ impl Accumulator { // if digest is different, that's misbehavior. if occupied.get().0 != digest { return Err(Misbehavior::DoubleCommit( self.round_number, occupied.get().clone(), - (digest, signature) - )); + (digest, signature), + )) } None - } + }, }; // transition to concluded state always valid. @@ -369,7 +380,8 @@ impl Accumulator Accumulator Accumulator Result<(), Misbehavior> { self.advance_round.insert(sender); - if self.advance_round.len() < self.threshold { return Ok(()) } + if self.advance_round.len() < self.threshold { + return Ok(()) + } trace!(target: "bft", "Witnessed threshold advance-round messages for round {}", self.round_number); // allow transition to new round only if we haven't produced a justification @@ -452,7 +466,9 @@ mod tests { justification.signatures.pop(); } // duplicates not allowed. - justification.signatures.extend((0..10).map(|i| Signature(600, i))); + justification + .signatures + .extend((0..10).map(|i| Signature(600, i))); assert!(justification.clone().check(11, &check_message).is_err()); } @@ -474,14 +490,16 @@ mod tests { assert_eq!(accumulator.state(), &State::Begin); - accumulator.import_message(LocalizedMessage::Propose(LocalizedProposal { - sender: AuthorityId(8), - full_signature: Signature(999, 8), - digest_signature: Signature(999, 8), - proposal: Candidate(999), - digest: Digest(999), - round_number: 1, - })).unwrap(); + accumulator + .import_message(LocalizedMessage::Propose(LocalizedProposal { + sender: AuthorityId(8), + full_signature: Signature(999, 8), + digest_signature: Signature(999, 8), + proposal: Candidate(999), + digest: Digest(999), + round_number: 1, + })) + .unwrap(); assert_eq!(accumulator.state(), &State::Proposed(Candidate(999))); } @@ -491,32 +509,42 @@ mod tests { let mut accumulator = Accumulator::new(1, 7, AuthorityId(8)); assert_eq!(accumulator.state(), &State::Begin); - accumulator.import_message(LocalizedMessage::Propose(LocalizedProposal { - sender: AuthorityId(8), - full_signature: Signature(999, 8), - digest_signature: Signature(999, 8), - round_number: 1, - proposal: Candidate(999), - digest: Digest(999), - })).unwrap(); + accumulator + .import_message(LocalizedMessage::Propose(LocalizedProposal { + sender: AuthorityId(8), + full_signature: Signature(999, 8), + digest_signature: Signature(999, 8), + round_number: 1, + proposal: Candidate(999), + digest: Digest(999), + })) + .unwrap(); assert_eq!(accumulator.state(), &State::Proposed(Candidate(999))); for i in 0..6 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(999, i), - vote: Vote::Prepare(1, Digest(999)), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(999, i), + vote: Vote::Prepare(1, Digest(999)), + }.into(), + ) + .unwrap(); assert_eq!(accumulator.state(), &State::Proposed(Candidate(999))); } - accumulator.import_message(LocalizedVote { - sender: AuthorityId(7), - signature: Signature(999, 7), - vote: Vote::Prepare(1, Digest(999)), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(7), + signature: Signature(999, 7), + vote: Vote::Prepare(1, Digest(999)), + }.into(), + ) + .unwrap(); match accumulator.state() { &State::Prepared(ref j) => assert_eq!(j.digest, Digest(999)), @@ -529,32 +557,42 @@ mod tests { let mut accumulator = Accumulator::new(1, 7, AuthorityId(8)); assert_eq!(accumulator.state(), &State::Begin); - accumulator.import_message(LocalizedMessage::Propose(LocalizedProposal { - sender: AuthorityId(8), - full_signature: Signature(999, 8), - digest_signature: Signature(999, 8), - round_number: 1, - proposal: Candidate(999), - digest: Digest(999), - })).unwrap(); + accumulator + .import_message(LocalizedMessage::Propose(LocalizedProposal { + sender: AuthorityId(8), + full_signature: Signature(999, 8), + digest_signature: Signature(999, 8), + round_number: 1, + proposal: Candidate(999), + digest: Digest(999), + })) + .unwrap(); assert_eq!(accumulator.state(), &State::Proposed(Candidate(999))); for i in 0..6 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(999, i), - vote: Vote::Prepare(1, Digest(999)), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(999, i), + vote: Vote::Prepare(1, Digest(999)), + }.into(), + ) + .unwrap(); assert_eq!(accumulator.state(), &State::Proposed(Candidate(999))); } - accumulator.import_message(LocalizedVote { - sender: AuthorityId(7), - signature: Signature(999, 7), - vote: Vote::Prepare(1, Digest(999)), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(7), + signature: Signature(999, 7), + vote: Vote::Prepare(1, Digest(999)), + }.into(), + ) + .unwrap(); match accumulator.state() { &State::Prepared(ref j) => assert_eq!(j.digest, Digest(999)), @@ -562,11 +600,15 @@ mod tests { } for i in 0..6 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(999, i), - vote: Vote::Commit(1, Digest(999)), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(999, i), + vote: Vote::Commit(1, Digest(999)), + }.into(), + ) + .unwrap(); match accumulator.state() { &State::Prepared(_) => {}, @@ -574,11 +616,15 @@ mod tests { } } - accumulator.import_message(LocalizedVote { - sender: AuthorityId(7), - signature: Signature(999, 7), - vote: Vote::Commit(1, Digest(999)), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(7), + signature: Signature(999, 7), + vote: Vote::Commit(1, Digest(999)), + }.into(), + ) + .unwrap(); match accumulator.state() { &State::Committed(ref j) => assert_eq!(j.digest, Digest(999)), @@ -591,23 +637,29 @@ mod tests { let mut accumulator = Accumulator::new(1, 7, AuthorityId(8)); assert_eq!(accumulator.state(), &State::Begin); - accumulator.import_message(LocalizedMessage::Propose(LocalizedProposal { - sender: AuthorityId(8), - full_signature: Signature(999, 8), - digest_signature: Signature(999, 8), - round_number: 1, - proposal: Candidate(999), - digest: Digest(999), - })).unwrap(); + accumulator + .import_message(LocalizedMessage::Propose(LocalizedProposal { + sender: AuthorityId(8), + full_signature: Signature(999, 8), + digest_signature: Signature(999, 8), + round_number: 1, + proposal: Candidate(999), + digest: Digest(999), + })) + .unwrap(); assert_eq!(accumulator.state(), &State::Proposed(Candidate(999))); for i in 0..7 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(999, i), - vote: Vote::Prepare(1, Digest(999)), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(999, i), + vote: Vote::Prepare(1, Digest(999)), + }.into(), + ) + .unwrap(); } match accumulator.state() { @@ -616,11 +668,15 @@ mod tests { } for i in 0..6 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(999, i), - vote: Vote::AdvanceRound(1), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(999, i), + vote: Vote::AdvanceRound(1), + }.into(), + ) + .unwrap(); match accumulator.state() { &State::Prepared(_) => {}, @@ -628,11 +684,15 @@ mod tests { } } - accumulator.import_message(LocalizedVote { - sender: AuthorityId(7), - signature: Signature(999, 7), - vote: Vote::AdvanceRound(1), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(7), + signature: Signature(999, 7), + vote: Vote::AdvanceRound(1), + }.into(), + ) + .unwrap(); match accumulator.state() { &State::Advanced(Some(_)) => {}, @@ -646,11 +706,15 @@ mod tests { assert_eq!(accumulator.state(), &State::Begin); for i in 0..7 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(999, i), - vote: Vote::Prepare(1, Digest(999)), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(999, i), + vote: Vote::Prepare(1, Digest(999)), + }.into(), + ) + .unwrap(); } match accumulator.state() { @@ -659,11 +723,15 @@ mod tests { } for i in 0..7 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(999, i), - vote: Vote::Commit(1, Digest(999)), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(999, i), + vote: Vote::Commit(1, Digest(999)), + }.into(), + ) + .unwrap(); } match accumulator.state() { @@ -678,11 +746,15 @@ mod tests { assert_eq!(accumulator.state(), &State::Begin); for i in 0..7 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(999, i), - vote: Vote::Prepare(1, Digest(999)), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(999, i), + vote: Vote::Prepare(1, Digest(999)), + }.into(), + ) + .unwrap(); } match accumulator.state() { @@ -690,14 +762,16 @@ mod tests { s => panic!("wrong state: {:?}", s), } - accumulator.import_message(LocalizedMessage::Propose(LocalizedProposal { - sender: AuthorityId(8), - full_signature: Signature(999, 8), - digest_signature: Signature(999, 8), - round_number: 1, - proposal: Candidate(999), - digest: Digest(999), - })).unwrap(); + accumulator + .import_message(LocalizedMessage::Propose(LocalizedProposal { + sender: AuthorityId(8), + full_signature: Signature(999, 8), + digest_signature: Signature(999, 8), + round_number: 1, + proposal: Candidate(999), + digest: Digest(999), + })) + .unwrap(); match accumulator.state() { &State::Prepared(ref j) => assert_eq!(j.digest, Digest(999)), @@ -711,11 +785,15 @@ mod tests { assert_eq!(accumulator.state(), &State::Begin); for i in 0..7 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(999, i), - vote: Vote::Commit(1, Digest(999)), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(999, i), + vote: Vote::Commit(1, Digest(999)), + }.into(), + ) + .unwrap(); } match accumulator.state() { @@ -723,14 +801,16 @@ mod tests { s => panic!("wrong state: {:?}", s), } - accumulator.import_message(LocalizedMessage::Propose(LocalizedProposal { - sender: AuthorityId(8), - full_signature: Signature(999, 8), - digest_signature: Signature(999, 8), - round_number: 1, - proposal: Candidate(999), - digest: Digest(999), - })).unwrap(); + accumulator + .import_message(LocalizedMessage::Propose(LocalizedProposal { + sender: AuthorityId(8), + full_signature: Signature(999, 8), + digest_signature: Signature(999, 8), + round_number: 1, + proposal: Candidate(999), + digest: Digest(999), + })) + .unwrap(); match accumulator.state() { &State::Committed(ref j) => assert_eq!(j.digest, Digest(999)), @@ -744,29 +824,35 @@ mod tests { assert_eq!(accumulator.state(), &State::Begin); for i in 0..7 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(1, i), - vote: Vote::AdvanceRound(1), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(1, i), + vote: Vote::AdvanceRound(1), + }.into(), + ) + .unwrap(); } match accumulator.state() { - &State::Advanced(_) => {} + &State::Advanced(_) => {}, s => panic!("wrong state: {:?}", s), } - accumulator.import_message(LocalizedMessage::Propose(LocalizedProposal { - sender: AuthorityId(8), - full_signature: Signature(999, 8), - digest_signature: Signature(999, 8), - round_number: 1, - proposal: Candidate(999), - digest: Digest(999), - })).unwrap(); + accumulator + .import_message(LocalizedMessage::Propose(LocalizedProposal { + sender: AuthorityId(8), + full_signature: Signature(999, 8), + digest_signature: Signature(999, 8), + round_number: 1, + proposal: Candidate(999), + digest: Digest(999), + })) + .unwrap(); match accumulator.state() { - &State::Advanced(_) => {} + &State::Advanced(_) => {}, s => panic!("wrong state: {:?}", s), } } @@ -777,11 +863,15 @@ mod tests { assert_eq!(accumulator.state(), &State::Begin); for i in 0..7 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(1, i), - vote: Vote::AdvanceRound(1), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(1, i), + vote: Vote::AdvanceRound(1), + }.into(), + ) + .unwrap(); } match accumulator.state() { @@ -796,11 +886,15 @@ mod tests { assert_eq!(accumulator.state(), &State::Begin); for i in 0..7 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(999, i), - vote: Vote::Commit(1, Digest(999)), - }.into()).unwrap(); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(999, i), + vote: Vote::Commit(1, Digest(999)), + }.into(), + ) + .unwrap(); } match accumulator.state() { @@ -815,20 +909,25 @@ mod tests { assert_eq!(accumulator.state(), &State::Begin); for i in 0..7 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(999, i), - vote: Vote::Prepare(1, Digest(999)), - }.into()).unwrap(); - - let res = accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(123, i), - vote: Vote::Prepare(1, Digest(123)), - }.into()); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(999, i), + vote: Vote::Prepare(1, Digest(999)), + }.into(), + ) + .unwrap(); + + let res = accumulator.import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(123, i), + vote: Vote::Prepare(1, Digest(123)), + }.into(), + ); assert!(res.is_err()); - } } @@ -838,20 +937,25 @@ mod tests { assert_eq!(accumulator.state(), &State::Begin); for i in 0..7 { - accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(999, i), - vote: Vote::Commit(1, Digest(999)), - }.into()).unwrap(); - - let res = accumulator.import_message(LocalizedVote { - sender: AuthorityId(i), - signature: Signature(123, i), - vote: Vote::Commit(1, Digest(123)), - }.into()); + accumulator + .import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(999, i), + vote: Vote::Commit(1, Digest(999)), + }.into(), + ) + .unwrap(); + + let res = accumulator.import_message( + LocalizedVote { + sender: AuthorityId(i), + signature: Signature(123, i), + vote: Vote::Commit(1, Digest(123)), + }.into(), + ); assert!(res.is_err()); - } } @@ -860,14 +964,16 @@ mod tests { let mut accumulator = Accumulator::::new(1, 7, AuthorityId(8)); assert_eq!(accumulator.state(), &State::Begin); - accumulator.import_message(LocalizedMessage::Propose(LocalizedProposal { - sender: AuthorityId(8), - full_signature: Signature(999, 8), - digest_signature: Signature(999, 8), - round_number: 1, - proposal: Candidate(999), - digest: Digest(999), - })).unwrap(); + accumulator + .import_message(LocalizedMessage::Propose(LocalizedProposal { + sender: AuthorityId(8), + full_signature: Signature(999, 8), + digest_signature: Signature(999, 8), + round_number: 1, + proposal: Candidate(999), + digest: Digest(999), + })) + .unwrap(); let res = accumulator.import_message(LocalizedMessage::Propose(LocalizedProposal { sender: AuthorityId(8), diff --git a/substrate/bft/src/generic/mod.rs b/substrate/bft/src/generic/mod.rs index 6d4715e80612f..a2f8efa3e65f2 100644 --- a/substrate/bft/src/generic/mod.rs +++ b/substrate/bft/src/generic/mod.rs @@ -17,16 +17,18 @@ //! BFT Agreement based on a rotating proposer in different rounds. //! Very general implementation. -use std::collections::{HashMap, BTreeMap, VecDeque}; use std::collections::hash_map; +use std::collections::{BTreeMap, HashMap, VecDeque}; use std::fmt::Debug; use std::hash::Hash; -use futures::{future, Future, Stream, Sink, Poll, Async, AsyncSink}; +use futures::{future, Async, AsyncSink, Future, Poll, Sink, Stream}; use self::accumulator::State; -pub use self::accumulator::{Accumulator, Justification, PrepareJustification, UncheckedJustification, Misbehavior}; +pub use self::accumulator::{ + Accumulator, Justification, Misbehavior, PrepareJustification, UncheckedJustification, +}; mod accumulator; @@ -148,11 +150,11 @@ pub trait Context { /// Signature. type Signature: Debug + Eq + Clone; /// A future that resolves when a round timeout is concluded. - type RoundTimeout: Future; + type RoundTimeout: Future; /// A future that resolves when a proposal is ready. - type CreateProposal: Future; + type CreateProposal: Future; /// A future that resolves when a proposal has been evaluated. - type EvaluateProposal: Future; + type EvaluateProposal: Future; /// Get the local authority ID. fn local_id(&self) -> Self::AuthorityId; @@ -166,8 +168,10 @@ pub trait Context { /// Sign a message using the local authority ID. /// In the case of a proposal message, it should sign on the hash and /// the bytes of the proposal. - fn sign_local(&self, message: Message) - -> LocalizedMessage; + fn sign_local( + &self, + message: Message, + ) -> LocalizedMessage; /// Get the proposer for a given round of consensus. fn round_proposer(&self, round: usize) -> Self::AuthorityId; @@ -220,15 +224,17 @@ impl Sending { } // process all the sends into the sink. - fn process_all>(&mut self, sink: &mut S) -> Poll<(), S::SinkError> { + fn process_all>(&mut self, sink: &mut S) -> Poll<(), S::SinkError> { while let Some(item) = self.items.pop_front() { match sink.start_send(item) { Err(e) => return Err(e), Ok(AsyncSink::NotReady(item)) => { self.items.push_front(item); - return Ok(Async::NotReady); - } - Ok(AsyncSink::Ready) => { self.flushing = true; } + return Ok(Async::NotReady) + }, + Ok(AsyncSink::Ready) => { + self.flushing = true; + }, } } @@ -236,7 +242,9 @@ impl Sending { match sink.poll_complete() { Err(e) => return Err(e), Ok(Async::NotReady) => return Ok(Async::NotReady), - Ok(Async::Ready(())) => { self.flushing = false; } + Ok(Async::Ready(())) => { + self.flushing = false; + }, } } @@ -321,7 +329,8 @@ struct Strategy { locked: Option>, notable_candidates: HashMap, current_accumulator: Accumulator, - future_accumulators: BTreeMap>, + future_accumulators: + BTreeMap>, local_id: C::AuthorityId, misbehavior: HashMap>, } @@ -331,11 +340,7 @@ impl Strategy { let timeout = context.begin_round_timeout(0); let threshold = bft_threshold(nodes, max_faulty); - let current_accumulator = Accumulator::new( - 0, - threshold, - context.round_proposer(0), - ); + let current_accumulator = Accumulator::new(0, threshold, context.round_proposer(0)); Strategy { nodes, @@ -360,7 +365,7 @@ impl Strategy { fn import_message( &mut self, context: &C, - msg: LocalizedMessage + msg: LocalizedMessage, ) { let round_number = msg.round_number(); @@ -371,13 +376,15 @@ impl Strategy { } else if round_number > current_round { let threshold = bft_threshold(self.nodes, self.max_faulty); - let mut future_acc = self.future_accumulators.entry(round_number).or_insert_with(|| { - Accumulator::new( - round_number, - threshold, - context.round_proposer(round_number), - ) - }); + let mut future_acc = self.future_accumulators.entry(round_number).or_insert_with( + || { + Accumulator::new( + round_number, + threshold, + context.round_proposer(round_number), + ) + }, + ); future_acc.import_message(msg) } else { @@ -402,8 +409,9 @@ impl Strategy { self.advance_to_round(context, justification.round_number); } - let lock_to_new = self.locked.as_ref() - .map_or(true, |l| l.justification.round_number < justification.round_number); + let lock_to_new = self.locked.as_ref().map_or(true, |l| { + l.justification.round_number < justification.round_number + }); if lock_to_new { self.locked = Some(Locked { justification }) @@ -417,10 +425,8 @@ impl Strategy { fn poll( &mut self, context: &C, - sending: &mut Sending<::Communication> - ) - -> Poll, C::Error> - { + sending: &mut Sending<::Communication>, + ) -> Poll, C::Error> { let mut last_watermark = (self.current_round(), self.local_state); // poll until either completion or state doesn't change. @@ -436,7 +442,7 @@ impl Strategy { } else { last_watermark = new_watermark; } - } + }, } } } @@ -446,10 +452,8 @@ impl Strategy { fn poll_once( &mut self, context: &C, - sending: &mut Sending<::Communication> - ) - -> Poll, C::Error> - { + sending: &mut Sending<::Communication>, + ) -> Poll, C::Error> { self.propose(context, sending)?; self.prepare(context, sending)?; self.commit(context, sending); @@ -459,34 +463,39 @@ impl Strategy { &State::Advanced(ref p_just) => { // lock to any witnessed prepare justification. if let Some(p_just) = p_just.as_ref() { - self.locked = Some(Locked { justification: p_just.clone() }); + self.locked = Some(Locked { + justification: p_just.clone(), + }); } let round_number = self.current_round(); Some(round_number + 1) - } + }, &State::Committed(ref just) => { // fetch the agreed-upon candidate: // - we may not have received the proposal in the first place // - there is no guarantee that the proposal we got was agreed upon // (can happen if faulty primary) // - look in the candidates of prior rounds just in case. - let candidate = self.current_accumulator + let candidate = self + .current_accumulator .proposal() - .and_then(|c| if context.candidate_digest(c) == just.digest { - Some(c.clone()) - } else { - None + .and_then(|c| { + if context.candidate_digest(c) == just.digest { + Some(c.clone()) + } else { + None + } }) .or_else(|| self.notable_candidates.get(&just.digest).cloned()); let committed = Committed { candidate, - justification: just.clone() + justification: just.clone(), }; return Ok(Async::Ready(committed)) - } + }, _ => None, }; @@ -500,10 +509,8 @@ impl Strategy { fn propose( &mut self, context: &C, - sending: &mut Sending<::Communication> - ) - -> Result<(), C::Error> - { + sending: &mut Sending<::Communication>, + ) -> Result<(), C::Error> { if let LocalState::Start = self.local_state { let mut propose = false; if let &State::Begin = self.current_accumulator.state() { @@ -512,7 +519,9 @@ impl Strategy { propose = self.local_id == primary; }; - if !propose { return Ok(()) } + if !propose { + return Ok(()) + } // obtain the proposal to broadcast. let proposal = match self.locked { @@ -524,9 +533,10 @@ impl Strategy { // to eat the round timeout for now, but it can be optimized by // broadcasting an advance vote. self.notable_candidates.get(locked.digest()).cloned() - } + }, None => { - let res = self.fetching_proposal + let res = self + .fetching_proposal .get_or_insert_with(|| context.proposal()) .poll()?; @@ -534,24 +544,19 @@ impl Strategy { Async::Ready(p) => Some(p), Async::NotReady => None, } - } + }, }; if let Some(proposal) = proposal { self.fetching_proposal = None; - let message = Message::Propose( - self.current_round(), - proposal - ); + let message = Message::Propose(self.current_round(), proposal); self.import_and_send_message(message, context, sending); // broadcast the justification along with the proposal if we are locked. if let Some(ref locked) = self.locked { - sending.push( - Communication::Auxiliary(locked.justification.clone()) - ); + sending.push(Communication::Auxiliary(locked.justification.clone())); } self.local_state = LocalState::Proposed; @@ -564,14 +569,12 @@ impl Strategy { fn prepare( &mut self, context: &C, - sending: &mut Sending<::Communication> - ) - -> Result<(), C::Error> - { + sending: &mut Sending<::Communication>, + ) -> Result<(), C::Error> { // prepare only upon start or having proposed. match self.local_state { LocalState::Start | LocalState::Proposed => {}, - _ => return Ok(()) + _ => return Ok(()), }; let mut prepare_for = None; @@ -583,15 +586,16 @@ impl Strategy { // vote to prepare only if we believe the candidate to be valid and // we are not locked on some other candidate. match self.locked { - Some(ref locked) if locked.digest() != &digest => {} + Some(ref locked) if locked.digest() != &digest => {}, Some(_) => { // don't check validity if we are locked. // this is necessary to preserve the liveness property. self.local_state = LocalState::Prepared(true); prepare_for = Some(digest); - } + }, None => { - let res = self.evaluating_proposal + let res = self + .evaluating_proposal .get_or_insert_with(|| context.proposal_valid(candidate)) .poll()?; @@ -603,15 +607,12 @@ impl Strategy { prepare_for = Some(digest); } } - } + }, } } if let Some(digest) = prepare_for { - let message = Vote::Prepare( - self.current_round(), - digest - ).into(); + let message = Vote::Prepare(self.current_round(), digest).into(); self.import_and_send_message(message, context, sending); } @@ -619,15 +620,11 @@ impl Strategy { Ok(()) } - fn commit( - &mut self, - context: &C, - sending: &mut Sending<::Communication> - ) { + fn commit(&mut self, context: &C, sending: &mut Sending<::Communication>) { // commit only if we haven't voted to advance or committed already match self.local_state { LocalState::Committed | LocalState::VoteAdvance => return, - _ => {} + _ => {}, } let mut commit_for = None; @@ -635,15 +632,14 @@ impl Strategy { if let &State::Prepared(ref p_just) = self.current_accumulator.state() { // we are now locked to this prepare justification. let digest = p_just.digest.clone(); - self.locked = Some(Locked { justification: p_just.clone() }); + self.locked = Some(Locked { + justification: p_just.clone(), + }); commit_for = Some(digest); } if let Some(digest) = commit_for { - let message = Vote::Commit( - self.current_round(), - digest - ).into(); + let message = Vote::Commit(self.current_round(), digest).into(); self.import_and_send_message(message, context, sending); self.local_state = LocalState::Committed; @@ -653,12 +649,12 @@ impl Strategy { fn vote_advance( &mut self, context: &C, - sending: &mut Sending<::Communication> - ) - -> Result<(), C::Error> - { + sending: &mut Sending<::Communication>, + ) -> Result<(), C::Error> { // we can vote for advancement under all circumstances unless we have already. - if let LocalState::VoteAdvance = self.local_state { return Ok(()) } + if let LocalState::VoteAdvance = self.local_state { + return Ok(()) + } // if we got f + 1 advance votes, or the timeout has fired, and we haven't // sent an AdvanceRound message yet, do so. @@ -675,9 +671,7 @@ impl Strategy { } if attempt_advance { - let message = Vote::AdvanceRound( - self.current_round(), - ).into(); + let message = Vote::AdvanceRound(self.current_round()).into(); self.import_and_send_message(message, context, sending); self.local_state = LocalState::VoteAdvance; @@ -701,7 +695,9 @@ impl Strategy { // we will have it. if let Some(proposal) = self.current_accumulator.proposal() { let digest = context.candidate_digest(proposal); - self.notable_candidates.entry(digest).or_insert_with(|| proposal.clone()); + self.notable_candidates + .entry(digest) + .or_insert_with(|| proposal.clone()); } // if we jump ahead more than one round, get rid of the ones in between. @@ -724,7 +720,7 @@ impl Strategy { &mut self, message: Message, context: &C, - sending: &mut Sending<::Communication> + sending: &mut Sending<::Communication>, ) { let signed_message = context.sign_local(message); self.import_message(context, signed_message.clone()); @@ -744,10 +740,10 @@ pub struct Agreement { } impl Future for Agreement - where - C: Context, - I: Stream::Communication,Error=C::Error>, - O: Sink::Communication,SinkError=C::Error>, +where + C: Context, + I: Stream::Communication, Error = C::Error>, + O: Sink::Communication, SinkError = C::Error>, { type Item = Committed; type Error = C::Error; @@ -761,7 +757,7 @@ impl Future for Agreement Async::NotReady => { self.concluded = Some(just); Async::NotReady - } + }, }) } @@ -771,20 +767,21 @@ impl Future for Agreement driving = match self.input.poll()? { Async::Ready(msg) => { match msg.ok_or(InputStreamConcluded)? { - Communication::Consensus(message) => self.strategy.import_message(&self.context, message), - Communication::Auxiliary(lock_proof) - => self.strategy.import_lock_proof(&self.context, lock_proof), + Communication::Consensus(message) => + self.strategy.import_message(&self.context, message), + Communication::Auxiliary(lock_proof) => + self.strategy.import_lock_proof(&self.context, lock_proof), } true - } + }, Async::NotReady => false, }; // drive state machine after handling new input. if let Async::Ready(just) = self.strategy.poll(&self.context, &mut self.sending)? { self.concluded = Some(just); - return self.poll(); + return self.poll() } } @@ -801,7 +798,9 @@ impl Agreement { } /// Drain the misbehavior vector. - pub fn drain_misbehavior(&mut self) -> hash_map::Drain> { + pub fn drain_misbehavior( + &mut self, + ) -> hash_map::Drain> { self.strategy.misbehavior.drain() } } @@ -823,12 +822,17 @@ impl Agreement { /// conclude without having witnessed the conclusion. /// In general, this future should be pre-empted by the import of a justification /// set for this block height. -pub fn agree(context: C, nodes: usize, max_faulty: usize, input: I, output: O) - -> Agreement - where - C: Context, - I: Stream::Communication,Error=C::Error>, - O: Sink::Communication,SinkError=C::Error>, +pub fn agree( + context: C, + nodes: usize, + max_faulty: usize, + input: I, + output: O, +) -> Agreement +where + C: Context, + I: Stream::Communication, Error = C::Error>, + O: Sink::Communication, SinkError = C::Error>, { let strategy = Strategy::create(&context, nodes, max_faulty); Agreement { @@ -837,6 +841,6 @@ pub fn agree(context: C, nodes: usize, max_faulty: usize, inpu output, concluded: None, sending: Sending::with_capacity(4), - strategy: strategy, + strategy, } } diff --git a/substrate/bft/src/generic/tests.rs b/substrate/bft/src/generic/tests.rs index b683d751e6ed5..d50e148befe2b 100644 --- a/substrate/bft/src/generic/tests.rs +++ b/substrate/bft/src/generic/tests.rs @@ -19,13 +19,13 @@ use super::*; use std::collections::BTreeSet; -use std::sync::{Arc, Mutex}; use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::{Arc, Mutex}; use std::time::Duration; -use futures::prelude::*; -use futures::sync::{oneshot, mpsc}; use futures::future::FutureResult; +use futures::prelude::*; +use futures::sync::{mpsc, oneshot}; use tokio_timer::{self, Timer}; @@ -37,9 +37,13 @@ struct Network { } impl Network { - fn new(nodes: usize) - -> (Self, Vec>, Vec>) - { + fn new( + nodes: usize, + ) -> ( + Self, + Vec>, + Vec>, + ) { let mut inputs = Vec::with_capacity(nodes); let mut outputs = Vec::with_capacity(nodes); let mut endpoints = Vec::with_capacity(nodes); @@ -61,7 +65,9 @@ impl Network { } fn route_on_thread(self) { - ::std::thread::spawn(move || { let _ = self.wait(); }); + ::std::thread::spawn(move || { + let _ = self.wait(); + }); } } @@ -74,7 +80,8 @@ impl Future for Network { None => Ok(Async::Ready(())), Some((sender, item)) => { { - let receiving_endpoints = self.endpoints + let receiving_endpoints = self + .endpoints .iter() .enumerate() .filter(|&(i, _)| i != sender) @@ -86,7 +93,7 @@ impl Future for Network { } self.poll() - } + }, } } } @@ -127,7 +134,7 @@ impl Context for TestContext { type Digest = Digest; type AuthorityId = AuthorityId; type Signature = Signature; - type RoundTimeout = Box>; + type RoundTimeout = Box>; type CreateProposal = FutureResult; type EvaluateProposal = FutureResult; @@ -150,9 +157,10 @@ impl Context for TestContext { Digest(candidate.0) } - fn sign_local(&self, message: Message) - -> LocalizedMessage - { + fn sign_local( + &self, + message: Message, + ) -> LocalizedMessage { let signature = Signature(message.clone(), self.local_id.clone()); match message { @@ -194,7 +202,9 @@ impl Context for TestContext { } let current_round = self.current_round.clone(); - let timeout = self.timer.sleep(round_duration) + let timeout = self + .timer + .sleep(round_duration) .map(move |_| { current_round.compare_and_swap(round, round + 1, Ordering::SeqCst); }) @@ -261,7 +271,10 @@ fn consensus_completes_with_minimum_good() { .expect("to not time out"); for result in &results { - assert_eq!(&result.justification.digest, &results[0].justification.digest); + assert_eq!( + &result.justification.digest, + &results[0].justification.digest + ); } } @@ -318,7 +331,10 @@ fn consensus_completes_with_minimum_good_all_initial_proposals_bad() { .expect("to not time out"); for result in &results { - assert_eq!(&result.justification.digest, &results[0].justification.digest); + assert_eq!( + &result.justification.digest, + &results[0].justification.digest + ); } } @@ -381,9 +397,15 @@ fn threshold_plus_one_locked_on_proposal_only_one_with_candidate() { round_number: locked_round, digest: locked_digest.clone(), signatures: (0..7) - .map(|i| Signature(Message::Vote(Vote::Prepare(locked_round, locked_digest.clone())), AuthorityId(i))) - .collect() - }.check(7, |_, _, s| Some(s.1.clone())).unwrap(); + .map(|i| { + Signature( + Message::Vote(Vote::Prepare(locked_round, locked_digest.clone())), + AuthorityId(i), + ) + }) + .collect(), + }.check(7, |_, _, s| Some(s.1.clone())) + .unwrap(); let timer = tokio_timer::wheel().tick_duration(ROUND_DURATION).build(); @@ -411,10 +433,9 @@ fn threshold_plus_one_locked_on_proposal_only_one_with_candidate() { tx.sink_map_err(|_| Error).with(move |t| Ok((i, t))), ); - agreement.strategy.advance_to_round( - &agreement.context, - locked_round + 1 - ); + agreement + .strategy + .advance_to_round(&agreement.context, locked_round + 1); if i <= max_faulty { agreement.strategy.locked = Some(Locked { @@ -423,10 +444,10 @@ fn threshold_plus_one_locked_on_proposal_only_one_with_candidate() { } if i == max_faulty { - agreement.strategy.notable_candidates.insert( - locked_digest.clone(), - locked_proposal.clone(), - ); + agreement + .strategy + .notable_candidates + .insert(locked_digest.clone(), locked_proposal.clone()); } agreement @@ -476,15 +497,18 @@ fn consensus_completes_even_when_nodes_start_with_a_delay() { let sleep_duration = base_sleep * i as u32; - timer.sleep(sleep_duration).map_err(|_| Error).and_then(move |_| { - agree( - ctx, - node_count, - max_faulty, - rx.map_err(|_| Error), - tx.sink_map_err(|_| Error).with(move |t| Ok((i, t))), - ) - }) + timer + .sleep(sleep_duration) + .map_err(|_| Error) + .and_then(move |_| { + agree( + ctx, + node_count, + max_faulty, + rx.map_err(|_| Error), + tx.sink_map_err(|_| Error).with(move |t| Ok((i, t))), + ) + }) }) .collect::>(); @@ -499,6 +523,9 @@ fn consensus_completes_even_when_nodes_start_with_a_delay() { .expect("to not time out"); for result in &results { - assert_eq!(&result.justification.digest, &results[0].justification.digest); + assert_eq!( + &result.justification.digest, + &results[0].justification.digest + ); } } diff --git a/substrate/bft/src/lib.rs b/substrate/bft/src/lib.rs index 07bf64ea0c3e0..9eb32d52c138a 100644 --- a/substrate/bft/src/lib.rs +++ b/substrate/bft/src/lib.rs @@ -19,13 +19,13 @@ pub mod error; pub mod generic; +extern crate ed25519; +extern crate parking_lot; extern crate substrate_codec as codec; extern crate substrate_primitives as primitives; -extern crate substrate_runtime_support as runtime_support; extern crate substrate_runtime_primitives as runtime_primitives; -extern crate ed25519; +extern crate substrate_runtime_support as runtime_support; extern crate tokio_timer; -extern crate parking_lot; #[macro_use] extern crate log; @@ -37,35 +37,33 @@ extern crate futures; extern crate error_chain; use std::mem; -use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; use codec::Slicable; use ed25519::LocalizedSignature; +use primitives::AuthorityId; +use runtime_primitives::bft::{ + Action as PrimitiveAction, Justification as PrimitiveJustification, Message as PrimitiveMessage, +}; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{Block, Header}; -use runtime_primitives::bft::{Message as PrimitiveMessage, Action as PrimitiveAction, Justification as PrimitiveJustification}; -use primitives::AuthorityId; -use futures::{task, Async, Stream, Sink, Future, IntoFuture}; use futures::sync::oneshot; -use tokio_timer::Timer; +use futures::{task, Async, Future, IntoFuture, Sink, Stream}; use parking_lot::Mutex; +use tokio_timer::Timer; -pub use generic::InputStreamConcluded; pub use error::{Error, ErrorKind}; +pub use generic::InputStreamConcluded; /// Messages over the proposal. /// Each message carries an associated round number. pub type Message = generic::Message::Hash>; /// Localized message type. -pub type LocalizedMessage = generic::LocalizedMessage< - B, - ::Hash, - AuthorityId, - LocalizedSignature ->; +pub type LocalizedMessage = + generic::LocalizedMessage::Hash, AuthorityId, LocalizedSignature>; /// Justification of some hash. pub type Justification = generic::Justification; @@ -81,10 +79,14 @@ impl From> for UncheckedJustification { UncheckedJustification { round_number: just.round_number as usize, digest: just.hash, - signatures: just.signatures.into_iter().map(|(from, sig)| LocalizedSignature { - signer: from.into(), - signature: sig, - }).collect(), + signatures: just + .signatures + .into_iter() + .map(|(from, sig)| LocalizedSignature { + signer: from.into(), + signature: sig, + }) + .collect(), } } } @@ -94,7 +96,11 @@ impl Into> for UncheckedJustification { PrimitiveJustification { round_number: self.round_number as u32, hash: self.digest, - signatures: self.signatures.into_iter().map(|s| (s.signer.into(), s.signature)).collect(), + signatures: self + .signatures + .into_iter() + .map(|s| (s.signer.into(), s.signature)) + .collect(), } } } @@ -103,7 +109,8 @@ impl Into> for UncheckedJustification { pub type Committed = generic::Committed::Hash, LocalizedSignature>; /// Communication between BFT participants. -pub type Communication = generic::Communication::Hash, AuthorityId, LocalizedSignature>; +pub type Communication = + generic::Communication::Hash, AuthorityId, LocalizedSignature>; /// Misbehavior observed from BFT participants. pub type Misbehavior = generic::Misbehavior; @@ -117,7 +124,12 @@ pub trait ProposerFactory { /// Initialize the proposal logic on top of a specific header. // TODO: provide state context explicitly? - fn init(&self, parent_header: &B::Header, authorities: &[AuthorityId], sign_with: Arc) -> Result; + fn init( + &self, + parent_header: &B::Header, + authorities: &[AuthorityId], + sign_with: Arc, + ) -> Result; } /// Logic for a proposer. @@ -128,9 +140,9 @@ pub trait Proposer { /// Error type which can occur when proposing or evaluating. type Error: From + From + 'static; /// Future that resolves to a committed proposal. - type Create: IntoFuture; + type Create: IntoFuture; /// Future that resolves when a proposal is evaluated. - type Evaluate: IntoFuture; + type Evaluate: IntoFuture; /// Create a proposal. fn propose(&self) -> Self::Create; @@ -169,16 +181,16 @@ struct BftInstance { } impl> generic::Context for BftInstance - where - B: Clone + Eq, - B::Hash: ::std::hash::Hash, +where + B: Clone + Eq, + B::Hash: ::std::hash::Hash, { type Error = P::Error; type AuthorityId = AuthorityId; type Digest = B::Hash; type Signature = LocalizedSignature; type Candidate = B; - type RoundTimeout = Box + Send>; + type RoundTimeout = Box + Send>; type CreateProposal = ::Future; type EvaluateProposal = ::Future; @@ -210,24 +222,29 @@ impl> generic::Context for BftInstance use std::time::Duration; let round = ::std::cmp::min(63, round) as u32; - let timeout = 1u64.checked_shl(round) + let timeout = 1u64 + .checked_shl(round) .unwrap_or_else(u64::max_value) .saturating_mul(self.round_timeout_multiplier); - Box::new(self.timer.sleep(Duration::from_secs(timeout)) - .map_err(|_| Error::from(ErrorKind::FaultyTimer)) - .map_err(Into::into)) + Box::new( + self.timer + .sleep(Duration::from_secs(timeout)) + .map_err(|_| Error::from(ErrorKind::FaultyTimer)) + .map_err(Into::into), + ) } } /// A future that resolves either when canceled (witnessing a block from the network at same height) /// or when agreement completes. -pub struct BftFuture where +pub struct BftFuture +where B: Block + Clone + Eq, B::Hash: ::std::hash::Hash, P: Proposer, - InStream: Stream, Error=P::Error>, - OutSink: Sink, SinkError=P::Error>, + InStream: Stream, Error = P::Error>, + OutSink: Sink, SinkError = P::Error>, { inner: generic::Agreement, InStream, OutSink>, cancel: Arc, @@ -235,14 +252,15 @@ pub struct BftFuture where import: Arc, } -impl Future for BftFuture where +impl Future for BftFuture +where B: Block + Clone + Eq, B::Hash: ::std::hash::Hash, P: Proposer, P::Error: ::std::fmt::Display, I: BlockImport, - InStream: Stream, Error=P::Error>, - OutSink: Sink, SinkError=P::Error>, + InStream: Stream, Error = P::Error>, + OutSink: Sink, SinkError = P::Error>, { type Item = (); type Error = (); @@ -269,24 +287,29 @@ impl Future for BftFuture Drop for BftFuture where +impl Drop for BftFuture +where B: Block + Clone + Eq, B::Hash: ::std::hash::Hash, P: Proposer, - InStream: Stream, Error=P::Error>, - OutSink: Sink, SinkError=P::Error>, + InStream: Stream, Error = P::Error>, + OutSink: Sink, SinkError = P::Error>, { fn drop(&mut self) { // TODO: have a trait member to pass misbehavior reports into. let misbehavior = self.inner.drain_misbehavior().collect::>(); - self.inner.context().proposer.import_misbehavior(misbehavior); + self.inner + .context() + .proposer + .import_misbehavior(misbehavior); } } @@ -322,23 +345,22 @@ pub struct BftService { } impl BftService - where - B: Block + Clone + Eq, - B::Hash: ::std::hash::Hash, - P: ProposerFactory, - >::Error: ::std::fmt::Display, - I: BlockImport + Authorities, +where + B: Block + Clone + Eq, + B::Hash: ::std::hash::Hash, + P: ProposerFactory, + >::Error: ::std::fmt::Display, + I: BlockImport + Authorities, { - /// Create a new service instance. pub fn new(client: Arc, key: Arc, factory: P) -> BftService { BftService { - client: client, + client, live_agreement: Mutex::new(None), timer: Timer::default(), round_timeout_multiplier: 4, - key: key, // TODO: key changing over time. - factory: factory, + key, // TODO: key changing over time. + factory, } } @@ -353,14 +375,33 @@ impl BftService /// If the local signing key is an authority, this will begin the consensus process to build a /// block on top of it. If the executor fails to run the future, an error will be returned. /// Returns `None` if the agreement on the block with given parent is already in progress. - pub fn build_upon(&self, header: &B::Header, input: InStream, output: OutSink) - -> Result>::Proposer, I, InStream, OutSink>>, P::Error> where - InStream: Stream, Error=<

>::Proposer as Proposer>::Error>, - OutSink: Sink, SinkError=<

>::Proposer as Proposer>::Error>, + pub fn build_upon( + &self, + header: &B::Header, + input: InStream, + output: OutSink, + ) -> Result< + Option>::Proposer, I, InStream, OutSink>>, + P::Error, + > + where + InStream: Stream< + Item = Communication, + Error = <

>::Proposer as Proposer>::Error, + >, + OutSink: Sink< + SinkItem = Communication, + SinkError = <

>::Proposer as Proposer>::Error, + >, { let hash = header.hash(); - if self.live_agreement.lock().as_ref().map_or(false, |&(ref h, _)| h == &hash) { - return Ok(None); + if self + .live_agreement + .lock() + .as_ref() + .map_or(false, |&(ref h, _)| h == &hash) + { + return Ok(None) } let authorities = self.client.authorities(&BlockId::Hash(hash.clone()))?; @@ -385,16 +426,10 @@ impl BftService round_timeout_multiplier: self.round_timeout_multiplier, timer: self.timer.clone(), key: self.key.clone(), - authorities: authorities, + authorities, }; - let agreement = generic::agree( - bft_instance, - n, - max_faulty, - input, - output, - ); + let agreement = generic::agree(bft_instance, n, max_faulty, input, output); let cancel = Arc::new(AtomicBool::new(false)); let (tx, rx) = oneshot::channel(); @@ -402,15 +437,21 @@ impl BftService // cancel current agreement. // defers drop of live to the end. let _preempted_consensus = { - mem::replace(&mut *self.live_agreement.lock(), Some((hash, AgreementHandle { - task: Some(rx), - cancel: cancel.clone(), - }))) + mem::replace( + &mut *self.live_agreement.lock(), + Some(( + hash, + AgreementHandle { + task: Some(rx), + cancel: cancel.clone(), + }, + )), + ) }; Ok(Some(BftFuture { inner: agreement, - cancel: cancel, + cancel, send_task: Some(tx), import: self.client.clone(), })) @@ -423,9 +464,11 @@ impl BftService /// Get current agreement parent hash if any. pub fn live_agreement(&self) -> Option { - self.live_agreement.lock().as_ref().map(|&(ref h, _)| h.clone()) + self.live_agreement + .lock() + .as_ref() + .map(|&(ref h, _)| h.clone()) } - } /// Given a total number of authorities, yield the maximum faulty that would be allowed. @@ -440,29 +483,38 @@ pub fn bft_threshold(n: usize) -> usize { n - max_faulty_of(n) } -fn check_justification_signed_message(authorities: &[AuthorityId], message: &[u8], just: UncheckedJustification) - -> Result, UncheckedJustification> -{ +fn check_justification_signed_message( + authorities: &[AuthorityId], + message: &[u8], + just: UncheckedJustification, +) -> Result, UncheckedJustification> { // TODO: return additional error information. - just.check(authorities.len() - max_faulty_of(authorities.len()), |_, _, sig| { - let auth_id = sig.signer.clone().into(); - if !authorities.contains(&auth_id) { return None } - - if ed25519::verify_strong(&sig.signature, message, &sig.signer) { - Some(sig.signer.0) - } else { - None - } - }) + just.check( + authorities.len() - max_faulty_of(authorities.len()), + |_, _, sig| { + let auth_id = sig.signer.clone().into(); + if !authorities.contains(&auth_id) { + return None + } + + if ed25519::verify_strong(&sig.signature, message, &sig.signer) { + Some(sig.signer.0) + } else { + None + } + }, + ) } /// Check a full justification for a header hash. /// Provide all valid authorities. /// /// On failure, returns the justification back. -pub fn check_justification(authorities: &[AuthorityId], parent: B::Hash, just: UncheckedJustification) - -> Result, UncheckedJustification> -{ +pub fn check_justification( + authorities: &[AuthorityId], + parent: B::Hash, + just: UncheckedJustification, +) -> Result, UncheckedJustification> { let message = Slicable::encode(&PrimitiveMessage:: { parent, action: PrimitiveAction::Commit(just.round_number as u32, just.digest.clone()), @@ -475,9 +527,11 @@ pub fn check_justification(authorities: &[AuthorityId], parent: B::Has /// Provide all valid authorities. /// /// On failure, returns the justification back. -pub fn check_prepare_justification(authorities: &[AuthorityId], parent: B::Hash, just: UncheckedJustification) - -> Result, UncheckedJustification> -{ +pub fn check_prepare_justification( + authorities: &[AuthorityId], + parent: B::Hash, + just: UncheckedJustification, +) -> Result, UncheckedJustification> { let message = Slicable::encode(&PrimitiveMessage:: { parent, action: PrimitiveAction::Prepare(just.round_number as u32, just.digest.clone()), @@ -491,15 +545,16 @@ pub fn check_prepare_justification(authorities: &[AuthorityId], parent pub fn check_proposal( authorities: &[AuthorityId], parent_hash: &B::Hash, - propose: &::generic::LocalizedProposal) - -> Result<(), Error> -{ + propose: &::generic::LocalizedProposal, +) -> Result<(), Error> { if !authorities.contains(&propose.sender) { - return Err(ErrorKind::InvalidAuthority(propose.sender.into()).into()); + return Err(ErrorKind::InvalidAuthority(propose.sender.into()).into()) } - let action_header = PrimitiveAction::ProposeHeader(propose.round_number as u32, propose.digest.clone()); - let action_propose = PrimitiveAction::Propose(propose.round_number as u32, propose.proposal.clone()); + let action_header = + PrimitiveAction::ProposeHeader(propose.round_number as u32, propose.digest.clone()); + let action_propose = + PrimitiveAction::Propose(propose.round_number as u32, propose.proposal.clone()); check_action::(action_header, parent_hash, &propose.digest_signature)?; check_action::(action_propose, parent_hash, &propose.full_signature) } @@ -509,11 +564,10 @@ pub fn check_proposal( pub fn check_vote( authorities: &[AuthorityId], parent_hash: &B::Hash, - vote: &::generic::LocalizedVote) - -> Result<(), Error> -{ + vote: &::generic::LocalizedVote, +) -> Result<(), Error> { if !authorities.contains(&vote.sender) { - return Err(ErrorKind::InvalidAuthority(vote.sender.into()).into()); + return Err(ErrorKind::InvalidAuthority(vote.sender.into()).into()) } let action = match vote.vote { @@ -524,7 +578,11 @@ pub fn check_vote( check_action::(action, parent_hash, &vote.signature) } -fn check_action(action: PrimitiveAction, parent_hash: &B::Hash, sig: &LocalizedSignature) -> Result<(), Error> { +fn check_action( + action: PrimitiveAction, + parent_hash: &B::Hash, + sig: &LocalizedSignature, +) -> Result<(), Error> { let primitive = PrimitiveMessage { parent: parent_hash.clone(), action, @@ -539,7 +597,11 @@ fn check_action(action: PrimitiveAction, parent_hash: &B:: } /// Sign a BFT message with the given key. -pub fn sign_message(message: Message, key: &ed25519::Pair, parent_hash: B::Hash) -> LocalizedMessage { +pub fn sign_message( + message: Message, + key: &ed25519::Pair, + parent_hash: B::Hash, +) -> LocalizedMessage { let signer = key.public(); let sign_action = |action: PrimitiveAction| { @@ -569,7 +631,7 @@ pub fn sign_message(message: Message, key: &ed25519::Pair, digest_signature: sign_action(action_header), full_signature: sign_action(action_propose), }) - } + }, ::generic::Message::Vote(vote) => { let action = match vote { ::generic::Vote::Prepare(r, ref h) => PrimitiveAction::Prepare(r as u32, h.clone()), @@ -578,24 +640,24 @@ pub fn sign_message(message: Message, key: &ed25519::Pair, }; ::generic::LocalizedMessage::Vote(::generic::LocalizedVote { - vote: vote, + vote, sender: signer.clone().into(), signature: sign_action(action), }) - } + }, } } #[cfg(test)] mod tests { - use super::*; - use std::collections::HashSet; - use runtime_primitives::testing::{Block as GenericTestBlock, Header as TestHeader}; - use primitives::H256; - use self::tokio_core::reactor::{Core}; use self::keyring::Keyring; - use futures::stream; + use self::tokio_core::reactor::Core; + use super::*; use futures::future::Executor; + use futures::stream; + use primitives::H256; + use runtime_primitives::testing::{Block as GenericTestBlock, Header as TestHeader}; + use std::collections::HashSet; extern crate substrate_keyring as keyring; extern crate tokio_core; @@ -604,7 +666,7 @@ mod tests { struct FakeClient { authorities: Vec, - imported_heights: Mutex> + imported_heights: Mutex>, } impl BlockImport for FakeClient { @@ -626,7 +688,10 @@ mod tests { type SinkItem = Communication; type SinkError = E; - fn start_send(&mut self, _item: Communication) -> ::futures::StartSend, E> { + fn start_send( + &mut self, + _item: Communication, + ) -> ::futures::StartSend, E> { Ok(::futures::AsyncSink::Ready) } @@ -642,7 +707,12 @@ mod tests { type Proposer = DummyProposer; type Error = Error; - fn init(&self, parent_header: &TestHeader, _authorities: &[AuthorityId], _sign_with: Arc) -> Result { + fn init( + &self, + parent_header: &TestHeader, + _authorities: &[AuthorityId], + _sign_with: Arc, + ) -> Result { Ok(DummyProposer(parent_header.number + 1)) } } @@ -653,10 +723,9 @@ mod tests { type Evaluate = Result; fn propose(&self) -> Result { - Ok(TestBlock { header: from_block_number(self.0), - extrinsics: Default::default() + extrinsics: Default::default(), }) } @@ -671,20 +740,22 @@ mod tests { } } - fn make_service(client: FakeClient) - -> BftService - { + fn make_service(client: FakeClient) -> BftService { BftService { client: Arc::new(client), live_agreement: Mutex::new(None), timer: Timer::default(), round_timeout_multiplier: 4, key: Arc::new(Keyring::One.into()), - factory: DummyFactory + factory: DummyFactory, } } - fn sign_vote(vote: ::generic::Vote, key: &ed25519::Pair, parent_hash: H256) -> LocalizedSignature { + fn sign_vote( + vote: ::generic::Vote, + key: &ed25519::Pair, + parent_hash: H256, + ) -> LocalizedSignature { match sign_message::(vote.into(), key, parent_hash) { ::generic::LocalizedMessage::Vote(vote) => vote.signature, _ => panic!("signing vote leads to signed vote"), @@ -724,14 +795,18 @@ mod tests { second.parent_hash = first_hash; let second_hash = second.hash(); - let bft = service.build_upon(&first, stream::empty(), Output(Default::default())).unwrap(); + let bft = service + .build_upon(&first, stream::empty(), Output(Default::default())) + .unwrap(); assert!(service.live_agreement.lock().as_ref().unwrap().0 == first_hash); // turn the core so the future gets polled and sends its task to the // service. otherwise it deadlocks. core.handle().execute(bft.unwrap()).unwrap(); core.turn(Some(::std::time::Duration::from_millis(100))); - let bft = service.build_upon(&second, stream::empty(), Output(Default::default())).unwrap(); + let bft = service + .build_upon(&second, stream::empty(), Output(Default::default())) + .unwrap(); assert!(service.live_agreement.lock().as_ref().unwrap().0 != first_hash); assert!(service.live_agreement.lock().as_ref().unwrap().0 == second_hash); @@ -771,9 +846,11 @@ mod tests { let unchecked = UncheckedJustification { digest: hash, round_number: 1, - signatures: authorities_keys.iter().take(3).map(|key| { - sign_vote(generic::Vote::Commit(1, hash).into(), key, parent_hash) - }).collect(), + signatures: authorities_keys + .iter() + .take(3) + .map(|key| sign_vote(generic::Vote::Commit(1, hash).into(), key, parent_hash)) + .collect(), }; assert!(check_justification::(&authorities, parent_hash, unchecked).is_ok()); @@ -781,9 +858,11 @@ mod tests { let unchecked = UncheckedJustification { digest: hash, round_number: 0, // wrong round number (vs. the signatures) - signatures: authorities_keys.iter().take(3).map(|key| { - sign_vote(generic::Vote::Commit(1, hash).into(), key, parent_hash) - }).collect(), + signatures: authorities_keys + .iter() + .take(3) + .map(|key| sign_vote(generic::Vote::Commit(1, hash).into(), key, parent_hash)) + .collect(), }; assert!(check_justification::(&authorities, parent_hash, unchecked).is_err()); @@ -792,9 +871,11 @@ mod tests { let unchecked = UncheckedJustification { digest: hash, round_number: 1, - signatures: authorities_keys.iter().take(2).map(|key| { - sign_vote(generic::Vote::Commit(1, hash).into(), key, parent_hash) - }).collect(), + signatures: authorities_keys + .iter() + .take(2) + .map(|key| sign_vote(generic::Vote::Commit(1, hash).into(), key, parent_hash)) + .collect(), }; assert!(check_justification::(&authorities, parent_hash, unchecked).is_err()); @@ -803,9 +884,11 @@ mod tests { let unchecked = UncheckedJustification { digest: [0xfe; 32].into(), round_number: 1, - signatures: authorities_keys.iter().take(3).map(|key| { - sign_vote(generic::Vote::Commit(1, hash).into(), key, parent_hash) - }).collect(), + signatures: authorities_keys + .iter() + .take(3) + .map(|key| sign_vote(generic::Vote::Commit(1, hash).into(), key, parent_hash)) + .collect(), }; assert!(check_justification::(&authorities, parent_hash, unchecked).is_err()); @@ -822,10 +905,14 @@ mod tests { let block = TestBlock { header: from_block_number(1), - extrinsics: Default::default() + extrinsics: Default::default(), }; - let proposal = sign_message(::generic::Message::Propose(1, block.clone()), &Keyring::Alice.pair(), parent_hash);; + let proposal = sign_message( + ::generic::Message::Propose(1, block.clone()), + &Keyring::Alice.pair(), + parent_hash, + ); if let ::generic::LocalizedMessage::Propose(proposal) = proposal { assert!(check_proposal(&authorities, &parent_hash, &proposal).is_ok()); let mut invalid_round = proposal.clone(); @@ -839,7 +926,11 @@ mod tests { } // Not an authority - let proposal = sign_message::(::generic::Message::Propose(1, block), &Keyring::Bob.pair(), parent_hash);; + let proposal = sign_message::( + ::generic::Message::Propose(1, block), + &Keyring::Bob.pair(), + parent_hash, + ); if let ::generic::LocalizedMessage::Propose(proposal) = proposal { assert!(check_proposal(&authorities, &parent_hash, &proposal).is_err()); } else { @@ -857,7 +948,11 @@ mod tests { Keyring::Eve.to_raw_public().into(), ]; - let vote = sign_message::(::generic::Message::Vote(::generic::Vote::Prepare(1, hash)), &Keyring::Alice.pair(), parent_hash);; + let vote = sign_message::( + ::generic::Message::Vote(::generic::Vote::Prepare(1, hash)), + &Keyring::Alice.pair(), + parent_hash, + ); if let ::generic::LocalizedMessage::Vote(vote) = vote { assert!(check_vote::(&authorities, &parent_hash, &vote).is_ok()); let mut invalid_sender = vote.clone(); @@ -868,7 +963,11 @@ mod tests { } // Not an authority - let vote = sign_message::(::generic::Message::Vote(::generic::Vote::Prepare(1, hash)), &Keyring::Bob.pair(), parent_hash);; + let vote = sign_message::( + ::generic::Message::Vote(::generic::Vote::Prepare(1, hash)), + &Keyring::Bob.pair(), + parent_hash, + ); if let ::generic::LocalizedMessage::Vote(vote) = vote { assert!(check_vote::(&authorities, &parent_hash, &vote).is_err()); } else { diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 9dd7c1d3b2354..d3f681a1a387f 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -16,19 +16,19 @@ //! Client backend that uses RocksDB database as storage. -extern crate substrate_client as client; -extern crate kvdb_rocksdb; -extern crate kvdb; extern crate hashdb; +extern crate kvdb; +extern crate kvdb_rocksdb; extern crate memorydb; extern crate parking_lot; -extern crate substrate_state_machine as state_machine; -extern crate substrate_primitives as primitives; -extern crate substrate_runtime_support as runtime_support; -extern crate substrate_runtime_primitives as runtime_primitives; +extern crate substrate_client as client; extern crate substrate_codec as codec; extern crate substrate_executor as executor; +extern crate substrate_primitives as primitives; +extern crate substrate_runtime_primitives as runtime_primitives; +extern crate substrate_runtime_support as runtime_support; extern crate substrate_state_db as state_db; +extern crate substrate_state_machine as state_machine; #[macro_use] extern crate log; @@ -40,24 +40,28 @@ pub mod light; mod utils; -use std::sync::Arc; use std::path::PathBuf; +use std::sync::Arc; use codec::Slicable; -use kvdb::{KeyValueDB, DBTransaction}; +use executor::RuntimeInfo; +use kvdb::{DBTransaction, KeyValueDB}; use memorydb::MemoryDB; use parking_lot::RwLock; use primitives::H256; -use runtime_primitives::generic::BlockId; use runtime_primitives::bft::Justification; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, Hashing, HashingFor, Zero}; +use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::{ + As, Block as BlockT, Hashing, HashingFor, Header as HeaderT, Zero, +}; use runtime_primitives::BuildStorage; -use state_machine::backend::Backend as StateBackend; -use executor::RuntimeInfo; -use state_machine::{CodeExecutor, TrieH256, DBValue}; -use utils::{Meta, db_err, meta_keys, number_to_db_key, open_database, read_db, read_id, read_meta}; -use state_db::StateDb; pub use state_db::PruningMode; +use state_db::StateDb; +use state_machine::backend::Backend as StateBackend; +use state_machine::{CodeExecutor, DBValue, TrieH256}; +use utils::{ + db_err, meta_keys, number_to_db_key, open_database, read_db, read_id, read_meta, Meta, +}; const FINALIZATION_WINDOW: u64 = 32; @@ -79,13 +83,16 @@ pub fn new_client( settings: DatabaseSettings, executor: E, genesis_storage: S, -) -> Result, client::LocalCallExecutor, E>, Block>, client::error::Error> - where - Block: BlockT, - ::Number: As, - Block::Hash: Into<[u8; 32]>, // TODO: remove when patricia_trie generic. - E: CodeExecutor + RuntimeInfo, - S: BuildStorage, +) -> Result< + client::Client, client::LocalCallExecutor, E>, Block>, + client::error::Error, +> +where + Block: BlockT, + ::Number: As, + Block::Hash: Into<[u8; 32]>, // TODO: remove when patricia_trie generic. + E: CodeExecutor + RuntimeInfo, + S: BuildStorage, { let backend = Arc::new(Backend::new(settings, FINALIZATION_WINDOW)?); let executor = client::LocalCallExecutor::new(backend.clone(), executor); @@ -116,7 +123,9 @@ impl<'a> state_db::MetaDb for StateMetaDb<'a> { type Error = kvdb::Error; fn get_meta(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.get(columns::STATE_META, key).map(|r| r.map(|v| v.to_vec())) + self.0 + .get(columns::STATE_META, key) + .map(|r| r.map(|v| v.to_vec())) } } @@ -126,16 +135,24 @@ pub struct BlockchainDb { meta: RwLock::Number, Block::Hash>>, } -impl BlockchainDb where ::Number: As { +impl BlockchainDb +where + ::Number: As, +{ fn new(db: Arc) -> Result { let meta = read_meta::(&*db, columns::HEADER)?; Ok(BlockchainDb { db, - meta: RwLock::new(meta) + meta: RwLock::new(meta), }) } - fn update_meta(&self, hash: Block::Hash, number: ::Number, is_best: bool) { + fn update_meta( + &self, + hash: Block::Hash, + number: ::Number, + is_best: bool, + ) { if is_best { let mut meta = self.meta.write(); if number == Zero::zero() { @@ -147,13 +164,19 @@ impl BlockchainDb where ::Number } } -impl client::blockchain::HeaderBackend for BlockchainDb where ::Number: As { +impl client::blockchain::HeaderBackend for BlockchainDb +where + ::Number: As, +{ fn header(&self, id: BlockId) -> Result, client::error::Error> { match read_db(&*self.db, columns::BLOCK_INDEX, columns::HEADER, id)? { Some(header) => match Block::Header::decode(&mut &header[..]) { Some(header) => Ok(Some(header)), - None => return Err(client::error::ErrorKind::Backend("Error decoding header".into()).into()), - } + None => + return Err( + client::error::ErrorKind::Backend("Error decoding header".into()).into(), + ), + }, None => Ok(None), } } @@ -167,7 +190,10 @@ impl client::blockchain::HeaderBackend for BlockchainDb) -> Result { + fn status( + &self, + id: BlockId, + ) -> Result { let exists = match id { BlockId::Hash(_) => read_id(&*self.db, columns::BLOCK_INDEX, id)?.is_some(), BlockId::Number(n) => n <= self.meta.read().best_number, @@ -178,30 +204,54 @@ impl client::blockchain::HeaderBackend for BlockchainDb::Number) -> Result, client::error::Error> { - read_db::(&*self.db, columns::BLOCK_INDEX, columns::HEADER, BlockId::Number(number)).map(|x| - x.map(|raw| HashingFor::::hash(&raw[..])).map(Into::into) - ) + fn hash( + &self, + number: ::Number, + ) -> Result, client::error::Error> { + read_db::( + &*self.db, + columns::BLOCK_INDEX, + columns::HEADER, + BlockId::Number(number), + ).map(|x| { + x.map(|raw| HashingFor::::hash(&raw[..])) + .map(Into::into) + }) } } -impl client::blockchain::Backend for BlockchainDb where ::Number: As { - fn body(&self, id: BlockId) -> Result>, client::error::Error> { +impl client::blockchain::Backend for BlockchainDb +where + ::Number: As, +{ + fn body( + &self, + id: BlockId, + ) -> Result>, client::error::Error> { match read_db(&*self.db, columns::BLOCK_INDEX, columns::BODY, id)? { Some(body) => match Slicable::decode(&mut &body[..]) { Some(body) => Ok(Some(body)), - None => return Err(client::error::ErrorKind::Backend("Error decoding body".into()).into()), - } + None => + return Err( + client::error::ErrorKind::Backend("Error decoding body".into()).into(), + ), + }, None => Ok(None), } } - fn justification(&self, id: BlockId) -> Result>, client::error::Error> { + fn justification( + &self, + id: BlockId, + ) -> Result>, client::error::Error> { match read_db(&*self.db, columns::BLOCK_INDEX, columns::JUSTIFICATION, id)? { Some(justification) => match Slicable::decode(&mut &justification[..]) { Some(justification) => Ok(Some(justification)), - None => return Err(client::error::ErrorKind::Backend("Error decoding justification".into()).into()), - } + None => + return Err(client::error::ErrorKind::Backend( + "Error decoding justification".into(), + ).into()), + }, None => Ok(None), } } @@ -221,8 +271,17 @@ impl client::backend::BlockImportOperation for BlockImport Ok(Some(&self.old_state)) } - fn set_block_data(&mut self, header: Block::Header, body: Option>, justification: Option>, is_best: bool) -> Result<(), client::error::Error> { - assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); + fn set_block_data( + &mut self, + header: Block::Header, + body: Option>, + justification: Option>, + is_best: bool, + ) -> Result<(), client::error::Error> { + assert!( + self.pending_block.is_none(), + "Only one block per operation is allowed" + ); self.pending_block = Some(PendingBlock { header, body, @@ -237,9 +296,14 @@ impl client::backend::BlockImportOperation for BlockImport Ok(()) } - fn reset_storage, Vec)>>(&mut self, iter: I) -> Result<(), client::error::Error> { + fn reset_storage, Vec)>>( + &mut self, + iter: I, + ) -> Result<(), client::error::Error> { // TODO: wipe out existing trie. - let (_, update) = self.old_state.storage_root(iter.into_iter().map(|(k, v)| (k, Some(v)))); + let (_, update) = self + .old_state + .storage_root(iter.into_iter().map(|(k, v)| (k, Some(v)))); self.updates = update; Ok(()) } @@ -252,7 +316,9 @@ struct StorageDb { impl state_machine::Storage for StorageDb { fn get(&self, key: &TrieH256) -> Result, String> { - self.state_db.get(&key.0.into(), self).map(|r| r.map(|v| DBValue::from_slice(&v))) + self.state_db + .get(&key.0.into(), self) + .map(|r| r.map(|v| DBValue::from_slice(&v))) .map_err(|e| format!("Database backend error: {:?}", e)) } } @@ -262,22 +328,29 @@ impl state_db::HashDb for StorageDb { type Hash = H256; fn get(&self, key: &H256) -> Result>, Self::Error> { - self.db.get(columns::STATE, &key[..]).map(|r| r.map(|v| v.to_vec())) + self.db + .get(columns::STATE, &key[..]) + .map(|r| r.map(|v| v.to_vec())) } } - -/// Disk backend. Keeps data in a key-value store. In archive mode, trie nodes are kept from all blocks. -/// Otherwise, trie nodes are kept only from the most recent block. +/// Disk backend. Keeps data in a key-value store. In archive mode, trie nodes are kept from all +/// blocks. Otherwise, trie nodes are kept only from the most recent block. pub struct Backend { storage: Arc>, blockchain: BlockchainDb, finalization_window: u64, } -impl Backend where ::Number: As { +impl Backend +where + ::Number: As, +{ /// Create a new instance of database backend. - pub fn new(config: DatabaseSettings, finalization_window: u64) -> Result { + pub fn new( + config: DatabaseSettings, + finalization_window: u64, + ) -> Result { let db = open_database(&config, "full")?; Backend::from_kvdb(db as Arc<_>, config.pruning, finalization_window) @@ -289,17 +362,22 @@ impl Backend where ::Number: As< let db = Arc::new(::kvdb_memorydb::create(NUM_COLUMNS)); - Backend::from_kvdb(db as Arc<_>, PruningMode::keep_blocks(0), 0).expect("failed to create test-db") + Backend::from_kvdb(db as Arc<_>, PruningMode::keep_blocks(0), 0) + .expect("failed to create test-db") } - fn from_kvdb(db: Arc, pruning: PruningMode, finalization_window: u64) -> Result { + fn from_kvdb( + db: Arc, + pruning: PruningMode, + finalization_window: u64, + ) -> Result { let blockchain = BlockchainDb::new(db.clone())?; - let map_e = |e: state_db::Error| ::client::error::Error::from(format!("State database error: {:?}", e)); - let state_db: StateDb = StateDb::new(pruning, &StateMetaDb(&*db)).map_err(map_e)?; - let storage_db = StorageDb { - db, - state_db, + let map_e = |e: state_db::Error| { + ::client::error::Error::from(format!("State database error: {:?}", e)) }; + let state_db: StateDb = + StateDb::new(pruning, &StateMetaDb(&*db)).map_err(map_e)?; + let storage_db = StorageDb { db, state_db }; Ok(Backend { storage: Arc::new(storage_db), @@ -324,7 +402,8 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: state_db::CommitS } } -impl client::backend::Backend for Backend where +impl client::backend::Backend for Backend +where ::Number: As, Block::Hash: Into<[u8; 32]>, // TODO: remove when patricia_trie generic. { @@ -332,7 +411,10 @@ impl client::backend::Backend for Backend where type Blockchain = BlockchainDb; type State = DbState; - fn begin_operation(&self, block: BlockId) -> Result { + fn begin_operation( + &self, + block: BlockId, + ) -> Result { let state = self.state_at(block)?; Ok(BlockImportOperation { pending_block: None, @@ -341,7 +423,10 @@ impl client::backend::Backend for Backend where }) } - fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> Result<(), client::error::Error> { + fn commit_operation( + &self, + mut operation: Self::BlockImportOperation, + ) -> Result<(), client::error::Error> { use client::blockchain::HeaderBackend; let mut transaction = DBTransaction::new(); if let Some(pending_block) = operation.pending_block { @@ -368,18 +453,28 @@ impl client::backend::Backend for Backend where } } let number_u64 = number.as_().into(); - let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changeset); + let commit = self.storage.state_db.insert_block( + &hash, + number_u64, + &pending_block.header.parent_hash(), + changeset, + ); apply_state_commit(&mut transaction, commit); - //finalize an older block + // finalize an older block if number_u64 > self.finalization_window { let finalizing_hash = if self.finalization_window == 0 { Some(hash) } else { - self.blockchain.hash(As::sa((number_u64 - self.finalization_window) as u32))? + self.blockchain + .hash(As::sa((number_u64 - self.finalization_window) as u32))? }; if let Some(finalizing_hash) = finalizing_hash { - trace!("Finalizing block #{} ({:?})", number_u64 - self.finalization_window, finalizing_hash); + trace!( + "Finalizing block #{} ({:?})", + number_u64 - self.finalization_window, + finalizing_hash + ); let commit = self.storage.state_db.finalize_block(&finalizing_hash); apply_state_commit(&mut transaction, commit); } @@ -387,7 +482,8 @@ impl client::backend::Backend for Backend where debug!("DB Commit {:?} ({})", hash, number); self.storage.db.write(transaction).map_err(db_err)?; - self.blockchain.update_meta(hash, number, pending_block.is_best); + self.blockchain + .update_meta(hash, number, pending_block.is_best); } Ok(()) } @@ -403,29 +499,36 @@ impl client::backend::Backend for Backend where match block { BlockId::Hash(h) if h == Default::default() => return Ok(DbState::with_storage_for_genesis(self.storage.clone())), - _ => {} + _ => {}, } - self.blockchain.header(block).and_then(|maybe_hdr| maybe_hdr.map(|hdr| { - let root: [u8; 32] = hdr.state_root().clone().into(); - DbState::with_storage(self.storage.clone(), root.into()) - }).ok_or_else(|| client::error::ErrorKind::UnknownBlock(format!("{:?}", block)).into())) + self.blockchain.header(block).and_then(|maybe_hdr| { + maybe_hdr + .map(|hdr| { + let root: [u8; 32] = hdr.state_root().clone().into(); + DbState::with_storage(self.storage.clone(), root.into()) + }) + .ok_or_else(|| { + client::error::ErrorKind::UnknownBlock(format!("{:?}", block)).into() + }) + }) } } -impl client::backend::LocalBackend for Backend where +impl client::backend::LocalBackend for Backend +where ::Number: As, Block::Hash: Into<[u8; 32]>, // TODO: remove when patricia_trie generic. {} #[cfg(test)] mod tests { - use hashdb::HashDB; use super::*; use client::backend::Backend as BTrait; use client::backend::BlockImportOperation as Op; use client::blockchain::HeaderBackend as BlockchainHeaderBackend; - use runtime_primitives::testing::{Header, Block as RawBlock}; + use hashdb::HashDB; + use runtime_primitives::testing::{Block as RawBlock, Header}; type Block = RawBlock; @@ -455,12 +558,7 @@ mod tests { extrinsics_root: Default::default(), }; - op.set_block_data( - header, - Some(vec![]), - None, - true, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, true).unwrap(); db.commit_operation(op).unwrap(); } @@ -472,7 +570,9 @@ mod tests { fn set_state_data() { let db = Backend::::new_test(); { - let mut op = db.begin_operation(BlockId::Hash(Default::default())).unwrap(); + let mut op = db + .begin_operation(BlockId::Hash(Default::default())) + .unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -486,19 +586,14 @@ mod tests { (vec![1, 2, 3], vec![9, 9, 9]), ]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); op.reset_storage(storage.iter().cloned()).unwrap(); - op.set_block_data( - header, - Some(vec![]), - None, - true - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, true).unwrap(); db.commit_operation(op).unwrap(); @@ -507,7 +602,6 @@ mod tests { assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None); - } { @@ -520,21 +614,13 @@ mod tests { extrinsics_root: Default::default(), }; - let storage = vec![ - (vec![1, 3, 5], None), - (vec![5, 5, 5], Some(vec![4, 5, 6])), - ]; + let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))]; let (root, overlay) = op.old_state.storage_root(storage.iter().cloned()); op.update_storage(overlay).unwrap(); header.state_root = root.into(); - op.set_block_data( - header, - Some(vec![]), - None, - true - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, true).unwrap(); db.commit_operation(op).unwrap(); @@ -552,7 +638,9 @@ mod tests { let backend = Backend::::new_test(); let hash = { - let mut op = backend.begin_operation(BlockId::Hash(Default::default())).unwrap(); + let mut op = backend + .begin_operation(BlockId::Hash(Default::default())) + .unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -563,26 +651,29 @@ mod tests { let storage: Vec<(_, _)> = vec![]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); let hash = header.hash(); op.reset_storage(storage.iter().cloned()).unwrap(); key = op.updates.insert(b"hello"); - op.set_block_data( - header, - Some(vec![]), - None, - true - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, true).unwrap(); backend.commit_operation(op).unwrap(); - assert_eq!(backend.storage.db.get(::columns::STATE, &key.0[..]).unwrap().unwrap(), &b"hello"[..]); + assert_eq!( + backend + .storage + .db + .get(::columns::STATE, &key.0[..]) + .unwrap() + .unwrap(), + &b"hello"[..] + ); hash }; @@ -598,25 +689,28 @@ mod tests { let storage: Vec<(_, _)> = vec![]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); let hash = header.hash(); op.updates.insert(b"hello"); op.updates.remove(&key); - op.set_block_data( - header, - Some(vec![]), - None, - true - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, true).unwrap(); backend.commit_operation(op).unwrap(); - assert_eq!(backend.storage.db.get(::columns::STATE, &key.0[..]).unwrap().unwrap(), &b"hello"[..]); + assert_eq!( + backend + .storage + .db + .get(::columns::STATE, &key.0[..]) + .unwrap() + .unwrap(), + &b"hello"[..] + ); hash }; @@ -632,23 +726,25 @@ mod tests { let storage: Vec<(_, _)> = vec![]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); op.updates.remove(&key); - op.set_block_data( - header, - Some(vec![]), - None, - true - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, true).unwrap(); backend.commit_operation(op).unwrap(); - assert!(backend.storage.db.get(::columns::STATE, &key.0[..]).unwrap().is_none()); + assert!( + backend + .storage + .db + .get(::columns::STATE, &key.0[..]) + .unwrap() + .is_none() + ); } } } diff --git a/substrate/client/db/src/light.rs b/substrate/client/db/src/light.rs index 969eade171894..8a9e685a3547d 100644 --- a/substrate/client/db/src/light.rs +++ b/substrate/client/db/src/light.rs @@ -16,20 +16,25 @@ //! RocksDB-based light client blockchain storage. -use std::sync::Arc; use parking_lot::RwLock; +use std::sync::Arc; -use kvdb::{KeyValueDB, DBTransaction}; +use kvdb::{DBTransaction, KeyValueDB}; -use client::blockchain::{BlockStatus, HeaderBackend as BlockchainHeaderBackend, - Info as BlockchainInfo}; +use client::blockchain::{ + BlockStatus, HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo, +}; use client::error::{ErrorKind as ClientErrorKind, Result as ClientResult}; use client::light::blockchain::Storage as LightBlockchainStorage; use codec::Slicable; use primitives::AuthorityId; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, Hashing, HashingFor, Zero}; -use utils::{meta_keys, Meta, db_err, number_to_db_key, open_database, read_db, read_id, read_meta}; +use runtime_primitives::traits::{ + As, Block as BlockT, Hashing, HashingFor, Header as HeaderT, Zero, +}; +use utils::{ + db_err, meta_keys, number_to_db_key, open_database, read_db, read_id, read_meta, Meta, +}; use DatabaseSettings; pub(crate) mod columns { @@ -53,9 +58,9 @@ struct BestAuthorities { } impl LightStorage - where - Block: BlockT, - <::Header as HeaderT>::Number: As, +where + Block: BlockT, + <::Header as HeaderT>::Number: As, { /// Create new storage with given settings. pub fn new(config: DatabaseSettings) -> ClientResult { @@ -76,13 +81,15 @@ impl LightStorage fn from_kvdb(db: Arc) -> ClientResult { let meta = RwLock::new(read_meta::(&*db, columns::HEADER)?); - Ok(LightStorage { - db, - meta, - }) + Ok(LightStorage { db, meta }) } - fn update_meta(&self, hash: Block::Hash, number: <::Header as HeaderT>::Number, is_best: bool) { + fn update_meta( + &self, + hash: Block::Hash, + number: <::Header as HeaderT>::Number, + is_best: bool, + ) { if is_best { let mut meta = self.meta.write(); if number == <::Header as HeaderT>::Number::zero() { @@ -96,16 +103,16 @@ impl LightStorage } impl BlockchainHeaderBackend for LightStorage - where - Block: BlockT, - <::Header as HeaderT>::Number: As, +where + Block: BlockT, + <::Header as HeaderT>::Number: As, { fn header(&self, id: BlockId) -> ClientResult> { match read_db(&*self.db, columns::BLOCK_INDEX, columns::HEADER, id)? { Some(header) => match Block::Header::decode(&mut &header[..]) { Some(header) => Ok(Some(header)), None => return Err(ClientErrorKind::Backend("Error decoding header".into()).into()), - } + }, None => Ok(None), } } @@ -130,18 +137,27 @@ impl BlockchainHeaderBackend for LightStorage } } - fn hash(&self, number: <::Header as HeaderT>::Number) -> ClientResult> { - read_db::(&*self.db, columns::BLOCK_INDEX, columns::HEADER, BlockId::Number(number)).map(|x| - x.map(|raw| HashingFor::::hash(&raw[..])).map(Into::into) - ) + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> ClientResult> { + read_db::( + &*self.db, + columns::BLOCK_INDEX, + columns::HEADER, + BlockId::Number(number), + ).map(|x| { + x.map(|raw| HashingFor::::hash(&raw[..])) + .map(Into::into) + }) } } impl LightBlockchainStorage for LightStorage - where - Block: BlockT, - <::Header as HeaderT>::Number: As, - ::Hash: From<[u8; 32]> + Into<[u8; 32]>, +where + Block: BlockT, + <::Header as HeaderT>::Number: As, + ::Hash: From<[u8; 32]> + Into<[u8; 32]>, { fn import_header(&self, is_new_best: bool, header: Block::Header) -> ClientResult<()> { let mut transaction = DBTransaction::new(); @@ -167,8 +183,8 @@ impl LightBlockchainStorage for LightStorage #[cfg(test)] pub(crate) mod tests { - use runtime_primitives::testing::{H256 as Hash, Header, Block as RawBlock}; use super::*; + use runtime_primitives::testing::{Block as RawBlock, H256 as Hash, Header}; type Block = RawBlock; @@ -221,9 +237,15 @@ pub(crate) mod tests { fn returns_block_status() { let db = LightStorage::new_test(); let genesis_hash = insert_block(&db, &Default::default(), 0); - assert_eq!(db.status(BlockId::Hash(genesis_hash)).unwrap(), BlockStatus::InChain); + assert_eq!( + db.status(BlockId::Hash(genesis_hash)).unwrap(), + BlockStatus::InChain + ); assert_eq!(db.status(BlockId::Number(0)).unwrap(), BlockStatus::InChain); - assert_eq!(db.status(BlockId::Hash(1.into())).unwrap(), BlockStatus::Unknown); + assert_eq!( + db.status(BlockId::Hash(1.into())).unwrap(), + BlockStatus::Unknown + ); assert_eq!(db.status(BlockId::Number(1)).unwrap(), BlockStatus::Unknown); } diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index 96892dcf952b0..31b7bf967c82a 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -19,14 +19,16 @@ use std::sync::Arc; -use kvdb::{self, KeyValueDB, DBTransaction}; +use kvdb::{self, DBTransaction, KeyValueDB}; use kvdb_rocksdb::{Database, DatabaseConfig}; use client; use codec::Slicable; use hashdb::DBValue; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, Hashing, HashingFor, Zero}; +use runtime_primitives::traits::{ + As, Block as BlockT, Hashing, HashingFor, Header as HeaderT, Zero, +}; use DatabaseSettings; /// Number of columns in the db. Must be the same for both full && light dbs. @@ -57,14 +59,17 @@ pub struct Meta { pub type BlockKey = [u8; 4]; /// Convert block number into key (LE representation). -pub fn number_to_db_key(n: N) -> BlockKey where N: As { +pub fn number_to_db_key(n: N) -> BlockKey +where + N: As, +{ let n: u32 = n.as_(); [ (n >> 24) as u8, ((n >> 16) & 0xff) as u8, ((n >> 8) & 0xff) as u8, - (n & 0xff) as u8 + (n & 0xff) as u8, ] } @@ -72,27 +77,34 @@ pub fn number_to_db_key(n: N) -> BlockKey where N: As { pub fn db_err(err: kvdb::Error) -> client::error::Error { use std::error::Error; match err.kind() { - &kvdb::ErrorKind::Io(ref err) => client::error::ErrorKind::Backend(err.description().into()).into(), + &kvdb::ErrorKind::Io(ref err) => + client::error::ErrorKind::Backend(err.description().into()).into(), &kvdb::ErrorKind::Msg(ref m) => client::error::ErrorKind::Backend(m.clone()).into(), _ => client::error::ErrorKind::Backend("Unknown backend error".into()).into(), } } /// Open RocksDB database. -pub fn open_database(config: &DatabaseSettings, db_type: &str) -> client::error::Result> { +pub fn open_database( + config: &DatabaseSettings, + db_type: &str, +) -> client::error::Result> { let mut db_config = DatabaseConfig::with_columns(Some(NUM_COLUMNS)); db_config.memory_budget = config.cache_size; db_config.wal = true; - let path = config.path.to_str().ok_or_else(|| client::error::ErrorKind::Backend("Invalid database path".into()))?; + let path = config + .path + .to_str() + .ok_or_else(|| client::error::ErrorKind::Backend("Invalid database path".into()))?; let db = Database::open(&db_config, &path).map_err(db_err)?; // check database type match db.get(COLUMN_META, meta_keys::TYPE).map_err(db_err)? { - Some(stored_type) => { - if db_type.as_bytes() != &*stored_type { - return Err(client::error::ErrorKind::Backend( - format!("Unexpected database type. Expected: {}", db_type)).into()); - } + Some(stored_type) => if db_type.as_bytes() != &*stored_type { + return Err(client::error::ErrorKind::Backend(format!( + "Unexpected database type. Expected: {}", + db_type + )).into()) }, None => { let mut transaction = DBTransaction::new(); @@ -105,27 +117,40 @@ pub fn open_database(config: &DatabaseSettings, db_type: &str) -> client::error: } /// Convert block id to block key, reading number from db if required. -pub fn read_id(db: &KeyValueDB, col_index: Option, id: BlockId) -> Result, client::error::Error> - where - Block: BlockT, - <::Header as HeaderT>::Number: As, +pub fn read_id( + db: &KeyValueDB, + col_index: Option, + id: BlockId, +) -> Result, client::error::Error> +where + Block: BlockT, + <::Header as HeaderT>::Number: As, { match id { - BlockId::Hash(h) => db.get(col_index, h.as_ref()) - .map(|v| v.map(|v| { - let mut key: [u8; 4] = [0; 4]; - key.copy_from_slice(&v); - key - })).map_err(db_err), + BlockId::Hash(h) => db + .get(col_index, h.as_ref()) + .map(|v| { + v.map(|v| { + let mut key: [u8; 4] = [0; 4]; + key.copy_from_slice(&v); + key + }) + }) + .map_err(db_err), BlockId::Number(n) => Ok(Some(number_to_db_key(n))), } } /// Read database column entry for the given block. -pub fn read_db(db: &KeyValueDB, col_index: Option, col: Option, id: BlockId) -> client::error::Result> - where - Block: BlockT, - <::Header as HeaderT>::Number: As, +pub fn read_db( + db: &KeyValueDB, + col_index: Option, + col: Option, + id: BlockId, +) -> client::error::Result> +where + Block: BlockT, + <::Header as HeaderT>::Number: As, { read_id(db, col_index, id).and_then(|key| match key { Some(key) => db.get(col, &key).map_err(db_err), @@ -134,26 +159,38 @@ pub fn read_db(db: &KeyValueDB, col_index: Option, col: Option, } /// Read meta from the database. -pub fn read_meta(db: &KeyValueDB, col_header: Option) -> Result::Header as HeaderT>::Number, Block::Hash>, client::error::Error> - where - Block: BlockT, - <::Header as HeaderT>::Number: As, +pub fn read_meta( + db: &KeyValueDB, + col_header: Option, +) -> Result::Header as HeaderT>::Number, Block::Hash>, client::error::Error> +where + Block: BlockT, + <::Header as HeaderT>::Number: As, { let genesis_number = <::Header as HeaderT>::Number::zero(); - let (best_hash, best_number) = if let Some(Some(header)) = db.get(COLUMN_META, meta_keys::BEST_BLOCK).and_then(|id| - match id { - Some(id) => db.get(col_header, &id).map(|h| h.map(|b| Block::Header::decode(&mut &b[..]))), + let (best_hash, best_number) = if let Some(Some(header)) = db + .get(COLUMN_META, meta_keys::BEST_BLOCK) + .and_then(|id| match id { + Some(id) => db + .get(col_header, &id) + .map(|h| h.map(|b| Block::Header::decode(&mut &b[..]))), None => Ok(None), - }).map_err(db_err)? + }) + .map_err(db_err)? { let hash = header.hash(); - debug!("DB Opened blockchain db, best {:?} ({})", hash, header.number()); + debug!( + "DB Opened blockchain db, best {:?} ({})", + hash, + header.number() + ); (hash, *header.number()) } else { (Default::default(), genesis_number) }; - let genesis_hash = db.get(col_header, &number_to_db_key(genesis_number)) + let genesis_hash = db + .get(col_header, &number_to_db_key(genesis_number)) .map_err(db_err)? .map(|raw| HashingFor::::hash(&raw[..])) .unwrap_or_default() diff --git a/substrate/client/src/backend.rs b/substrate/client/src/backend.rs index 64f4a1d57f59e..04b0a9ad4542f 100644 --- a/substrate/client/src/backend.rs +++ b/substrate/client/src/backend.rs @@ -16,11 +16,11 @@ //! Polkadot Client data backend -use state_machine::backend::Backend as StateBackend; use error; use runtime_primitives::bft::Justification; -use runtime_primitives::traits::Block as BlockT; use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::Block as BlockT; +use state_machine::backend::Backend as StateBackend; /// Block insertion operation. Keeps hold if the inserted block state and data. pub trait BlockImportOperation { @@ -35,13 +35,19 @@ pub trait BlockImportOperation { header: Block::Header, body: Option>, justification: Option>, - is_new_best: bool + is_new_best: bool, ) -> error::Result<()>; /// Inject storage data into the database. - fn update_storage(&mut self, update: ::Transaction) -> error::Result<()>; + fn update_storage( + &mut self, + update: ::Transaction, + ) -> error::Result<()>; /// Inject storage data into the database replacing any existing data. - fn reset_storage, Vec)>>(&mut self, iter: I) -> error::Result<()>; + fn reset_storage, Vec)>>( + &mut self, + iter: I, + ) -> error::Result<()>; } /// Client backend. Manages the data layer. @@ -50,8 +56,8 @@ pub trait BlockImportOperation { /// should not be pruned. The backend should internally reference-count /// its state objects. /// -/// The same applies for live `BlockImportOperation`s: while an import operation building on a parent `P` -/// is alive, the state for `P` should not be pruned. +/// The same applies for live `BlockImportOperation`s: while an import operation building on a +/// parent `P` is alive, the state for `P` should not be pruned. pub trait Backend: Send + Sync { /// Associated block insertion operation type. type BlockImportOperation: BlockImportOperation; diff --git a/substrate/client/src/block_builder.rs b/substrate/client/src/block_builder.rs index 5443759f170b2..2e0eae3b9b30f 100644 --- a/substrate/client/src/block_builder.rs +++ b/substrate/client/src/block_builder.rs @@ -16,19 +16,23 @@ //! Utility struct to build a block. -use std::vec::Vec; use codec::Slicable; -use state_machine; -use runtime_primitives::traits::{Header as HeaderT, Hashing as HashingT, Block as BlockT, One, HashingFor}; use runtime_primitives::generic::BlockId; -use {backend, error, Client, CallExecutor}; +use runtime_primitives::traits::{ + Block as BlockT, Hashing as HashingT, HashingFor, Header as HeaderT, One, +}; +use state_machine; +use std::vec::Vec; +use {backend, error, CallExecutor, Client}; /// Utility for building new (valid) blocks from a stream of extrinsics. -pub struct BlockBuilder where +pub struct BlockBuilder +where B: backend::Backend, E: CallExecutor + Clone, Block: BlockT, - error::Error: From<<>::State as state_machine::backend::Backend>::Error>, + error::Error: + From<<>::State as state_machine::backend::Backend>::Error>, { header: ::Header, extrinsics: Vec<::Extrinsic>, @@ -37,25 +41,34 @@ pub struct BlockBuilder where changes: state_machine::OverlayedChanges, } -impl BlockBuilder where +impl BlockBuilder +where B: backend::Backend, E: CallExecutor + Clone, Block: BlockT, - error::Error: From<<>::State as state_machine::backend::Backend>::Error>, + error::Error: + From<<>::State as state_machine::backend::Backend>::Error>, { /// Create a new instance of builder from the given client, building on the latest block. pub fn new(client: &Client) -> error::Result { - client.info().and_then(|i| Self::at_block(&BlockId::Hash(i.chain.best_hash), client)) + client + .info() + .and_then(|i| Self::at_block(&BlockId::Hash(i.chain.best_hash), client)) } /// Create a new instance of builder from the given client using a particular block's ID to /// build upon. - pub fn at_block(block_id: &BlockId, client: &Client) -> error::Result { - let number = client.block_number_from_id(block_id)? + pub fn at_block( + block_id: &BlockId, + client: &Client, + ) -> error::Result { + let number = client + .block_number_from_id(block_id)? .ok_or_else(|| error::ErrorKind::UnknownBlock(format!("{}", block_id)))? + One::one(); - let parent_hash = client.block_hash_from_id(block_id)? + let parent_hash = client + .block_hash_from_id(block_id)? .ok_or_else(|| error::ErrorKind::UnknownBlock(format!("{}", block_id)))?; let executor = client.executor().clone(); @@ -66,7 +79,7 @@ impl BlockBuilder where Default::default(), Default::default(), parent_hash, - Default::default() + Default::default(), ); executor.call_at_state(&state, &mut changes, "initialise_block", &header.encode())?; @@ -84,26 +97,28 @@ impl BlockBuilder where /// can be validly executed (by executing it); if it is invalid, it'll be returned along with /// the error. Otherwise, it will return a mutable reference to self (in order to chain). pub fn push(&mut self, xt: ::Extrinsic) -> error::Result<()> { - match self.executor.call_at_state(&self.state, &mut self.changes, "apply_extrinsic", &xt.encode()) { + match self.executor.call_at_state( + &self.state, + &mut self.changes, + "apply_extrinsic", + &xt.encode(), + ) { Ok(_) => { self.extrinsics.push(xt); Ok(()) - } + }, Err(e) => { self.changes.discard_prospective(); Err(e) - } + }, } } /// Consume the builder to return a valid `Block` containing all pushed extrinsics. pub fn bake(mut self) -> error::Result { - let (output, _) = self.executor.call_at_state( - &self.state, - &mut self.changes, - "finalise_block", - &[], - )?; + let (output, _) = + self.executor + .call_at_state(&self.state, &mut self.changes, "finalise_block", &[])?; self.header = <::Header as Slicable>::decode(&mut &output[..]) .expect("Header came straight out of runtime so must be valid"); diff --git a/substrate/client/src/blockchain.rs b/substrate/client/src/blockchain.rs index 18aed482b8c4d..bc0c5f740b90c 100644 --- a/substrate/client/src/blockchain.rs +++ b/substrate/client/src/blockchain.rs @@ -16,9 +16,9 @@ //! Polkadot blockchain trait -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; -use runtime_primitives::generic::BlockId; use runtime_primitives::bft::Justification; +use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; use error::Result; @@ -31,7 +31,10 @@ pub trait HeaderBackend: Send + Sync { /// Get block status. fn status(&self, id: BlockId) -> Result; /// Get block hash by number. Returns `None` if the header is not in the chain. - fn hash(&self, number: <::Header as HeaderT>::Number) -> Result::Header as HeaderT>::Hash>>; + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> Result::Header as HeaderT>::Hash>>; } /// Blockchain database backend. Does not perform any validation. diff --git a/substrate/client/src/call_executor.rs b/substrate/client/src/call_executor.rs index 36d6ced1e5b6d..3e542c121f612 100644 --- a/substrate/client/src/call_executor.rs +++ b/substrate/client/src/call_executor.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::sync::Arc; +use executor::{RuntimeInfo, RuntimeVersion}; +use runtime_io::Externalities; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::Block as BlockT; -use state_machine::{self, OverlayedChanges, Ext, Backend as StateBackend, CodeExecutor}; -use runtime_io::Externalities; -use executor::{RuntimeVersion, RuntimeInfo}; +use state_machine::{self, Backend as StateBackend, CodeExecutor, Ext, OverlayedChanges}; +use std::sync::Arc; use backend; use error; @@ -41,7 +41,12 @@ pub trait CallExecutor { /// Execute a call to a contract on top of state in a block of given hash. /// /// No changes are made. - fn call(&self, id: &BlockId, method: &str, call_data: &[u8]) -> Result; + fn call( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + ) -> Result; /// Extract RuntimeVersion of given block /// @@ -51,12 +56,24 @@ pub trait CallExecutor { /// Execute a call to a contract on top of given state. /// /// No changes are made. - fn call_at_state(&self, state: &S, overlay: &mut OverlayedChanges, method: &str, call_data: &[u8]) -> Result<(Vec, S::Transaction), error::Error>; + fn call_at_state( + &self, + state: &S, + overlay: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + ) -> Result<(Vec, S::Transaction), error::Error>; /// Execute a call to a contract on top of given state, gathering execution proof. /// /// No changes are made. - fn prove_at_state(&self, state: S, overlay: &mut OverlayedChanges, method: &str, call_data: &[u8]) -> Result<(Vec, Vec>), error::Error>; + fn prove_at_state( + &self, + state: S, + overlay: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + ) -> Result<(Vec, Vec>), error::Error>; /// Get runtime version if supported. fn native_runtime_version(&self) -> Option; @@ -76,7 +93,10 @@ impl LocalCallExecutor { } } -impl Clone for LocalCallExecutor where E: Clone { +impl Clone for LocalCallExecutor +where + E: Clone, +{ fn clone(&self) -> Self { LocalCallExecutor { backend: self.backend.clone(), @@ -86,51 +106,68 @@ impl Clone for LocalCallExecutor where E: Clone { } impl CallExecutor for LocalCallExecutor - where - B: backend::LocalBackend, - E: CodeExecutor + RuntimeInfo, - Block: BlockT, - error::Error: From<<>::State as StateBackend>::Error>, +where + B: backend::LocalBackend, + E: CodeExecutor + RuntimeInfo, + Block: BlockT, + error::Error: From<<>::State as StateBackend>::Error>, { type Error = E::Error; - fn call(&self, id: &BlockId, method: &str, call_data: &[u8]) -> error::Result { + fn call( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + ) -> error::Result { let mut changes = OverlayedChanges::default(); - let (return_data, _) = self.call_at_state(&self.backend.state_at(*id)?, &mut changes, method, call_data)?; - Ok(CallResult{ return_data, changes }) + let (return_data, _) = self.call_at_state( + &self.backend.state_at(*id)?, + &mut changes, + method, + call_data, + )?; + Ok(CallResult { + return_data, + changes, + }) } fn runtime_version(&self, id: &BlockId) -> error::Result { let mut overlay = OverlayedChanges::default(); let state = self.backend.state_at(*id)?; let mut externalities = Ext::new(&mut overlay, &state); - let code = externalities.storage(b":code").ok_or(error::ErrorKind::VersionInvalid)? + let code = externalities + .storage(b":code") + .ok_or(error::ErrorKind::VersionInvalid)? .to_vec(); - self.executor.runtime_version(&mut externalities, &code) + self.executor + .runtime_version(&mut externalities, &code) .ok_or(error::ErrorKind::VersionInvalid.into()) } - fn call_at_state(&self, state: &S, changes: &mut OverlayedChanges, method: &str, call_data: &[u8]) -> error::Result<(Vec, S::Transaction)> { - state_machine::execute( - state, - changes, - &self.executor, - method, - call_data, - ).map_err(Into::into) + fn call_at_state( + &self, + state: &S, + changes: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + ) -> error::Result<(Vec, S::Transaction)> { + state_machine::execute(state, changes, &self.executor, method, call_data) + .map_err(Into::into) } - fn prove_at_state(&self, state: S, changes: &mut OverlayedChanges, method: &str, call_data: &[u8]) -> Result<(Vec, Vec>), error::Error> { - state_machine::prove_execution( - state, - changes, - &self.executor, - method, - call_data, - ) - .map(|(result, proof, _)| (result, proof)) - .map_err(Into::into) + fn prove_at_state( + &self, + state: S, + changes: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + ) -> Result<(Vec, Vec>), error::Error> { + state_machine::prove_execution(state, changes, &self.executor, method, call_data) + .map(|(result, proof, _)| (result, proof)) + .map_err(Into::into) } fn native_runtime_version(&self) -> Option { diff --git a/substrate/client/src/client.rs b/substrate/client/src/client.rs index 1438e66ead415..3adaa203bed41 100644 --- a/substrate/client/src/client.rs +++ b/substrate/client/src/client.rs @@ -16,39 +16,50 @@ //! Substrate Client -use std::sync::Arc; +use codec::Slicable; use futures::sync::mpsc; use parking_lot::{Mutex, RwLock}; +use primitives::storage::{StorageData, StorageKey}; use primitives::AuthorityId; -use runtime_primitives::{bft::Justification, generic::{BlockId, SignedBlock, Block as RuntimeBlock}}; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero, One}; +use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, One, Zero}; use runtime_primitives::BuildStorage; -use primitives::storage::{StorageKey, StorageData}; -use codec::Slicable; -use state_machine::{self, Ext, OverlayedChanges, Backend as StateBackend, CodeExecutor}; +use runtime_primitives::{ + bft::Justification, + generic::{Block as RuntimeBlock, BlockId, SignedBlock}, +}; +use state_machine::{self, Backend as StateBackend, CodeExecutor, Ext, OverlayedChanges}; +use std::sync::Arc; use backend::{self, BlockImportOperation}; -use blockchain::{self, Info as ChainInfo, Backend as ChainBackend, HeaderBackend as ChainHeaderBackend}; +use blockchain::{ + self, Backend as ChainBackend, HeaderBackend as ChainHeaderBackend, Info as ChainInfo, +}; use call_executor::{CallExecutor, LocalCallExecutor}; -use executor::{RuntimeVersion, RuntimeInfo}; -use {error, in_mem, block_builder, runtime_io, bft, genesis}; +use executor::{RuntimeInfo, RuntimeVersion}; +use {bft, block_builder, error, genesis, in_mem, runtime_io}; /// Type that implements `futures::Stream` of block import events. pub type BlockchainEventStream = mpsc::UnboundedReceiver>; /// Substrate Client -pub struct Client where Block: BlockT { +pub struct Client +where + Block: BlockT, +{ backend: Arc, executor: E, import_notification_sinks: Mutex>>>, import_lock: Mutex<()>, - importing_block: RwLock>, // holds the block hash currently being imported. TODO: replace this with block queue + importing_block: RwLock>, /* holds the block hash currently being + * imported. TODO: replace this with block + * queue */ } /// A source of blockchain evenets. pub trait BlockchainEvents { /// Get block import event stream. - fn import_notification_stream(&self) -> mpsc::UnboundedReceiver>; + fn import_notification_stream(&self) + -> mpsc::UnboundedReceiver>; } /// Chain head information. @@ -144,19 +155,22 @@ impl JustifiedHeader { /// Create an instance of in-memory client. pub fn new_in_mem( executor: E, - genesis_storage: S -) -> error::Result, LocalCallExecutor, E>, Block>> - where - E: CodeExecutor + RuntimeInfo, - S: BuildStorage, - Block: BlockT, + genesis_storage: S, +) -> error::Result< + Client, LocalCallExecutor, E>, Block>, +> +where + E: CodeExecutor + RuntimeInfo, + S: BuildStorage, + Block: BlockT, { let backend = Arc::new(in_mem::Backend::new()); let executor = LocalCallExecutor::new(backend.clone(), executor); Client::new(backend, executor, genesis_storage) } -impl Client where +impl Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -168,10 +182,18 @@ impl Client where executor: E, build_genesis_storage: S, ) -> error::Result { - if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() { + if backend + .blockchain() + .header(BlockId::Number(Zero::zero()))? + .is_none() + { let genesis_storage = build_genesis_storage.build_storage()?; let genesis_block = genesis::construct_genesis_block::(&genesis_storage); - info!("Initialising Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), genesis_block.header().hash()); + info!( + "Initialising Genesis block/state (state: {}, header-hash: {})", + genesis_block.header().state_root(), + genesis_block.header().hash() + ); let mut op = backend.begin_operation(BlockId::Hash(Default::default()))?; op.reset_storage(genesis_storage.into_iter())?; op.set_block_data(genesis_block.deconstruct().0, Some(vec![]), None, true)?; @@ -196,24 +218,29 @@ impl Client where &self.backend } - /// Return single storage entry of contract under given address in state in a block of given hash. + /// Return single storage entry of contract under given address in state in a block of given + /// hash. pub fn storage(&self, id: &BlockId, key: &StorageKey) -> error::Result { - Ok(StorageData(self.state_at(id)? - .storage(&key.0)? - .ok_or_else(|| error::ErrorKind::NoValueForKey(key.0.clone()))? - .to_vec())) + Ok(StorageData( + self.state_at(id)? + .storage(&key.0)? + .ok_or_else(|| error::ErrorKind::NoValueForKey(key.0.clone()))? + .to_vec(), + )) } /// Get the code at a given block. pub fn code_at(&self, id: &BlockId) -> error::Result> { - self.storage(id, &StorageKey(b":code".to_vec())).map(|data| data.0) + self.storage(id, &StorageKey(b":code".to_vec())) + .map(|data| data.0) } /// Get the set of authorities at a given block. pub fn authorities_at(&self, id: &BlockId) -> error::Result> { - self.executor.call(id, "authorities", &[]) - .and_then(|r| Vec::::decode(&mut &r.return_data[..]) - .ok_or(error::ErrorKind::AuthLenInvalid.into())) + self.executor.call(id, "authorities", &[]).and_then(|r| { + Vec::::decode(&mut &r.return_data[..]) + .ok_or(error::ErrorKind::AuthLenInvalid.into()) + }) } /// Get the RuntimeVersion at a given block. @@ -231,15 +258,25 @@ impl Client where /// AND returning execution proof. /// /// No changes are made. - pub fn execution_proof(&self, id: &BlockId, method: &str, call_data: &[u8]) -> error::Result<(Vec, Vec>)> { - self.state_at(id).and_then(|state| self.executor.prove_at_state(state, &mut Default::default(), method, call_data)) + pub fn execution_proof( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + ) -> error::Result<(Vec, Vec>)> { + self.state_at(id).and_then(|state| { + self.executor + .prove_at_state(state, &mut Default::default(), method, call_data) + }) } /// Set up the native execution environment to call into a native runtime code. - pub fn using_environment T, T>( - &self, f: F - ) -> error::Result { - self.using_environment_at(&BlockId::Number(self.info()?.chain.best_number), &mut Default::default(), f) + pub fn using_environment T, T>(&self, f: F) -> error::Result { + self.using_environment_at( + &BlockId::Number(self.info()?.chain.best_number), + &mut Default::default(), + f, + ) } /// Set up the native execution environment to call into a native runtime code. @@ -247,18 +284,30 @@ impl Client where &self, id: &BlockId, overlay: &mut OverlayedChanges, - f: F + f: F, ) -> error::Result { - Ok(runtime_io::with_externalities(&mut Ext::new(overlay, &self.state_at(id)?), f)) + Ok(runtime_io::with_externalities( + &mut Ext::new(overlay, &self.state_at(id)?), + f, + )) } /// Create a new block, built on the head of the chain. - pub fn new_block(&self) -> error::Result> where E: Clone { + pub fn new_block(&self) -> error::Result> + where + E: Clone, + { block_builder::BlockBuilder::new(self) } /// Create a new block, built on top of `parent`. - pub fn new_block_at(&self, parent: &BlockId) -> error::Result> where E: Clone { + pub fn new_block_at( + &self, + parent: &BlockId, + ) -> error::Result> + where + E: Clone, + { block_builder::BlockBuilder::at_block(parent, &self) } @@ -270,12 +319,9 @@ impl Client where ) -> error::Result> { let parent_hash = header.parent_hash().clone(); let authorities = self.authorities_at(&BlockId::Hash(parent_hash))?; - let just = ::bft::check_justification::(&authorities[..], parent_hash, justification) - .map_err(|_| - error::ErrorKind::BadJustification( - format!("{}", header.hash()) - ) - )?; + let just = + ::bft::check_justification::(&authorities[..], parent_hash, justification) + .map_err(|_| error::ErrorKind::BadJustification(format!("{}", header.hash())))?; Ok(JustifiedHeader { header, justification: just, @@ -291,7 +337,11 @@ impl Client where ) -> error::Result { let (header, justification) = header.into_inner(); let parent_hash = header.parent_hash().clone(); - match self.backend.blockchain().status(BlockId::Hash(parent_hash))? { + match self + .backend + .blockchain() + .status(BlockId::Hash(parent_hash))? + { blockchain::BlockStatus::InChain => {}, blockchain::BlockStatus::Unknown => return Ok(ImportResult::UnknownParent), } @@ -325,7 +375,8 @@ impl Client where transaction_state, &mut overlay, "execute_block", - &::new(header.clone(), body.clone().unwrap_or_default()).encode() + &::new(header.clone(), body.clone().unwrap_or_default()) + .encode(), )?; Some(storage_update) @@ -333,21 +384,37 @@ impl Client where None => None, }; - let is_new_best = header.number() == &(self.backend.blockchain().info()?.best_number + One::one()); - trace!("Imported {}, (#{}), best={}, origin={:?}", hash, header.number(), is_new_best, origin); - transaction.set_block_data(header.clone(), body, Some(justification.uncheck().into()), is_new_best)?; + let is_new_best = + header.number() == &(self.backend.blockchain().info()?.best_number + One::one()); + trace!( + "Imported {}, (#{}), best={}, origin={:?}", + hash, + header.number(), + is_new_best, + origin + ); + transaction.set_block_data( + header.clone(), + body, + Some(justification.uncheck().into()), + is_new_best, + )?; if let Some(storage_update) = storage_update { transaction.update_storage(storage_update)?; } self.backend.commit_operation(transaction)?; - if origin == BlockOrigin::NetworkBroadcast || origin == BlockOrigin::Own || origin == BlockOrigin::ConsensusBroadcast { + if origin == BlockOrigin::NetworkBroadcast + || origin == BlockOrigin::Own + || origin == BlockOrigin::ConsensusBroadcast + { let notification = BlockImportNotification:: { - hash: hash, - origin: origin, - header: header, - is_new_best: is_new_best, + hash, + origin, + header, + is_new_best, }; - self.import_notification_sinks.lock() + self.import_notification_sinks + .lock() .retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); } Ok(ImportResult::Queued) @@ -355,7 +422,11 @@ impl Client where /// Get blockchain info. pub fn info(&self) -> error::Result> { - let info = self.backend.blockchain().info().map_err(|e| error::Error::from_blockchain(Box::new(e)))?; + let info = self + .backend + .blockchain() + .info() + .map_err(|e| error::Error::from_blockchain(Box::new(e)))?; Ok(ClientInfo { chain: info, best_queued_hash: None, @@ -367,18 +438,32 @@ impl Client where pub fn block_status(&self, id: &BlockId) -> error::Result { // TODO: more efficient implementation if let BlockId::Hash(ref h) = id { - if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { - return Ok(BlockStatus::Queued); + if self + .importing_block + .read() + .as_ref() + .map_or(false, |importing| h == importing) + { + return Ok(BlockStatus::Queued) } } - match self.backend.blockchain().header(*id).map_err(|e| error::Error::from_blockchain(Box::new(e)))?.is_some() { + match self + .backend + .blockchain() + .header(*id) + .map_err(|e| error::Error::from_blockchain(Box::new(e)))? + .is_some() + { true => Ok(BlockStatus::InChain), false => Ok(BlockStatus::Unknown), } } /// Get block hash by number. - pub fn block_hash(&self, block_number: <::Header as HeaderT>::Number) -> error::Result> { + pub fn block_hash( + &self, + block_number: <::Header as HeaderT>::Number, + ) -> error::Result> { self.backend.blockchain().hash(block_number) } @@ -391,7 +476,10 @@ impl Client where } /// Convert an arbitrary block ID into a block hash. - pub fn block_number_from_id(&self, id: &BlockId) -> error::Result::Header as HeaderT>::Number>> { + pub fn block_number_from_id( + &self, + id: &BlockId, + ) -> error::Result::Header as HeaderT>::Number>> { match *id { BlockId::Hash(_) => Ok(self.header(id)?.map(|h| h.number().clone())), BlockId::Number(n) => Ok(Some(n)), @@ -404,60 +492,89 @@ impl Client where } /// Get block body by id. - pub fn body(&self, id: &BlockId) -> error::Result::Extrinsic>>> { + pub fn body( + &self, + id: &BlockId, + ) -> error::Result::Extrinsic>>> { self.backend.blockchain().body(*id) } /// Get block justification set by id. - pub fn justification(&self, id: &BlockId) -> error::Result>> { + pub fn justification( + &self, + id: &BlockId, + ) -> error::Result>> { self.backend.blockchain().justification(*id) } /// Get full block by id. - pub fn block(&self, id: &BlockId) -> error::Result>> { - Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) { - (Some(header), Some(extrinsics), Some(justification)) => - Some(SignedBlock { block: RuntimeBlock { header, extrinsics }, justification }), - _ => None, - }) + pub fn block( + &self, + id: &BlockId, + ) -> error::Result>> { + Ok( + match (self.header(id)?, self.body(id)?, self.justification(id)?) { + (Some(header), Some(extrinsics), Some(justification)) => Some(SignedBlock { + block: RuntimeBlock { header, extrinsics }, + justification, + }), + _ => None, + }, + ) } /// Get best block header. pub fn best_block_header(&self) -> error::Result<::Header> { - let info = self.backend.blockchain().info().map_err(|e| error::Error::from_blockchain(Box::new(e)))?; - Ok(self.header(&BlockId::Hash(info.best_hash))?.expect("Best block header must always exist")) + let info = self + .backend + .blockchain() + .info() + .map_err(|e| error::Error::from_blockchain(Box::new(e)))?; + Ok(self + .header(&BlockId::Hash(info.best_hash))? + .expect("Best block header must always exist")) } } impl bft::BlockImport for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, - error::Error: From<::Error> +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + error::Error: From<::Error>, { fn import_block(&self, block: Block, justification: ::bft::Justification) { let (header, extrinsics) = block.deconstruct(); let justified_header = JustifiedHeader { - header: header, + header, justification, }; - let _ = self.import_block(BlockOrigin::ConsensusBroadcast, justified_header, Some(extrinsics)); + let _ = self.import_block( + BlockOrigin::ConsensusBroadcast, + justified_header, + Some(extrinsics), + ); } } impl bft::Authorities for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, - error::Error: From<::Error>, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + error::Error: From<::Error>, { fn authorities(&self, at: &BlockId) -> Result, bft::Error> { - let version: Result<_, bft::Error> = self.runtime_version_at(at).map_err(|_| bft::ErrorKind::InvalidRuntime.into()); + let version: Result<_, bft::Error> = self + .runtime_version_at(at) + .map_err(|_| bft::ErrorKind::InvalidRuntime.into()); let version = version?; - if !self.executor.native_runtime_version().map_or(true, |v| v.can_author_with(&version)) { + if !self + .executor + .native_runtime_version() + .map_or(true, |v| v.can_author_with(&version)) + { return Err(bft::ErrorKind::InvalidRuntime.into()) } self.authorities_at(at).map_err(|_| { @@ -468,14 +585,16 @@ impl bft::Authorities for Client } impl BlockchainEvents for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, - error::Error: From<::Error> +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + error::Error: From<::Error>, { /// Get block import event stream. - fn import_notification_stream(&self) -> mpsc::UnboundedReceiver> { + fn import_notification_stream( + &self, + ) -> mpsc::UnboundedReceiver> { let (sink, stream) = mpsc::unbounded(); self.import_notification_sinks.lock().push(sink); stream @@ -483,11 +602,11 @@ impl BlockchainEvents for Client } impl ChainHead for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, - error::Error: From<::Error> +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + error::Error: From<::Error>, { fn best_block_header(&self) -> error::Result<::Header> { Client::best_block_header(self) @@ -499,17 +618,31 @@ mod tests { use super::*; use codec::Slicable; use keyring::Keyring; - use test_client::{self, TestClient}; use test_client::client::BlockOrigin; use test_client::runtime as test_runtime; - use test_client::runtime::{Transfer, Extrinsic}; + use test_client::runtime::{Extrinsic, Transfer}; + use test_client::{self, TestClient}; #[test] fn client_initialises_from_genesis_ok() { let client = test_client::new(); - assert_eq!(client.using_environment(|| test_runtime::system::balance_of(Keyring::Alice.to_raw_public().into())).unwrap(), 1000); - assert_eq!(client.using_environment(|| test_runtime::system::balance_of(Keyring::Ferdie.to_raw_public().into())).unwrap(), 0); + assert_eq!( + client + .using_environment(|| test_runtime::system::balance_of( + Keyring::Alice.to_raw_public().into() + )) + .unwrap(), + 1000 + ); + assert_eq!( + client + .using_environment(|| test_runtime::system::balance_of( + Keyring::Ferdie.to_raw_public().into() + )) + .unwrap(), + 0 + ); } #[test] @@ -517,11 +650,14 @@ mod tests { let client = test_client::new(); assert_eq!(client.info().unwrap().chain.best_number, 0); - assert_eq!(client.authorities_at(&BlockId::Number(0)).unwrap(), vec![ - Keyring::Alice.to_raw_public().into(), - Keyring::Bob.to_raw_public().into(), - Keyring::Charlie.to_raw_public().into() - ]); + assert_eq!( + client.authorities_at(&BlockId::Number(0)).unwrap(), + vec![ + Keyring::Alice.to_raw_public().into(), + Keyring::Bob.to_raw_public().into(), + Keyring::Charlie.to_raw_public().into(), + ] + ); } #[test] @@ -530,14 +666,22 @@ mod tests { let builder = client.new_block().unwrap(); - client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); + client + .justify_and_import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); assert_eq!(client.info().unwrap().chain.best_number, 1); } fn sign_tx(tx: Transfer) -> Extrinsic { - let signature = Keyring::from_raw_public(tx.from.0.clone()).unwrap().sign(&tx.encode()).into(); - Extrinsic { transfer: tx, signature } + let signature = Keyring::from_raw_public(tx.from.0.clone()) + .unwrap() + .sign(&tx.encode()) + .into(); + Extrinsic { + transfer: tx, + signature, + } } #[test] @@ -546,18 +690,39 @@ mod tests { let mut builder = client.new_block().unwrap(); - builder.push(sign_tx(Transfer { - from: Keyring::Alice.to_raw_public().into(), - to: Keyring::Ferdie.to_raw_public().into(), - amount: 42, - nonce: 0, - })).unwrap(); + builder + .push(sign_tx(Transfer { + from: Keyring::Alice.to_raw_public().into(), + to: Keyring::Ferdie.to_raw_public().into(), + amount: 42, + nonce: 0, + })) + .unwrap(); - client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); + client + .justify_and_import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); assert_eq!(client.info().unwrap().chain.best_number, 1); - assert!(client.state_at(&BlockId::Number(1)).unwrap() != client.state_at(&BlockId::Number(0)).unwrap()); - assert_eq!(client.using_environment(|| test_runtime::system::balance_of(Keyring::Alice.to_raw_public().into())).unwrap(), 958); - assert_eq!(client.using_environment(|| test_runtime::system::balance_of(Keyring::Ferdie.to_raw_public().into())).unwrap(), 42); + assert!( + client.state_at(&BlockId::Number(1)).unwrap() + != client.state_at(&BlockId::Number(0)).unwrap() + ); + assert_eq!( + client + .using_environment(|| test_runtime::system::balance_of( + Keyring::Alice.to_raw_public().into() + )) + .unwrap(), + 958 + ); + assert_eq!( + client + .using_environment(|| test_runtime::system::balance_of( + Keyring::Ferdie.to_raw_public().into() + )) + .unwrap(), + 42 + ); } } diff --git a/substrate/client/src/error.rs b/substrate/client/src/error.rs index b9447d38a30b4..36e9f47d88841 100644 --- a/substrate/client/src/error.rs +++ b/substrate/client/src/error.rs @@ -16,9 +16,9 @@ //! Polkadot client possible errors. -use std; -use state_machine; use primitives::hexdisplay::HexDisplay; +use state_machine; +use std; error_chain! { errors { diff --git a/substrate/client/src/genesis.rs b/substrate/client/src/genesis.rs index a4e9f9cf87d8f..80f4b5df66d49 100644 --- a/substrate/client/src/genesis.rs +++ b/substrate/client/src/genesis.rs @@ -16,55 +16,78 @@ //! Tool for creating the genesis block. -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Hashing as HashingT, Zero}; +use runtime_primitives::traits::{Block as BlockT, Hashing as HashingT, Header as HeaderT, Zero}; use runtime_primitives::StorageMap; /// Create a genesis block, given the initial storage. -pub fn construct_genesis_block< - Block: BlockT -> ( - storage: &StorageMap -) -> Block { - let state_root = <<::Header as HeaderT>::Hashing as HashingT>::trie_root(storage.clone().into_iter()); - let extrinsics_root = <<::Header as HeaderT>::Hashing as HashingT>::trie_root(::std::iter::empty::<(&[u8], &[u8])>()); +pub fn construct_genesis_block(storage: &StorageMap) -> Block { + let state_root = <<::Header as HeaderT>::Hashing as HashingT>::trie_root( + storage.clone().into_iter(), + ); + let extrinsics_root = <<::Header as HeaderT>::Hashing as HashingT>::trie_root( + ::std::iter::empty::<(&[u8], &[u8])>(), + ); Block::new( <::Header as HeaderT>::new( Zero::zero(), extrinsics_root, state_root, Default::default(), - Default::default() + Default::default(), ), - Default::default() + Default::default(), ) } #[cfg(test)] mod tests { use super::*; - use codec::{Slicable, Joiner}; - use keyring::Keyring; + use codec::{Joiner, Slicable}; + use ed25519::{Pair, Public}; use executor::WasmExecutor; - use state_machine::{execute, OverlayedChanges}; + use keyring::Keyring; use state_machine::backend::InMemory; + use state_machine::{execute, OverlayedChanges}; use test_client; - use test_client::runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; - use test_client::runtime::{Hash, Transfer, Block, BlockNumber, Header, Digest, Extrinsic}; - use ed25519::{Public, Pair}; - - native_executor_instance!(Executor, test_client::runtime::api::dispatch, test_client::runtime::VERSION, include_bytes!("../../test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm")); - - fn construct_block(backend: &InMemory, number: BlockNumber, parent_hash: Hash, state_root: Hash, txs: Vec) -> (Vec, Hash) { + use test_client::runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; + use test_client::runtime::{Block, BlockNumber, Digest, Extrinsic, Hash, Header, Transfer}; + + native_executor_instance!( + Executor, + test_client::runtime::api::dispatch, + test_client::runtime::VERSION, + include_bytes!( + "../../test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm" + ) + ); + + fn construct_block( + backend: &InMemory, + number: BlockNumber, + parent_hash: Hash, + state_root: Hash, + txs: Vec, + ) -> (Vec, Hash) { use triehash::ordered_trie_root; - let transactions = txs.into_iter().map(|tx| { - let signature = Pair::from(Keyring::from_public(Public::from_raw(tx.from.0)).unwrap()) - .sign(&tx.encode()).into(); - - Extrinsic { transfer: tx, signature } - }).collect::>(); - - let extrinsics_root = ordered_trie_root(transactions.iter().map(Slicable::encode)).0.into(); + let transactions = txs + .into_iter() + .map(|tx| { + let signature = Pair::from( + Keyring::from_public(Public::from_raw(tx.from.0)).unwrap(), + ).sign(&tx.encode()) + .into(); + + Extrinsic { + transfer: tx, + signature, + } + }) + .collect::>(); + + let extrinsics_root = ordered_trie_root(transactions.iter().map(Slicable::encode)) + .0 + .into(); println!("root before: {:?}", extrinsics_root); let mut header = Header { @@ -72,7 +95,7 @@ mod tests { number, state_root, extrinsics_root, - digest: Digest { logs: vec![], }, + digest: Digest { logs: vec![] }, }; let hash = header.hash(); let mut overlay = OverlayedChanges::default(); @@ -100,12 +123,18 @@ mod tests { &mut overlay, &Executor::new(), "finalise_block", - &[] + &[], ).unwrap(); header = Header::decode(&mut &ret_data[..]).unwrap(); println!("root after: {:?}", header.extrinsics_root); - (vec![].and(&Block { header, extrinsics: transactions }), hash) + ( + vec![].and(&Block { + header, + extrinsics: transactions, + }), + hash, + ) } fn block1(genesis_hash: Hash, backend: &InMemory) -> (Vec, Hash) { @@ -119,14 +148,18 @@ mod tests { to: Keyring::Two.to_raw_public().into(), amount: 69, nonce: 0, - }] + }], ) } #[test] fn construct_genesis_should_work_with_native() { let mut storage = GenesisConfig::new_simple( - vec![Keyring::One.to_raw_public().into(), Keyring::Two.to_raw_public().into()], 1000 + vec![ + Keyring::One.to_raw_public().into(), + Keyring::Two.to_raw_public().into(), + ], + 1000, ).genesis_map(); let block = construct_genesis_block::(&storage); let genesis_hash = block.header.hash(); @@ -141,14 +174,18 @@ mod tests { &mut overlay, &Executor::new(), "execute_block", - &b1data + &b1data, ).unwrap(); } #[test] fn construct_genesis_should_work_with_wasm() { let mut storage = GenesisConfig::new_simple( - vec![Keyring::One.to_raw_public().into(), Keyring::Two.to_raw_public().into()], 1000 + vec![ + Keyring::One.to_raw_public().into(), + Keyring::Two.to_raw_public().into(), + ], + 1000, ).genesis_map(); let block = construct_genesis_block::(&storage); let genesis_hash = block.header.hash(); @@ -163,7 +200,7 @@ mod tests { &mut overlay, &WasmExecutor, "execute_block", - &b1data + &b1data, ).unwrap(); } @@ -171,7 +208,11 @@ mod tests { #[should_panic] fn construct_genesis_with_bad_transaction_should_panic() { let mut storage = GenesisConfig::new_simple( - vec![Keyring::One.to_raw_public().into(), Keyring::Two.to_raw_public().into()], 68 + vec![ + Keyring::One.to_raw_public().into(), + Keyring::Two.to_raw_public().into(), + ], + 68, ).genesis_map(); let block = construct_genesis_block::(&storage); let genesis_hash = block.header.hash(); @@ -186,7 +227,7 @@ mod tests { &mut overlay, &Executor::new(), "execute_block", - &b1data + &b1data, ).unwrap(); } } diff --git a/substrate/client/src/in_mem.rs b/substrate/client/src/in_mem.rs index 0473c1fa6ae07..156fb63cfb5b4 100644 --- a/substrate/client/src/in_mem.rs +++ b/substrate/client/src/in_mem.rs @@ -16,17 +16,17 @@ //! In memory client backend -use std::collections::HashMap; -use std::sync::Arc; -use parking_lot::RwLock; -use error; use backend; +use blockchain::{self, BlockStatus}; +use error; use light; +use parking_lot::RwLock; +use runtime_primitives::bft::Justification; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero}; -use runtime_primitives::bft::Justification; -use blockchain::{self, BlockStatus}; use state_machine::backend::{Backend as StateBackend, InMemory}; +use std::collections::HashMap; +use std::sync::Arc; struct PendingBlock { block: StoredBlock, @@ -40,7 +40,11 @@ enum StoredBlock { } impl StoredBlock { - fn new(header: B::Header, body: Option>, just: Option>) -> Self { + fn new( + header: B::Header, + body: Option>, + just: Option>, + ) -> Self { match body { Some(body) => StoredBlock::Full(B::new(header, body), just), None => StoredBlock::Header(header, just), @@ -56,24 +60,30 @@ impl StoredBlock { fn justification(&self) -> Option<&Justification> { match *self { - StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref() + StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref(), } } fn extrinsics(&self) -> Option<&[B::Extrinsic]> { match *self { StoredBlock::Header(_, _) => None, - StoredBlock::Full(ref b, _) => Some(b.extrinsics()) + StoredBlock::Full(ref b, _) => Some(b.extrinsics()), } } - fn into_inner(self) -> (B::Header, Option>, Option>) { + fn into_inner( + self, + ) -> ( + B::Header, + Option>, + Option>, + ) { match self { StoredBlock::Header(header, just) => (header, None, just), StoredBlock::Full(block, just) => { let (header, body) = block.deconstruct(); (header, Some(body), just) - } + }, } } } @@ -104,17 +114,14 @@ impl Blockchain { /// Create new in-memory blockchain storage. pub fn new() -> Blockchain { - let storage = Arc::new(RwLock::new( - BlockchainStorage { - blocks: HashMap::new(), - hashes: HashMap::new(), - best_hash: Default::default(), - best_number: Zero::zero(), - genesis_hash: Default::default(), - })); - Blockchain { - storage: storage, - } + let storage = Arc::new(RwLock::new(BlockchainStorage { + blocks: HashMap::new(), + hashes: HashMap::new(), + best_hash: Default::default(), + best_number: Zero::zero(), + genesis_hash: Default::default(), + })); + Blockchain { storage } } /// Insert a block header and associated data. @@ -124,11 +131,13 @@ impl Blockchain { header: ::Header, justification: Option>, body: Option::Extrinsic>>, - is_new_best: bool + is_new_best: bool, ) { let number = header.number().clone(); let mut storage = self.storage.write(); - storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justification)); + storage + .blocks + .insert(hash.clone(), StoredBlock::new(header, body, justification)); storage.hashes.insert(number, hash.clone()); if is_new_best { storage.best_hash = hash.clone(); @@ -148,7 +157,7 @@ impl Blockchain { pub fn canon_equals_to(&self, other: &Self) -> bool { let this = self.storage.read(); let other = other.storage.read(); - this.hashes == other.hashes + this.hashes == other.hashes && this.best_hash == other.best_hash && this.best_number == other.best_number && this.genesis_hash == other.genesis_hash @@ -158,7 +167,11 @@ impl Blockchain { impl blockchain::HeaderBackend for Blockchain { fn header(&self, id: BlockId) -> error::Result::Header>> { Ok(self.id(id).and_then(|hash| { - self.storage.read().blocks.get(&hash).map(|b| b.header().clone()) + self.storage + .read() + .blocks + .get(&hash) + .map(|b| b.header().clone()) })) } @@ -172,30 +185,45 @@ impl blockchain::HeaderBackend for Blockchain { } fn status(&self, id: BlockId) -> error::Result { - match self.id(id).map_or(false, |hash| self.storage.read().blocks.contains_key(&hash)) { + match self + .id(id) + .map_or(false, |hash| self.storage.read().blocks.contains_key(&hash)) + { true => Ok(BlockStatus::InChain), false => Ok(BlockStatus::Unknown), } } - fn hash(&self, number: <::Header as HeaderT>::Number) -> error::Result> { + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> error::Result> { Ok(self.id(BlockId::Number(number))) } } - impl blockchain::Backend for Blockchain { fn body(&self, id: BlockId) -> error::Result::Extrinsic>>> { Ok(self.id(id).and_then(|hash| { - self.storage.read().blocks.get(&hash) + self.storage + .read() + .blocks + .get(&hash) .and_then(|b| b.extrinsics().map(|x| x.to_vec())) })) } - fn justification(&self, id: BlockId) -> error::Result>> { - Ok(self.id(id).and_then(|hash| self.storage.read().blocks.get(&hash).and_then(|b| - b.justification().map(|x| x.clone())) - )) + fn justification( + &self, + id: BlockId, + ) -> error::Result>> { + Ok(self.id(id).and_then(|hash| { + self.storage + .read() + .blocks + .get(&hash) + .and_then(|b| b.justification().map(|x| x.clone())) + })) } } @@ -226,9 +254,12 @@ impl backend::BlockImportOperation for BlockImportOperatio header: ::Header, body: Option::Extrinsic>>, justification: Option>, - is_new_best: bool + is_new_best: bool, ) -> error::Result<()> { - assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); + assert!( + self.pending_block.is_none(), + "Only one block per operation is allowed" + ); self.pending_block = Some(PendingBlock { block: StoredBlock::new(header, body, justification), is_best: is_new_best, @@ -236,26 +267,34 @@ impl backend::BlockImportOperation for BlockImportOperatio Ok(()) } - fn update_storage(&mut self, update: ::Transaction) -> error::Result<()> { + fn update_storage( + &mut self, + update: ::Transaction, + ) -> error::Result<()> { self.new_state = Some(self.old_state.update(update)); Ok(()) } - fn reset_storage, Vec)>>(&mut self, iter: I) -> error::Result<()> { + fn reset_storage, Vec)>>( + &mut self, + iter: I, + ) -> error::Result<()> { self.new_state = Some(InMemory::from(iter.collect::>())); Ok(()) } } /// In-memory backend. Keeps all states and blocks in memory. Useful for testing. -pub struct Backend where +pub struct Backend +where Block: BlockT, { states: RwLock>, blockchain: Blockchain, } -impl Backend where +impl Backend +where Block: BlockT, { /// Create a new instance of in-mem backend. @@ -267,7 +306,8 @@ impl Backend where } } -impl backend::Backend for Backend where +impl backend::Backend for Backend +where Block: BlockT, { type BlockImportOperation = BlockImportOperation; @@ -293,8 +333,12 @@ impl backend::Backend for Backend where let (header, body, justification) = pending_block.block.into_inner(); let hash = header.hash(); - self.states.write().insert(hash, operation.new_state.unwrap_or_else(|| old_state.clone())); - self.blockchain.insert(hash, header, justification, body, pending_block.is_best); + self.states.write().insert( + hash, + operation.new_state.unwrap_or_else(|| old_state.clone()), + ); + self.blockchain + .insert(hash, header, justification, body, pending_block.is_best); } Ok(()) } @@ -304,7 +348,11 @@ impl backend::Backend for Backend where } fn state_at(&self, block: BlockId) -> error::Result { - match self.blockchain.id(block).and_then(|id| self.states.read().get(&id).cloned()) { + match self + .blockchain + .id(block) + .and_then(|id| self.states.read().get(&id).cloned()) + { Some(state) => Ok(state), None => Err(error::ErrorKind::UnknownBlock(format!("{}", block)).into()), } diff --git a/substrate/client/src/lib.rs b/substrate/client/src/lib.rs index 187a43636b496..0e077c4b8e3af 100644 --- a/substrate/client/src/lib.rs +++ b/substrate/client/src/lib.rs @@ -17,27 +17,34 @@ //! Substrate Client and associated logic. #![warn(missing_docs)] -#![recursion_limit="128"] +#![recursion_limit = "128"] extern crate substrate_bft as bft; extern crate substrate_codec as codec; +#[cfg(test)] +extern crate substrate_keyring as keyring; extern crate substrate_primitives as primitives; extern crate substrate_runtime_io as runtime_io; -extern crate substrate_runtime_support as runtime_support; extern crate substrate_runtime_primitives as runtime_primitives; +extern crate substrate_runtime_support as runtime_support; extern crate substrate_state_machine as state_machine; -#[cfg(test)] extern crate substrate_keyring as keyring; -#[cfg(test)] extern crate substrate_test_client as test_client; +#[cfg(test)] +extern crate substrate_test_client as test_client; extern crate ed25519; extern crate futures; extern crate parking_lot; extern crate triehash; -#[macro_use] extern crate error_chain; -#[macro_use] extern crate log; -#[cfg_attr(test, macro_use)] extern crate substrate_executor as executor; -#[cfg(test)] #[macro_use] extern crate hex_literal; +#[macro_use] +extern crate error_chain; +#[macro_use] +extern crate log; +#[cfg_attr(test, macro_use)] +extern crate substrate_executor as executor; +#[cfg(test)] +#[macro_use] +extern crate hex_literal; pub mod error; pub mod blockchain; @@ -49,11 +56,9 @@ pub mod light; mod call_executor; mod client; +pub use blockchain::Info as ChainInfo; +pub use call_executor::{CallExecutor, CallResult, LocalCallExecutor}; pub use client::{ - new_in_mem, - BlockStatus, BlockOrigin, BlockchainEventStream, BlockchainEvents, - Client, ClientInfo, ChainHead, - ImportResult, JustifiedHeader, + new_in_mem, BlockOrigin, BlockStatus, BlockchainEventStream, BlockchainEvents, ChainHead, + Client, ClientInfo, ImportResult, JustifiedHeader, }; -pub use blockchain::Info as ChainInfo; -pub use call_executor::{CallResult, CallExecutor, LocalCallExecutor}; diff --git a/substrate/client/src/light/backend.rs b/substrate/client/src/light/backend.rs index 1311618e994ec..4a9336b5caf6b 100644 --- a/substrate/client/src/light/backend.rs +++ b/substrate/client/src/light/backend.rs @@ -19,10 +19,12 @@ use std::sync::{Arc, Weak}; -use runtime_primitives::{bft::Justification, generic::BlockId}; use runtime_primitives::traits::Block as BlockT; -use state_machine::{Backend as StateBackend, TrieBackend as StateTrieBackend, - TryIntoTrieBackend as TryIntoStateTrieBackend}; +use runtime_primitives::{bft::Justification, generic::BlockId}; +use state_machine::{ + Backend as StateBackend, TrieBackend as StateTrieBackend, + TryIntoTrieBackend as TryIntoStateTrieBackend, +}; use backend::{Backend as ClientBackend, BlockImportOperation, RemoteBackend}; use blockchain::HeaderBackend as BlockchainHeaderBackend; @@ -60,7 +62,12 @@ impl Backend { } } -impl ClientBackend for Backend where Block: BlockT, S: BlockchainStorage, F: Fetcher { +impl ClientBackend for Backend +where + Block: BlockT, + S: BlockchainStorage, + F: Fetcher, +{ type BlockImportOperation = ImportOperation; type Blockchain = Blockchain; type State = OnDemandState; @@ -74,8 +81,12 @@ impl ClientBackend for Backend where Block: BlockT, S: } fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> { - let header = operation.header.expect("commit is called after set_block_data; set_block_data sets header; qed"); - self.blockchain.storage().import_header(operation.is_new_best, header) + let header = operation + .header + .expect("commit is called after set_block_data; set_block_data sets header; qed"); + self.blockchain + .storage() + .import_header(operation.is_new_best, header) } fn blockchain(&self) -> &Blockchain { @@ -95,9 +106,18 @@ impl ClientBackend for Backend where Block: BlockT, S: } } -impl RemoteBackend for Backend where Block: BlockT, S: BlockchainStorage, F: Fetcher {} - -impl BlockImportOperation for ImportOperation where Block: BlockT, F: Fetcher { +impl RemoteBackend for Backend +where + Block: BlockT, + S: BlockchainStorage, + F: Fetcher, +{} + +impl BlockImportOperation for ImportOperation +where + Block: BlockT, + F: Fetcher, +{ type State = OnDemandState; fn state(&self) -> ClientResult> { @@ -110,19 +130,25 @@ impl BlockImportOperation for ImportOperation where B header: Block::Header, _body: Option>, _justification: Option>, - is_new_best: bool + is_new_best: bool, ) -> ClientResult<()> { self.is_new_best = is_new_best; self.header = Some(header); Ok(()) } - fn update_storage(&mut self, _update: ::Transaction) -> ClientResult<()> { + fn update_storage( + &mut self, + _update: ::Transaction, + ) -> ClientResult<()> { // we're not storing anything locally => ignore changes Ok(()) } - fn reset_storage, Vec)>>(&mut self, _iter: I) -> ClientResult<()> { + fn reset_storage, Vec)>>( + &mut self, + _iter: I, + ) -> ClientResult<()> { // we're not storing anything locally => ignore changes Ok(()) } @@ -137,7 +163,11 @@ impl Clone for OnDemandState { } } -impl StateBackend for OnDemandState where Block: BlockT, F: Fetcher { +impl StateBackend for OnDemandState +where + Block: BlockT, + F: Fetcher, +{ type Error = ClientError; type Transaction = (); @@ -150,7 +180,9 @@ impl StateBackend for OnDemandState where Block: BlockT, F: } fn storage_root(&self, _delta: I) -> ([u8; 32], Self::Transaction) - where I: IntoIterator, Option>)> { + where + I: IntoIterator, Option>)>, + { ([0; 32], ()) } @@ -160,7 +192,11 @@ impl StateBackend for OnDemandState where Block: BlockT, F: } } -impl TryIntoStateTrieBackend for OnDemandState where Block: BlockT, F: Fetcher { +impl TryIntoStateTrieBackend for OnDemandState +where + Block: BlockT, + F: Fetcher, +{ fn try_into_trie_backend(self) -> Option { None } @@ -168,12 +204,12 @@ impl TryIntoStateTrieBackend for OnDemandState where Block: #[cfg(test)] pub mod tests { - use futures::future::{ok, FutureResult}; - use parking_lot::Mutex; use call_executor::CallResult; use error::Error as ClientError; - use test_client::runtime::{Hash, Block}; + use futures::future::{ok, FutureResult}; use light::fetcher::{Fetcher, RemoteCallRequest}; + use parking_lot::Mutex; + use test_client::runtime::{Block, Hash}; pub type OkCallFetcher = Mutex; diff --git a/substrate/client/src/light/blockchain.rs b/substrate/client/src/light/blockchain.rs index 9655f91baa616..09af9701614cd 100644 --- a/substrate/client/src/light/blockchain.rs +++ b/substrate/client/src/light/blockchain.rs @@ -17,14 +17,16 @@ //! Light client blockchin backend. Only stores headers and justifications of recent //! blocks. CHT roots are stored for headers of ancient blocks. -use std::sync::Weak; use parking_lot::Mutex; +use std::sync::Weak; -use runtime_primitives::{bft::Justification, generic::BlockId}; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; +use runtime_primitives::{bft::Justification, generic::BlockId}; -use blockchain::{Backend as BlockchainBackend, BlockStatus, - HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo}; +use blockchain::{ + Backend as BlockchainBackend, BlockStatus, HeaderBackend as BlockchainHeaderBackend, + Info as BlockchainInfo, +}; use error::Result as ClientResult; use light::fetcher::Fetcher; @@ -65,7 +67,12 @@ impl Blockchain { } } -impl BlockchainHeaderBackend for Blockchain where Block: BlockT, S: Storage, F: Fetcher { +impl BlockchainHeaderBackend for Blockchain +where + Block: BlockT, + S: Storage, + F: Fetcher, +{ fn header(&self, id: BlockId) -> ClientResult> { self.storage.header(id) } @@ -78,18 +85,29 @@ impl BlockchainHeaderBackend for Blockchain where Bloc self.storage.status(id) } - fn hash(&self, number: <::Header as HeaderT>::Number) -> ClientResult> { + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> ClientResult> { self.storage.hash(number) } } -impl BlockchainBackend for Blockchain where Block: BlockT, S: Storage, F: Fetcher { +impl BlockchainBackend for Blockchain +where + Block: BlockT, + S: Storage, + F: Fetcher, +{ fn body(&self, _id: BlockId) -> ClientResult>> { // TODO [light]: fetch from remote node Ok(None) } - fn justification(&self, _id: BlockId) -> ClientResult>> { + fn justification( + &self, + _id: BlockId, + ) -> ClientResult>> { Ok(None) } } diff --git a/substrate/client/src/light/call_executor.rs b/substrate/client/src/light/call_executor.rs index 104e5e81cc894..f86247934111c 100644 --- a/substrate/client/src/light/call_executor.rs +++ b/substrate/client/src/light/call_executor.rs @@ -17,19 +17,21 @@ //! Light client call exector. Executes methods on remote full nodes, fetching //! execution proof and checking it locally. +use futures::{Future, IntoFuture}; use std::sync::Arc; -use futures::{IntoFuture, Future}; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; -use state_machine::{Backend as StateBackend, CodeExecutor, OverlayedChanges, execution_proof_check}; +use state_machine::{ + execution_proof_check, Backend as StateBackend, CodeExecutor, OverlayedChanges, +}; use blockchain::Backend as ChainBackend; use call_executor::{CallExecutor, CallResult}; +use codec::Slicable; use error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}; -use light::fetcher::{Fetcher, RemoteCallRequest}; use executor::RuntimeVersion; -use codec::Slicable; +use light::fetcher::{Fetcher, RemoteCallRequest}; /// Call executor that executes methods on remote node, querying execution proof /// and checking proof by re-executing locally. @@ -41,30 +43,43 @@ pub struct RemoteCallExecutor { impl RemoteCallExecutor { /// Creates new instance of remote call executor. pub fn new(blockchain: Arc, fetcher: Arc) -> Self { - RemoteCallExecutor { blockchain, fetcher } + RemoteCallExecutor { + blockchain, + fetcher, + } } } impl CallExecutor for RemoteCallExecutor - where - Block: BlockT, - B: ChainBackend, - F: Fetcher, +where + Block: BlockT, + B: ChainBackend, + F: Fetcher, { type Error = ClientError; - fn call(&self, id: &BlockId, method: &str, call_data: &[u8]) -> ClientResult { + fn call( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + ) -> ClientResult { let block_hash = match *id { BlockId::Hash(hash) => hash, - BlockId::Number(number) => self.blockchain.hash(number)? + BlockId::Number(number) => self + .blockchain + .hash(number)? .ok_or_else(|| ClientErrorKind::UnknownBlock(format!("{}", number)))?, }; - self.fetcher.remote_call(RemoteCallRequest { - block: block_hash.clone(), - method: method.into(), - call_data: call_data.to_vec(), - }).into_future().wait() + self.fetcher + .remote_call(RemoteCallRequest { + block: block_hash.clone(), + method: method.into(), + call_data: call_data.to_vec(), + }) + .into_future() + .wait() } fn runtime_version(&self, id: &BlockId) -> ClientResult { @@ -73,11 +88,23 @@ impl CallExecutor for RemoteCallExecutor .ok_or_else(|| ClientErrorKind::VersionInvalid.into()) } - fn call_at_state(&self, _state: &S, _changes: &mut OverlayedChanges, _method: &str, _call_data: &[u8]) -> ClientResult<(Vec, S::Transaction)> { + fn call_at_state( + &self, + _state: &S, + _changes: &mut OverlayedChanges, + _method: &str, + _call_data: &[u8], + ) -> ClientResult<(Vec, S::Transaction)> { Err(ClientErrorKind::NotAvailableOnLightClient.into()) } - fn prove_at_state(&self, _state: S, _changes: &mut OverlayedChanges, _method: &str, _call_data: &[u8]) -> ClientResult<(Vec, Vec>)> { + fn prove_at_state( + &self, + _state: S, + _changes: &mut OverlayedChanges, + _method: &str, + _call_data: &[u8], + ) -> ClientResult<(Vec, Vec>)> { Err(ClientErrorKind::NotAvailableOnLightClient.into()) } @@ -91,16 +118,17 @@ pub fn check_execution_proof( blockchain: &B, executor: &E, request: &RemoteCallRequest, - remote_proof: Vec> + remote_proof: Vec>, ) -> ClientResult - where - Block: BlockT, - ::Hash: Into<[u8; 32]>, // TODO: remove when patricia_trie generic. - B: ChainBackend, - E: CodeExecutor, +where + Block: BlockT, + ::Hash: Into<[u8; 32]>, // TODO: remove when patricia_trie generic. + B: ChainBackend, + E: CodeExecutor, { let local_header = blockchain.header(BlockId::Hash(request.block))?; - let local_header = local_header.ok_or_else(|| ClientErrorKind::UnknownBlock(format!("{}", request.block)))?; + let local_header = + local_header.ok_or_else(|| ClientErrorKind::UnknownBlock(format!("{}", request.block)))?; let local_state_root = *local_header.state_root(); do_check_execution_proof(local_state_root.into(), executor, request, remote_proof) } @@ -112,9 +140,9 @@ fn do_check_execution_proof( request: &RemoteCallRequest, remote_proof: Vec>, ) -> ClientResult - where - Hash: ::std::fmt::Display, - E: CodeExecutor, +where + Hash: ::std::fmt::Display, + E: CodeExecutor, { let mut changes = OverlayedChanges::default(); let (local_result, _) = execution_proof_check( @@ -123,33 +151,48 @@ fn do_check_execution_proof( &mut changes, executor, &request.method, - &request.call_data)?; + &request.call_data, + )?; - Ok(CallResult { return_data: local_result, changes }) + Ok(CallResult { + return_data: local_result, + changes, + }) } #[cfg(test)] mod tests { - use test_client; use super::*; + use test_client; #[test] fn execution_proof_is_generated_and_checked() { // prepare remote client let remote_client = test_client::new(); let remote_block_id = BlockId::Number(0); - let remote_block_storage_root = remote_client.state_at(&remote_block_id) - .unwrap().storage_root(::std::iter::empty()).0; + let remote_block_storage_root = remote_client + .state_at(&remote_block_id) + .unwrap() + .storage_root(::std::iter::empty()) + .0; // 'fetch' execution proof from remote node - let remote_execution_proof = remote_client.execution_proof(&remote_block_id, "authorities", &[]).unwrap().1; + let remote_execution_proof = remote_client + .execution_proof(&remote_block_id, "authorities", &[]) + .unwrap() + .1; // check remote execution proof locally let local_executor = test_client::NativeExecutor::new(); - do_check_execution_proof(remote_block_storage_root.into(), &local_executor, &RemoteCallRequest { - block: test_client::runtime::Hash::default(), - method: "authorities".into(), - call_data: vec![], - }, remote_execution_proof).unwrap(); + do_check_execution_proof( + remote_block_storage_root.into(), + &local_executor, + &RemoteCallRequest { + block: test_client::runtime::Hash::default(), + method: "authorities".into(), + call_data: vec![], + }, + remote_execution_proof, + ).unwrap(); } } diff --git a/substrate/client/src/light/fetcher.rs b/substrate/client/src/light/fetcher.rs index 3527203b3fdd8..0430bc9019534 100644 --- a/substrate/client/src/light/fetcher.rs +++ b/substrate/client/src/light/fetcher.rs @@ -16,8 +16,8 @@ //! Light client data fetcher. Fetches requested data from remote full nodes. -use std::sync::Arc; use futures::IntoFuture; +use std::sync::Arc; use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT}; use state_machine::CodeExecutor; @@ -42,7 +42,7 @@ pub struct RemoteCallRequest { /// is correct (see FetchedDataChecker) and return already checked data. pub trait Fetcher: Send + Sync { /// Remote call result future. - type RemoteCallResult: IntoFuture; + type RemoteCallResult: IntoFuture; /// Fetch remote call result. fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; @@ -51,7 +51,11 @@ pub trait Fetcher: Send + Sync { /// Light client remote data checker. pub trait FetchChecker: Send + Sync { /// Check remote method execution proof. - fn check_execution_proof(&self, request: &RemoteCallRequest, remote_proof: Vec>) -> ClientResult; + fn check_execution_proof( + &self, + request: &RemoteCallRequest, + remote_proof: Vec>, + ) -> ClientResult; } /// Remote data checker. @@ -76,15 +80,20 @@ impl LightDataChecker { } impl FetchChecker for LightDataChecker - where - Block: BlockT, - ::Hash: From<[u8; 32]> + Into<[u8; 32]>, // TODO: remove when patricia_trie generic. - <::Header as HeaderT>::Number: As, - S: BlockchainStorage, - E: CodeExecutor, - F: Fetcher, +where + Block: BlockT, + ::Hash: From<[u8; 32]> + Into<[u8; 32]>, /* TODO: remove when + * patricia_trie generic. */ + <::Header as HeaderT>::Number: As, + S: BlockchainStorage, + E: CodeExecutor, + F: Fetcher, { - fn check_execution_proof(&self, request: &RemoteCallRequest, remote_proof: Vec>) -> ClientResult { + fn check_execution_proof( + &self, + request: &RemoteCallRequest, + remote_proof: Vec>, + ) -> ClientResult { check_execution_proof(&*self.blockchain, &self.executor, request, remote_proof) } } diff --git a/substrate/client/src/light/mod.rs b/substrate/client/src/light/mod.rs index d55ab5f3ac8de..fbcf497d9d53c 100644 --- a/substrate/client/src/light/mod.rs +++ b/substrate/client/src/light/mod.rs @@ -23,8 +23,8 @@ pub mod fetcher; use std::sync::Arc; -use runtime_primitives::BuildStorage; use runtime_primitives::traits::Block as BlockT; +use runtime_primitives::BuildStorage; use state_machine::CodeExecutor; use client::Client; @@ -35,12 +35,17 @@ use light::call_executor::RemoteCallExecutor; use light::fetcher::{Fetcher, LightDataChecker}; /// Create an instance of light client blockchain backend. -pub fn new_light_blockchain, F>(storage: S) -> Arc> { +pub fn new_light_blockchain, F>( + storage: S, +) -> Arc> { Arc::new(Blockchain::new(storage)) } /// Create an instance of light client backend. -pub fn new_light_backend, F: Fetcher>(blockchain: Arc>, fetcher: Arc) -> Arc> { +pub fn new_light_backend, F: Fetcher>( + blockchain: Arc>, + fetcher: Arc, +) -> Arc> { blockchain.set_fetcher(Arc::downgrade(&fetcher)); Arc::new(Backend::new(blockchain)) } @@ -51,11 +56,11 @@ pub fn new_light( fetcher: Arc, genesis_storage: GS, ) -> ClientResult, RemoteCallExecutor, F>, B>> - where - B: BlockT, - S: BlockchainStorage, - F: Fetcher, - GS: BuildStorage, +where + B: BlockT, + S: BlockchainStorage, + F: Fetcher, + GS: BuildStorage, { let executor = RemoteCallExecutor::new(backend.blockchain().clone(), fetcher); Client::new(backend, executor, genesis_storage) @@ -66,10 +71,10 @@ pub fn new_fetch_checker( blockchain: Arc>, executor: E, ) -> LightDataChecker - where - B: BlockT, - S: BlockchainStorage, - E: CodeExecutor, +where + B: BlockT, + S: BlockchainStorage, + E: CodeExecutor, { LightDataChecker::new(blockchain, executor) } diff --git a/substrate/codec/src/joiner.rs b/substrate/codec/src/joiner.rs index f5775082fd854..3cf43074177a3 100644 --- a/substrate/codec/src/joiner.rs +++ b/substrate/codec/src/joiner.rs @@ -16,8 +16,8 @@ //! Trait -use core::iter::Extend; use super::slicable::Slicable; +use core::iter::Extend; /// Trait to allow itself to be serialised into a value which can be extended /// by bytes. @@ -25,7 +25,10 @@ pub trait Joiner { fn and(self, value: &V) -> Self; } -impl Joiner for T where T: for<'a> Extend<&'a u8> { +impl Joiner for T +where + T: for<'a> Extend<&'a u8>, +{ fn and(mut self, value: &V) -> Self { value.using_encoded(|s| self.extend(s)); self diff --git a/substrate/codec/src/keyedvec.rs b/substrate/codec/src/keyedvec.rs index 353c7ec0e97c8..2b36775f2c016 100644 --- a/substrate/codec/src/keyedvec.rs +++ b/substrate/codec/src/keyedvec.rs @@ -16,9 +16,9 @@ //! Serialiser and prepender. -use slicable::Slicable; -use core::iter::Extend; use alloc::vec::Vec; +use core::iter::Extend; +use slicable::Slicable; /// Trait to allow itselg to be serialised and prepended by a given slice. pub trait KeyedVec { diff --git a/substrate/codec/src/lib.rs b/substrate/codec/src/lib.rs index d38e3f6f723d8..2f4dcfca42e53 100644 --- a/substrate/codec/src/lib.rs +++ b/substrate/codec/src/lib.rs @@ -38,6 +38,6 @@ mod slicable; mod joiner; mod keyedvec; -pub use self::slicable::{Input, Slicable, encode_slice}; pub use self::joiner::Joiner; pub use self::keyedvec::KeyedVec; +pub use self::slicable::{encode_slice, Input, Slicable}; diff --git a/substrate/codec/src/slicable.rs b/substrate/codec/src/slicable.rs index da402c8375727..b97f0a1405440 100644 --- a/substrate/codec/src/slicable.rs +++ b/substrate/codec/src/slicable.rs @@ -16,11 +16,11 @@ //! Serialisation. -use alloc::vec::Vec; -use alloc::boxed::Box; -use core::{mem, slice}; use super::joiner::Joiner; +use alloc::boxed::Box; +use alloc::vec::Vec; use arrayvec::ArrayVec; +use core::{mem, slice}; /// Trait that allows reading of data into a slice. pub trait Input { @@ -74,7 +74,10 @@ pub trait Slicable: Sized { /// Encode a bytes slice as `Slicable` that can be decoded into a vector. pub fn encode_slice(bytes: &[u8]) -> Vec { let len = bytes.len(); - assert!(len <= u32::max_value() as usize, "Attempted to serialize a collection with too many elements."); + assert!( + len <= u32::max_value() as usize, + "Attempted to serialize a collection with too many elements." + ); let mut r: Vec = Vec::new().and(&(len as u32)); r.extend_from_slice(bytes); @@ -96,11 +99,11 @@ impl Slicable for Result { Ok(ref t) => { v.push(0); t.using_encoded(|s| v.extend(s)); - } + }, Err(ref e) => { v.push(1); e.using_encoded(|s| v.extend(s)); - } + }, } v } @@ -143,7 +146,7 @@ impl Slicable for Option { Some(ref t) => { v.push(1); t.using_encoded(|s| v.extend(s)); - } + }, None => v.push(0), } v @@ -219,7 +222,10 @@ impl Slicable for Vec { use core::iter::Extend; let len = self.len(); - assert!(len <= u32::max_value() as usize, "Attempted to serialize vec with too many elements."); + assert!( + len <= u32::max_value() as usize, + "Attempted to serialize vec with too many elements." + ); let mut r: Vec = Vec::new().and(&(len as u32)); for item in self { @@ -311,12 +317,24 @@ mod inner_tuple_impl { // note: the copy bound and static lifetimes are necessary for safety of `Slicable` blanket // implementation. trait EndianSensitive: Copy + 'static { - fn to_le(self) -> Self { self } - fn to_be(self) -> Self { self } - fn from_le(self) -> Self { self } - fn from_be(self) -> Self { self } - fn as_be_then T>(&self, f: F) -> T { f(&self) } - fn as_le_then T>(&self, f: F) -> T { f(&self) } + fn to_le(self) -> Self { + self + } + fn to_be(self) -> Self { + self + } + fn from_le(self) -> Self { + self + } + fn from_be(self) -> Self { + self + } + fn as_be_then T>(&self, f: F) -> T { + f(&self) + } + fn as_le_then T>(&self, f: F) -> T { + f(&self) + } } macro_rules! impl_endians { @@ -404,10 +422,11 @@ macro_rules! impl_non_endians { } impl_endians!(u16, u32, u64, u128, usize, i16, i32, i64, i128, isize); -impl_non_endians!(i8, [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], - [u8; 10], [u8; 12], [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], - [u8; 48], [u8; 56], [u8; 64], [u8; 80], [u8; 96], [u8; 112], [u8; 128], bool); - +impl_non_endians!( + i8, [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 10], [u8; 12], + [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], [u8; 48], [u8; 56], + [u8; 64], [u8; 80], [u8; 96], [u8; 112], [u8; 128], bool +); #[cfg(test)] mod tests { @@ -416,8 +435,6 @@ mod tests { #[test] fn vec_is_slicable() { let v = b"Hello world".to_vec(); - v.using_encoded(|ref slice| - assert_eq!(slice, &b"\x0b\0\0\0Hello world") - ); + v.using_encoded(|ref slice| assert_eq!(slice, &b"\x0b\0\0\0Hello world")); } } diff --git a/substrate/ed25519/src/lib.rs b/substrate/ed25519/src/lib.rs index e02108524604b..ed4bdcdd33056 100644 --- a/substrate/ed25519/src/lib.rs +++ b/substrate/ed25519/src/lib.rs @@ -16,15 +16,15 @@ //! Simple Ed25519 API. -extern crate ring; extern crate base58; +extern crate blake2_rfc; +extern crate ring; extern crate substrate_primitives as primitives; extern crate untrusted; -extern crate blake2_rfc; -use ring::{rand, signature}; +use base58::{FromBase58, ToBase58}; use primitives::{hash::H512, AuthorityId}; -use base58::{ToBase58, FromBase58}; +use ring::{rand, signature}; #[cfg(test)] #[macro_use] @@ -93,18 +93,18 @@ impl Public { /// Some if the string is a properly encoded SS58Check address. pub fn from_ss58check(s: &str) -> Result { - let d = s.from_base58().map_err(|_| PublicError::BadBase58)?; // failure here would be invalid encoding. + let d = s.from_base58().map_err(|_| PublicError::BadBase58)?; // failure here would be invalid encoding. if d.len() != 35 { // Invalid length. - return Err(PublicError::BadLength); + return Err(PublicError::BadLength) } if d[0] != 42 { // Invalid version. - return Err(PublicError::UnknownVersion); + return Err(PublicError::UnknownVersion) } if d[33..35] != blake2_rfc::blake2b::blake2b(64, &[], &d[0..33]).as_bytes()[0..2] { // Invalid checksum. - return Err(PublicError::InvalidChecksum); + return Err(PublicError::InvalidChecksum) } Ok(Self::from_slice(&d[1..33])) } @@ -187,7 +187,12 @@ impl ::std::fmt::Display for Public { impl ::std::fmt::Debug for Public { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { let s = self.to_ss58check(); - write!(f, "{} ({}...)", ::primitives::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) + write!( + f, + "{} ({}...)", + ::primitives::hexdisplay::HexDisplay::from(&self.0), + &s[0..8] + ) } } @@ -195,8 +200,10 @@ impl Pair { /// Generate new secure (random) key pair, yielding it and the corresponding pkcs#8 bytes. pub fn generate_with_pkcs8() -> (Self, [u8; PKCS_LEN]) { let rng = rand::SystemRandom::new(); - let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).expect("system randomness is available; qed"); - let pair = Self::from_pkcs8(&pkcs8_bytes).expect("just-generated pkcs#8 data is valid; qed"); + let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng) + .expect("system randomness is available; qed"); + let pair = + Self::from_pkcs8(&pkcs8_bytes).expect("just-generated pkcs#8 data is valid; qed"); (pair, pkcs8_bytes) } @@ -282,15 +289,22 @@ mod test { use super::*; fn _test_primitives_signature_and_local_the_same() { - fn takes_two(_: T, _: T) { } + fn takes_two(_: T, _: T) {} takes_two(Signature::default(), primitives::Signature::default()) } #[test] fn test_vector_should_work() { - let pair: Pair = Pair::from_seed(&hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60")); + let pair: Pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); let public = pair.public(); - assert_eq!(public, Public::from_raw(hex!("d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a"))); + assert_eq!( + public, + Public::from_raw(hex!( + "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" + )) + ); let message = b""; let signature: Signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b").into(); assert!(&pair.sign(&message[..]) == &signature); @@ -312,7 +326,12 @@ mod test { let pair = Pair::from_seed(b"12345678901234567890123456789012"); let public = pair.public(); - assert_eq!(public, Public::from_raw(hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee"))); + assert_eq!( + public, + Public::from_raw(hex!( + "2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee" + )) + ); let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); let signature = pair.sign(&message[..]); println!("Correct signature: {}", HexDisplay::from(&signature.0)); diff --git a/substrate/environmental/src/lib.rs b/substrate/environmental/src/lib.rs index 86e1100ecccad..e4e3e94f2b364 100644 --- a/substrate/environmental/src/lib.rs +++ b/substrate/environmental/src/lib.rs @@ -27,17 +27,17 @@ //! // create a place for the global reference to exist. //! environmental!(counter: u32); //! fn stuff() { -//! // do some stuff, accessing the named reference as desired. -//! counter::with(|i| *i += 1); -//! } +//! // do some stuff, accessing the named reference as desired. +//! counter::with(|i| *i += 1); +//! } //! fn main() { -//! // declare a stack variable of the same type as our global declaration. -//! let mut counter_value = 41u32; -//! // call stuff, setting up our `counter` environment as a refrence to our counter_value var. -//! counter::using(&mut counter_value, stuff); -//! println!("The answer is {:?}", counter_value); // will print 42! -//! stuff(); // safe! doesn't do anything. -//! } +//! // declare a stack variable of the same type as our global declaration. +//! let mut counter_value = 41u32; +//! // call stuff, setting up our `counter` environment as a refrence to our counter_value var. +//! counter::using(&mut counter_value, stuff); +//! println!("The answer is {:?}", counter_value); // will print 42! +//! stuff(); // safe! doesn't do anything. +//! } //! ``` #![cfg_attr(not(feature = "std"), no_std)] @@ -53,7 +53,7 @@ include!("../without_std.rs"); pub fn using R>( global: &'static imp::LocalKey>>, protected: &mut T, - f: F + f: F, ) -> R { // store the `protected` reference as a pointer so we can provide it to logic running within // `f`. @@ -101,7 +101,7 @@ pub fn with R>( // safe because it's only non-zero when it's being called from using, which // is holding on to the underlying reference (and not using it itself) safely. Some(mutator(&mut *ptr)) - } + }, None => None, } }) @@ -127,19 +127,28 @@ pub fn with R>( /// #[macro_use] extern crate environmental; /// environmental!(counter: u32); /// fn main() { -/// let mut counter_value = 41u32; -/// counter::using(&mut counter_value, || { -/// let odd = counter::with(|value| -/// if *value % 2 == 1 { -/// *value += 1; true -/// } else { -/// *value -= 3; false -/// }).unwrap(); // safe because we're inside a counter::using -/// println!("counter was {}", match odd { true => "odd", _ => "even" }); -/// }); +/// let mut counter_value = 41u32; +/// counter::using(&mut counter_value, || { +/// let odd = counter::with(|value| { +/// if *value % 2 == 1 { +/// *value += 1; +/// true +/// } else { +/// *value -= 3; +/// false +/// } +/// }).unwrap(); // safe because we're inside a counter::using +/// println!( +/// "counter was {}", +/// match odd { +/// true => "odd", +/// _ => "even", +/// } +/// ); +/// }); /// -/// println!("The answer is {:?}", counter_value); // 42 -/// } +/// println!("The answer is {:?}", counter_value); // 42 +/// } /// ``` /// /// Roughly the same, but with a trait object: @@ -150,18 +159,18 @@ pub fn with R>( /// trait Increment { fn increment(&mut self); } /// /// impl Increment for i32 { -/// fn increment(&mut self) { *self += 1 } +/// fn increment(&mut self) { *self += 1 } /// } /// /// environmental!(val: Increment + 'static); /// /// fn main() { -/// let mut local = 0i32; -/// val::using(&mut local, || { -/// val::with(|v| for _ in 0..5 { v.increment() }); -/// }); +/// let mut local = 0i32; +/// val::using(&mut local, || { +/// val::with(|v| for _ in 0..5 { v.increment() }); +/// }); /// -/// assert_eq!(local, 5); +/// assert_eq!(local, 5); /// } /// ``` #[macro_export] @@ -226,7 +235,9 @@ mod tests { fn simple_works() { environmental!(counter: u32); - fn stuff() { counter::with(|value| *value += 1); }; + fn stuff() { + counter::with(|value| *value += 1); + }; // declare a stack variable of the same type as our global declaration. let mut local = 41u32; @@ -234,7 +245,7 @@ mod tests { // call stuff, setting up our `counter` environment as a refrence to our local counter var. counter::using(&mut local, stuff); assert_eq!(local, 42); - stuff(); // safe! doesn't do anything. + stuff(); // safe! doesn't do anything. } #[test] @@ -258,8 +269,12 @@ mod tests { } impl Foo for i32 { - fn get(&self) -> i32 { *self } - fn set(&mut self, x: i32) { *self = x } + fn get(&self) -> i32 { + *self + } + fn set(&mut self, x: i32) { + *self = x + } } environmental!(foo: Foo + 'static); @@ -306,7 +321,9 @@ mod tests { #[test] fn use_non_static_trait() { - trait Sum { fn sum(&self) -> usize; } + trait Sum { + fn sum(&self) -> usize; + } impl<'a> Sum for &'a [usize] { fn sum(&self) -> usize { self.iter().fold(0, |a, c| a + c) @@ -316,9 +333,7 @@ mod tests { environmental!(sum: trait Sum); let numbers = vec![1, 2, 3, 4, 5]; let mut numbers = &numbers[..]; - let got_sum = sum::using(&mut numbers, || { - sum::with(|x| x.sum()) - }).unwrap(); + let got_sum = sum::using(&mut numbers, || sum::with(|x| x.sum())).unwrap(); assert_eq!(got_sum, 15); } diff --git a/substrate/executor/src/lib.rs b/substrate/executor/src/lib.rs index ddf631ba5f949..2eb85b685a383 100644 --- a/substrate/executor/src/lib.rs +++ b/substrate/executor/src/lib.rs @@ -26,24 +26,25 @@ //! I leave it as is for now as it might be removed before this is ever done. #![warn(missing_docs)] -#![recursion_limit="128"] +#![recursion_limit = "128"] +extern crate ed25519; extern crate substrate_codec as codec; -extern crate substrate_runtime_io as runtime_io; extern crate substrate_primitives as primitives; +extern crate substrate_runtime_io as runtime_io; +extern crate substrate_runtime_version as runtime_version; extern crate substrate_serializer as serializer; extern crate substrate_state_machine as state_machine; -extern crate substrate_runtime_version as runtime_version; -extern crate ed25519; -extern crate serde; -extern crate wasmi; extern crate byteorder; +extern crate parking_lot; extern crate rustc_hex; +extern crate serde; extern crate triehash; -extern crate parking_lot; extern crate twox_hash; -#[macro_use] extern crate log; +extern crate wasmi; +#[macro_use] +extern crate log; #[macro_use] extern crate lazy_static; @@ -65,11 +66,11 @@ mod native_executor; mod sandbox; pub mod error; -pub use wasm_executor::WasmExecutor; -pub use native_executor::{with_native_environment, NativeExecutor, NativeExecutionDispatch}; -pub use state_machine::Externalities; -pub use runtime_version::RuntimeVersion; pub use codec::Slicable; +pub use native_executor::{with_native_environment, NativeExecutionDispatch, NativeExecutor}; +pub use runtime_version::RuntimeVersion; +pub use state_machine::Externalities; +pub use wasm_executor::WasmExecutor; /// Provides runtime information. pub trait RuntimeInfo { @@ -77,9 +78,6 @@ pub trait RuntimeInfo { const NATIVE_VERSION: Option; /// Extract RuntimeVersion of given :code block - fn runtime_version ( - &self, - ext: &mut E, - code: &[u8] - ) -> Option; + fn runtime_version(&self, ext: &mut E, code: &[u8]) + -> Option; } diff --git a/substrate/executor/src/native_executor.rs b/substrate/executor/src/native_executor.rs index 0191c41ff5f59..b0154cface649 100644 --- a/substrate/executor/src/native_executor.rs +++ b/substrate/executor/src/native_executor.rs @@ -14,16 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use codec::Slicable; use error::{Error, ErrorKind, Result}; -use state_machine::{CodeExecutor, Externalities}; -use wasm_executor::WasmExecutor; -use wasmi::Module as WasmModule; +use parking_lot::{Mutex, MutexGuard}; use runtime_version::RuntimeVersion; +use state_machine::{CodeExecutor, Externalities}; use std::collections::HashMap; -use codec::Slicable; -use twox_hash::XxHash; use std::hash::Hasher; -use parking_lot::{Mutex, MutexGuard}; +use twox_hash::XxHash; +use wasm_executor::WasmExecutor; +use wasmi::Module as WasmModule; use RuntimeInfo; // For the internal Runtime Cache: @@ -31,7 +31,7 @@ use RuntimeInfo; enum RunWith { InvalidVersion(WasmModule), NativeRuntime(RuntimeVersion), - WasmRuntime(RuntimeVersion, WasmModule) + WasmRuntime(RuntimeVersion, WasmModule), } type CacheType = HashMap; @@ -57,29 +57,31 @@ fn fetch_cached_runtime_version<'a, E: Externalities>( cache: &'a mut MutexGuard, ext: &mut E, code: &[u8], - ref_version: RuntimeVersion + ref_version: RuntimeVersion, ) -> &'a RunWith { - cache.entry(gen_cache_key(code)) - .or_insert_with(|| { - let module = WasmModule::from_buffer(code).expect("all modules compiled with rustc are valid wasm code; qed"); - let version = WasmExecutor.call_in_wasm_module(ext, &module, "version", &[]).ok() - .and_then(|v| RuntimeVersion::decode(&mut v.as_slice())); - - - if let Some(v) = version { - if ref_version.can_call_with(&v) { - RunWith::NativeRuntime(v) - } else { - RunWith::WasmRuntime(v, module) - } + cache.entry(gen_cache_key(code)).or_insert_with(|| { + let module = WasmModule::from_buffer(code) + .expect("all modules compiled with rustc are valid wasm code; qed"); + let version = WasmExecutor + .call_in_wasm_module(ext, &module, "version", &[]) + .ok() + .and_then(|v| RuntimeVersion::decode(&mut v.as_slice())); + + if let Some(v) = version { + if ref_version.can_call_with(&v) { + RunWith::NativeRuntime(v) } else { - RunWith::InvalidVersion(module) + RunWith::WasmRuntime(v, module) } + } else { + RunWith::InvalidVersion(module) + } }) } fn safe_call(f: F) -> Result - where F: ::std::panic::UnwindSafe + FnOnce() -> U +where + F: ::std::panic::UnwindSafe + FnOnce() -> U, { ::std::panic::catch_unwind(f).map_err(|_| ErrorKind::Runtime.into()) } @@ -88,7 +90,8 @@ fn safe_call(f: F) -> Result /// /// If the inner closure panics, it will be caught and return an error. pub fn with_native_environment(ext: &mut Externalities, f: F) -> Result - where F: ::std::panic::UnwindSafe + FnOnce() -> U +where + F: ::std::panic::UnwindSafe + FnOnce() -> U, { ::runtime_io::with_externalities(ext, move || safe_call(f)) } @@ -120,7 +123,8 @@ impl NativeExecutor { // FIXME: set this entry at compile time RUNTIMES_CACHE.lock().insert( gen_cache_key(D::native_equivalent()), - RunWith::NativeRuntime(D::VERSION)); + RunWith::NativeRuntime(D::VERSION), + ); NativeExecutor { _dummy: Default::default(), @@ -147,7 +151,7 @@ impl RuntimeInfo for NativeExecutor let mut c = RUNTIMES_CACHE.lock(); match fetch_cached_runtime_version(&mut c, ext, code, D::VERSION) { RunWith::NativeRuntime(v) | RunWith::WasmRuntime(v, _) => Some(v.clone()), - RunWith::InvalidVersion(_m) => None + RunWith::InvalidVersion(_m) => None, } } } @@ -165,7 +169,8 @@ impl CodeExecutor for NativeExecutor D::dispatch(ext, method, data), - RunWith::WasmRuntime(_, m) | RunWith::InvalidVersion(m) => WasmExecutor.call_in_wasm_module(ext, m, method, data) + RunWith::WasmRuntime(_, m) | RunWith::InvalidVersion(m) => + WasmExecutor.call_in_wasm_module(ext, m, method, data), } } } diff --git a/substrate/executor/src/sandbox.rs b/substrate/executor/src/sandbox.rs index 51cbb85fb5a03..f72e0d534ffbd 100644 --- a/substrate/executor/src/sandbox.rs +++ b/substrate/executor/src/sandbox.rs @@ -18,16 +18,16 @@ //! This module implements sandboxing support in the runtime. -use std::collections::HashMap; -use std::rc::Rc; use codec::Slicable; use primitives::sandbox as sandbox_primitives; +use std::collections::HashMap; +use std::rc::Rc; use wasm_utils::DummyUserError; use wasmi; use wasmi::memory_units::Pages; use wasmi::{ Externals, FuncRef, ImportResolver, MemoryInstance, MemoryRef, Module, ModuleInstance, - ModuleRef, RuntimeArgs, RuntimeValue, Trap, TrapKind + ModuleRef, RuntimeArgs, RuntimeValue, Trap, TrapKind, }; /// Index of a function inside the supervisor. @@ -99,7 +99,8 @@ impl ImportResolver for Imports { module_name.as_bytes().to_vec(), field_name.as_bytes().to_vec(), ); - let mem = self.memories_map + let mem = self + .memories_map .get(&key) .ok_or_else(|| { ::wasmi::Error::Instantiation(format!( @@ -209,18 +210,20 @@ impl<'a, FE: SandboxCapabilities + Externals + 'a> Externals for GuestExternals< let index = GuestFuncIndex(index); let dispatch_thunk = self.sandbox_instance.dispatch_thunk.clone(); - let func_idx = self.sandbox_instance + let func_idx = self + .sandbox_instance .guest_to_supervisor_mapping .func_by_guest_index(index) .expect( "`invoke_index` is called with indexes registered via `FuncInstance::alloc_host`; `FuncInstance::alloc_host` is called with indexes that was obtained from `guest_to_supervisor_mapping`; `func_by_guest_index` called with `index` can't return `None`; - qed" + qed", ); // Serialize arguments into a byte vector. - let invoke_args_data: Vec = args.as_ref() + let invoke_args_data: Vec = args + .as_ref() .iter() .cloned() .map(sandbox_primitives::TypedValue::from) @@ -231,7 +234,8 @@ impl<'a, FE: SandboxCapabilities + Externals + 'a> Externals for GuestExternals< // Move serialized arguments inside the memory and invoke dispatch thunk and // then free allocated memory. - let invoke_args_ptr = self.supervisor_externals + let invoke_args_ptr = self + .supervisor_externals .allocate(invoke_args_data.len() as u32); self.supervisor_externals .write_memory(invoke_args_ptr, &invoke_args_data)?; @@ -256,11 +260,12 @@ impl<'a, FE: SandboxCapabilities + Externals + 'a> Externals for GuestExternals< let ptr = (v as u64 >> 32) as u32; let len = (v & 0xFFFFFFFF) as u32; (ptr, len) - } + }, _ => return Err(trap()), }; - let serialized_result_val = self.supervisor_externals + let serialized_result_val = self + .supervisor_externals .read_memory(serialized_result_val_ptr, serialized_result_val_len)?; self.supervisor_externals .deallocate(serialized_result_val_ptr); @@ -323,15 +328,10 @@ impl SandboxInstance { supervisor_externals: &mut FE, state: u32, ) -> Result, wasmi::Error> { - with_guest_externals( - supervisor_externals, - self, - state, - |guest_externals| { - self.instance - .invoke_export(export_name, args, guest_externals) - }, - ) + with_guest_externals(supervisor_externals, self, state, |guest_externals| { + self.instance + .invoke_export(export_name, args, guest_externals) + }) } } @@ -339,7 +339,8 @@ fn decode_environment_definition( raw_env_def: &[u8], memories: &[Option], ) -> Result<(Imports, GuestToSupervisorFunctionMapping), DummyUserError> { - let env_def = sandbox_primitives::EnvironmentDefinition::decode(&mut &raw_env_def[..]).ok_or_else(|| DummyUserError)?; + let env_def = sandbox_primitives::EnvironmentDefinition::decode(&mut &raw_env_def[..]) + .ok_or_else(|| DummyUserError)?; let mut func_map = HashMap::new(); let mut memories_map = HashMap::new(); @@ -354,7 +355,7 @@ fn decode_environment_definition( let externals_idx = guest_to_supervisor_mapping.define(SupervisorFuncIndex(func_idx as usize)); func_map.insert((module, field), externals_idx); - } + }, sandbox_primitives::ExternEntity::Memory(memory_idx) => { let memory_ref = memories .get(memory_idx as usize) @@ -362,7 +363,7 @@ fn decode_environment_definition( .ok_or_else(|| DummyUserError)? .ok_or_else(|| DummyUserError)?; memories_map.insert((module, field), memory_ref); - } + }, } } @@ -499,7 +500,7 @@ impl Store { /// Returns `Err` if `memory_idx` isn't a valid index of an memory. pub fn memory_teardown(&mut self, memory_idx: u32) -> Result<(), DummyUserError> { if memory_idx as usize >= self.memories.len() { - return Err(DummyUserError); + return Err(DummyUserError) } self.memories[memory_idx as usize] = None; Ok(()) @@ -508,7 +509,7 @@ impl Store { /// Teardown the instance at the specified index. pub fn instance_teardown(&mut self, instance_idx: u32) -> Result<(), DummyUserError> { if instance_idx as usize >= self.instances.len() { - return Err(DummyUserError); + return Err(DummyUserError) } self.instances[instance_idx as usize] = None; Ok(()) @@ -523,16 +524,19 @@ impl Store { #[cfg(test)] mod tests { - use wasm_executor::WasmExecutor; - use state_machine::{TestExternalities, CodeExecutor}; + use state_machine::{CodeExecutor, TestExternalities}; use wabt; + use wasm_executor::WasmExecutor; #[test] fn sandbox_should_work() { let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) @@ -551,10 +555,13 @@ mod tests { call $assert ) ) - "#).unwrap(); + "#, + ).unwrap(); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_sandbox", &code).unwrap(), + WasmExecutor + .call(&mut ext, &test_code[..], "test_sandbox", &code) + .unwrap(), vec![1], ); } @@ -562,9 +569,12 @@ mod tests { #[test] fn sandbox_trap() { let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) (func (export "call") @@ -572,10 +582,13 @@ mod tests { call $assert ) ) - "#).unwrap(); + "#, + ).unwrap(); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_sandbox", &code).unwrap(), + WasmExecutor + .call(&mut ext, &test_code[..], "test_sandbox", &code) + .unwrap(), vec![0], ); } @@ -583,9 +596,12 @@ mod tests { #[test] fn start_called() { let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) @@ -610,10 +626,13 @@ mod tests { call $assert ) ) - "#).unwrap(); + "#, + ).unwrap(); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_sandbox", &code).unwrap(), + WasmExecutor + .call(&mut ext, &test_code[..], "test_sandbox", &code) + .unwrap(), vec![1], ); } @@ -621,9 +640,12 @@ mod tests { #[test] fn invoke_args() { let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) @@ -644,10 +666,13 @@ mod tests { ) ) ) - "#).unwrap(); + "#, + ).unwrap(); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_sandbox_args", &code).unwrap(), + WasmExecutor + .call(&mut ext, &test_code[..], "test_sandbox_args", &code) + .unwrap(), vec![1], ); } @@ -655,9 +680,12 @@ mod tests { #[test] fn return_val() { let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (func (export "call") (param $x i32) (result i32) (i32.add @@ -666,10 +694,13 @@ mod tests { ) ) ) - "#).unwrap(); + "#, + ).unwrap(); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_sandbox_return_val", &code).unwrap(), + WasmExecutor + .call(&mut ext, &test_code[..], "test_sandbox_return_val", &code) + .unwrap(), vec![1], ); } diff --git a/substrate/executor/src/wasm_executor.rs b/substrate/executor/src/wasm_executor.rs index f48a3a93bc042..1175b3d5a0355 100644 --- a/substrate/executor/src/wasm_executor.rs +++ b/substrate/executor/src/wasm_executor.rs @@ -16,21 +16,19 @@ //! Rust implementation of Substrate contracts. -use std::cmp::Ordering; -use std::collections::HashMap; -use wasmi::{ - Module, ModuleInstance, MemoryInstance, MemoryRef, TableRef, ImportsBuilder, -}; -use wasmi::RuntimeValue::{I32, I64}; -use wasmi::memory_units::{Pages, Bytes}; -use state_machine::{Externalities, CodeExecutor}; use error::{Error, ErrorKind, Result}; -use wasm_utils::{DummyUserError}; -use primitives::{blake2_256, twox_128, twox_256}; use primitives::hexdisplay::HexDisplay; use primitives::sandbox as sandbox_primitives; -use triehash::ordered_trie_root; +use primitives::{blake2_256, twox_128, twox_256}; use sandbox; +use state_machine::{CodeExecutor, Externalities}; +use std::cmp::Ordering; +use std::collections::HashMap; +use triehash::ordered_trie_root; +use wasm_utils::DummyUserError; +use wasmi::memory_units::{Bytes, Pages}; +use wasmi::RuntimeValue::{I32, I64}; +use wasmi::{ImportsBuilder, MemoryInstance, MemoryRef, Module, ModuleInstance, TableRef}; struct Heap { end: u32, @@ -59,8 +57,7 @@ impl Heap { self.end += size; r } - fn deallocate(&mut self, _offset: u32) { - } + fn deallocate(&mut self, _offset: u32) {} } struct FunctionExecutor<'e, E: Externalities + 'e> { @@ -102,7 +99,9 @@ impl<'e, E: Externalities> sandbox::SandboxCapabilities for FunctionExecutor<'e, self.memory.set(ptr, data).map_err(|_| DummyUserError) } fn read_memory(&self, ptr: u32, len: u32) -> ::std::result::Result, DummyUserError> { - self.memory.get(ptr, len as usize).map_err(|_| DummyUserError) + self.memory + .get(ptr, len as usize) + .map_err(|_| DummyUserError) } } @@ -112,7 +111,7 @@ trait WritePrimitive { impl WritePrimitive for MemoryInstance { fn write_primitive(&self, offset: u32, t: u32) -> ::std::result::Result<(), DummyUserError> { - use byteorder::{LittleEndian, ByteOrder}; + use byteorder::{ByteOrder, LittleEndian}; let mut r = [0u8; 4]; LittleEndian::write_u32(&mut r, t); self.set(offset, &r).map_err(|_| DummyUserError) @@ -125,8 +124,10 @@ trait ReadPrimitive { impl ReadPrimitive for MemoryInstance { fn read_primitive(&self, offset: u32) -> ::std::result::Result { - use byteorder::{LittleEndian, ByteOrder}; - Ok(LittleEndian::read_u32(&self.get(offset, 4).map_err(|_| DummyUserError)?)) + use byteorder::{ByteOrder, LittleEndian}; + Ok(LittleEndian::read_u32( + &self.get(offset, 4).map_err(|_| DummyUserError)?, + )) } } @@ -142,7 +143,7 @@ fn ascii_format(asciish: &[u8]) -> String { latch = true; } r.push_str(&format!("{:02x}", *c)); - } + }, } } r @@ -469,7 +470,6 @@ impl_function_executor!(this: FunctionExecutor<'e, E>, pub struct WasmExecutor; impl WasmExecutor { - /// Call a given method in the given wasm-module runtime. pub fn call_in_wasm_module( &self, @@ -481,8 +481,7 @@ impl WasmExecutor { // start module instantiation. Don't run 'start' function yet. let intermediate_instance = ModuleInstance::new( module, - &ImportsBuilder::new() - .with_resolver("env", FunctionExecutor::::resolver()) + &ImportsBuilder::new().with_resolver("env", FunctionExecutor::::resolver()), )?; // extract a reference to a linear memory, optional reference to a table @@ -507,21 +506,18 @@ impl WasmExecutor { let size = data.len() as u32; let offset = fec.heap.allocate(size); - memory.set(offset, &data).expect("heap always gives a sensible offset to write"); - - let returned = instance.invoke_export( - method, - &[ - I32(offset as i32), - I32(size as i32) - ], - &mut fec - )?; + memory + .set(offset, &data) + .expect("heap always gives a sensible offset to write"); + + let returned = + instance.invoke_export(method, &[I32(offset as i32), I32(size as i32)], &mut fec)?; if let Some(I64(r)) = returned { let offset = r as u32; let length = (r >> 32) as u32 as usize; - memory.get(offset, length) + memory + .get(offset, length) .map_err(|_| ErrorKind::Runtime.into()) } else { Err(ErrorKind::InvalidReturn.into()) @@ -547,8 +543,8 @@ impl CodeExecutor for WasmExecutor { #[cfg(test)] mod tests { use super::*; - use rustc_hex::FromHex; use codec::Slicable; + use rustc_hex::FromHex; use state_machine::TestExternalities; // TODO: move into own crate. @@ -561,16 +557,22 @@ mod tests { #[test] fn returning_should_work() { let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); - let output = WasmExecutor.call(&mut ext, &test_code[..], "test_empty_return", &[]).unwrap(); + let output = WasmExecutor + .call(&mut ext, &test_code[..], "test_empty_return", &[]) + .unwrap(); assert_eq!(output, vec![0u8; 0]); } #[test] fn panicking_should_work() { let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); let output = WasmExecutor.call(&mut ext, &test_code[..], "test_panic", &[]); assert!(output.is_err()); @@ -583,9 +585,13 @@ mod tests { fn storage_should_work() { let mut ext = TestExternalities::default(); ext.set_storage(b"foo".to_vec(), b"bar".to_vec()); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); - let output = WasmExecutor.call(&mut ext, &test_code[..], "test_data_in", b"Hello world").unwrap(); + let output = WasmExecutor + .call(&mut ext, &test_code[..], "test_data_in", b"Hello world") + .unwrap(); assert_eq!(output, b"all ok!".to_vec()); @@ -605,10 +611,14 @@ mod tests { ext.set_storage(b"aba".to_vec(), b"3".to_vec()); ext.set_storage(b"abb".to_vec(), b"4".to_vec()); ext.set_storage(b"bbb".to_vec(), b"5".to_vec()); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); // This will clear all entries which prefix is "ab". - let output = WasmExecutor.call(&mut ext, &test_code[..], "test_clear_prefix", b"ab").unwrap(); + let output = WasmExecutor + .call(&mut ext, &test_code[..], "test_clear_prefix", b"ab") + .unwrap(); assert_eq!(output, b"all ok!".to_vec()); @@ -623,13 +633,19 @@ mod tests { #[test] fn blake2_256_should_work() { let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_blake2_256", &[]).unwrap(), + WasmExecutor + .call(&mut ext, &test_code[..], "test_blake2_256", &[]) + .unwrap(), blake2_256(&b""[..]).encode() ); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_blake2_256", b"Hello world!").unwrap(), + WasmExecutor + .call(&mut ext, &test_code[..], "test_blake2_256", b"Hello world!") + .unwrap(), blake2_256(&b"Hello world!"[..]).encode() ); } @@ -637,27 +653,41 @@ mod tests { #[test] fn twox_256_should_work() { let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_twox_256", &[]).unwrap(), - FromHex::from_hex("99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a").unwrap() + WasmExecutor + .call(&mut ext, &test_code[..], "test_twox_256", &[]) + .unwrap(), + FromHex::from_hex("99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a") + .unwrap() ); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_twox_256", b"Hello world!").unwrap(), - FromHex::from_hex("b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74").unwrap() + WasmExecutor + .call(&mut ext, &test_code[..], "test_twox_256", b"Hello world!") + .unwrap(), + FromHex::from_hex("b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74") + .unwrap() ); } #[test] fn twox_128_should_work() { let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_twox_128", &[]).unwrap(), + WasmExecutor + .call(&mut ext, &test_code[..], "test_twox_128", &[]) + .unwrap(), FromHex::from_hex("99e9d85137db46ef4bbea33613baafd5").unwrap() ); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_twox_128", b"Hello world!").unwrap(), + WasmExecutor + .call(&mut ext, &test_code[..], "test_twox_128", b"Hello world!") + .unwrap(), FromHex::from_hex("b27dfd7f223f177f2a13647b533599af").unwrap() ); } @@ -665,7 +695,9 @@ mod tests { #[test] fn ed25519_verify_should_work() { let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); let key = ::ed25519::Pair::from_seed(&blake2_256(b"test")); let sig = key.sign(b"all ok!"); let mut calldata = vec![]; @@ -673,7 +705,9 @@ mod tests { calldata.extend_from_slice(sig.as_ref()); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_ed25519_verify", &calldata).unwrap(), + WasmExecutor + .call(&mut ext, &test_code[..], "test_ed25519_verify", &calldata) + .unwrap(), vec![1] ); @@ -683,7 +717,9 @@ mod tests { calldata.extend_from_slice(other_sig.as_ref()); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_ed25519_verify", &calldata).unwrap(), + WasmExecutor + .call(&mut ext, &test_code[..], "test_ed25519_verify", &calldata) + .unwrap(), vec![0] ); } @@ -691,12 +727,17 @@ mod tests { #[test] fn enumerated_trie_root_should_work() { let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); assert_eq!( - WasmExecutor.call(&mut ext, &test_code[..], "test_enumerated_trie_root", &[]).unwrap(), - ordered_trie_root(vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]).0.encode() + WasmExecutor + .call(&mut ext, &test_code[..], "test_enumerated_trie_root", &[]) + .unwrap(), + ordered_trie_root(vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]) + .0 + .encode() ); } - } diff --git a/substrate/executor/src/wasm_utils.rs b/substrate/executor/src/wasm_utils.rs index 8bd9f98feb375..05dcbcb011e22 100644 --- a/substrate/executor/src/wasm_utils.rs +++ b/substrate/executor/src/wasm_utils.rs @@ -16,9 +16,9 @@ //! Rust implementation of Substrate contracts. -use wasmi::{ValueType, RuntimeValue, HostError}; -use wasmi::nan_preserving_float::{F32, F64}; use std::fmt; +use wasmi::nan_preserving_float::{F32, F64}; +use wasmi::{HostError, RuntimeValue, ValueType}; #[derive(Debug)] pub struct DummyUserError; @@ -27,20 +27,83 @@ impl fmt::Display for DummyUserError { write!(f, "DummyUserError") } } -impl HostError for DummyUserError { -} +impl HostError for DummyUserError {} -pub trait ConvertibleToWasm { const VALUE_TYPE: ValueType; type NativeType; fn to_runtime_value(self) -> RuntimeValue; } -impl ConvertibleToWasm for i32 { type NativeType = i32; const VALUE_TYPE: ValueType = ValueType::I32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I32(self) } } -impl ConvertibleToWasm for u32 { type NativeType = u32; const VALUE_TYPE: ValueType = ValueType::I32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I32(self as i32) } } -impl ConvertibleToWasm for i64 { type NativeType = i64; const VALUE_TYPE: ValueType = ValueType::I64; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I64(self) } } -impl ConvertibleToWasm for u64 { type NativeType = u64; const VALUE_TYPE: ValueType = ValueType::I64; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I64(self as i64) } } -impl ConvertibleToWasm for F32 { type NativeType = F32; const VALUE_TYPE: ValueType = ValueType::F32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::F32(self) } } -impl ConvertibleToWasm for F64 { type NativeType = F64; const VALUE_TYPE: ValueType = ValueType::F64; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::F64(self) } } -impl ConvertibleToWasm for isize { type NativeType = i32; const VALUE_TYPE: ValueType = ValueType::I32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I32(self as i32) } } -impl ConvertibleToWasm for usize { type NativeType = u32; const VALUE_TYPE: ValueType = ValueType::I32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I32(self as u32 as i32) } } -impl ConvertibleToWasm for *const T { type NativeType = u32; const VALUE_TYPE: ValueType = ValueType::I32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I32(self as isize as i32) } } -impl ConvertibleToWasm for *mut T { type NativeType = u32; const VALUE_TYPE: ValueType = ValueType::I32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I32(self as isize as i32) } } +pub trait ConvertibleToWasm { + const VALUE_TYPE: ValueType; + type NativeType; + fn to_runtime_value(self) -> RuntimeValue; +} +impl ConvertibleToWasm for i32 { + type NativeType = i32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I32(self) + } +} +impl ConvertibleToWasm for u32 { + type NativeType = u32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I32(self as i32) + } +} +impl ConvertibleToWasm for i64 { + type NativeType = i64; + const VALUE_TYPE: ValueType = ValueType::I64; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I64(self) + } +} +impl ConvertibleToWasm for u64 { + type NativeType = u64; + const VALUE_TYPE: ValueType = ValueType::I64; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I64(self as i64) + } +} +impl ConvertibleToWasm for F32 { + type NativeType = F32; + const VALUE_TYPE: ValueType = ValueType::F32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::F32(self) + } +} +impl ConvertibleToWasm for F64 { + type NativeType = F64; + const VALUE_TYPE: ValueType = ValueType::F64; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::F64(self) + } +} +impl ConvertibleToWasm for isize { + type NativeType = i32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I32(self as i32) + } +} +impl ConvertibleToWasm for usize { + type NativeType = u32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I32(self as u32 as i32) + } +} +impl ConvertibleToWasm for *const T { + type NativeType = u32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I32(self as isize as i32) + } +} +impl ConvertibleToWasm for *mut T { + type NativeType = u32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I32(self as isize as i32) + } +} #[macro_export] macro_rules! convert_args { @@ -115,7 +178,7 @@ macro_rules! unmarshall_args { #[inline(always)] pub fn constrain_closure(f: F) -> F where - F: FnOnce() -> Result + F: FnOnce() -> Result, { f } diff --git a/substrate/extrinsic-pool/src/api.rs b/substrate/extrinsic-pool/src/api.rs index 31ec0f5346daa..e7c262910791f 100644 --- a/substrate/extrinsic-pool/src/api.rs +++ b/substrate/extrinsic-pool/src/api.rs @@ -25,11 +25,15 @@ pub trait Error: ::std::error::Error + Send + Sized { /// This implementation is optional and used only to /// provide more descriptive error messages for end users /// of RPC API. - fn into_pool_error(self) -> Result { Err(self) } + fn into_pool_error(self) -> Result { + Err(self) + } } impl Error for txpool::Error { - fn into_pool_error(self) -> Result { Ok(self) } + fn into_pool_error(self) -> Result { + Ok(self) + } } /// Extrinsic pool. diff --git a/substrate/extrinsic-pool/src/listener.rs b/substrate/extrinsic-pool/src/listener.rs index 6bf110e55f977..4c91097db38ba 100644 --- a/substrate/extrinsic-pool/src/listener.rs +++ b/substrate/extrinsic-pool/src/listener.rs @@ -14,11 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{ - sync::Arc, - fmt, - collections::HashMap, -}; +use std::{collections::HashMap, fmt, sync::Arc}; use txpool; use watcher; @@ -26,15 +22,21 @@ use watcher; /// Extrinsic pool default listener. #[derive(Default)] pub struct Listener { - watchers: HashMap> + watchers: HashMap>, } impl Listener { /// Creates a new watcher for given verified extrinsic. /// /// The watcher can be used to subscribe to lifecycle events of that extrinsic. - pub fn create_watcher>(&mut self, xt: Arc) -> watcher::Watcher { - let sender = self.watchers.entry(*xt.hash()).or_insert_with(watcher::Sender::default); + pub fn create_watcher>( + &mut self, + xt: Arc, + ) -> watcher::Watcher { + let sender = self + .watchers + .entry(*xt.hash()) + .or_insert_with(watcher::Sender::default); sender.new_watcher() } @@ -43,7 +45,10 @@ impl Li self.fire(hash, |watcher| watcher.broadcast(peers)); } - fn fire(&mut self, hash: &H, fun: F) where F: FnOnce(&mut watcher::Sender) { + fn fire(&mut self, hash: &H, fun: F) + where + F: FnOnce(&mut watcher::Sender), + { let clean = if let Some(h) = self.watchers.get_mut(hash) { fun(h); h.is_done() @@ -57,9 +62,10 @@ impl Li } } -impl txpool::Listener for Listener where +impl txpool::Listener for Listener +where H: ::std::hash::Hash + Eq + Copy + fmt::Debug + fmt::LowerHex + Default, - T: txpool::VerifiedTransaction, + T: txpool::VerifiedTransaction, { fn added(&mut self, tx: &Arc, old: Option<&Arc>) { if let Some(old) = old { diff --git a/substrate/extrinsic-pool/src/pool.rs b/substrate/extrinsic-pool/src/pool.rs index ca8df7c6acad3..c6ab5add239c3 100644 --- a/substrate/extrinsic-pool/src/pool.rs +++ b/substrate/extrinsic-pool/src/pool.rs @@ -22,31 +22,29 @@ use std::{ }; use futures::sync::mpsc; -use parking_lot::{RwLock, Mutex}; +use parking_lot::{Mutex, RwLock}; use txpool; use listener::Listener; use watcher::Watcher; /// Extrinsics pool. -pub struct Pool where +pub struct Pool +where Hash: ::std::hash::Hash + Eq + Copy + fmt::Debug + fmt::LowerHex, S: txpool::Scoring, - VEx: txpool::VerifiedTransaction, + VEx: txpool::VerifiedTransaction, { _error: Mutex>, - pool: RwLock, - >>, + pool: RwLock>>, import_notification_sinks: Mutex>>>, } -impl Pool where +impl Pool +where Hash: ::std::hash::Hash + Eq + Copy + fmt::Debug + fmt::LowerHex + Default, S: txpool::Scoring, - VEx: txpool::VerifiedTransaction, + VEx: txpool::VerifiedTransaction, E: From, { /// Create a new transaction pool. @@ -63,7 +61,8 @@ impl Pool where let result = self.pool.write().import(xt)?; let weak = Arc::downgrade(&result); - self.import_notification_sinks.lock() + self.import_notification_sinks + .lock() .retain(|sink| sink.unbounded_send(weak.clone()).is_ok()); Ok(result) @@ -84,26 +83,28 @@ impl Pool where } /// Imports a bunch of unverified extrinsics to the pool - pub fn submit(&self, verifier: V, xts: T) -> Result>, E> where - V: txpool::Verifier, + pub fn submit(&self, verifier: V, xts: T) -> Result>, E> + where + V: txpool::Verifier, E: From, - T: IntoIterator + T: IntoIterator, { - xts - .into_iter() + xts.into_iter() .map(|xt| verifier.verify_transaction(xt)) - .map(|xt| { - Ok(self.pool.write().import(xt?)?) - }) + .map(|xt| Ok(self.pool.write().import(xt?)?)) .collect() } /// Import a single extrinsic and starts to watch their progress in the pool. - pub fn submit_and_watch(&self, verifier: V, xt: Ex) -> Result, E> where - V: txpool::Verifier, + pub fn submit_and_watch(&self, verifier: V, xt: Ex) -> Result, E> + where + V: txpool::Verifier, E: From, { - let xt = self.submit(verifier, vec![xt])?.pop().expect("One extrinsic passed; one result returned; qed"); + let xt = self + .submit(verifier, vec![xt])? + .pop() + .expect("One extrinsic passed; one result returned; qed"); Ok(self.pool.write().listener_mut().create_watcher(xt)) } @@ -118,7 +119,12 @@ impl Pool where } /// Cull transactions from the queue. - pub fn cull(&self, senders: Option<&[::Sender]>, ready: R) -> usize where + pub fn cull( + &self, + senders: Option<&[::Sender]>, + ready: R, + ) -> usize + where R: txpool::Ready, { self.pool.write().cull(senders, ready) @@ -137,14 +143,17 @@ impl Pool where /// Removes all transactions from given sender pub fn remove_sender(&self, sender: VEx::Sender) -> Vec> { let mut pool = self.pool.write(); - let pending = pool.pending_from_sender(|_: &VEx| txpool::Readiness::Ready, &sender).collect(); + let pending = pool + .pending_from_sender(|_: &VEx| txpool::Readiness::Ready, &sender) + .collect(); // remove all transactions from this sender pool.cull(Some(&[sender]), |_: &VEx| txpool::Readiness::Stale); pending } /// Retrieve the pending set. Be careful to not leak the pool `ReadGuard` to prevent deadlocks. - pub fn pending(&self, ready: R, f: F) -> T where + pub fn pending(&self, ready: R, f: F) -> T + where R: txpool::Ready, F: FnOnce(txpool::PendingIterator>) -> T, { diff --git a/substrate/extrinsic-pool/src/watcher.rs b/substrate/extrinsic-pool/src/watcher.rs index e4d8b9921f8ff..cedfb8acf449d 100644 --- a/substrate/extrinsic-pool/src/watcher.rs +++ b/substrate/extrinsic-pool/src/watcher.rs @@ -48,9 +48,7 @@ impl Sender { pub fn new_watcher(&mut self) -> Watcher { let (tx, receiver) = mpsc::unbounded(); self.receivers.push(tx); - Watcher { - receiver, - } + Watcher { receiver } } /// Some state change (perhaps another extrinsic was included) rendered this extrinsic invalid. @@ -80,6 +78,7 @@ impl Sender { } fn send(&mut self, status: Status) { - self.receivers.retain(|sender| sender.unbounded_send(status.clone()).is_ok()) + self.receivers + .retain(|sender| sender.unbounded_send(status.clone()).is_ok()) } } diff --git a/substrate/keyring/src/lib.rs b/substrate/keyring/src/lib.rs index 9749ef13449cf..8cfeab5fdee65 100644 --- a/substrate/keyring/src/lib.rs +++ b/substrate/keyring/src/lib.rs @@ -16,13 +16,15 @@ //! Support code for the runtime. -#[macro_use] extern crate hex_literal; -#[macro_use] extern crate lazy_static; +#[macro_use] +extern crate hex_literal; +#[macro_use] +extern crate lazy_static; pub extern crate ed25519; +use ed25519::{Pair, Public, Signature}; use std::collections::HashMap; use std::ops::Deref; -use ed25519::{Pair, Public, Signature}; /// Set of test accounts. #[derive(Clone, Copy, PartialEq, Eq, Hash)] @@ -78,7 +80,9 @@ impl Keyring { Keyring::Eve => Pair::from_seed(b"Eve "), Keyring::Ferdie => Pair::from_seed(b"Ferdie "), Keyring::One => Pair::from_seed(b"12345678901234567890123456789012"), - Keyring::Two => Pair::from_seed(&hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60")), + Keyring::Two => Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )), } } } @@ -109,11 +113,15 @@ lazy_static! { Keyring::Ferdie, Keyring::One, Keyring::Two, - ].iter().map(|&i| (i, i.pair())).collect() + ].iter() + .map(|&i| (i, i.pair())) + .collect() }; - static ref PUBLIC_KEYS: HashMap = { - PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect() + PRIVATE_KEYS + .iter() + .map(|(&name, pair)| (name, pair.public())) + .collect() }; } @@ -167,8 +175,20 @@ mod tests { #[test] fn should_work() { - assert!(Keyring::Alice.sign(b"I am Alice!").verify(b"I am Alice!", Keyring::Alice)); - assert!(!Keyring::Alice.sign(b"I am Alice!").verify(b"I am Bob!", Keyring::Alice)); - assert!(!Keyring::Alice.sign(b"I am Alice!").verify(b"I am Alice!", Keyring::Bob)); + assert!( + Keyring::Alice + .sign(b"I am Alice!") + .verify(b"I am Alice!", Keyring::Alice) + ); + assert!( + !Keyring::Alice + .sign(b"I am Alice!") + .verify(b"I am Bob!", Keyring::Alice) + ); + assert!( + !Keyring::Alice + .sign(b"I am Alice!") + .verify(b"I am Alice!", Keyring::Bob) + ); } } diff --git a/substrate/keystore/src/lib.rs b/substrate/keystore/src/lib.rs index 97210b29ef015..235f08986856e 100644 --- a/substrate/keystore/src/lib.rs +++ b/substrate/keystore/src/lib.rs @@ -16,13 +16,13 @@ //! Keystore (and session key management) for ed25519 based chains like Polkadot. -extern crate ethcore_crypto as crypto; -extern crate subtle; extern crate ed25519; +extern crate ethcore_crypto as crypto; +extern crate hex; extern crate rand; -extern crate serde_json; extern crate serde; -extern crate hex; +extern crate serde_json; +extern crate subtle; #[macro_use] extern crate serde_derive; @@ -34,9 +34,9 @@ extern crate error_chain; extern crate tempdir; use std::collections::HashMap; -use std::path::PathBuf; use std::fs::{self, File}; use std::io::{self, Write}; +use std::path::PathBuf; use crypto::Keccak256; use ed25519::{Pair, Public, PKCS_LEN}; @@ -75,16 +75,18 @@ struct EncryptedKey { impl EncryptedKey { fn encrypt(plain: &[u8; PKCS_LEN], password: &str, iterations: u32) -> Self { - use rand::{Rng, OsRng}; + use rand::{OsRng, Rng}; - let mut rng = OsRng::new().expect("OS Randomness available on all supported platforms; qed"); + let mut rng = + OsRng::new().expect("OS Randomness available on all supported platforms; qed"); let salt: [u8; 32] = rng.gen(); let iv: [u8; 16] = rng.gen(); // two parts of derived key // DK = [ DK[0..15] DK[16..31] ] = [derived_left_bits, derived_right_bits] - let (derived_left_bits, derived_right_bits) = crypto::derive_key_iterations(password.as_bytes(), &salt, iterations); + let (derived_left_bits, derived_right_bits) = + crypto::derive_key_iterations(password.as_bytes(), &salt, iterations); // preallocated (on-stack in case of `Secret`) buffer to hold cipher // length = length(plain) as we are using CTR-approach @@ -113,12 +115,16 @@ impl EncryptedKey { let mac = crypto::derive_mac(&derived_right_bits, &self.ciphertext).keccak256(); if subtle::slices_equal(&mac[..], &self.mac[..]) != 1 { - return Err(ErrorKind::InvalidPassword.into()); + return Err(ErrorKind::InvalidPassword.into()) } let mut plain = [0; PKCS_LEN]; - crypto::aes::decrypt_128_ctr(&derived_left_bits, &self.iv, &self.ciphertext, &mut plain[..]) - .expect("input lengths of key and iv are both 16; qed"); + crypto::aes::decrypt_128_ctr( + &derived_left_bits, + &self.iv, + &self.ciphertext, + &mut plain[..], + ).expect("input lengths of key and iv are both 16; qed"); Ok(plain) } } @@ -135,7 +141,10 @@ impl Store { /// Create a new store at the given path. pub fn open(path: PathBuf) -> Result { fs::create_dir_all(&path)?; - Ok(Store { path, additional: HashMap::new() }) + Ok(Store { + path, + additional: HashMap::new(), + }) } /// Generate a new key, placing it into the store. @@ -167,7 +176,7 @@ impl Store { pub fn load(&self, public: &Public, password: &str) -> Result { if let Some(ref seed) = self.additional.get(public) { let pair = Pair::from_seed(seed); - return Ok(pair); + return Ok(pair) } let path = self.key_file_path(public); let file = File::open(path)?; @@ -187,7 +196,9 @@ impl Store { // skip directories and non-unicode file names (hex is unicode) if let Some(name) = path.file_name().and_then(|n| n.to_str()) { - if name.len() != 64 { continue } + if name.len() != 64 { + continue + } match hex::decode(name) { Ok(ref hex) if hex.len() == 32 => { @@ -195,7 +206,7 @@ impl Store { buf.copy_from_slice(&hex[..]); public_keys.push(Public(buf)); - } + }, _ => continue, } } diff --git a/substrate/misbehavior-check/src/lib.rs b/substrate/misbehavior-check/src/lib.rs index 634fd5f2b63c9..272f3b2dd9fa7 100644 --- a/substrate/misbehavior-check/src/lib.rs +++ b/substrate/misbehavior-check/src/lib.rs @@ -37,7 +37,7 @@ use runtime_primitives::bft::{Action, Message, MisbehaviorKind}; fn check_message_sig( message: Message, signature: &Signature, - from: &AuthorityId + from: &AuthorityId, ) -> bool { let msg: Vec = message.encode(); runtime_io::ed25519_verify(&signature.0, &msg, from) @@ -68,16 +68,30 @@ pub fn evaluate_misbehavior( kind: &MisbehaviorKind, ) -> bool { match *kind { - MisbehaviorKind::BftDoublePrepare(round, (h_1, ref s_1), (h_2, ref s_2)) => { - s_1 != s_2 && - check_message_sig::(prepare::(parent_hash, round, h_1), s_1, misbehaved) && - check_message_sig::(prepare::(parent_hash, round, h_2), s_2, misbehaved) - } - MisbehaviorKind::BftDoubleCommit(round, (h_1, ref s_1), (h_2, ref s_2)) => { - s_1 != s_2 && - check_message_sig::(commit::(parent_hash, round, h_1), s_1, misbehaved) && - check_message_sig::(commit::(parent_hash, round, h_2), s_2, misbehaved) - } + MisbehaviorKind::BftDoublePrepare(round, (h_1, ref s_1), (h_2, ref s_2)) => + s_1 != s_2 + && check_message_sig::( + prepare::(parent_hash, round, h_1), + s_1, + misbehaved, + ) + && check_message_sig::( + prepare::(parent_hash, round, h_2), + s_2, + misbehaved, + ), + MisbehaviorKind::BftDoubleCommit(round, (h_1, ref s_1), (h_2, ref s_2)) => + s_1 != s_2 + && check_message_sig::( + commit::(parent_hash, round, h_1), + s_1, + misbehaved, + ) + && check_message_sig::( + commit::(parent_hash, round, h_2), + s_2, + misbehaved, + ), } } @@ -85,19 +99,24 @@ pub fn evaluate_misbehavior( mod tests { use super::*; - use substrate_bft::generic; use keyring::ed25519; use keyring::Keyring; + use substrate_bft::generic; - use runtime_primitives::testing::{H256, Block as RawBlock}; + use runtime_primitives::testing::{Block as RawBlock, H256}; type Block = RawBlock; - fn sign_prepare(key: &ed25519::Pair, round: u32, hash: H256, parent_hash: H256) -> (H256, Signature) { + fn sign_prepare( + key: &ed25519::Pair, + round: u32, + hash: H256, + parent_hash: H256, + ) -> (H256, Signature) { let msg = substrate_bft::sign_message::( generic::Message::Vote(generic::Vote::Prepare(round as _, hash)), key, - parent_hash + parent_hash, ); match msg { @@ -106,11 +125,16 @@ mod tests { } } - fn sign_commit(key: &ed25519::Pair, round: u32, hash: H256, parent_hash: H256) -> (H256, Signature) { + fn sign_commit( + key: &ed25519::Pair, + round: u32, + hash: H256, + parent_hash: H256, + ) -> (H256, Signature) { let msg = substrate_bft::sign_message::( generic::Message::Vote(generic::Vote::Commit(round as _, hash)), key, - parent_hash + parent_hash, ); match msg { @@ -138,26 +162,26 @@ mod tests { // same signature twice is not misbehavior. let signed = sign_prepare(&key, 1, hash_1, parent_hash); - assert!(evaluate_misbehavior::( - &key.public().into(), - parent_hash, - &MisbehaviorKind::BftDoublePrepare( - 1, - signed, - signed, - ) - ) == false); + assert!( + evaluate_misbehavior::( + &key.public().into(), + parent_hash, + &MisbehaviorKind::BftDoublePrepare(1, signed, signed,) + ) == false + ); // misbehavior has wrong target. - assert!(evaluate_misbehavior::( - &Keyring::Two.to_raw_public().into(), - parent_hash, - &MisbehaviorKind::BftDoublePrepare( - 1, - sign_prepare(&key, 1, hash_1, parent_hash), - sign_prepare(&key, 1, hash_2, parent_hash), - ) - ) == false); + assert!( + evaluate_misbehavior::( + &Keyring::Two.to_raw_public().into(), + parent_hash, + &MisbehaviorKind::BftDoublePrepare( + 1, + sign_prepare(&key, 1, hash_1, parent_hash), + sign_prepare(&key, 1, hash_2, parent_hash), + ) + ) == false + ); } #[test] @@ -179,25 +203,25 @@ mod tests { // same signature twice is not misbehavior. let signed = sign_commit(&key, 1, hash_1, parent_hash); - assert!(evaluate_misbehavior::( - &key.public().into(), - parent_hash, - &MisbehaviorKind::BftDoubleCommit( - 1, - signed, - signed, - ) - ) == false); + assert!( + evaluate_misbehavior::( + &key.public().into(), + parent_hash, + &MisbehaviorKind::BftDoubleCommit(1, signed, signed,) + ) == false + ); // misbehavior has wrong target. - assert!(evaluate_misbehavior::( - &Keyring::Two.to_raw_public().into(), - parent_hash, - &MisbehaviorKind::BftDoubleCommit( - 1, - sign_commit(&key, 1, hash_1, parent_hash), - sign_commit(&key, 1, hash_2, parent_hash), - ) - ) == false); + assert!( + evaluate_misbehavior::( + &Keyring::Two.to_raw_public().into(), + parent_hash, + &MisbehaviorKind::BftDoubleCommit( + 1, + sign_commit(&key, 1, hash_1, parent_hash), + sign_commit(&key, 1, hash_2, parent_hash), + ) + ) == false + ); } } diff --git a/substrate/network/src/blocks.rs b/substrate/network/src/blocks.rs index f5b32ad38739f..cb502e23cc6ff 100644 --- a/substrate/network/src/blocks.rs +++ b/substrate/network/src/blocks.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see .? -use std::mem; -use std::cmp; -use std::ops::Range; -use std::collections::{HashMap, BTreeMap}; -use std::collections::hash_map::Entry; +use message; use network::PeerId; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; -use message; +use std::cmp; +use std::collections::hash_map::Entry; +use std::collections::{BTreeMap, HashMap}; +use std::mem; +use std::ops::Range; const MAX_PARALLEL_DOWNLOADS: u32 = 1; @@ -34,14 +34,14 @@ pub struct BlockData { #[derive(Debug)] enum BlockRangeState { - Downloading { - len: u64, - downloading: u32, - }, + Downloading { len: u64, downloading: u32 }, Complete(Vec>), } -impl BlockRangeState where B::Header: HeaderT { +impl BlockRangeState +where + B::Header: HeaderT, +{ pub fn len(&self) -> u64 { match *self { BlockRangeState::Downloading { len, .. } => len, @@ -58,7 +58,10 @@ pub struct BlockCollection { peer_requests: HashMap, } -impl BlockCollection where B::Header: HeaderT { +impl BlockCollection +where + B::Header: HeaderT, +{ /// Create a new instance. pub fn new() -> Self { BlockCollection { @@ -76,27 +79,45 @@ impl BlockCollection where B::Header: HeaderT { /// Insert a set of blocks into collection. pub fn insert(&mut self, start: u64, blocks: Vec>, peer_id: PeerId) { if blocks.is_empty() { - return; + return } match self.blocks.get(&start) { Some(&BlockRangeState::Downloading { .. }) => { trace!(target: "sync", "Ignored block data still marked as being downloaded: {}", start); debug_assert!(false); - return; + return }, Some(&BlockRangeState::Complete(ref existing)) if existing.len() >= blocks.len() => { trace!(target: "sync", "Ignored block data already downloaded: {}", start); - return; + return }, _ => (), } - self.blocks.insert(start, BlockRangeState::Complete(blocks.into_iter().map(|b| BlockData { origin: peer_id, block: b }).collect())); + self.blocks.insert( + start, + BlockRangeState::Complete( + blocks + .into_iter() + .map(|b| BlockData { + origin: peer_id, + block: b, + }) + .collect(), + ), + ); } - /// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded. - pub fn needed_blocks(&mut self, peer_id: PeerId, count: usize, peer_best: u64, common: u64) -> Option> { + /// Returns a set of block hashes that require a header download. The returned set is marked as + /// being downloaded. + pub fn needed_blocks( + &mut self, + peer_id: PeerId, + count: usize, + peer_best: u64, + common: u64, + ) -> Option> { // First block number that we need to download let first_different = common + 1; let count = count as u64; @@ -106,16 +127,28 @@ impl BlockCollection where B::Header: HeaderT { loop { let next = downloading_iter.next(); break match &(prev, next) { - &(Some((start, &BlockRangeState::Downloading { ref len, downloading })), _) if downloading < MAX_PARALLEL_DOWNLOADS => - (*start .. *start + *len, downloading), + &( + Some(( + start, + &BlockRangeState::Downloading { + ref len, + downloading, + }, + )), + _, + ) if downloading < MAX_PARALLEL_DOWNLOADS => + (*start..*start + *len, downloading), &(Some((start, r)), Some((next_start, _))) if start + r.len() < *next_start => - (*start + r.len() .. cmp::min(*next_start, *start + r.len() + count), 0), // gap - &(Some((start, r)), None) => - (start + r.len() .. start + r.len() + count, 0), // last range - &(None, None) => - (first_different .. first_different + count, 0), // empty - &(None, Some((start, _))) if *start > first_different => - (first_different .. cmp::min(first_different + count, *start), 0), // gap at the start + ( + *start + r.len()..cmp::min(*next_start, *start + r.len() + count), + 0, + ), // gap + &(Some((start, r)), None) => (start + r.len()..start + r.len() + count, 0), /* last range */ + &(None, None) => (first_different..first_different + count, 0), /* empty */ + &(None, Some((start, _))) if *start > first_different => ( + first_different..cmp::min(first_different + count, *start), + 0, + ), /* gap at the start */ _ => { prev = next; continue @@ -126,18 +159,28 @@ impl BlockCollection where B::Header: HeaderT { // crop to peers best if range.start > peer_best { trace!(target: "sync", "Out of range for peer {} ({} vs {})", peer_id, range.start, peer_best); - return None; + return None } range.end = cmp::min(peer_best + 1, range.end); self.peer_requests.insert(peer_id, range.start); - self.blocks.insert(range.start, BlockRangeState::Downloading{ len: range.end - range.start, downloading: downloading + 1 }); + self.blocks.insert( + range.start, + BlockRangeState::Downloading { + len: range.end - range.start, + downloading: downloading + 1, + }, + ); if range.end <= range.start { - panic!("Empty range {:?}, count={}, peer_best={}, common={}, blocks={:?}", range, count, peer_best, common, self.blocks); + panic!( + "Empty range {:?}, count={}, peer_best={}, common={}, blocks={:?}", + range, count, peer_best, common, self.blocks + ); } Some(range) } - /// Get a valid chain of blocks ordered in descending order and ready for importing into blockchain. + /// Get a valid chain of blocks ordered in descending order and ready for importing into + /// blockchain. pub fn drain(&mut self, from: u64) -> Vec> { let mut drained = Vec::new(); let mut ranges = Vec::new(); @@ -146,10 +189,10 @@ impl BlockCollection where B::Header: HeaderT { for (start, range_data) in &mut self.blocks { match range_data { &mut BlockRangeState::Complete(ref mut blocks) if *start <= prev => { - prev = *start + blocks.len() as u64; - let mut blocks = mem::replace(blocks, Vec::new()); - drained.append(&mut blocks); - ranges.push(*start); + prev = *start + blocks.len() as u64; + let mut blocks = mem::replace(blocks, Vec::new()); + drained.append(&mut blocks); + ranges.push(*start); }, _ => break, } @@ -167,17 +210,19 @@ impl BlockCollection where B::Header: HeaderT { Entry::Occupied(entry) => { let start = entry.remove(); let remove = match self.blocks.get_mut(&start) { - Some(&mut BlockRangeState::Downloading { ref mut downloading, .. }) if *downloading > 1 => { + Some(&mut BlockRangeState::Downloading { + ref mut downloading, + .. + }) if *downloading > 1 => + { *downloading = *downloading - 1; false - }, - Some(&mut BlockRangeState::Downloading { .. }) => { - true - }, + } + Some(&mut BlockRangeState::Downloading { .. }) => true, _ => { debug_assert!(false); false - } + }, }; if remove { self.blocks.remove(&start); @@ -192,32 +237,33 @@ impl BlockCollection where B::Header: HeaderT { mod test { use super::{BlockCollection, BlockData, BlockRangeState}; use message; - use runtime_primitives::testing::Block as RawBlock; use primitives::H256; + use runtime_primitives::testing::Block as RawBlock; type Block = RawBlock; fn is_empty(bc: &BlockCollection) -> bool { - bc.blocks.is_empty() && - bc.peer_requests.is_empty() + bc.blocks.is_empty() && bc.peer_requests.is_empty() } fn generate_blocks(n: usize) -> Vec> { - (0 .. n).map(|_| message::generic::BlockData { - hash: H256::random(), - header: None, - body: None, - message_queue: None, - receipt: None, - justification: None, - }).collect() + (0..n) + .map(|_| message::generic::BlockData { + hash: H256::random(), + header: None, + body: None, + message_queue: None, + receipt: None, + justification: None, + }) + .collect() } #[test] fn create_clear() { let mut bc = BlockCollection::new(); assert!(is_empty(&bc)); - bc.insert(1, generate_blocks(100), 0); + bc.insert(1, generate_blocks(100), 0); assert!(!is_empty(&bc)); bc.clear(); assert!(is_empty(&bc)); @@ -232,29 +278,56 @@ mod test { let peer2 = 2; let blocks = generate_blocks(150); - assert_eq!(bc.needed_blocks(peer0, 40, 150, 0), Some(1 .. 41)); - assert_eq!(bc.needed_blocks(peer1, 40, 150, 0), Some(41 .. 81)); - assert_eq!(bc.needed_blocks(peer2, 40, 150, 0), Some(81 .. 121)); + assert_eq!(bc.needed_blocks(peer0, 40, 150, 0), Some(1..41)); + assert_eq!(bc.needed_blocks(peer1, 40, 150, 0), Some(41..81)); + assert_eq!(bc.needed_blocks(peer2, 40, 150, 0), Some(81..121)); bc.clear_peer_download(peer1); bc.insert(41, blocks[41..81].to_vec(), peer1); assert_eq!(bc.drain(1), vec![]); - assert_eq!(bc.needed_blocks(peer1, 40, 150, 0), Some(121 .. 151)); + assert_eq!(bc.needed_blocks(peer1, 40, 150, 0), Some(121..151)); bc.clear_peer_download(peer0); bc.insert(1, blocks[1..11].to_vec(), peer0); - assert_eq!(bc.needed_blocks(peer0, 40, 150, 0), Some(11 .. 41)); - assert_eq!(bc.drain(1), blocks[1..11].iter().map(|b| BlockData { block: b.clone(), origin: 0 }).collect::>()); + assert_eq!(bc.needed_blocks(peer0, 40, 150, 0), Some(11..41)); + assert_eq!( + bc.drain(1), + blocks[1..11] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: 0 + }) + .collect::>() + ); bc.clear_peer_download(peer0); bc.insert(11, blocks[11..41].to_vec(), peer0); let drained = bc.drain(12); - assert_eq!(drained[..30], blocks[11..41].iter().map(|b| BlockData { block: b.clone(), origin: 0 }).collect::>()[..]); - assert_eq!(drained[30..], blocks[41..81].iter().map(|b| BlockData { block: b.clone(), origin: 1 }).collect::>()[..]); + assert_eq!( + drained[..30], + blocks[11..41] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: 0 + }) + .collect::>()[..] + ); + assert_eq!( + drained[30..], + blocks[41..81] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: 1 + }) + .collect::>()[..] + ); bc.clear_peer_download(peer2); - assert_eq!(bc.needed_blocks(peer2, 40, 150, 80), Some(81 .. 121)); + assert_eq!(bc.needed_blocks(peer2, 40, 150, 80), Some(81..121)); bc.clear_peer_download(peer2); bc.insert(81, blocks[81..121].to_vec(), peer2); bc.clear_peer_download(peer1); @@ -262,21 +335,51 @@ mod test { assert_eq!(bc.drain(80), vec![]); let drained = bc.drain(81); - assert_eq!(drained[..40], blocks[81..121].iter().map(|b| BlockData { block: b.clone(), origin: 2 }).collect::>()[..]); - assert_eq!(drained[40..], blocks[121..150].iter().map(|b| BlockData { block: b.clone(), origin: 1 }).collect::>()[..]); + assert_eq!( + drained[..40], + blocks[81..121] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: 2 + }) + .collect::>()[..] + ); + assert_eq!( + drained[40..], + blocks[121..150] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: 1 + }) + .collect::>()[..] + ); } #[test] fn large_gap() { let mut bc: BlockCollection = BlockCollection::new(); - bc.blocks.insert(100, BlockRangeState::Downloading { - len: 128, - downloading: 1, - }); - let blocks = generate_blocks(10).into_iter().map(|b| BlockData { block: b, origin: 0 }).collect(); + bc.blocks.insert( + 100, + BlockRangeState::Downloading { + len: 128, + downloading: 1, + }, + ); + let blocks = generate_blocks(10) + .into_iter() + .map(|b| BlockData { + block: b, + origin: 0, + }) + .collect(); bc.blocks.insert(114305, BlockRangeState::Complete(blocks)); - assert_eq!(bc.needed_blocks(0, 128, 10000, 000), Some(1 .. 100)); - assert_eq!(bc.needed_blocks(0, 128, 10000, 600), Some(100 + 128 .. 100 + 128 + 128)); + assert_eq!(bc.needed_blocks(0, 128, 10000, 000), Some(1..100)); + assert_eq!( + bc.needed_blocks(0, 128, 10000, 600), + Some(100 + 128..100 + 128 + 128) + ); } } diff --git a/substrate/network/src/chain.rs b/substrate/network/src/chain.rs index a2eff9b54d8a5..2a2f5fbc7b98c 100644 --- a/substrate/network/src/chain.rs +++ b/substrate/network/src/chain.rs @@ -16,16 +16,25 @@ //! Blockchain access trait -use client::{self, Client as PolkadotClient, ImportResult, ClientInfo, BlockStatus, BlockOrigin, CallExecutor}; use client::error::Error; -use state_machine; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; -use runtime_primitives::generic::BlockId; +use client::{ + self, BlockOrigin, BlockStatus, CallExecutor, Client as PolkadotClient, ClientInfo, + ImportResult, +}; use runtime_primitives::bft::Justification; +use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; +use state_machine; pub trait Client: Send + Sync { /// Import a new block. Parent is supposed to be existing in the blockchain. - fn import(&self, is_best: bool, header: Block::Header, justification: Justification, body: Option>) -> Result; + fn import( + &self, + is_best: bool, + header: Block::Header, + justification: Justification, + body: Option>, + ) -> Result; /// Get blockchain info. fn info(&self) -> Result, Error>; @@ -34,7 +43,10 @@ pub trait Client: Send + Sync { fn block_status(&self, id: &BlockId) -> Result; /// Get block hash by number. - fn block_hash(&self, block_number: ::Number) -> Result, Error>; + fn block_hash( + &self, + block_number: ::Number, + ) -> Result, Error>; /// Get block header. fn header(&self, id: &BlockId) -> Result, Error>; @@ -43,22 +55,43 @@ pub trait Client: Send + Sync { fn body(&self, id: &BlockId) -> Result>, Error>; /// Get block justification. - fn justification(&self, id: &BlockId) -> Result>, Error>; + fn justification( + &self, + id: &BlockId, + ) -> Result>, Error>; /// Get method execution proof. - fn execution_proof(&self, block: &Block::Hash, method: &str, data: &[u8]) -> Result<(Vec, Vec>), Error>; + fn execution_proof( + &self, + block: &Block::Hash, + method: &str, + data: &[u8], + ) -> Result<(Vec, Vec>), Error>; } -impl Client for PolkadotClient where +impl Client for PolkadotClient +where B: client::backend::Backend + Send + Sync + 'static, E: CallExecutor + Send + Sync + 'static, Block: BlockT, - Error: From<<>::State as state_machine::backend::Backend>::Error>, { - - fn import(&self, is_best: bool, header: Block::Header, justification: Justification, body: Option>) -> Result { + Error: From< + <>::State as state_machine::backend::Backend>::Error, + >, +{ + fn import( + &self, + is_best: bool, + header: Block::Header, + justification: Justification, + body: Option>, + ) -> Result { // TODO: defer justification check. let justified_header = self.check_justification(header, justification.into())?; - let origin = if is_best { BlockOrigin::NetworkBroadcast } else { BlockOrigin::NetworkInitialSync }; + let origin = if is_best { + BlockOrigin::NetworkBroadcast + } else { + BlockOrigin::NetworkInitialSync + }; (self as &PolkadotClient).import_block(origin, justified_header, body) } @@ -70,7 +103,10 @@ impl Client for PolkadotClient where (self as &PolkadotClient).block_status(id) } - fn block_hash(&self, block_number: ::Number) -> Result, Error> { + fn block_hash( + &self, + block_number: ::Number, + ) -> Result, Error> { (self as &PolkadotClient).block_hash(block_number) } @@ -82,11 +118,23 @@ impl Client for PolkadotClient where (self as &PolkadotClient).body(id) } - fn justification(&self, id: &BlockId) -> Result>, Error> { + fn justification( + &self, + id: &BlockId, + ) -> Result>, Error> { (self as &PolkadotClient).justification(id) } - fn execution_proof(&self, block: &Block::Hash, method: &str, data: &[u8]) -> Result<(Vec, Vec>), Error> { - (self as &PolkadotClient).execution_proof(&BlockId::Hash(block.clone()), method, data) + fn execution_proof( + &self, + block: &Block::Hash, + method: &str, + data: &[u8], + ) -> Result<(Vec, Vec>), Error> { + (self as &PolkadotClient).execution_proof( + &BlockId::Hash(block.clone()), + method, + data, + ) } } diff --git a/substrate/network/src/config.rs b/substrate/network/src/config.rs index 7e21a5ded3b02..892c6ff203201 100644 --- a/substrate/network/src/config.rs +++ b/substrate/network/src/config.rs @@ -25,8 +25,6 @@ pub struct ProtocolConfig { impl Default for ProtocolConfig { fn default() -> ProtocolConfig { - ProtocolConfig { - roles: Role::FULL, - } + ProtocolConfig { roles: Role::FULL } } } diff --git a/substrate/network/src/consensus.rs b/substrate/network/src/consensus.rs index 60abb9b2d4277..9a6ce7a0cd251 100644 --- a/substrate/network/src/consensus.rs +++ b/substrate/network/src/consensus.rs @@ -16,15 +16,15 @@ //! Consensus related bits of the network service. -use std::collections::{HashMap, HashSet}; use futures::sync::mpsc; -use std::time::{Instant, Duration}; use io::SyncIo; -use protocol::Protocol; +use message::{self, generic::Message as GenericMessage}; use network::PeerId; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; +use protocol::Protocol; use runtime_primitives::generic::BlockId; -use message::{self, generic::Message as GenericMessage}; +use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; +use std::collections::{HashMap, HashSet}; +use std::time::{Duration, Instant}; // TODO: Add additional spam/DoS attack protection. const MESSAGE_LIFETIME: Duration = Duration::from_secs(600); @@ -36,12 +36,18 @@ struct PeerConsensus { /// Consensus network protocol handler. Manages statements and candidate requests. pub struct Consensus { peers: HashMap>, - bft_message_sink: Option<(mpsc::UnboundedSender>, B::Hash)>, + bft_message_sink: Option<( + mpsc::UnboundedSender>, + B::Hash, + )>, messages: Vec<(B::Hash, Instant, message::Message)>, message_hashes: HashSet, } -impl Consensus where B::Header: HeaderT { +impl Consensus +where + B::Header: HeaderT, +{ /// Create a new instance. pub fn new() -> Self { Consensus { @@ -58,7 +64,13 @@ impl Consensus where B::Header: HeaderT { } /// Handle new connected peer. - pub fn new_peer(&mut self, io: &mut SyncIo, protocol: &Protocol, peer_id: PeerId, roles: &[message::Role]) { + pub fn new_peer( + &mut self, + io: &mut SyncIo, + protocol: &Protocol, + peer_id: PeerId, + roles: &[message::Role], + ) { if roles.iter().any(|r| *r == message::Role::Validator) { trace!(target:"sync", "Registering validator {}", peer_id); // Send out all known messages. @@ -68,13 +80,17 @@ impl Consensus where B::Header: HeaderT { known_messages.insert(hash.clone()); protocol.send_message(io, peer_id, message.clone()); } - self.peers.insert(peer_id, PeerConsensus { - known_messages, - }); + self.peers.insert(peer_id, PeerConsensus { known_messages }); } } - fn propagate(&mut self, io: &mut SyncIo, protocol: &Protocol, message: message::Message, hash: B::Hash) { + fn propagate( + &mut self, + io: &mut SyncIo, + protocol: &Protocol, + message: message::Message, + hash: B::Hash, + ) { for (id, ref mut peer) in self.peers.iter_mut() { if peer.known_messages.insert(hash.clone()) { protocol.send_message(io, *id, message.clone()); @@ -88,22 +104,30 @@ impl Consensus where B::Header: HeaderT { } } - pub fn on_bft_message(&mut self, io: &mut SyncIo, protocol: &Protocol, peer_id: PeerId, message: message::LocalizedBftMessage, hash: B::Hash) { + pub fn on_bft_message( + &mut self, + io: &mut SyncIo, + protocol: &Protocol, + peer_id: PeerId, + message: message::LocalizedBftMessage, + hash: B::Hash, + ) { if self.message_hashes.contains(&hash) { trace!(target:"sync", "Ignored already known BFT message from {}", peer_id); - return; + return } - match (protocol.chain().info(), protocol.chain().header(&BlockId::Hash(message.parent_hash))) { + match ( + protocol.chain().info(), + protocol.chain().header(&BlockId::Hash(message.parent_hash)), + ) { (_, Err(e)) | (Err(e), _) => { debug!(target:"sync", "Error reading blockchain: {:?}", e); - return; + return }, - (Ok(info), Ok(Some(header))) => { - if header.number() < &info.chain.best_number { - trace!(target:"sync", "Ignored ancient BFT message from {}, hash={}", peer_id, message.parent_hash); - return; - } + (Ok(info), Ok(Some(header))) => if header.number() < &info.chain.best_number { + trace!(target:"sync", "Ignored ancient BFT message from {}, hash={}", peer_id, message.parent_hash); + return }, (Ok(_), Ok(None)) => {}, } @@ -122,7 +146,7 @@ impl Consensus where B::Header: HeaderT { } } else { trace!(target:"sync", "Ignored BFT statement from unregistered peer {}", peer_id); - return; + return } let message = GenericMessage::BftMessage(message); @@ -131,7 +155,10 @@ impl Consensus where B::Header: HeaderT { self.propagate(io, protocol, message, hash); } - pub fn bft_messages(&mut self, parent_hash: B::Hash) -> mpsc::UnboundedReceiver> { + pub fn bft_messages( + &mut self, + parent_hash: B::Hash, + ) -> mpsc::UnboundedReceiver> { let (sink, stream) = mpsc::unbounded(); for &(_, _, ref message) in self.messages.iter() { @@ -141,7 +168,8 @@ impl Consensus where B::Header: HeaderT { }; if bft_message.parent_hash == parent_hash { - sink.unbounded_send(bft_message.clone()).expect("receiving end known to be open; qed"); + sink.unbounded_send(bft_message.clone()) + .expect("receiving end known to be open; qed"); } } @@ -149,7 +177,12 @@ impl Consensus where B::Header: HeaderT { stream } - pub fn send_bft_message(&mut self, io: &mut SyncIo, protocol: &Protocol, message: message::LocalizedBftMessage) { + pub fn send_bft_message( + &mut self, + io: &mut SyncIo, + protocol: &Protocol, + message: message::LocalizedBftMessage, + ) { // Broadcast message to all validators. trace!(target:"sync", "Broadcasting BFT message {:?}", message); let message = GenericMessage::BftMessage(message); @@ -158,7 +191,12 @@ impl Consensus where B::Header: HeaderT { self.propagate(io, protocol, message, hash); } - pub fn peer_disconnected(&mut self, _io: &mut SyncIo, _protocol: &Protocol, peer_id: PeerId) { + pub fn peer_disconnected( + &mut self, + _io: &mut SyncIo, + _protocol: &Protocol, + peer_id: PeerId, + ) { self.peers.remove(&peer_id); } @@ -167,13 +205,12 @@ impl Consensus where B::Header: HeaderT { let before = self.messages.len(); let now = Instant::now(); self.messages.retain(|&(ref hash, timestamp, ref message)| { - if timestamp >= now - MESSAGE_LIFETIME && - best_header.map_or(true, |header| - match *message { - GenericMessage::BftMessage(ref msg) => &msg.parent_hash != header.parent_hash(), - _ => true, - }) - { + if timestamp >= now - MESSAGE_LIFETIME && best_header.map_or(true, |header| { + match *message { + GenericMessage::BftMessage(ref msg) => &msg.parent_hash != header.parent_hash(), + _ => true, + } + }) { true } else { hashes.remove(hash); @@ -191,11 +228,11 @@ impl Consensus where B::Header: HeaderT { #[cfg(test)] mod tests { + use super::{Consensus, MESSAGE_LIFETIME}; + use message::{self, generic::Message as GenericMessage}; use runtime_primitives::bft::Justification; - use runtime_primitives::testing::{H256, Header, Block as RawBlock}; + use runtime_primitives::testing::{Block as RawBlock, H256, Header}; use std::time::Instant; - use message::{self, generic::Message as GenericMessage}; - use super::{Consensus, MESSAGE_LIFETIME}; type Block = RawBlock; @@ -255,7 +292,9 @@ mod tests { // make timestamp expired consensus.messages.clear(); - consensus.messages.push((m2_hash, now - MESSAGE_LIFETIME, m2)); + consensus + .messages + .push((m2_hash, now - MESSAGE_LIFETIME, m2)); consensus.collect_garbage(None); assert!(consensus.messages.is_empty()); assert!(consensus.message_hashes.is_empty()); diff --git a/substrate/network/src/error.rs b/substrate/network/src/error.rs index 120cfe0b4f358..7cb5153a72d76 100644 --- a/substrate/network/src/error.rs +++ b/substrate/network/src/error.rs @@ -16,8 +16,8 @@ //! Polkadot service possible errors. -use network::Error as NetworkError; use client; +use network::Error as NetworkError; error_chain! { foreign_links { diff --git a/substrate/network/src/io.rs b/substrate/network/src/io.rs index 2a67888b657af..ce66d1a8e19e2 100644 --- a/substrate/network/src/io.rs +++ b/substrate/network/src/io.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see .? -use network::{NetworkContext, PeerId, Error as NetworkError, SessionInfo}; +use network::{Error as NetworkError, NetworkContext, PeerId, SessionInfo}; /// IO interface for the syncing handler. /// Provides peer connection management and an interface to the blockchain client. @@ -43,9 +43,7 @@ pub struct NetSyncIo<'s> { impl<'s> NetSyncIo<'s> { /// Creates a new instance from the `NetworkContext` and the blockchain client reference. pub fn new(network: &'s NetworkContext) -> NetSyncIo<'s> { - NetSyncIo { - network: network, - } + NetSyncIo { network } } } @@ -58,7 +56,7 @@ impl<'s> SyncIo for NetSyncIo<'s> { self.network.disconnect_peer(peer_id); } - fn send(&mut self, peer_id: PeerId, data: Vec) -> Result<(), NetworkError>{ + fn send(&mut self, peer_id: PeerId, data: Vec) -> Result<(), NetworkError> { self.network.send(peer_id, 0, data) } @@ -74,5 +72,3 @@ impl<'s> SyncIo for NetSyncIo<'s> { self.network.peer_client_version(peer_id) } } - - diff --git a/substrate/network/src/lib.rs b/substrate/network/src/lib.rs index d15cc4855d295..5370735b2652a 100644 --- a/substrate/network/src/lib.rs +++ b/substrate/network/src/lib.rs @@ -19,32 +19,39 @@ //! Implements polkadot protocol version as specified here: //! https://github.com/paritytech/polkadot/wiki/Network-protocol -extern crate ethcore_network_devp2p as network_devp2p; -extern crate ethcore_network as network; +extern crate ed25519; extern crate ethcore_io as core_io; +extern crate ethcore_network as network; +extern crate ethcore_network_devp2p as network_devp2p; +extern crate futures; extern crate linked_hash_map; -extern crate rand; extern crate parking_lot; -extern crate substrate_primitives as primitives; -extern crate substrate_state_machine as state_machine; -extern crate substrate_serializer as ser; -extern crate substrate_client as client; -extern crate substrate_runtime_support as runtime_support; -extern crate substrate_runtime_primitives as runtime_primitives; -extern crate substrate_bft; -extern crate substrate_codec as codec; +extern crate rand; extern crate serde; extern crate serde_json; -extern crate futures; -extern crate ed25519; -#[macro_use] extern crate serde_derive; -#[macro_use] extern crate log; -#[macro_use] extern crate bitflags; -#[macro_use] extern crate error_chain; +extern crate substrate_bft; +extern crate substrate_client as client; +extern crate substrate_codec as codec; +extern crate substrate_primitives as primitives; +extern crate substrate_runtime_primitives as runtime_primitives; +extern crate substrate_runtime_support as runtime_support; +extern crate substrate_serializer as ser; +extern crate substrate_state_machine as state_machine; +#[macro_use] +extern crate serde_derive; +#[macro_use] +extern crate log; +#[macro_use] +extern crate bitflags; +#[macro_use] +extern crate error_chain; -#[cfg(test)] extern crate env_logger; -#[cfg(test)] extern crate substrate_keyring as keyring; -#[cfg(test)] extern crate substrate_test_client as test_client; +#[cfg(test)] +extern crate env_logger; +#[cfg(test)] +extern crate substrate_keyring as keyring; +#[cfg(test)] +extern crate substrate_test_client as test_client; mod service; mod sync; @@ -58,14 +65,22 @@ mod consensus; mod on_demand; pub mod error; -#[cfg(test)] mod test; +#[cfg(test)] +mod test; -pub use service::{Service, FetchFuture, ConsensusService, BftMessageStream, - TransactionPool, Params, ManageNetwork, SyncProvider}; -pub use protocol::{ProtocolStatus}; -pub use sync::{Status as SyncStatus, SyncState}; -pub use network::{NonReservedPeerMode, NetworkConfiguration, ConnectionFilter, ConnectionDirection}; -pub use message::{generic as generic_message, BftMessage, LocalizedBftMessage, ConsensusVote, SignedConsensusVote, SignedConsensusMessage, SignedConsensusProposal}; +pub use config::{ProtocolConfig, Role}; pub use error::Error; -pub use config::{Role, ProtocolConfig}; +pub use message::{ + generic as generic_message, BftMessage, ConsensusVote, LocalizedBftMessage, + SignedConsensusMessage, SignedConsensusProposal, SignedConsensusVote, +}; +pub use network::{ + ConnectionDirection, ConnectionFilter, NetworkConfiguration, NonReservedPeerMode, +}; pub use on_demand::{OnDemand, OnDemandService, RemoteCallResponse}; +pub use protocol::ProtocolStatus; +pub use service::{ + BftMessageStream, ConsensusService, FetchFuture, ManageNetwork, Params, Service, SyncProvider, + TransactionPool, +}; +pub use sync::{Status as SyncStatus, SyncState}; diff --git a/substrate/network/src/message.rs b/substrate/network/src/message.rs index 3b96b5dae383f..720b106e029f0 100644 --- a/substrate/network/src/message.rs +++ b/substrate/network/src/message.rs @@ -14,12 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see .? -//! Network packet message types. These get serialized and put into the lower level protocol payload. +//! Network packet message types. These get serialized and put into the lower level protocol +//! payload. use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; use service::Role as RoleFlags; -pub use self::generic::{BlockAnnounce, RemoteCallRequest, ConsensusVote, SignedConsensusVote, FromBlock, Body}; +pub use self::generic::{ + BlockAnnounce, Body, ConsensusVote, FromBlock, RemoteCallRequest, SignedConsensusVote, +}; pub type RequestId = u64; @@ -33,54 +36,32 @@ pub type Message = generic::Message< >; /// Type alias for using the status type using block type parameters. -pub type Status = generic::Status< - ::Hash, - <::Header as HeaderT>::Number, ->; +pub type Status = + generic::Status<::Hash, <::Header as HeaderT>::Number>; /// Type alias for using the block request type using block type parameters. -pub type BlockRequest = generic::BlockRequest< - ::Hash, - <::Header as HeaderT>::Number, ->; +pub type BlockRequest = + generic::BlockRequest<::Hash, <::Header as HeaderT>::Number>; /// Type alias for using the localized bft message type using block type parameters. -pub type LocalizedBftMessage = generic::LocalizedBftMessage< - B, - ::Hash, ->; +pub type LocalizedBftMessage = generic::LocalizedBftMessage::Hash>; /// Type alias for using the BlockData type using block type parameters. -pub type BlockData = generic::BlockData< - ::Header, - ::Hash, - ::Extrinsic, ->; +pub type BlockData = + generic::BlockData<::Header, ::Hash, ::Extrinsic>; /// Type alias for using the BlockResponse type using block type parameters. -pub type BlockResponse = generic::BlockResponse< - ::Header, - ::Hash, - ::Extrinsic, ->; +pub type BlockResponse = + generic::BlockResponse<::Header, ::Hash, ::Extrinsic>; /// Type alias for using the BftMessage type using block type parameters. -pub type BftMessage = generic::BftMessage< - B, - ::Hash, ->; +pub type BftMessage = generic::BftMessage::Hash>; /// Type alias for using the SignedConsensusProposal type using block type parameters. -pub type SignedConsensusProposal = generic::SignedConsensusProposal< - B, - ::Hash, ->; +pub type SignedConsensusProposal = generic::SignedConsensusProposal::Hash>; /// Type alias for using the SignedConsensusProposal type using block type parameters. -pub type SignedConsensusMessage = generic::SignedConsensusProposal< - B, - ::Hash, ->; +pub type SignedConsensusMessage = generic::SignedConsensusProposal::Hash>; /// A set of transactions. pub type Transactions = Vec; @@ -114,7 +95,8 @@ impl Role { } } -impl From for Vec where { +impl From for Vec // where +{ fn from(flags: RoleFlags) -> Vec { let mut roles = Vec::new(); if !(flags & RoleFlags::FULL).is_empty() { @@ -168,19 +150,19 @@ pub struct RemoteCallResponse { /// Generic types. pub mod generic { - use primitives::AuthorityId; use codec::Slicable; - use runtime_primitives::bft::Justification; use ed25519; + use primitives::AuthorityId; use primitives::Signature; + use runtime_primitives::bft::Justification; - use super::{Role, BlockAttribute, RemoteCallResponse, RequestId, Transactions, Direction}; + use super::{BlockAttribute, Direction, RemoteCallResponse, RequestId, Role, Transactions}; use primitives::bytes; /// Emulates Poc-1 extrinsic primitive. #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] - pub struct V1Extrinsic(#[serde(with="bytes")] pub Vec); + pub struct V1Extrinsic(#[serde(with = "bytes")] pub Vec); // Alternative block format for poc-1 compatibility. // TODO: remove this after poc-2 #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] @@ -193,17 +175,21 @@ pub mod generic { Extrinsics(Vec), } - impl Body where Extrinsic: Slicable { + impl Body + where + Extrinsic: Slicable, + { /// Extracts extrinsic from the body. pub fn to_extrinsics(self) -> Vec { match self { Body::Extrinsics(e) => e, - Body::V1(e) => { - e.into_iter().filter_map(|bytes| { + Body::V1(e) => e + .into_iter() + .filter_map(|bytes| { let bytes = bytes.0.encode(); Slicable::decode(&mut bytes.as_slice()) - }).collect() - } + }) + .collect(), } } } @@ -216,7 +202,7 @@ pub mod generic { /// The hash of the header justified. pub hash: H, /// The signatures and signers of the hash. - pub signatures: Vec<([u8; 32], Signature)> + pub signatures: Vec<([u8; 32], Signature)>, } // TODO: remove this after poc-2 @@ -235,13 +221,15 @@ pub mod generic { pub fn to_justification(self) -> Justification { match self { BlockJustification::V2(j) => j, - BlockJustification::V1(j) => { - Justification { - round_number: j.round_number, - hash: j.hash, - signatures: j.signatures.into_iter().map(|(a, s)| (a.into(), s)).collect(), - } - } + BlockJustification::V1(j) => Justification { + round_number: j.round_number, + hash: j.hash, + signatures: j + .signatures + .into_iter() + .map(|(a, s)| (a.into(), s)) + .collect(), + }, } } } @@ -394,7 +382,8 @@ pub mod generic { pub to: Option, /// Sequence direction. pub direction: Direction, - /// Maximum number of blocks to return. An implementation defined maximum is used when unspecified. + /// Maximum number of blocks to return. An implementation defined maximum is used when + /// unspecified. pub max: Option, } diff --git a/substrate/network/src/on_demand.rs b/substrate/network/src/on_demand.rs index a6b225d917e30..828baf8600a05 100644 --- a/substrate/network/src/on_demand.rs +++ b/substrate/network/src/on_demand.rs @@ -16,21 +16,21 @@ //! On-demand requests service. -use std::collections::VecDeque; -use std::sync::{Arc, Weak}; -use std::time::{Instant, Duration}; -use futures::{Async, Future, Poll}; -use futures::sync::oneshot::{channel, Receiver, Sender}; -use linked_hash_map::LinkedHashMap; -use linked_hash_map::Entry; -use parking_lot::Mutex; use client; -use client::light::fetcher::{Fetcher, FetchChecker, RemoteCallRequest}; +use client::light::fetcher::{FetchChecker, Fetcher, RemoteCallRequest}; +use futures::sync::oneshot::{channel, Receiver, Sender}; +use futures::{Async, Future, Poll}; use io::SyncIo; +use linked_hash_map::Entry; +use linked_hash_map::LinkedHashMap; use message; use network::PeerId; -use service; +use parking_lot::Mutex; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; +use service; +use std::collections::VecDeque; +use std::sync::{Arc, Weak}; +use std::time::{Duration, Instant}; /// Remote request timeout. const REQUEST_TIMEOUT: Duration = Duration::from_secs(15); @@ -47,7 +47,12 @@ pub trait OnDemandService: Send + Sync { fn maintain_peers(&self, io: &mut SyncIo); /// When call response is received from remote node. - fn on_remote_call_response(&self, io: &mut SyncIo, peer: PeerId, response: message::RemoteCallResponse); + fn on_remote_call_response( + &self, + io: &mut SyncIo, + peer: PeerId, + response: message::RemoteCallResponse, + ); } /// On-demand requests service. Dispatches requests to appropriate peers. @@ -77,7 +82,10 @@ struct Request { } enum RequestData { - RemoteCall(RemoteCallRequest, Sender>), + RemoteCall( + RemoteCallRequest, + Sender>, + ), } enum Accept { @@ -90,7 +98,8 @@ impl Future for RemoteCallResponse { type Error = client::error::Error; fn poll(&mut self) -> Poll { - self.receiver.poll() + self.receiver + .poll() .map_err(|_| client::error::ErrorKind::RemoteFetchCancelled.into()) .and_then(|r| match r { Async::Ready(Ok(ready)) => Ok(Async::Ready(ready)), @@ -100,9 +109,10 @@ impl Future for RemoteCallResponse { } } -impl OnDemand where +impl OnDemand +where E: service::ExecuteInContext, - B::Header: HeaderT, + B::Header: HeaderT, { /// Creates new on-demand service. pub fn new(checker: Arc>) -> Self { @@ -114,7 +124,7 @@ impl OnDemand where pending_requests: VecDeque::new(), active_peers: LinkedHashMap::new(), idle_peers: VecDeque::new(), - }) + }), } } @@ -132,7 +142,14 @@ impl OnDemand where } /// Try to accept response from given peer. - fn accept_response) -> Accept>(&self, rtype: &str, io: &mut SyncIo, peer: PeerId, request_id: u64, try_accept: F) { + fn accept_response) -> Accept>( + &self, + rtype: &str, + io: &mut SyncIo, + peer: PeerId, + request_id: u64, + try_accept: F, + ) { let mut core = self.core.lock(); let request = match core.remove(peer, request_id) { Some(request) => request, @@ -140,7 +157,7 @@ impl OnDemand where trace!(target: "sync", "Invalid remote {} response from peer {}", rtype, peer); io.disconnect_peer(peer); core.remove_peer(peer); - return; + return }, }; @@ -163,14 +180,18 @@ impl OnDemand where } } -impl OnDemandService for OnDemand where +impl OnDemandService for OnDemand +where B: BlockT, E: service::ExecuteInContext, - B::Header: HeaderT, + B::Header: HeaderT, { fn on_connect(&self, peer: PeerId, role: service::Role) { - if !role.intersects(service::Role::FULL | service::Role::COLLATOR | service::Role::VALIDATOR) { // TODO: correct? - return; + if !role + .intersects(service::Role::FULL | service::Role::COLLATOR | service::Role::VALIDATOR) + { + // TODO: correct? + return } let mut core = self.core.lock(); @@ -193,38 +214,51 @@ impl OnDemandService for OnDemand where core.dispatch(); } - fn on_remote_call_response(&self, io: &mut SyncIo, peer: PeerId, response: message::RemoteCallResponse) { - self.accept_response("call", io, peer, response.id, |request| match request.data { - RequestData::RemoteCall(request, sender) => match self.checker.check_execution_proof(&request, response.proof) { - Ok(response) => { - // we do not bother if receiver has been dropped already - let _ = sender.send(Ok(response)); - Accept::Ok - }, - Err(error) => Accept::CheckFailed(error, RequestData::RemoteCall(request, sender)), - }, + fn on_remote_call_response( + &self, + io: &mut SyncIo, + peer: PeerId, + response: message::RemoteCallResponse, + ) { + self.accept_response("call", io, peer, response.id, |request| { + match request.data { + RequestData::RemoteCall(request, sender) => + match self.checker.check_execution_proof(&request, response.proof) { + Ok(response) => { + // we do not bother if receiver has been dropped already + let _ = sender.send(Ok(response)); + Accept::Ok + }, + Err(error) => + Accept::CheckFailed(error, RequestData::RemoteCall(request, sender)), + }, + } }) } } -impl Fetcher for OnDemand where +impl Fetcher for OnDemand +where B: BlockT, E: service::ExecuteInContext, - B::Header: HeaderT, + B::Header: HeaderT, { type RemoteCallResult = RemoteCallResponse; fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult { let (sender, receiver) = channel(); - self.schedule_request(RequestData::RemoteCall(request, sender), - RemoteCallResponse { receiver }) + self.schedule_request( + RequestData::RemoteCall(request, sender), + RemoteCallResponse { receiver }, + ) } } -impl OnDemandCore where +impl OnDemandCore +where B: BlockT, - E: service::ExecuteInContext , - B::Header: HeaderT + E: service::ExecuteInContext, + B::Header: HeaderT, { pub fn add_peer(&mut self, peer: PeerId) { self.idle_peers.push_back(peer); @@ -233,7 +267,7 @@ impl OnDemandCore where pub fn remove_peer(&mut self, peer: PeerId) { if let Some(request) = self.active_peers.remove(&peer) { self.pending_requests.push_front(request); - return; + return } if let Some(idle_index) = self.idle_peers.iter().position(|i| *i == peer) { @@ -250,7 +284,10 @@ impl OnDemandCore where _ => return bad_peers, } - let (bad_peer, request) = self.active_peers.pop_front().expect("front() is Some as checked above"); + let (bad_peer, request) = self + .active_peers + .pop_front() + .expect("front() is Some as checked above"); self.pending_requests.push_front(request); bad_peers.push(bad_peer); } @@ -292,7 +329,10 @@ impl OnDemandCore where None => return, }; - let mut request = self.pending_requests.pop_front().expect("checked in loop condition; qed"); + let mut request = self + .pending_requests + .pop_front() + .expect("checked in loop condition; qed"); request.timestamp = Instant::now(); trace!(target: "sync", "Dispatching remote request {} to peer {}", request.id, peer); @@ -307,43 +347,50 @@ impl OnDemandCore where impl Request { pub fn message(&self) -> message::Message { match self.data { - RequestData::RemoteCall(ref data, _) => message::generic::Message::RemoteCallRequest(message::RemoteCallRequest { - id: self.id, - block: data.block, - method: data.method.clone(), - data: data.call_data.clone(), - }), + RequestData::RemoteCall(ref data, _) => + message::generic::Message::RemoteCallRequest(message::RemoteCallRequest { + id: self.id, + block: data.block, + method: data.method.clone(), + data: data.call_data.clone(), + }), } } } #[cfg(test)] mod tests { - use std::collections::VecDeque; - use std::sync::Arc; - use std::time::Instant; - use futures::Future; - use parking_lot::RwLock; + use super::{OnDemand, OnDemandService, REQUEST_TIMEOUT}; use client; - use client::light::fetcher::{Fetcher, FetchChecker, RemoteCallRequest}; + use client::light::fetcher::{FetchChecker, Fetcher, RemoteCallRequest}; + use futures::Future; use io::NetSyncIo; use message; use network::PeerId; + use parking_lot::RwLock; use protocol::Protocol; - use service::{Role, ExecuteInContext}; + use service::{ExecuteInContext, Role}; + use std::collections::VecDeque; + use std::sync::Arc; + use std::time::Instant; use test::TestIo; - use super::{REQUEST_TIMEOUT, OnDemand, OnDemandService}; use test_client::runtime::{Block, Hash}; struct DummyExecutor; - struct DummyFetchChecker { ok: bool } + struct DummyFetchChecker { + ok: bool, + } impl ExecuteInContext for DummyExecutor { fn execute_in_context)>(&self, _closure: F) {} } impl FetchChecker for DummyFetchChecker { - fn check_execution_proof(&self, _request: &RemoteCallRequest, _remote_proof: Vec>) -> client::error::Result { + fn check_execution_proof( + &self, + _request: &RemoteCallRequest, + _remote_proof: Vec>, + ) -> client::error::Result { match self.ok { true => Ok(client::CallResult { return_data: vec![42], @@ -366,11 +413,20 @@ mod tests { core.idle_peers.len() + core.active_peers.len() } - fn receive_call_response(on_demand: &OnDemand, network: &mut TestIo, peer: PeerId, id: message::RequestId) { - on_demand.on_remote_call_response(network, peer, message::RemoteCallResponse { - id: id, - proof: vec![vec![2]], - }); + fn receive_call_response( + on_demand: &OnDemand, + network: &mut TestIo, + peer: PeerId, + id: message::RequestId, + ) { + on_demand.on_remote_call_response( + network, + peer, + message::RemoteCallResponse { + id, + proof: vec![vec![2]], + }, + ); } #[test] @@ -380,7 +436,16 @@ mod tests { on_demand.on_connect(1, Role::FULL); on_demand.on_connect(2, Role::COLLATOR); on_demand.on_connect(3, Role::VALIDATOR); - assert_eq!(vec![1, 2, 3], on_demand.core.lock().idle_peers.iter().cloned().collect::>()); + assert_eq!( + vec![1, 2, 3], + on_demand + .core + .lock() + .idle_peers + .iter() + .cloned() + .collect::>() + ); } #[test] @@ -400,17 +465,58 @@ mod tests { on_demand.on_connect(0, Role::FULL); on_demand.on_connect(1, Role::FULL); - assert_eq!(vec![0, 1], on_demand.core.lock().idle_peers.iter().cloned().collect::>()); + assert_eq!( + vec![0, 1], + on_demand + .core + .lock() + .idle_peers + .iter() + .cloned() + .collect::>() + ); assert!(on_demand.core.lock().active_peers.is_empty()); - on_demand.remote_call(RemoteCallRequest { block: Default::default(), method: "test".into(), call_data: vec![] }); - assert_eq!(vec![1], on_demand.core.lock().idle_peers.iter().cloned().collect::>()); - assert_eq!(vec![0], on_demand.core.lock().active_peers.keys().cloned().collect::>()); - - on_demand.core.lock().active_peers[&0].timestamp = Instant::now() - REQUEST_TIMEOUT - REQUEST_TIMEOUT; + on_demand.remote_call(RemoteCallRequest { + block: Default::default(), + method: "test".into(), + call_data: vec![], + }); + assert_eq!( + vec![1], + on_demand + .core + .lock() + .idle_peers + .iter() + .cloned() + .collect::>() + ); + assert_eq!( + vec![0], + on_demand + .core + .lock() + .active_peers + .keys() + .cloned() + .collect::>() + ); + + on_demand.core.lock().active_peers[&0].timestamp = + Instant::now() - REQUEST_TIMEOUT - REQUEST_TIMEOUT; on_demand.maintain_peers(&mut network); assert!(on_demand.core.lock().idle_peers.is_empty()); - assert_eq!(vec![1], on_demand.core.lock().active_peers.keys().cloned().collect::>()); + assert_eq!( + vec![1], + on_demand + .core + .lock() + .active_peers + .keys() + .cloned() + .collect::>() + ); assert!(network.to_disconnect.contains(&0)); } @@ -421,7 +527,11 @@ mod tests { let mut network = TestIo::new(&queue, None); on_demand.on_connect(0, Role::FULL); - on_demand.remote_call(RemoteCallRequest { block: Default::default(), method: "test".into(), call_data: vec![] }); + on_demand.remote_call(RemoteCallRequest { + block: Default::default(), + method: "test".into(), + call_data: vec![], + }); receive_call_response(&*on_demand, &mut network, 0, 1); assert!(network.to_disconnect.contains(&0)); assert_eq!(on_demand.core.lock().pending_requests.len(), 1); @@ -432,7 +542,11 @@ mod tests { let (_x, on_demand) = dummy(false); let queue = RwLock::new(VecDeque::new()); let mut network = TestIo::new(&queue, None); - on_demand.remote_call(RemoteCallRequest { block: Default::default(), method: "test".into(), call_data: vec![] }); + on_demand.remote_call(RemoteCallRequest { + block: Default::default(), + method: "test".into(), + call_data: vec![], + }); on_demand.on_connect(0, Role::FULL); receive_call_response(&*on_demand, &mut network, 0, 0); @@ -458,7 +572,11 @@ mod tests { let mut network = TestIo::new(&queue, None); on_demand.on_connect(0, Role::FULL); - let response = on_demand.remote_call(RemoteCallRequest { block: Default::default(), method: "test".into(), call_data: vec![] }); + let response = on_demand.remote_call(RemoteCallRequest { + block: Default::default(), + method: "test".into(), + call_data: vec![], + }); let thread = ::std::thread::spawn(move || { let result = response.wait().unwrap(); assert_eq!(result.return_data, vec![42]); @@ -467,4 +585,4 @@ mod tests { receive_call_response(&*on_demand, &mut network, 0, 0); thread.join().unwrap(); } -} \ No newline at end of file +} diff --git a/substrate/network/src/protocol.rs b/substrate/network/src/protocol.rs index d46ff839c3f85..145d2daff2d28 100644 --- a/substrate/network/src/protocol.rs +++ b/substrate/network/src/protocol.rs @@ -14,26 +14,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see .? +use network::PeerId; +use parking_lot::{Mutex, RwLock}; +use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::{Block as BlockT, Hashing, HashingFor, Header as HeaderT}; +use serde_json; use std::collections::{HashMap, HashSet}; -use std::{mem, cmp}; use std::sync::Arc; use std::time; -use parking_lot::{RwLock, Mutex}; -use serde_json; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Hashing, HashingFor}; -use runtime_primitives::generic::BlockId; -use network::PeerId; +use std::{cmp, mem}; -use message::{self, Message}; -use message::generic::Message as GenericMessage; -use sync::{ChainSync, Status as SyncStatus, SyncState}; -use consensus::Consensus; -use service::{Role, TransactionPool, BftMessageStream}; -use config::ProtocolConfig; use chain::Client; -use on_demand::OnDemandService; -use io::SyncIo; +use config::ProtocolConfig; +use consensus::Consensus; use error; +use io::SyncIo; +use message::generic::Message as GenericMessage; +use message::{self, Message}; +use on_demand::OnDemandService; +use service::{BftMessageStream, Role, TransactionPool}; +use sync::{ChainSync, Status as SyncStatus, SyncState}; const REQUEST_TIMEOUT_SEC: u64 = 40; const PROTOCOL_VERSION: u32 = 0; @@ -101,28 +101,29 @@ pub struct PeerInfo { pub best_number: ::Number, } -impl Protocol where - B::Header: HeaderT +impl Protocol +where + B::Header: HeaderT, { /// Create a new instance. pub fn new( config: ProtocolConfig, chain: Arc>, on_demand: Option>>, - transaction_pool: Arc> - ) -> error::Result { + transaction_pool: Arc>, + ) -> error::Result { let info = chain.info()?; let sync = ChainSync::new(config.roles, &info); let protocol = Protocol { - config: config, - chain: chain, - on_demand: on_demand, + config, + chain, + on_demand, genesis_hash: info.chain.genesis_hash, sync: RwLock::new(sync), consensus: Mutex::new(Consensus::new()), peers: RwLock::new(HashMap::new()), handshaking_peers: RwLock::new(HashMap::new()), - transaction_pool: transaction_pool, + transaction_pool, }; Ok(protocol) } @@ -145,8 +146,8 @@ impl Protocol where debug!(target: "sync", "Invalid packet from {}: {}", peer_id, e); trace!(target: "sync", "Invalid packet: {}", String::from_utf8_lossy(data)); io.disable_peer(peer_id); - return; - } + return + }, }; match message { @@ -162,28 +163,31 @@ impl Protocol where None => { debug!(target: "sync", "Unexpected response packet from {}", peer_id); io.disable_peer(peer_id); - return; - } + return + }, } } else { debug!(target: "sync", "Unexpected packet from {}", peer_id); io.disable_peer(peer_id); - return; + return } }; if request.id != r.id { trace!(target: "sync", "Ignoring mismatched response packet from {} (expected {} got {})", peer_id, request.id, r.id); - return; + return } self.on_block_response(io, peer_id, request, r); }, GenericMessage::BlockAnnounce(announce) => { self.on_block_announce(io, peer_id, announce); }, - GenericMessage::BftMessage(m) => self.on_bft_message(io, peer_id, m, HashingFor::::hash(data)), + GenericMessage::BftMessage(m) => + self.on_bft_message(io, peer_id, m, HashingFor::::hash(data)), GenericMessage::Transactions(m) => self.on_transactions(io, peer_id, m), - GenericMessage::RemoteCallRequest(request) => self.on_remote_call_request(io, peer_id, request), - GenericMessage::RemoteCallResponse(response) => self.on_remote_call_response(io, peer_id, response), + GenericMessage::RemoteCallRequest(request) => + self.on_remote_call_request(io, peer_id, request), + GenericMessage::RemoteCallResponse(response) => + self.on_remote_call_response(io, peer_id, response), } } @@ -215,7 +219,9 @@ impl Protocol where /// Called when a new peer is connected pub fn on_peer_connected(&self, io: &mut SyncIo, peer_id: PeerId) { trace!(target: "sync", "Connected {}: {}", peer_id, io.peer_info(peer_id)); - self.handshaking_peers.write().insert(peer_id, time::Instant::now()); + self.handshaking_peers + .write() + .insert(peer_id, time::Instant::now()); self.send_status(io, peer_id); } @@ -242,7 +248,10 @@ impl Protocol where message::FromBlock::Hash(h) => BlockId::Hash(h), message::FromBlock::Number(n) => BlockId::Number(n), }; - let max = cmp::min(request.max.unwrap_or(u32::max_value()), MAX_BLOCK_DATA_RESPONSE) as usize; + let max = cmp::min( + request.max.unwrap_or(u32::max_value()), + MAX_BLOCK_DATA_RESPONSE, + ) as usize; // TODO: receipts, etc. let (mut get_header, mut get_body, mut get_justification) = (false, false, false); for a in request.fields { @@ -255,16 +264,26 @@ impl Protocol where } } while let Some(header) = self.chain.header(&id).unwrap_or(None) { - if blocks.len() >= max{ - break; + if blocks.len() >= max { + break } let number = header.number().clone(); let hash = header.hash(); - let justification = if get_justification { self.chain.justification(&BlockId::Hash(hash)).unwrap_or(None) } else { None }; + let justification = if get_justification { + self.chain + .justification(&BlockId::Hash(hash)) + .unwrap_or(None) + } else { + None + }; let block_data = message::generic::BlockData { - hash: hash, + hash, header: if get_header { Some(header) } else { None }, - body: (if get_body { self.chain.body(&BlockId::Hash(hash)).unwrap_or(None) } else { None }).map(|body| message::Body::Extrinsics(body)), + body: (if get_body { + self.chain.body(&BlockId::Hash(hash)).unwrap_or(None) + } else { + None + }).map(|body| message::Body::Extrinsics(body)), receipt: None, message_queue: None, justification: justification.map(|j| message::generic::BlockJustification::V2(j)), @@ -274,29 +293,45 @@ impl Protocol where message::Direction::Ascending => id = BlockId::Number(number + 1), message::Direction::Descending => { if number == 0 { - break; + break } id = BlockId::Number(number - 1) - } + }, } } let response = message::generic::BlockResponse { id: request.id, - blocks: blocks, + blocks, }; trace!(target: "sync", "Sending BlockResponse with {} blocks", response.blocks.len()); self.send_message(io, peer, GenericMessage::BlockResponse(response)) } - fn on_block_response(&self, io: &mut SyncIo, peer: PeerId, request: message::BlockRequest, response: message::BlockResponse) { + fn on_block_response( + &self, + io: &mut SyncIo, + peer: PeerId, + request: message::BlockRequest, + response: message::BlockResponse, + ) { // TODO: validate response trace!(target: "sync", "BlockResponse {} from {} with {} blocks", response.id, peer, response.blocks.len()); - self.sync.write().on_block_data(io, self, peer, request, response); + self.sync + .write() + .on_block_data(io, self, peer, request, response); } - fn on_bft_message(&self, io: &mut SyncIo, peer: PeerId, message: message::LocalizedBftMessage, hash: B::Hash) { + fn on_bft_message( + &self, + io: &mut SyncIo, + peer: PeerId, + message: message::LocalizedBftMessage, + hash: B::Hash, + ) { trace!(target: "sync", "BFT message from {}: {:?}", peer, message); - self.consensus.lock().on_bft_message(io, self, peer, message, hash); + self.consensus + .lock() + .on_bft_message(io, self, peer, message, hash); } /// See `ConsensusService` trait. @@ -322,9 +357,11 @@ impl Protocol where { let peers = self.peers.read(); let handshaking_peers = self.handshaking_peers.read(); - for (peer_id, timestamp) in peers.iter() + for (peer_id, timestamp) in peers + .iter() .filter_map(|(id, peer)| peer.request_timestamp.as_ref().map(|r| (id, r))) - .chain(handshaking_peers.iter()) { + .chain(handshaking_peers.iter()) + { if (tick - *timestamp).as_secs() > REQUEST_TIMEOUT_SEC { trace!(target: "sync", "Timeout {}", peer_id); io.disconnect_peer(*peer_id); @@ -338,13 +375,11 @@ impl Protocol where } pub fn peer_info(&self, peer: PeerId) -> Option> { - self.peers.read().get(&peer).map(|p| { - PeerInfo { - roles: p.roles, - protocol_version: p.protocol_version, - best_hash: p.best_hash, - best_number: p.best_number, - } + self.peers.read().get(&peer).map(|p| PeerInfo { + roles: p.roles, + protocol_version: p.protocol_version, + best_hash: p.best_hash, + best_number: p.best_number, }) } @@ -353,7 +388,7 @@ impl Protocol where trace!(target: "sync", "New peer {} {:?}", peer_id, status); if io.is_expired() { trace!(target: "sync", "Status packet from expired session {}:{}", peer_id, io.peer_info(peer_id)); - return; + return } { @@ -361,17 +396,17 @@ impl Protocol where let mut handshaking_peers = self.handshaking_peers.write(); if peers.contains_key(&peer_id) { debug!(target: "sync", "Unexpected status packet from {}:{}", peer_id, io.peer_info(peer_id)); - return; + return } if status.genesis_hash != self.genesis_hash { io.disable_peer(peer_id); trace!(target: "sync", "Peer {} genesis hash mismatch (ours: {}, theirs: {})", peer_id, self.genesis_hash, status.genesis_hash); - return; + return } if status.version != PROTOCOL_VERSION { io.disable_peer(peer_id); trace!(target: "sync", "Peer {} unsupported eth protocol ({})", peer_id, status.version); - return; + return } let peer = Peer { @@ -391,16 +426,25 @@ impl Protocol where } self.sync.write().new_peer(io, self, peer_id); - self.consensus.lock().new_peer(io, self, peer_id, &status.roles); - self.on_demand.as_ref().map(|s| s.on_connect(peer_id, message::Role::as_flags(&status.roles))); + self.consensus + .lock() + .new_peer(io, self, peer_id, &status.roles); + self.on_demand + .as_ref() + .map(|s| s.on_connect(peer_id, message::Role::as_flags(&status.roles))); } /// Called when peer sends us new transactions - fn on_transactions(&self, _io: &mut SyncIo, peer_id: PeerId, transactions: message::Transactions) { + fn on_transactions( + &self, + _io: &mut SyncIo, + peer_id: PeerId, + transactions: message::Transactions, + ) { // Accept transactions only when fully synced if self.sync.read().status().state != SyncState::Idle { trace!(target: "sync", "{} Ignoring transactions while syncing", peer_id); - return; + return } trace!(target: "sync", "Received {} transactions from {}", transactions.len(), peer_id); let mut peers = self.peers.write(); @@ -419,7 +463,7 @@ impl Protocol where // Accept transactions only when fully synced if self.sync.read().status().state != SyncState::Idle { - return; + return } let transactions = self.transaction_pool.transactions(); @@ -441,7 +485,10 @@ impl Protocol where if let Some(id) = node_id { for hash in hashes { - propagated_to.entry(hash).or_insert_with(Vec::new).push(id.clone()); + propagated_to + .entry(hash) + .or_insert_with(Vec::new) + .push(id.clone()); } } trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), peer_id); @@ -478,7 +525,12 @@ impl Protocol where self.consensus.lock().restart(); } - pub fn on_block_announce(&self, io: &mut SyncIo, peer_id: PeerId, announce: message::BlockAnnounce) { + pub fn on_block_announce( + &self, + io: &mut SyncIo, + peer_id: PeerId, + announce: message::BlockAnnounce, + ) { let header = announce.header; let hash = header.hash(); { @@ -487,7 +539,9 @@ impl Protocol where peer.known_blocks.insert(hash.clone()); } } - self.sync.write().on_block_announce(io, self, peer_id, hash, &header); + self.sync + .write() + .on_block_announce(io, self, peer_id, hash, &header); } pub fn on_block_imported(&self, io: &mut SyncIo, hash: B::Hash, header: &B::Header) { @@ -495,7 +549,7 @@ impl Protocol where // blocks are not announced by light clients if self.config.roles & Role::LIGHT == Role::LIGHT { - return; + return } // send out block announcements @@ -504,18 +558,30 @@ impl Protocol where for (peer_id, ref mut peer) in peers.iter_mut() { if peer.known_blocks.insert(hash.clone()) { trace!(target: "sync", "Announcing block {:?} to {}", hash, peer_id); - self.send_message(io, *peer_id, GenericMessage::BlockAnnounce(message::BlockAnnounce { - header: header.clone() - })); + self.send_message( + io, + *peer_id, + GenericMessage::BlockAnnounce(message::BlockAnnounce { + header: header.clone(), + }), + ); } } self.consensus.lock().collect_garbage(Some(&header)); } - fn on_remote_call_request(&self, io: &mut SyncIo, peer_id: PeerId, request: message::RemoteCallRequest) { + fn on_remote_call_request( + &self, + io: &mut SyncIo, + peer_id: PeerId, + request: message::RemoteCallRequest, + ) { trace!(target: "sync", "Remote call request {} from {} ({} at {})", request.id, peer_id, request.method, request.block); - let proof = match self.chain.execution_proof(&request.block, &request.method, &request.data) { + let proof = match self + .chain + .execution_proof(&request.block, &request.method, &request.data) + { Ok((_, proof)) => proof, Err(error) => { trace!(target: "sync", "Remote call request {} from {} ({} at {}) failed with: {}", @@ -524,14 +590,26 @@ impl Protocol where }, }; - self.send_message(io, peer_id, GenericMessage::RemoteCallResponse(message::RemoteCallResponse { - id: request.id, proof, - })); + self.send_message( + io, + peer_id, + GenericMessage::RemoteCallResponse(message::RemoteCallResponse { + id: request.id, + proof, + }), + ); } - fn on_remote_call_response(&self, io: &mut SyncIo, peer_id: PeerId, response: message::RemoteCallResponse) { + fn on_remote_call_response( + &self, + io: &mut SyncIo, + peer_id: PeerId, + response: message::RemoteCallResponse, + ) { trace!(target: "sync", "Remote call response {} from {}", response.id, peer_id); - self.on_demand.as_ref().map(|s| s.on_remote_call_response(io, peer_id, response)); + self.on_demand + .as_ref() + .map(|s| s.on_remote_call_response(io, peer_id, response)); } pub fn chain(&self) -> &Client { diff --git a/substrate/network/src/service.rs b/substrate/network/src/service.rs index abae7e4fc77bd..197477aceaa80 100644 --- a/substrate/network/src/service.rs +++ b/substrate/network/src/service.rs @@ -14,23 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see .? -use std::collections::HashMap; -use std::sync::Arc; -use std::io; -use std::time::Duration; -use futures::sync::{oneshot, mpsc}; -use network::{NetworkProtocolHandler, NetworkContext, PeerId, ProtocolId, -NetworkConfiguration , NonReservedPeerMode, ErrorKind}; -use network_devp2p::{NetworkService}; -use core_io::{TimerToken}; -use io::NetSyncIo; -use protocol::{Protocol, ProtocolStatus, PeerInfo as ProtocolPeerInfo}; -use config::{ProtocolConfig}; -use error::Error; use chain::Client; +use config::ProtocolConfig; +use core_io::TimerToken; +use error::Error; +use futures::sync::{mpsc, oneshot}; +use io::NetSyncIo; use message::LocalizedBftMessage; +use network::{ + ErrorKind, NetworkConfiguration, NetworkContext, NetworkProtocolHandler, NonReservedPeerMode, + PeerId, ProtocolId, +}; +use network_devp2p::NetworkService; use on_demand::OnDemandService; +use protocol::{PeerInfo as ProtocolPeerInfo, Protocol, ProtocolStatus}; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; +use std::collections::HashMap; +use std::io; +use std::sync::Arc; +use std::time::Duration; /// Polkadot devp2p protocol id pub const DOT_PROTOCOL_ID: ProtocolId = *b"dot"; @@ -139,21 +141,32 @@ pub struct Params { } /// Polkadot network service. Handles network IO and manages connectivity. -pub struct Service where B::Header: HeaderT { +pub struct Service +where + B::Header: HeaderT, +{ /// Network service network: NetworkService, /// Devp2p protocol handler handler: Arc>, } -impl Service where B::Header: HeaderT { +impl Service +where + B::Header: HeaderT, +{ /// Creates and register protocol with the network service pub fn new(params: Params) -> Result>, Error> { let service = NetworkService::new(params.network_config.clone(), None)?; let sync = Arc::new(Service { network: service, handler: Arc::new(ProtocolHandler { - protocol: Protocol::new(params.config, params.chain, params.on_demand, params.transaction_pool)?, + protocol: Protocol::new( + params.config, + params.chain, + params.on_demand, + params.transaction_pool, + )?, }), }); @@ -163,14 +176,18 @@ impl Service where B::Header: HeaderT { /// Called when a new block is imported by the client. pub fn on_block_imported(&self, hash: B::Hash, header: &B::Header) { self.network.with_context(DOT_PROTOCOL_ID, |context| { - self.handler.protocol.on_block_imported(&mut NetSyncIo::new(context), hash, header) + self.handler + .protocol + .on_block_imported(&mut NetSyncIo::new(context), hash, header) }); } /// Called when new transactons are imported by the client. pub fn trigger_repropagate(&self) { self.network.with_context(DOT_PROTOCOL_ID, |context| { - self.handler.protocol.propagate_transactions(&mut NetSyncIo::new(context)); + self.handler + .protocol + .propagate_transactions(&mut NetSyncIo::new(context)); }); } @@ -181,7 +198,12 @@ impl Service where B::Header: HeaderT { Err(err) => warn!("Error starting network: {}", err), _ => {}, }; - self.network.register_protocol(self.handler.clone(), DOT_PROTOCOL_ID, &[(0, V0_PACKET_COUNT)]) + self.network + .register_protocol( + self.handler.clone(), + DOT_PROTOCOL_ID, + &[(0, V0_PACKET_COUNT)], + ) .unwrap_or_else(|e| warn!("Error registering polkadot protocol: {:?}", e)); } @@ -191,13 +213,19 @@ impl Service where B::Header: HeaderT { } } -impl Drop for Service where B::Header: HeaderT { +impl Drop for Service +where + B::Header: HeaderT, +{ fn drop(&mut self) { self.stop(); } } -impl ExecuteInContext for Service where B::Header: HeaderT { +impl ExecuteInContext for Service +where + B::Header: HeaderT, +{ fn execute_in_context)>(&self, closure: F) { self.network.with_context(DOT_PROTOCOL_ID, |context| { closure(&mut NetSyncIo::new(context), &self.handler.protocol) @@ -205,7 +233,10 @@ impl ExecuteInContext for Service where B::Header: He } } -impl SyncProvider for Service where B::Header: HeaderT { +impl SyncProvider for Service +where + B::Header: HeaderT, +{ /// Get sync status fn status(&self) -> ProtocolStatus { self.handler.protocol.status() @@ -213,25 +244,34 @@ impl SyncProvider for Service where B::Header: Header /// Get sync peers fn peers(&self) -> Vec> { - self.network.with_context_eval(DOT_PROTOCOL_ID, |ctx| { - let peer_ids = self.network.connected_peers(); - - peer_ids.into_iter().filter_map(|peer_id| { - let session_info = match ctx.session_info(peer_id) { - None => return None, - Some(info) => info, - }; - - Some(PeerInfo { - id: session_info.id.map(|id| format!("{:x}", id)), - client_version: session_info.client_version, - capabilities: session_info.peer_capabilities.into_iter().map(|c| c.to_string()).collect(), - remote_address: session_info.remote_address, - local_address: session_info.local_address, - dot_info: self.handler.protocol.peer_info(peer_id), - }) - }).collect() - }).unwrap_or_else(Vec::new) + self.network + .with_context_eval(DOT_PROTOCOL_ID, |ctx| { + let peer_ids = self.network.connected_peers(); + + peer_ids + .into_iter() + .filter_map(|peer_id| { + let session_info = match ctx.session_info(peer_id) { + None => return None, + Some(info) => info, + }; + + Some(PeerInfo { + id: session_info.id.map(|id| format!("{:x}", id)), + client_version: session_info.client_version, + capabilities: session_info + .peer_capabilities + .into_iter() + .map(|c| c.to_string()) + .collect(), + remote_address: session_info.remote_address, + local_address: session_info.local_address, + dot_info: self.handler.protocol.peer_info(peer_id), + }) + }) + .collect() + }) + .unwrap_or_else(Vec::new) } fn node_id(&self) -> Option { @@ -240,9 +280,12 @@ impl SyncProvider for Service where B::Header: Header } /// ConsensusService -impl ConsensusService for Service where B::Header: HeaderT { +impl ConsensusService for Service +where + B::Header: HeaderT, +{ fn connect_to_authorities(&self, _addresses: &[String]) { - //TODO: implement me + // TODO: implement me } fn bft_messages(&self, parent_hash: B::Hash) -> BftMessageStream { @@ -251,12 +294,17 @@ impl ConsensusService for Service where B::Header: He fn send_bft_message(&self, message: LocalizedBftMessage) { self.network.with_context(DOT_PROTOCOL_ID, |context| { - self.handler.protocol.send_bft_message(&mut NetSyncIo::new(context), message); + self.handler + .protocol + .send_bft_message(&mut NetSyncIo::new(context), message); }); } } -impl NetworkProtocolHandler for ProtocolHandler where B::Header: HeaderT { +impl NetworkProtocolHandler for ProtocolHandler +where + B::Header: HeaderT, +{ fn initialize(&self, io: &NetworkContext) { io.register_timer(TICK_TOKEN, TICK_TIMEOUT) .expect("Error registering sync timer"); @@ -266,22 +314,27 @@ impl NetworkProtocolHandler for ProtocolHandler where B: } fn read(&self, io: &NetworkContext, peer: &PeerId, _packet_id: u8, data: &[u8]) { - self.protocol.handle_packet(&mut NetSyncIo::new(io), *peer, data); + self.protocol + .handle_packet(&mut NetSyncIo::new(io), *peer, data); } fn connected(&self, io: &NetworkContext, peer: &PeerId) { - self.protocol.on_peer_connected(&mut NetSyncIo::new(io), *peer); + self.protocol + .on_peer_connected(&mut NetSyncIo::new(io), *peer); } fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { - self.protocol.on_peer_disconnected(&mut NetSyncIo::new(io), *peer); + self.protocol + .on_peer_disconnected(&mut NetSyncIo::new(io), *peer); } fn timeout(&self, io: &NetworkContext, timer: TimerToken) { match timer { TICK_TOKEN => self.protocol.tick(&mut NetSyncIo::new(io)), - PROPAGATE_TOKEN => self.protocol.propagate_transactions(&mut NetSyncIo::new(io)), - _ => {} + PROPAGATE_TOKEN => self + .protocol + .propagate_transactions(&mut NetSyncIo::new(io)), + _ => {}, } } } @@ -302,22 +355,30 @@ pub trait ManageNetwork: Send + Sync { fn stop_network(&self); } - -impl ManageNetwork for Service where B::Header: HeaderT { +impl ManageNetwork for Service +where + B::Header: HeaderT, +{ fn accept_unreserved_peers(&self) { - self.network.set_non_reserved_mode(NonReservedPeerMode::Accept); + self.network + .set_non_reserved_mode(NonReservedPeerMode::Accept); } fn deny_unreserved_peers(&self) { - self.network.set_non_reserved_mode(NonReservedPeerMode::Deny); + self.network + .set_non_reserved_mode(NonReservedPeerMode::Deny); } fn remove_reserved_peer(&self, peer: String) -> Result<(), String> { - self.network.remove_reserved_peer(&peer).map_err(|e| format!("{:?}", e)) + self.network + .remove_reserved_peer(&peer) + .map_err(|e| format!("{:?}", e)) } fn add_reserved_peer(&self, peer: String) -> Result<(), String> { - self.network.add_reserved_peer(&peer).map_err(|e| format!("{:?}", e)) + self.network + .add_reserved_peer(&peer) + .map_err(|e| format!("{:?}", e)) } fn start_network(&self) { diff --git a/substrate/network/src/sync.rs b/substrate/network/src/sync.rs index e75fcd16f9d57..d52f4fdbd61ac 100644 --- a/substrate/network/src/sync.rs +++ b/substrate/network/src/sync.rs @@ -14,16 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see .? -use std::collections::HashMap; +use blocks::{self, BlockCollection}; +use client::{BlockStatus, ClientInfo, ImportResult}; use io::SyncIo; -use protocol::Protocol; +use message::{self, generic::Message as GenericMessage}; use network::PeerId; -use client::{ImportResult, BlockStatus, ClientInfo}; -use blocks::{self, BlockCollection}; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; +use protocol::Protocol; use runtime_primitives::generic::BlockId; -use message::{self, generic::Message as GenericMessage}; +use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; use service::Role; +use std::collections::HashMap; // Maximum blocks to request in a single packet. const MAX_BLOCKS_TO_REQUEST: usize = 128; @@ -60,7 +60,7 @@ pub enum SyncState { /// Initial sync is complete, keep-up sync is active. Idle, /// Actively catching up with the chain. - Downloading + Downloading, } /// Syncing status and statistics @@ -72,14 +72,15 @@ pub struct Status { pub best_seen_block: Option<::Number>, } -impl ChainSync where - B::Header: HeaderT, +impl ChainSync +where + B::Header: HeaderT, { /// Create a new instance. pub fn new(role: Role, info: &ClientInfo) -> Self { let mut required_block_attributes = vec![ message::BlockAttribute::Header, - message::BlockAttribute::Justification + message::BlockAttribute::Justification, ]; if role.intersects(Role::FULL | Role::VALIDATOR | Role::COLLATOR) { required_block_attributes.push(message::BlockAttribute::Body); @@ -91,23 +92,27 @@ impl ChainSync where blocks: BlockCollection::new(), best_queued_hash: info.best_queued_hash.unwrap_or(info.chain.best_hash), best_queued_number: info.best_queued_number.unwrap_or(info.chain.best_number), - required_block_attributes: required_block_attributes, + required_block_attributes, } } fn best_seen_block(&self) -> Option { - self.peers.values().max_by_key(|p| p.best_number).map(|p| p.best_number) + self.peers + .values() + .max_by_key(|p| p.best_number) + .map(|p| p.best_number) } /// Returns sync status pub fn status(&self) -> Status { let best_seen = self.best_seen_block(); let state = match &best_seen { - &Some(n) if n > self.best_queued_number && n - self.best_queued_number > 5 => SyncState::Downloading, + &Some(n) if n > self.best_queued_number && n - self.best_queued_number > 5 => + SyncState::Downloading, _ => SyncState::Idle, }; Status { - state: state, + state, best_seen_block: best_seen, } } @@ -115,7 +120,12 @@ impl ChainSync where /// Handle new connected peer. pub fn new_peer(&mut self, io: &mut SyncIo, protocol: &Protocol, peer_id: PeerId) { if let Some(info) = protocol.peer_info(peer_id) { - match (protocol.chain().block_status(&BlockId::Hash(info.best_hash)), info.best_number) { + match ( + protocol + .chain() + .block_status(&BlockId::Hash(info.best_hash)), + info.best_number, + ) { (Err(e), _) => { debug!(target:"sync", "Error reading blockchain: {:?}", e); io.disconnect_peer(peer_id); @@ -132,42 +142,58 @@ impl ChainSync where let our_best = self.best_queued_number; if our_best > 0 { debug!(target:"sync", "New peer with unknown best hash {} ({}), searching for common ancestor.", info.best_hash, info.best_number); - self.peers.insert(peer_id, PeerSync { - common_hash: self.genesis_hash, - common_number: 0, - best_hash: info.best_hash, - best_number: info.best_number, - state: PeerSyncState::AncestorSearch(our_best), - }); + self.peers.insert( + peer_id, + PeerSync { + common_hash: self.genesis_hash, + common_number: 0, + best_hash: info.best_hash, + best_number: info.best_number, + state: PeerSyncState::AncestorSearch(our_best), + }, + ); Self::request_ancestry(io, protocol, peer_id, our_best) } else { // We are at genesis, just start downloading debug!(target:"sync", "New peer with best hash {} ({}).", info.best_hash, info.best_number); - self.peers.insert(peer_id, PeerSync { - common_hash: self.genesis_hash, - common_number: 0, - best_hash: info.best_hash, - best_number: info.best_number, - state: PeerSyncState::Available, - }); + self.peers.insert( + peer_id, + PeerSync { + common_hash: self.genesis_hash, + common_number: 0, + best_hash: info.best_hash, + best_number: info.best_number, + state: PeerSyncState::Available, + }, + ); self.download_new(io, protocol, peer_id) } }, (Ok(BlockStatus::Queued), _) | (Ok(BlockStatus::InChain), _) => { debug!(target:"sync", "New peer with known best hash {} ({}).", info.best_hash, info.best_number); - self.peers.insert(peer_id, PeerSync { - common_hash: info.best_hash, - common_number: info.best_number, - best_hash: info.best_hash, - best_number: info.best_number, - state: PeerSyncState::Available, - }); - } + self.peers.insert( + peer_id, + PeerSync { + common_hash: info.best_hash, + common_number: info.best_number, + best_hash: info.best_hash, + best_number: info.best_number, + state: PeerSyncState::Available, + }, + ); + }, } } } - pub fn on_block_data(&mut self, io: &mut SyncIo, protocol: &Protocol, peer_id: PeerId, _request: message::BlockRequest, response: message::BlockResponse) { + pub fn on_block_data( + &mut self, + io: &mut SyncIo, + protocol: &Protocol, + peer_id: PeerId, + _request: message::BlockRequest, + response: message::BlockResponse, + ) { let count = response.blocks.len(); let mut imported: usize = 0; let new_blocks = if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { @@ -181,10 +207,14 @@ impl ChainSync where }, PeerSyncState::DownloadingStale(_) => { peer.state = PeerSyncState::Available; - response.blocks.into_iter().map(|b| blocks::BlockData { - origin: peer_id, - block: b - }).collect() + response + .blocks + .into_iter() + .map(|b| blocks::BlockData { + origin: peer_id, + block: b, + }) + .collect() }, PeerSyncState::AncestorSearch(n) => { match response.blocks.get(0) { @@ -205,25 +235,26 @@ impl ChainSync where let n = n - 1; peer.state = PeerSyncState::AncestorSearch(n); Self::request_ancestry(io, protocol, peer_id, n); - return; + return }, - Ok(_) => { // genesis mismatch + Ok(_) => { + // genesis mismatch trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", peer_id); io.disable_peer(peer_id); - return; + return }, Err(e) => { debug!(target:"sync", "Error reading blockchain: {:?}", e); io.disconnect_peer(peer_id); - return; - } + return + }, } }, None => { trace!(target:"sync", "Invalid response when searching for ancestor from {}", peer_id); io.disconnect_peer(peer_id); - return; - } + return + }, } }, PeerSyncState::Available => Vec::new(), @@ -251,8 +282,8 @@ impl ChainSync where Err(e) => { debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); self.restart(io, protocol); - return; - } + return + }, } let result = protocol.chain().import( @@ -278,31 +309,31 @@ impl ChainSync where Ok(ImportResult::UnknownParent) => { debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent); self.restart(io, protocol); - return; + return }, Ok(ImportResult::KnownBad) => { debug!(target: "sync", "Bad block {}: {:?}", number, hash); - io.disable_peer(origin); //TODO: use persistent ID + io.disable_peer(origin); // TODO: use persistent ID self.restart(io, protocol); - return; - } + return + }, Err(e) => { debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); self.restart(io, protocol); - return; - } + return + }, } }, (None, _) => { debug!(target: "sync", "Header {} was not provided by {} ", block.hash, origin); - io.disable_peer(origin); //TODO: use persistent ID - return; + io.disable_peer(origin); // TODO: use persistent ID + return }, (_, None) => { debug!(target: "sync", "Justification set for block {} was not provided by {} ", block.hash, origin); - io.disable_peer(origin); //TODO: use persistent ID - return; - } + io.disable_peer(origin); // TODO: use persistent ID + return + }, } } trace!(target: "sync", "Imported {} of {}", imported, count); @@ -323,7 +354,11 @@ impl ChainSync where } // Update common blocks for (_, peer) in self.peers.iter_mut() { - trace!("Updating peer info ours={}, theirs={}", number, peer.best_number); + trace!( + "Updating peer info ours={}, theirs={}", + number, + peer.best_number + ); if peer.best_number >= number { peer.common_number = number; peer.common_hash = *hash; @@ -336,7 +371,14 @@ impl ChainSync where self.block_imported(&hash, best_header.number().clone()) } - pub fn on_block_announce(&mut self, io: &mut SyncIo, protocol: &Protocol, peer_id: PeerId, hash: B::Hash, header: &B::Header) { + pub fn on_block_announce( + &mut self, + io: &mut SyncIo, + protocol: &Protocol, + peer_id: PeerId, + hash: B::Hash, + header: &B::Header, + ) { let number = *header.number(); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { if number > peer.best_number { @@ -347,7 +389,7 @@ impl ChainSync where peer.common_number = number } } else { - return; + return } if !self.is_known_or_already_downloading(protocol, &hash) { @@ -369,8 +411,14 @@ impl ChainSync where } fn is_known_or_already_downloading(&self, protocol: &Protocol, hash: &B::Hash) -> bool { - self.peers.iter().any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) - || protocol.chain().block_status(&BlockId::Hash(*hash)).ok().map_or(false, |s| s != BlockStatus::Unknown) + self.peers + .iter() + .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) + || protocol + .chain() + .block_status(&BlockId::Hash(*hash)) + .ok() + .map_or(false, |s| s != BlockStatus::Unknown) } pub fn peer_disconnected(&mut self, io: &mut SyncIo, protocol: &Protocol, peer_id: PeerId) { @@ -394,7 +442,7 @@ impl ChainSync where debug!(target:"sync", "Error reading blockchain: {:?}", e); self.best_queued_hash = self.genesis_hash; self.best_queued_number = 0; - } + }, } } @@ -404,7 +452,13 @@ impl ChainSync where } // Download old block. - fn download_stale(&mut self, io: &mut SyncIo, protocol: &Protocol, peer_id: PeerId, hash: &B::Hash) { + fn download_stale( + &mut self, + io: &mut SyncIo, + protocol: &Protocol, + peer_id: PeerId, + hash: &B::Hash, + ) { if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { match peer.state { PeerSyncState::Available => { @@ -429,22 +483,25 @@ impl ChainSync where if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { trace!(target: "sync", "Considering new block download from {}, common block is {}, best is {:?}", peer_id, peer.common_number, peer.best_number); match peer.state { - PeerSyncState::Available => { - if let Some(range) = self.blocks.needed_blocks(peer_id, MAX_BLOCKS_TO_REQUEST, peer.best_number, peer.common_number) { - trace!(target: "sync", "Requesting blocks from {}, ({} to {})", peer_id, range.start, range.end); - let request = message::generic::BlockRequest { - id: 0, - fields: self.required_block_attributes.clone(), - from: message::FromBlock::Number(range.start), - to: None, - direction: message::Direction::Ascending, - max: Some((range.end - range.start) as u32), - }; - peer.state = PeerSyncState::DownloadingNew(range.start); - protocol.send_message(io, peer_id, GenericMessage::BlockRequest(request)); - } else { - trace!(target: "sync", "Nothing to request"); - } + PeerSyncState::Available => if let Some(range) = self.blocks.needed_blocks( + peer_id, + MAX_BLOCKS_TO_REQUEST, + peer.best_number, + peer.common_number, + ) { + trace!(target: "sync", "Requesting blocks from {}, ({} to {})", peer_id, range.start, range.end); + let request = message::generic::BlockRequest { + id: 0, + fields: self.required_block_attributes.clone(), + from: message::FromBlock::Number(range.start), + to: None, + direction: message::Direction::Ascending, + max: Some((range.end - range.start) as u32), + }; + peer.state = PeerSyncState::DownloadingNew(range.start); + protocol.send_message(io, peer_id, GenericMessage::BlockRequest(request)); + } else { + trace!(target: "sync", "Nothing to request"); }, _ => (), } @@ -455,7 +512,10 @@ impl ChainSync where trace!(target: "sync", "Requesting ancestry block #{} from {}", block, peer_id); let request = message::generic::BlockRequest { id: 0, - fields: vec![message::BlockAttribute::Header, message::BlockAttribute::Justification], + fields: vec![ + message::BlockAttribute::Header, + message::BlockAttribute::Justification, + ], from: message::FromBlock::Number(block), to: None, direction: message::Direction::Ascending, diff --git a/substrate/network/src/test/consensus.rs b/substrate/network/src/test/consensus.rs index 28b8da1155b7c..565057d645d32 100644 --- a/substrate/network/src/test/consensus.rs +++ b/substrate/network/src/test/consensus.rs @@ -15,8 +15,8 @@ // along with Polkadot. If not, see . use super::*; -use message::{Message, generic}; use futures::Stream; +use message::{generic, Message}; use test_client::runtime::Block; #[test] @@ -29,17 +29,19 @@ fn bft_messages_include_those_sent_before_asking_for_stream() { let peer = net.peer(0); let mut io = TestIo::new(&peer.queue, None); - let bft_message = generic::BftMessage::Consensus(generic::SignedConsensusMessage::Vote(generic::SignedConsensusVote { - vote: generic::ConsensusVote::AdvanceRound(0), - sender: Default::default(), - signature: Default::default(), - })); + let bft_message = generic::BftMessage::Consensus(generic::SignedConsensusMessage::Vote( + generic::SignedConsensusVote { + vote: generic::ConsensusVote::AdvanceRound(0), + sender: Default::default(), + signature: Default::default(), + }, + )); let parent_hash = peer.genesis_hash(); let localized = ::message::LocalizedBftMessage:: { message: bft_message, - parent_hash: parent_hash, + parent_hash, }; let message: Message = generic::Message::BftMessage(localized.clone()); diff --git a/substrate/network/src/test/mod.rs b/substrate/network/src/test/mod.rs index 104bb8b9814cf..db2557c6733f8 100644 --- a/substrate/network/src/test/mod.rs +++ b/substrate/network/src/test/mod.rs @@ -17,23 +17,23 @@ mod consensus; mod sync; -use std::collections::{VecDeque, HashSet, HashMap}; +use std::collections::{HashMap, HashSet, VecDeque}; use std::sync::Arc; -use parking_lot::RwLock; use client; use client::block_builder::BlockBuilder; -use runtime_primitives::traits::Block as BlockT; -use runtime_primitives::generic::BlockId; +use codec::Slicable; +use config::ProtocolConfig; use io::SyncIo; +use keyring::Keyring; +use network::{Error as NetworkError, PeerId, SessionInfo}; +use parking_lot::RwLock; use protocol::Protocol; -use config::ProtocolConfig; +use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::Block as BlockT; use service::TransactionPool; -use network::{PeerId, SessionInfo, Error as NetworkError}; -use keyring::Keyring; -use codec::Slicable; +use test_client::runtime::{Block, Extrinsic, Hash, Transfer}; use test_client::{self, TestClient}; -use test_client::runtime::{Block, Hash, Transfer, Extrinsic}; pub struct TestIo<'p> { pub queue: &'p RwLock>, @@ -43,11 +43,12 @@ pub struct TestIo<'p> { pub peers_info: HashMap, } -impl<'p> TestIo<'p> where { +impl<'p> TestIo<'p> // where +{ pub fn new(queue: &'p RwLock>, sender: Option) -> TestIo<'p> { TestIo { - queue: queue, - sender: sender, + queue, + sender, to_disconnect: HashSet::new(), packets: Vec::new(), peers_info: HashMap::new(), @@ -76,14 +77,15 @@ impl<'p> SyncIo for TestIo<'p> { fn send(&mut self, peer_id: PeerId, data: Vec) -> Result<(), NetworkError> { self.packets.push(TestPacket { - data: data, + data, recipient: peer_id, }); Ok(()) } fn peer_info(&self, peer_id: PeerId) -> String { - self.peers_info.get(&peer_id) + self.peers_info + .get(&peer_id) .cloned() .unwrap_or_else(|| peer_id.to_string()) } @@ -110,13 +112,22 @@ impl Peer { fn start(&self) { // Update the sync state to the latest chain state. let info = self.client.info().expect("In-mem client does not fail"); - let header = self.client.header(&BlockId::Hash(info.chain.best_hash)).unwrap().unwrap(); - self.sync.on_block_imported(&mut TestIo::new(&self.queue, None), info.chain.best_hash, &header); + let header = self + .client + .header(&BlockId::Hash(info.chain.best_hash)) + .unwrap() + .unwrap(); + self.sync.on_block_imported( + &mut TestIo::new(&self.queue, None), + info.chain.best_hash, + &header, + ); } /// Called on connection to other indicated peer. fn on_connect(&self, other: PeerId) { - self.sync.on_peer_connected(&mut TestIo::new(&self.queue, Some(other)), other); + self.sync + .on_peer_connected(&mut TestIo::new(&self.queue, Some(other)), other); } /// Called on disconnect from other indicated peer. @@ -155,16 +166,20 @@ impl Peer { self.sync.abort(); } - fn flush(&self) { - } + fn flush(&self) {} - fn generate_blocks(&self, count: usize, mut edit_block: F) where F: FnMut(&mut BlockBuilder) { - for _ in 0 .. count { + fn generate_blocks(&self, count: usize, mut edit_block: F) + where + F: FnMut(&mut BlockBuilder), + { + for _ in 0..count { let mut builder = self.client.new_block().unwrap(); edit_block(&mut builder); let block = builder.bake().unwrap(); trace!("Generating {}, (#{})", block.hash(), block.header.number); - self.client.justify_and_import(client::BlockOrigin::File, block).unwrap(); + self.client + .justify_and_import(client::BlockOrigin::File, block) + .unwrap(); } } @@ -178,8 +193,16 @@ impl Peer { amount: 1, nonce, }; - let signature = Keyring::from_raw_public(transfer.from.0).unwrap().sign(&transfer.encode()).into(); - builder.push(Extrinsic { transfer, signature }).unwrap(); + let signature = Keyring::from_raw_public(transfer.from.0) + .unwrap() + .sign(&transfer.encode()) + .into(); + builder + .push(Extrinsic { + transfer, + signature, + }) + .unwrap(); nonce = nonce + 1; }); } else { @@ -210,7 +233,7 @@ impl TransactionPool for EmptyTransactionPool { pub struct TestNet { pub peers: Vec>, pub started: bool, - pub disconnect_events: Vec<(PeerId, PeerId)>, //disconnected (initiated by, to) + pub disconnect_events: Vec<(PeerId, PeerId)>, // disconnected (initiated by, to) } impl TestNet { @@ -236,8 +259,8 @@ impl TestNet { let tx_pool = Arc::new(EmptyTransactionPool); let sync = Protocol::new(config.clone(), client.clone(), None, tx_pool).unwrap(); self.peers.push(Arc::new(Peer { - sync: sync, - client: client, + sync, + client, queue: RwLock::new(VecDeque::new()), })); } @@ -248,7 +271,7 @@ impl TestNet { pub fn start(&mut self) { if self.started { - return; + return } for peer in 0..self.peers.len() { self.peers[peer].start(); @@ -268,7 +291,8 @@ impl TestNet { let disconnecting = { let recipient = packet.recipient; trace!("--- {} -> {} ---", peer, recipient); - let to_disconnect = self.peers[recipient].receive_message(peer as PeerId, packet); + let to_disconnect = + self.peers[recipient].receive_message(peer as PeerId, packet); for d in &to_disconnect { // notify this that disconnecting peers are disconnecting self.peers[recipient].on_disconnect(*d as PeerId); diff --git a/substrate/network/src/test/sync.rs b/substrate/network/src/test/sync.rs index d67d530cce935..83a9fcffcbd49 100644 --- a/substrate/network/src/test/sync.rs +++ b/substrate/network/src/test/sync.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use super::*; use client::backend::Backend; use client::blockchain::HeaderBackend as BlockchainHeaderBackend; use sync::SyncState; -use {Role}; -use super::*; +use Role; #[test] #[ignore] @@ -28,7 +28,13 @@ fn sync_from_two_peers_works() { net.peer(1).push_blocks(100, false); net.peer(2).push_blocks(100, false); net.sync(); - assert!(net.peer(0).client.backend().blockchain().equals_to(net.peer(1).client.backend().blockchain())); + assert!( + net.peer(0) + .client + .backend() + .blockchain() + .equals_to(net.peer(1).client.backend().blockchain()) + ); let status = net.peer(0).sync.status(); assert_eq!(status.sync.state, SyncState::Idle); } @@ -43,7 +49,13 @@ fn sync_from_two_peers_with_ancestry_search_works() { net.peer(2).push_blocks(100, false); net.restart_peer(0); net.sync(); - assert!(net.peer(0).client.backend().blockchain().canon_equals_to(net.peer(1).client.backend().blockchain())); + assert!( + net.peer(0) + .client + .backend() + .blockchain() + .canon_equals_to(net.peer(1).client.backend().blockchain()) + ); } #[test] @@ -54,7 +66,13 @@ fn sync_long_chain_works() { net.sync_steps(3); assert_eq!(net.peer(0).sync.status().sync.state, SyncState::Downloading); net.sync(); - assert!(net.peer(0).client.backend().blockchain().equals_to(net.peer(1).client.backend().blockchain())); + assert!( + net.peer(0) + .client + .backend() + .blockchain() + .equals_to(net.peer(1).client.backend().blockchain()) + ); } #[test] @@ -64,7 +82,13 @@ fn sync_no_common_longer_chain_fails() { net.peer(0).push_blocks(20, true); net.peer(1).push_blocks(20, false); net.sync(); - assert!(!net.peer(0).client.backend().blockchain().canon_equals_to(net.peer(1).client.backend().blockchain())); + assert!( + !net.peer(0) + .client + .backend() + .blockchain() + .canon_equals_to(net.peer(1).client.backend().blockchain()) + ); } #[test] @@ -86,9 +110,27 @@ fn sync_after_fork_works() { // peer 1 has the best chain let peer1_chain = net.peer(1).client.backend().blockchain().clone(); net.sync(); - assert!(net.peer(0).client.backend().blockchain().canon_equals_to(&peer1_chain)); - assert!(net.peer(1).client.backend().blockchain().canon_equals_to(&peer1_chain)); - assert!(net.peer(2).client.backend().blockchain().canon_equals_to(&peer1_chain)); + assert!( + net.peer(0) + .client + .backend() + .blockchain() + .canon_equals_to(&peer1_chain) + ); + assert!( + net.peer(1) + .client + .backend() + .blockchain() + .canon_equals_to(&peer1_chain) + ); + assert!( + net.peer(2) + .client + .backend() + .blockchain() + .canon_equals_to(&peer1_chain) + ); } #[test] @@ -119,7 +161,34 @@ fn blocks_are_not_announced_by_light_nodes() { // peer 0 has the best chain // peer 1 has the best chain // peer 2 has genesis-chain only - assert_eq!(net.peer(0).client.backend().blockchain().info().unwrap().best_number, 1); - assert_eq!(net.peer(1).client.backend().blockchain().info().unwrap().best_number, 1); - assert_eq!(net.peer(2).client.backend().blockchain().info().unwrap().best_number, 0); + assert_eq!( + net.peer(0) + .client + .backend() + .blockchain() + .info() + .unwrap() + .best_number, + 1 + ); + assert_eq!( + net.peer(1) + .client + .backend() + .blockchain() + .info() + .unwrap() + .best_number, + 1 + ); + assert_eq!( + net.peer(2) + .client + .backend() + .blockchain() + .info() + .unwrap() + .best_number, + 0 + ); } diff --git a/substrate/primitives/src/authority_id.rs b/substrate/primitives/src/authority_id.rs index 665a55873669a..c2d610c40a55b 100644 --- a/substrate/primitives/src/authority_id.rs +++ b/substrate/primitives/src/authority_id.rs @@ -14,10 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . - -#[cfg(feature = "std")] -use serde::{Serialize, Serializer, Deserialize, Deserializer}; use codec::Slicable; +#[cfg(feature = "std")] +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use H256; /// An identifier for an authority in the consensus algorithm. The same size as ed25519::Public. @@ -92,14 +91,20 @@ impl Into for AuthorityId { #[cfg(feature = "std")] impl Serialize for AuthorityId { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { ::bytes::serialize(&self.0, serializer) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for AuthorityId { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { ::bytes::deserialize_check_len(deserializer, ::bytes::ExpectedLen::Exact(32)) .map(|x| AuthorityId::from_slice(&x)) } diff --git a/substrate/primitives/src/bytes.rs b/substrate/primitives/src/bytes.rs index c7e5b9817f9f9..8c1d1ab3deb8e 100644 --- a/substrate/primitives/src/bytes.rs +++ b/substrate/primitives/src/bytes.rs @@ -18,24 +18,25 @@ use core::fmt; -use serde::{de, Serializer, Deserializer}; +use serde::{de, Deserializer, Serializer}; #[cfg(not(feature = "std"))] mod alloc_types { - pub use ::alloc::string::String; - pub use ::alloc::vec::Vec; + pub use alloc::string::String; + pub use alloc::vec::Vec; } #[cfg(feature = "std")] mod alloc_types { - pub use ::std::vec::Vec; - pub use ::std::string::String; + pub use std::string::String; + pub use std::vec::Vec; } pub use self::alloc_types::*; /// Serializes a slice of bytes. -pub fn serialize(bytes: &[u8], serializer: S) -> Result where +pub fn serialize(bytes: &[u8], serializer: S) -> Result +where S: Serializer, { let hex: String = ::rustc_hex::ToHex::to_hex(bytes); @@ -45,20 +46,22 @@ pub fn serialize(bytes: &[u8], serializer: S) -> Result wher /// Serialize a slice of bytes as uint. /// /// The representation will have all leading zeros trimmed. -pub fn serialize_uint(bytes: &[u8], serializer: S) -> Result where +pub fn serialize_uint(bytes: &[u8], serializer: S) -> Result +where S: Serializer, { let non_zero = bytes.iter().take_while(|b| **b == 0).count(); let bytes = &bytes[non_zero..]; if bytes.is_empty() { - return serializer.serialize_str("0x0"); + return serializer.serialize_str("0x0") } let hex: String = ::rustc_hex::ToHex::to_hex(bytes); let has_leading_zero = !hex.is_empty() && &hex[0..1] == "0"; - serializer.serialize_str( - &format!("0x{}", if has_leading_zero { &hex[1..] } else { &hex }) - ) + serializer.serialize_str(&format!( + "0x{}", + if has_leading_zero { &hex[1..] } else { &hex } + )) } /// Expected length of bytes vector. @@ -79,21 +82,24 @@ impl fmt::Display for ExpectedLen { match *self { ExpectedLen::Any => write!(fmt, "even length"), ExpectedLen::Exact(v) => write!(fmt, "length of {}", v * 2), - ExpectedLen::Between(min, max) => write!(fmt, "length between ({}; {}]", min * 2, max * 2), + ExpectedLen::Between(min, max) => + write!(fmt, "length between ({}; {}]", min * 2, max * 2), } } } /// Deserialize into vector of bytes. #[cfg(feature = "std")] -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> where +pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> +where D: Deserializer<'de>, { deserialize_check_len(deserializer, ExpectedLen::Any) } /// Deserialize into vector of bytes with additional size check. -pub fn deserialize_check_len<'de, D>(deserializer: D, len: ExpectedLen) -> Result, D::Error> where +pub fn deserialize_check_len<'de, D>(deserializer: D, len: ExpectedLen) -> Result, D::Error> +where D: Deserializer<'de>, { struct Visitor { @@ -108,7 +114,7 @@ pub fn deserialize_check_len<'de, D>(deserializer: D, len: ExpectedLen) -> Resul } fn visit_str(self, v: &str) -> Result { - if v.len() < 2 || &v[0..2] != "0x" { + if v.len() < 2 || &v[0..2] != "0x" { return Err(E::custom("prefix is missing")) } @@ -124,10 +130,9 @@ pub fn deserialize_check_len<'de, D>(deserializer: D, len: ExpectedLen) -> Resul } let bytes = match self.len { - ExpectedLen::Between(..) if v.len() % 2 != 0 => { - ::rustc_hex::FromHex::from_hex(&*format!("0{}", &v[2..])) - }, - _ => ::rustc_hex::FromHex::from_hex(&v[2..]) + ExpectedLen::Between(..) if v.len() % 2 != 0 => + ::rustc_hex::FromHex::from_hex(&*format!("0{}", &v[2..])), + _ => ::rustc_hex::FromHex::from_hex(&v[2..]), }; #[cfg(feature = "std")] @@ -139,8 +144,10 @@ pub fn deserialize_check_len<'de, D>(deserializer: D, len: ExpectedLen) -> Resul fn format_err(e: ::rustc_hex::FromHexError) -> String { match e { ::rustc_hex::InvalidHexLength => format!("invalid hex value: invalid length"), - ::rustc_hex::InvalidHexCharacter(c, p) => - format!("invalid hex value: invalid character {} at position {}", c, p), + ::rustc_hex::InvalidHexCharacter(c, p) => format!( + "invalid hex value: invalid character {} at position {}", + c, p + ), } } diff --git a/substrate/primitives/src/hash.rs b/substrate/primitives/src/hash.rs index 8556f649bba67..400bbc5948f3f 100644 --- a/substrate/primitives/src/hash.rs +++ b/substrate/primitives/src/hash.rs @@ -17,23 +17,29 @@ //! A fixed hash type. #[cfg(feature = "std")] -use serde::{Serialize, Serializer, Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "std")] use bytes; macro_rules! impl_rest { - ($name: ident, $len: expr) => { + ($name:ident, $len:expr) => { #[cfg(feature = "std")] impl Serialize for $name { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { bytes::serialize(&self.0, serializer) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for $name { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { bytes::deserialize_check_len(deserializer, bytes::ExpectedLen::Exact($len)) .map(|x| (&*x).into()) } @@ -48,7 +54,7 @@ macro_rules! impl_rest { self.0.using_encoded(f) } } - } + }; } construct_hash!(H160, 20); @@ -66,13 +72,25 @@ mod tests { #[test] fn test_h160() { let tests = vec![ - (Default::default(), "0x0000000000000000000000000000000000000000"), + ( + Default::default(), + "0x0000000000000000000000000000000000000000", + ), (H160::from(2), "0x0000000000000000000000000000000000000002"), (H160::from(15), "0x000000000000000000000000000000000000000f"), (H160::from(16), "0x0000000000000000000000000000000000000010"), - (H160::from(1_000), "0x00000000000000000000000000000000000003e8"), - (H160::from(100_000), "0x00000000000000000000000000000000000186a0"), - (H160::from(u64::max_value()), "0x000000000000000000000000ffffffffffffffff"), + ( + H160::from(1_000), + "0x00000000000000000000000000000000000003e8", + ), + ( + H160::from(100_000), + "0x00000000000000000000000000000000000186a0", + ), + ( + H160::from(u64::max_value()), + "0x000000000000000000000000ffffffffffffffff", + ), ]; for (number, expected) in tests { @@ -84,13 +102,34 @@ mod tests { #[test] fn test_h256() { let tests = vec![ - (Default::default(), "0x0000000000000000000000000000000000000000000000000000000000000000"), - (H256::from(2), "0x0000000000000000000000000000000000000000000000000000000000000002"), - (H256::from(15), "0x000000000000000000000000000000000000000000000000000000000000000f"), - (H256::from(16), "0x0000000000000000000000000000000000000000000000000000000000000010"), - (H256::from(1_000), "0x00000000000000000000000000000000000000000000000000000000000003e8"), - (H256::from(100_000), "0x00000000000000000000000000000000000000000000000000000000000186a0"), - (H256::from(u64::max_value()), "0x000000000000000000000000000000000000000000000000ffffffffffffffff"), + ( + Default::default(), + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ( + H256::from(2), + "0x0000000000000000000000000000000000000000000000000000000000000002", + ), + ( + H256::from(15), + "0x000000000000000000000000000000000000000000000000000000000000000f", + ), + ( + H256::from(16), + "0x0000000000000000000000000000000000000000000000000000000000000010", + ), + ( + H256::from(1_000), + "0x00000000000000000000000000000000000000000000000000000000000003e8", + ), + ( + H256::from(100_000), + "0x00000000000000000000000000000000000000000000000000000000000186a0", + ), + ( + H256::from(u64::max_value()), + "0x000000000000000000000000000000000000000000000000ffffffffffffffff", + ), ]; for (number, expected) in tests { @@ -101,9 +140,24 @@ mod tests { #[test] fn test_invalid() { - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); + assert!( + ser::from_str::( + "\"0x000000000000000000000000000000000000000000000000000000000000000\"" + ).unwrap_err() + .is_data() + ); + assert!( + ser::from_str::( + "\"0x000000000000000000000000000000000000000000000000000000000000000g\"" + ).unwrap_err() + .is_data() + ); + assert!( + ser::from_str::( + "\"0x00000000000000000000000000000000000000000000000000000000000000000\"" + ).unwrap_err() + .is_data() + ); assert!(ser::from_str::("\"\"").unwrap_err().is_data()); assert!(ser::from_str::("\"0\"").unwrap_err().is_data()); assert!(ser::from_str::("\"10\"").unwrap_err().is_data()); diff --git a/substrate/primitives/src/hashing.rs b/substrate/primitives/src/hashing.rs index 8534cf4d98f1e..0c05e232073b8 100644 --- a/substrate/primitives/src/hashing.rs +++ b/substrate/primitives/src/hashing.rs @@ -57,7 +57,7 @@ pub fn blake2_128(data: &[u8]) -> [u8; 16] { /// Do a XX 128-bit hash and place result in `dest`. pub fn twox_128_into(data: &[u8], dest: &mut [u8; 16]) { - use ::core::hash::Hasher; + use core::hash::Hasher; let mut h0 = twox_hash::XxHash::with_seed(0); let mut h1 = twox_hash::XxHash::with_seed(1); h0.write(data); @@ -78,8 +78,8 @@ pub fn twox_128(data: &[u8]) -> [u8; 16] { /// Do a XX 256-bit hash and place result in `dest`. pub fn twox_256_into(data: &[u8], dest: &mut [u8; 32]) { - use ::core::hash::Hasher; use byteorder::{ByteOrder, LittleEndian}; + use core::hash::Hasher; let mut h0 = twox_hash::XxHash::with_seed(0); let mut h1 = twox_hash::XxHash::with_seed(1); let mut h2 = twox_hash::XxHash::with_seed(2); diff --git a/substrate/primitives/src/hexdisplay.rs b/substrate/primitives/src/hexdisplay.rs index 42db2212870f0..280dd645ddc64 100644 --- a/substrate/primitives/src/hexdisplay.rs +++ b/substrate/primitives/src/hexdisplay.rs @@ -21,13 +21,15 @@ pub struct HexDisplay<'a>(&'a [u8]); impl<'a> HexDisplay<'a> { /// Create new instance that will display `d` as a hex string when displayed. - pub fn from(d: &'a AsBytesRef) -> Self { HexDisplay(d.as_bytes_ref()) } + pub fn from(d: &'a AsBytesRef) -> Self { + HexDisplay(d.as_bytes_ref()) + } } impl<'a> ::core::fmt::Display for HexDisplay<'a> { fn fmt(&self, fmtr: &mut ::core::fmt::Formatter) -> Result<(), ::core::fmt::Error> { for byte in self.0 { - try!( fmtr.write_fmt(format_args!("{:02x}", byte))); + fmtr.write_fmt(format_args!("{:02x}", byte))?; } Ok(()) } @@ -40,15 +42,21 @@ pub trait AsBytesRef { } impl<'a> AsBytesRef for &'a [u8] { - fn as_bytes_ref(&self) -> &[u8] { self } + fn as_bytes_ref(&self) -> &[u8] { + self + } } impl AsBytesRef for [u8] { - fn as_bytes_ref(&self) -> &[u8] { &self } + fn as_bytes_ref(&self) -> &[u8] { + &self + } } impl AsBytesRef for ::bytes::Vec { - fn as_bytes_ref(&self) -> &[u8] { &self } + fn as_bytes_ref(&self) -> &[u8] { + &self + } } macro_rules! impl_non_endians { @@ -59,6 +67,8 @@ macro_rules! impl_non_endians { )* } } -impl_non_endians!([u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], - [u8; 10], [u8; 12], [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], - [u8; 48], [u8; 56], [u8; 64], [u8; 80], [u8; 96], [u8; 112], [u8; 128]); +impl_non_endians!( + [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 10], [u8; 12], + [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], [u8; 48], [u8; 56], + [u8; 64], [u8; 80], [u8; 96], [u8; 112], [u8; 128] +); diff --git a/substrate/primitives/src/lib.rs b/substrate/primitives/src/lib.rs index 9650cb7e7c65e..c2d5fba779084 100644 --- a/substrate/primitives/src/lib.rs +++ b/substrate/primitives/src/lib.rs @@ -17,12 +17,11 @@ //! Shareable Polkadot types. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(alloc))] -extern crate rustc_hex; extern crate byteorder; +extern crate rustc_hex; #[macro_use] extern crate crunchy; #[macro_use] @@ -31,13 +30,13 @@ extern crate fixed_hash; extern crate uint as uint_crate; extern crate substrate_codec as codec; +#[cfg(feature = "std")] +extern crate blake2_rfc; #[cfg(feature = "std")] extern crate serde; #[cfg(feature = "std")] extern crate twox_hash; #[cfg(feature = "std")] -extern crate blake2_rfc; -#[cfg(feature = "std")] #[macro_use] extern crate serde_derive; #[cfg(feature = "std")] @@ -61,8 +60,8 @@ macro_rules! map { ) } -use rstd::prelude::*; use rstd::ops::Deref; +use rstd::prelude::*; #[cfg(feature = "std")] pub mod bytes; @@ -92,13 +91,17 @@ pub type Signature = hash::H512; /// Hex-serialised shim for `Vec`. #[derive(PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Hash, PartialOrd, Ord))] -pub struct Bytes(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct Bytes(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); impl From> for Bytes { - fn from(s: Vec) -> Self { Bytes(s) } + fn from(s: Vec) -> Self { + Bytes(s) + } } impl Deref for Bytes { type Target = [u8]; - fn deref(&self) -> &[u8] { &self.0[..] } -} \ No newline at end of file + fn deref(&self) -> &[u8] { + &self.0[..] + } +} diff --git a/substrate/primitives/src/sandbox.rs b/substrate/primitives/src/sandbox.rs index 4dee6578df1aa..50a48ffa70426 100644 --- a/substrate/primitives/src/sandbox.rs +++ b/substrate/primitives/src/sandbox.rs @@ -16,7 +16,7 @@ //! Definition of a sandbox environment. -use codec::{Slicable, Input}; +use codec::{Input, Slicable}; use rstd::vec::Vec; /// Error error that can be returned from host function. @@ -77,7 +77,7 @@ impl TypedValue { #[cfg(feature = "std")] impl From<::wasmi::RuntimeValue> for TypedValue { fn from(val: ::wasmi::RuntimeValue) -> TypedValue { - use ::wasmi::RuntimeValue; + use wasmi::RuntimeValue; match val { RuntimeValue::I32(v) => TypedValue::I32(v), RuntimeValue::I64(v) => TypedValue::I64(v), @@ -90,8 +90,8 @@ impl From<::wasmi::RuntimeValue> for TypedValue { #[cfg(feature = "std")] impl From for ::wasmi::RuntimeValue { fn from(val: TypedValue) -> ::wasmi::RuntimeValue { - use ::wasmi::RuntimeValue; - use ::wasmi::nan_preserving_float::{F32, F64}; + use wasmi::nan_preserving_float::{F32, F64}; + use wasmi::RuntimeValue; match val { TypedValue::I32(v) => RuntimeValue::I32(v), TypedValue::I64(v) => RuntimeValue::I64(v), @@ -108,19 +108,19 @@ impl Slicable for TypedValue { TypedValue::I32(i) => { v.push(ValueType::I32 as u8); i.using_encoded(|s| v.extend(s)); - } + }, TypedValue::I64(i) => { v.push(ValueType::I64 as u8); i.using_encoded(|s| v.extend(s)); - } + }, TypedValue::F32(f_bits) => { v.push(ValueType::F32 as u8); f_bits.using_encoded(|s| v.extend(s)); - } + }, TypedValue::F64(f_bits) => { v.push(ValueType::F64 as u8); f_bits.using_encoded(|s| v.extend(s)); - } + }, } v @@ -163,11 +163,11 @@ impl Slicable for ReturnValue { match *self { ReturnValue::Unit => { v.push(0); - } + }, ReturnValue::Value(ref val) => { v.push(1); val.using_encoded(|s| v.extend(s)); - } + }, } v } @@ -226,11 +226,11 @@ impl Slicable for ExternEntity { ExternEntity::Function(ref index) => { v.push(ExternEntityKind::Function as u8); index.using_encoded(|s| v.extend(s)); - } + }, ExternEntity::Memory(ref mem_id) => { v.push(ExternEntityKind::Memory as u8); mem_id.using_encoded(|s| v.extend(s)); - } + }, } v @@ -241,11 +241,11 @@ impl Slicable for ExternEntity { Some(x) if x == ExternEntityKind::Function as i8 => { let idx = u32::decode(value)?; Some(ExternEntity::Function(idx)) - } + }, Some(x) if x == ExternEntityKind::Memory as i8 => { let mem_id = u32::decode(value)?; Some(ExternEntity::Memory(mem_id)) - } + }, _ => None, } } @@ -305,9 +305,7 @@ impl Slicable for EnvironmentDefinition { fn decode(value: &mut I) -> Option { let entries = Vec::decode(value)?; - Some(EnvironmentDefinition { - entries, - }) + Some(EnvironmentDefinition { entries }) } } @@ -348,28 +346,22 @@ mod tests { #[test] fn env_def_roundtrip() { - roundtrip(EnvironmentDefinition { - entries: vec![], - }); + roundtrip(EnvironmentDefinition { entries: vec![] }); roundtrip(EnvironmentDefinition { - entries: vec![ - Entry { - module_name: b"kernel"[..].into(), - field_name: b"memory"[..].into(), - entity: ExternEntity::Memory(1337), - }, - ], + entries: vec![Entry { + module_name: b"kernel"[..].into(), + field_name: b"memory"[..].into(), + entity: ExternEntity::Memory(1337), + }], }); roundtrip(EnvironmentDefinition { - entries: vec![ - Entry { - module_name: b"env"[..].into(), - field_name: b"abort"[..].into(), - entity: ExternEntity::Function(228), - }, - ], + entries: vec![Entry { + module_name: b"env"[..].into(), + field_name: b"abort"[..].into(), + entity: ExternEntity::Function(228), + }], }); } } diff --git a/substrate/primitives/src/storage.rs b/substrate/primitives/src/storage.rs index c8929c7646ff6..8149076204817 100644 --- a/substrate/primitives/src/storage.rs +++ b/substrate/primitives/src/storage.rs @@ -23,9 +23,9 @@ use rstd::vec::Vec; /// Contract storage key. #[derive(PartialEq, Eq)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Hash, PartialOrd, Ord))] -pub struct StorageKey(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct StorageKey(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); /// Contract storage entry data. #[derive(PartialEq, Eq)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Hash, PartialOrd, Ord))] -pub struct StorageData(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct StorageData(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); diff --git a/substrate/primitives/src/uint.rs b/substrate/primitives/src/uint.rs index b14a8aa4fd6ba..d549f8cd13188 100644 --- a/substrate/primitives/src/uint.rs +++ b/substrate/primitives/src/uint.rs @@ -17,16 +17,19 @@ //! An unsigned fixed-size integer. #[cfg(feature = "std")] -use serde::{Serialize, Serializer, Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "std")] use bytes; macro_rules! impl_serde { - ($name: ident, $len: expr) => { + ($name:ident, $len:expr) => { #[cfg(feature = "std")] impl Serialize for $name { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { let mut bytes = [0u8; $len * 8]; self.to_big_endian(&mut bytes); bytes::serialize_uint(&bytes, serializer) @@ -35,12 +38,15 @@ macro_rules! impl_serde { #[cfg(feature = "std")] impl<'de> Deserialize<'de> for $name { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { bytes::deserialize_check_len(deserializer, bytes::ExpectedLen::Between(0, $len * 8)) .map(|x| (&*x).into()) } } - } + }; } construct_uint!(U256, 4); @@ -54,7 +60,7 @@ mod tests { use substrate_serializer as ser; macro_rules! test { - ($name: ident, $test_name: ident) => { + ($name:ident, $test_name:ident) => { #[test] fn $test_name() { let tests = vec![ @@ -68,7 +74,10 @@ mod tests { ($name::from(1_000), "0x3e8"), ($name::from(100_000), "0x186a0"), ($name::from(u64::max_value()), "0xffffffffffffffff"), - ($name::from(u64::max_value()) + 1.into(), "0x10000000000000000"), + ( + $name::from(u64::max_value()) + 1.into(), + "0x10000000000000000", + ), ]; for (number, expected) in tests { @@ -83,7 +92,7 @@ mod tests { assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); } - } + }; } test!(U256, test_u256); @@ -96,7 +105,10 @@ mod tests { "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" ); assert!( - ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"").unwrap_err().is_data() + ser::from_str::( + "\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ).unwrap_err() + .is_data() ); } } diff --git a/substrate/rpc-servers/src/lib.rs b/substrate/rpc-servers/src/lib.rs index ec3d2d6fdd98a..20064bbf315d2 100644 --- a/substrate/rpc-servers/src/lib.rs +++ b/substrate/rpc-servers/src/lib.rs @@ -17,7 +17,6 @@ //! Substrate RPC servers. #[warn(missing_docs)] - pub extern crate substrate_rpc as apis; extern crate jsonrpc_core as rpc; @@ -41,10 +40,11 @@ pub fn rpc_handler( chain: C, author: A, system: Y, -) -> RpcHandler where +) -> RpcHandler +where Block: 'static, S: apis::state::StateApi, - C: apis::chain::ChainApi, + C: apis::chain::ChainApi, A: apis::author::AuthorApi, Y: apis::system::SystemApi, { @@ -57,10 +57,7 @@ pub fn rpc_handler( } /// Start HTTP server listening on given address. -pub fn start_http( - addr: &std::net::SocketAddr, - io: RpcHandler, -) -> io::Result { +pub fn start_http(addr: &std::net::SocketAddr, io: RpcHandler) -> io::Result { http::ServerBuilder::new(io) .threads(4) .rest_api(http::RestApi::Unsecure) @@ -69,18 +66,16 @@ pub fn start_http( } /// Start WS server listening on given address. -pub fn start_ws( - addr: &std::net::SocketAddr, - io: RpcHandler, -) -> io::Result { - ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| Metadata::new(context.sender())) - .start(addr) +pub fn start_ws(addr: &std::net::SocketAddr, io: RpcHandler) -> io::Result { + ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| { + Metadata::new(context.sender()) + }).start(addr) .map_err(|err| match err { ws::Error(ws::ErrorKind::Io(io), _) => io, ws::Error(ws::ErrorKind::ConnectionClosed, _) => io::ErrorKind::BrokenPipe.into(), ws::Error(e, _) => { error!("{}", e); io::ErrorKind::Other.into() - } + }, }) } diff --git a/substrate/rpc/src/author/mod.rs b/substrate/rpc/src/author/mod.rs index 1d916b4e29c6d..cffe726d94ec7 100644 --- a/substrate/rpc/src/author/mod.rs +++ b/substrate/rpc/src/author/mod.rs @@ -19,8 +19,8 @@ use std::sync::Arc; use client::{self, Client}; -use extrinsic_pool::api::{Error, ExtrinsicPool}; use codec::Slicable; +use extrinsic_pool::api::{Error, ExtrinsicPool}; use primitives::Bytes; use runtime_primitives::{generic, traits::Block as BlockT}; @@ -60,27 +60,36 @@ impl Author { } } -impl AuthorApi for Author where +impl AuthorApi for Author +where B: client::backend::Backend + Send + Sync + 'static, E: client::CallExecutor + Send + Sync + 'static, Block: BlockT + 'static, - client::error::Error: From<<>::State as state_machine::backend::Backend>::Error>, + client::error::Error: From< + <>::State as state_machine::backend::Backend>::Error, + >, P: ExtrinsicPool, Hash>, P::Error: 'static, Ex: Slicable, { fn submit_extrinsic(&self, xt: Bytes) -> Result { - self.submit_rich_extrinsic(Ex::decode(&mut &xt[..]).ok_or(error::Error::from(error::ErrorKind::BadFormat))?) + self.submit_rich_extrinsic( + Ex::decode(&mut &xt[..]).ok_or(error::Error::from(error::ErrorKind::BadFormat))?, + ) } fn submit_rich_extrinsic(&self, xt: Ex) -> Result { let best_block_hash = self.client.info().unwrap().chain.best_hash; self.pool .submit(generic::BlockId::hash(best_block_hash), vec![xt]) - .map(|mut res| res.pop().expect("One extrinsic passed; one result back; qed")) - .map_err(|e| e.into_pool_error() - .map(Into::into) - .unwrap_or_else(|e| error::ErrorKind::Verification(Box::new(e)).into()) - ) + .map(|mut res| { + res.pop() + .expect("One extrinsic passed; one result back; qed") + }) + .map_err(|e| { + e.into_pool_error() + .map(Into::into) + .unwrap_or_else(|e| error::ErrorKind::Verification(Box::new(e)).into()) + }) } } diff --git a/substrate/rpc/src/author/tests.rs b/substrate/rpc/src/author/tests.rs index 6c5b5fb05e2b1..95d99488bbc48 100644 --- a/substrate/rpc/src/author/tests.rs +++ b/substrate/rpc/src/author/tests.rs @@ -16,10 +16,10 @@ use super::*; -use std::{fmt, sync::Arc}; use extrinsic_pool::api; -use test_client; use parking_lot::Mutex; +use std::{fmt, sync::Arc}; +use test_client; type Extrinsic = u64; type Hash = u64; @@ -33,7 +33,9 @@ struct DummyTxPool { struct Error; impl api::Error for Error {} impl ::std::error::Error for Error { - fn description(&self) -> &str { "Error" } + fn description(&self) -> &str { + "Error" + } } impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { @@ -45,7 +47,11 @@ impl api::ExtrinsicPool for DummyTxPool { type Error = Error; /// Submit extrinsic for inclusion in block. - fn submit(&self, _block: BlockHash, xt: Vec) -> ::std::result::Result, Self::Error> { + fn submit( + &self, + _block: BlockHash, + xt: Vec, + ) -> ::std::result::Result, Self::Error> { let mut submitted = self.submitted.lock(); if submitted.len() < 1 { let hashes = xt.iter().map(|_xt| 1).collect(); @@ -68,9 +74,7 @@ fn submit_transaction_should_not_cause_error() { AuthorApi::submit_extrinsic(&p, u64::encode(&5).into()), Ok(1) ); - assert!( - AuthorApi::submit_extrinsic(&p, u64::encode(&5).into()).is_err() - ); + assert!(AuthorApi::submit_extrinsic(&p, u64::encode(&5).into()).is_err()); } #[test] @@ -80,11 +84,6 @@ fn submit_rich_transaction_should_not_cause_error() { pool: Arc::new(DummyTxPool::default()), }; - assert_matches!( - AuthorApi::submit_rich_extrinsic(&p, 5), - Ok(1) - ); - assert!( - AuthorApi::submit_rich_extrinsic(&p, 5).is_err() - ); + assert_matches!(AuthorApi::submit_rich_extrinsic(&p, 5), Ok(1)); + assert!(AuthorApi::submit_rich_extrinsic(&p, 5).is_err()); } diff --git a/substrate/rpc/src/chain/mod.rs b/substrate/rpc/src/chain/mod.rs index 04bd4896b555a..0374cc620b039 100644 --- a/substrate/rpc/src/chain/mod.rs +++ b/substrate/rpc/src/chain/mod.rs @@ -18,15 +18,15 @@ use std::sync::Arc; -use runtime_primitives::traits::Block as BlockT; +use client::{self, BlockchainEvents, Client}; use runtime_primitives::generic::BlockId; -use client::{self, Client, BlockchainEvents}; +use runtime_primitives::traits::Block as BlockT; use state_machine; use jsonrpc_macros::pubsub; use jsonrpc_pubsub::SubscriptionId; -use rpc::Result as RpcResult; use rpc::futures::{Future, Sink, Stream}; +use rpc::Result as RpcResult; use tokio_core::reactor::Remote; use subscriptions::Subscriptions; @@ -80,25 +80,41 @@ impl Chain { } } -impl ChainApi for Chain where +impl ChainApi for Chain +where Block: BlockT + 'static, B: client::backend::Backend + Send + Sync + 'static, E: client::CallExecutor + Send + Sync + 'static, - client::error::Error: From<<>::State as state_machine::backend::Backend>::Error>, + client::error::Error: From< + <>::State as state_machine::backend::Backend>::Error, + >, { type Metadata = ::metadata::Metadata; fn header(&self, hash: Block::Hash) -> Result> { - self.client.header(&BlockId::Hash(hash)).chain_err(|| "Blockchain error") + self.client + .header(&BlockId::Hash(hash)) + .chain_err(|| "Blockchain error") } fn head(&self) -> Result { - Ok(self.client.info().chain_err(|| "Blockchain error")?.chain.best_hash) + Ok(self + .client + .info() + .chain_err(|| "Blockchain error")? + .chain + .best_hash) } - fn subscribe_new_head(&self, _metadata: Self::Metadata, subscriber: pubsub::Subscriber) { + fn subscribe_new_head( + &self, + _metadata: Self::Metadata, + subscriber: pubsub::Subscriber, + ) { self.subscriptions.add(subscriber, |sink| { - let stream = self.client.import_notification_stream() + let stream = self + .client + .import_notification_stream() .filter(|notification| notification.is_new_best) .map(|notification| Ok(notification.header)) .map_err(|e| warn!("Block notification stream error: {:?}", e)); diff --git a/substrate/rpc/src/chain/tests.rs b/substrate/rpc/src/chain/tests.rs index e2e24b8e2c60a..2cf01ae0731ed 100644 --- a/substrate/rpc/src/chain/tests.rs +++ b/substrate/rpc/src/chain/tests.rs @@ -15,10 +15,10 @@ // along with Substrate. If not, see . use super::*; -use jsonrpc_macros::pubsub; use client::BlockOrigin; -use test_client::{self, TestClient}; +use jsonrpc_macros::pubsub; use test_client::runtime::Header; +use test_client::{self, TestClient}; #[test] fn should_return_header() { @@ -40,10 +40,7 @@ fn should_return_header() { } ); - assert_matches!( - client.header(5.into()), - Ok(None) - ); + assert_matches!(client.header(5.into()), Ok(None)); } #[test] @@ -64,7 +61,9 @@ fn should_notify_about_latest_block() { assert_eq!(core.run(id), Ok(Ok(SubscriptionId::Number(0)))); let builder = api.client.new_block().unwrap(); - api.client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); + api.client + .justify_and_import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); } // assert notification send to transport diff --git a/substrate/rpc/src/lib.rs b/substrate/rpc/src/lib.rs index f8099ec33e27e..7ad9d2d320424 100644 --- a/substrate/rpc/src/lib.rs +++ b/substrate/rpc/src/lib.rs @@ -21,8 +21,8 @@ extern crate jsonrpc_core as rpc; extern crate jsonrpc_pubsub; extern crate parking_lot; -extern crate substrate_codec as codec; extern crate substrate_client as client; +extern crate substrate_codec as codec; extern crate substrate_extrinsic_pool as extrinsic_pool; extern crate substrate_primitives as primitives; extern crate substrate_runtime_primitives as runtime_primitives; diff --git a/substrate/rpc/src/metadata.rs b/substrate/rpc/src/metadata.rs index c40a6ad0542c1..1f3a3aa1ce4f0 100644 --- a/substrate/rpc/src/metadata.rs +++ b/substrate/rpc/src/metadata.rs @@ -17,7 +17,7 @@ //! RPC Metadata use std::sync::Arc; -use jsonrpc_pubsub::{Session, PubSubMetadata}; +use jsonrpc_pubsub::{PubSubMetadata, Session}; /// RPC Metadata. /// diff --git a/substrate/rpc/src/state/mod.rs b/substrate/rpc/src/state/mod.rs index 4c68beb507d9f..9427ce1fa0574 100644 --- a/substrate/rpc/src/state/mod.rs +++ b/substrate/rpc/src/state/mod.rs @@ -21,13 +21,13 @@ mod error; #[cfg(test)] mod tests; +use client::{self, CallExecutor, Client}; use std::sync::Arc; -use client::{self, Client, CallExecutor}; +use primitives::hexdisplay::HexDisplay; +use primitives::storage::{StorageData, StorageKey}; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::Block as BlockT; -use primitives::storage::{StorageKey, StorageData}; -use primitives::hexdisplay::HexDisplay; use state_machine; use self::error::Result; @@ -69,11 +69,14 @@ build_rpc_trait! { } } -impl StateApi for Arc> where +impl StateApi for Arc> +where Block: BlockT + 'static, B: client::backend::Backend + Send + Sync + 'static, E: CallExecutor + Send + Sync + 'static, - client::error::Error: From<<>::State as state_machine::backend::Backend>::Error>, + client::error::Error: From< + <>::State as state_machine::backend::Backend>::Error, + >, { fn storage_at(&self, key: StorageKey, block: Block::Hash) -> Result { trace!(target: "rpc", "Querying storage at {:?} for key {}", block, HexDisplay::from(&key.0)); @@ -82,12 +85,17 @@ impl StateApi for Arc> where fn call_at(&self, method: String, data: Vec, block: Block::Hash) -> Result> { trace!(target: "rpc", "Calling runtime at {:?} for method {} ({})", block, method, HexDisplay::from(&data)); - Ok(self.as_ref().executor().call(&BlockId::Hash(block), &method, &data)?.return_data) + Ok(self + .as_ref() + .executor() + .call(&BlockId::Hash(block), &method, &data)? + .return_data) } fn storage_hash_at(&self, key: StorageKey, block: Block::Hash) -> Result { use runtime_primitives::traits::{Hashing, Header as HeaderT}; - self.storage_at(key, block).map(|x| ::Hashing::hash(&x.0)) + self.storage_at(key, block) + .map(|x| ::Hashing::hash(&x.0)) } fn storage_size_at(&self, key: StorageKey, block: Block::Hash) -> Result { diff --git a/substrate/rpc/src/state/tests.rs b/substrate/rpc/src/state/tests.rs index 1b98711ea5e99..e10589805bd7e 100644 --- a/substrate/rpc/src/state/tests.rs +++ b/substrate/rpc/src/state/tests.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use super::*; use self::error::{Error, ErrorKind}; +use super::*; use test_client::{self, TestClient}; #[test] @@ -35,7 +35,7 @@ fn should_call_contract() { let genesis_hash = client.genesis_hash(); assert_matches!( - StateApi::call_at(&client, "balanceOf".into(), vec![1,2,3], genesis_hash), + StateApi::call_at(&client, "balanceOf".into(), vec![1, 2, 3], genesis_hash), Err(Error(ErrorKind::Client(client::error::ErrorKind::Execution(_)), _)) ) } diff --git a/substrate/rpc/src/subscriptions.rs b/substrate/rpc/src/subscriptions.rs index 60536e5a6d801..97b0459a7b2ad 100644 --- a/substrate/rpc/src/subscriptions.rs +++ b/substrate/rpc/src/subscriptions.rs @@ -21,7 +21,7 @@ use jsonrpc_macros::pubsub; use jsonrpc_pubsub::SubscriptionId; use parking_lot::Mutex; use rpc::futures::sync::oneshot; -use rpc::futures::{Future, future}; +use rpc::futures::{future, Future}; use tokio_core::reactor::Remote; type Id = u64; @@ -52,10 +52,11 @@ impl Subscriptions { /// Second parameter is a function that converts Subscriber sink into a future. /// This future will be driven to completion bu underlying event loop /// or will be cancelled in case #cancel is invoked. - pub fn add(&self, subscriber: pubsub::Subscriber, into_future: G) where + pub fn add(&self, subscriber: pubsub::Subscriber, into_future: G) + where G: FnOnce(pubsub::Sink) -> R, - R: future::IntoFuture, - F: future::Future + Send + 'static, + R: future::IntoFuture, + F: future::Future + Send + 'static, { let id = self.next_id.fetch_add(1, atomic::Ordering::AcqRel) as u64; if let Ok(sink) = subscriber.assign_id(id.into()) { @@ -78,7 +79,7 @@ impl Subscriptions { if let SubscriptionId::Number(id) = id { if let Some(tx) = self.active_subscriptions.lock().remove(&id) { let _ = tx.send(()); - return true; + return true } } false diff --git a/substrate/rpc/src/system/tests.rs b/substrate/rpc/src/system/tests.rs index f22cd5a157779..e24a6cdf0a3c2 100644 --- a/substrate/rpc/src/system/tests.rs +++ b/substrate/rpc/src/system/tests.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use super::*; use super::error::*; +use super::*; impl SystemApi for () { fn system_name(&self) -> Result { @@ -39,10 +39,7 @@ fn system_name_works() { #[test] fn system_version_works() { - assert_eq!( - SystemApi::system_version(&()).unwrap(), - "0.2.0".to_owned() - ); + assert_eq!(SystemApi::system_version(&()).unwrap(), "0.2.0".to_owned()); } #[test] diff --git a/substrate/runtime-io/build.rs b/substrate/runtime-io/build.rs index 35eb154f3a69a..55688bad9cc51 100644 --- a/substrate/runtime-io/build.rs +++ b/substrate/runtime-io/build.rs @@ -4,11 +4,11 @@ extern crate rustc_version; use rustc_version::{version, version_meta, Channel}; fn main() { - // Assert we haven't travelled back in time - assert!(version().unwrap().major >= 1); + // Assert we haven't travelled back in time + assert!(version().unwrap().major >= 1); - // Set cfg flags depending on release channel - if let Channel::Nightly = version_meta().unwrap().channel { - println!("cargo:rustc-cfg=feature=\"nightly\""); - } + // Set cfg flags depending on release channel + if let Channel::Nightly = version_meta().unwrap().channel { + println!("cargo:rustc-cfg=feature=\"nightly\""); + } } diff --git a/substrate/runtime-io/src/lib.rs b/substrate/runtime-io/src/lib.rs index b3373fb46dc09..408b1f843a65a 100644 --- a/substrate/runtime-io/src/lib.rs +++ b/substrate/runtime-io/src/lib.rs @@ -21,9 +21,14 @@ #![cfg_attr(not(feature = "std"), feature(panic_implementation))] #![cfg_attr(not(feature = "std"), feature(core_intrinsics))] #![cfg_attr(not(feature = "std"), feature(alloc))] - -#![cfg_attr(feature = "std", doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] -#![cfg_attr(not(feature = "std"), doc = "Substrate's runtime standard library as compiled without Rust's standard library.")] +#![cfg_attr( + feature = "std", + doc = "Substrate runtime standard library as compiled when linked with Rust's standard library." +)] +#![cfg_attr( + not(feature = "std"), + doc = "Substrate's runtime standard library as compiled without Rust's standard library." +)] #[cfg(feature = "std")] include!("../with_std.rs"); diff --git a/substrate/runtime-sandbox/build.rs b/substrate/runtime-sandbox/build.rs index 35eb154f3a69a..55688bad9cc51 100755 --- a/substrate/runtime-sandbox/build.rs +++ b/substrate/runtime-sandbox/build.rs @@ -4,11 +4,11 @@ extern crate rustc_version; use rustc_version::{version, version_meta, Channel}; fn main() { - // Assert we haven't travelled back in time - assert!(version().unwrap().major >= 1); + // Assert we haven't travelled back in time + assert!(version().unwrap().major >= 1); - // Set cfg flags depending on release channel - if let Channel::Nightly = version_meta().unwrap().channel { - println!("cargo:rustc-cfg=feature=\"nightly\""); - } + // Set cfg flags depending on release channel + if let Channel::Nightly = version_meta().unwrap().channel { + println!("cargo:rustc-cfg=feature=\"nightly\""); + } } diff --git a/substrate/runtime-sandbox/src/lib.rs b/substrate/runtime-sandbox/src/lib.rs index f9195c10efe46..28cf92d83c04b 100755 --- a/substrate/runtime-sandbox/src/lib.rs +++ b/substrate/runtime-sandbox/src/lib.rs @@ -39,17 +39,17 @@ #![cfg_attr(not(feature = "std"), feature(alloc))] extern crate substrate_codec as codec; +extern crate substrate_primitives as primitives; extern crate substrate_runtime_io as runtime_io; #[cfg_attr(not(feature = "std"), macro_use)] extern crate substrate_runtime_std as rstd; -extern crate substrate_primitives as primitives; #[cfg(test)] extern crate wabt; use rstd::prelude::*; -pub use primitives::sandbox::{TypedValue, ReturnValue, HostError}; +pub use primitives::sandbox::{HostError, ReturnValue, TypedValue}; mod imp { #[cfg(feature = "std")] @@ -172,14 +172,17 @@ impl EnvironmentDefinitionBuilder { /// This instance can be used for invoking exported functions. pub struct Instance { inner: imp::Instance, - } impl Instance { /// Instantiate a module with the given [`EnvironmentDefinitionBuilder`]. /// /// [`EnvironmentDefinitionBuilder`]: struct.EnvironmentDefinitionBuilder.html - pub fn new(code: &[u8], env_def_builder: &EnvironmentDefinitionBuilder, state: &mut T) -> Result, Error> { + pub fn new( + code: &[u8], + env_def_builder: &EnvironmentDefinitionBuilder, + state: &mut T, + ) -> Result, Error> { Ok(Instance { inner: imp::Instance::new(code, &env_def_builder.inner, state)?, }) diff --git a/substrate/runtime-std/src/lib.rs b/substrate/runtime-std/src/lib.rs index 64267b23d0547..f2d5b38f796e0 100644 --- a/substrate/runtime-std/src/lib.rs +++ b/substrate/runtime-std/src/lib.rs @@ -22,9 +22,14 @@ #![cfg_attr(not(feature = "std"), feature(core_intrinsics))] #![cfg_attr(not(feature = "std"), feature(alloc))] #![cfg_attr(not(feature = "std"), feature(use_extern_macros))] - -#![cfg_attr(feature = "std", doc = "Polkadot runtime standard library as compiled when linked with Rust's standard library.")] -#![cfg_attr(not(feature = "std"), doc = "Polkadot's runtime standard library as compiled without Rust's standard library.")] +#![cfg_attr( + feature = "std", + doc = "Polkadot runtime standard library as compiled when linked with Rust's standard library." +)] +#![cfg_attr( + not(feature = "std"), + doc = "Polkadot's runtime standard library as compiled without Rust's standard library." +)] #[macro_export] macro_rules! map { @@ -43,8 +48,8 @@ include!("../without_std.rs"); /// /// This should include only things which are in the normal std prelude. pub mod prelude { - pub use ::vec::Vec; - pub use ::boxed::Box; - pub use ::cmp::{Eq, PartialEq}; - pub use ::clone::Clone; + pub use boxed::Box; + pub use clone::Clone; + pub use cmp::{Eq, PartialEq}; + pub use vec::Vec; } diff --git a/substrate/runtime-support/src/dispatch.rs b/substrate/runtime-support/src/dispatch.rs index d9c33bdc4c704..e5dbd0ce78cf4 100644 --- a/substrate/runtime-support/src/dispatch.rs +++ b/substrate/runtime-support/src/dispatch.rs @@ -16,13 +16,13 @@ //! Dispatch system. Just dispatches calls. -pub use rstd::prelude::{Vec, Clone, Eq, PartialEq}; -#[cfg(feature = "std")] -pub use std::fmt; +pub use codec::{Input, Slicable}; +pub use rstd::prelude::{Clone, Eq, PartialEq, Vec}; pub use rstd::result; #[cfg(feature = "std")] use serde; -pub use codec::{Slicable, Input}; +#[cfg(feature = "std")] +pub use std::fmt; pub type Result = result::Result<(), &'static str>; diff --git a/substrate/runtime-support/src/lib.rs b/substrate/runtime-support/src/lib.rs index b07d17c41dcf8..8179ed41787c3 100644 --- a/substrate/runtime-support/src/lib.rs +++ b/substrate/runtime-support/src/lib.rs @@ -21,9 +21,9 @@ #[cfg(feature = "std")] extern crate serde; -extern crate substrate_runtime_std as rstd; -extern crate substrate_runtime_io as runtime_io; extern crate substrate_primitives as primitives; +extern crate substrate_runtime_io as runtime_io; +extern crate substrate_runtime_std as rstd; #[doc(hidden)] pub extern crate substrate_codec as codec; @@ -33,53 +33,54 @@ pub mod dispatch; pub mod storage; mod hashable; -pub use self::storage::{StorageVec, StorageList, StorageValue, StorageMap}; +pub use self::dispatch::{ + AuxCallable, AuxDispatchable, Callable, Dispatchable, IsAuxSubType, IsSubType, Parameter, +}; pub use self::hashable::Hashable; -pub use self::dispatch::{Parameter, Dispatchable, Callable, AuxDispatchable, AuxCallable, IsSubType, IsAuxSubType}; +pub use self::storage::{StorageList, StorageMap, StorageValue, StorageVec}; pub use runtime_io::print; - #[macro_export] macro_rules! fail { - ( $y:expr ) => {{ - return Err($y); - }} + ($y:expr) => {{ + return Err($y) + }}; } #[macro_export] macro_rules! ensure { - ( $x:expr, $y:expr ) => {{ + ($x:expr, $y:expr) => {{ if !$x { fail!($y); - } - }} + } + }}; } #[macro_export] #[cfg(feature = "std")] macro_rules! assert_noop { - ( $x:expr , $y:expr ) => { + ($x:expr, $y:expr) => { let h = runtime_io::storage_root(); assert_err!($x, $y); assert_eq!(h, runtime_io::storage_root()); - } + }; } #[macro_export] #[cfg(feature = "std")] macro_rules! assert_err { - ( $x:expr , $y:expr ) => { + ($x:expr, $y:expr) => { assert_eq!($x, Err($y)); - } + }; } #[macro_export] #[cfg(feature = "std")] macro_rules! assert_ok { - ( $x:expr ) => { + ($x:expr) => { assert_eq!($x, Ok(())); }; - ( $x:expr, $y:expr ) => { + ($x:expr, $y:expr) => { assert_eq!($x, Ok($y)); - } + }; } diff --git a/substrate/runtime-support/src/storage/generator.rs b/substrate/runtime-support/src/storage/generator.rs index d91e860f9ee51..92143850f3b7e 100644 --- a/substrate/runtime-support/src/storage/generator.rs +++ b/substrate/runtime-support/src/storage/generator.rs @@ -35,23 +35,23 @@ //! pub type SessionKey = [u8; 32]; //! //! storage_items! { -//! // public value -//! pub Value: b"putd_key" => SessionKey; -//! // private map. -//! Balances: b"private_map:" => map [AuthorityId => Balance]; -//! // private list. -//! Authorities: b"auth:" => list [AuthorityId]; -//! } +//! // public value +//! pub Value: b"putd_key" => SessionKey; +//! // private map. +//! Balances: b"private_map:" => map [AuthorityId => Balance]; +//! // private list. +//! Authorities: b"auth:" => list [AuthorityId]; +//! } //! -//!# fn main() { } +//! # fn main() { } //! ``` use codec; -use rstd::vec::Vec; #[doc(hidden)] pub use rstd::borrow::Borrow; #[doc(hidden)] pub use rstd::marker::PhantomData; +use rstd::vec::Vec; /// Abstraction around storage. pub trait Storage { @@ -63,11 +63,15 @@ pub trait Storage { /// Load the bytes of a key from storage. Can panic if the type is incorrect. Will panic if /// it's not there. - fn require(&self, key: &[u8]) -> T { self.get(key).expect("Required values must be in storage") } + fn require(&self, key: &[u8]) -> T { + self.get(key).expect("Required values must be in storage") + } /// Load the bytes of a key from storage. Can panic if the type is incorrect. The type's /// default is returned if it's not there. - fn get_or_default(&self, key: &[u8]) -> T { self.get(key).unwrap_or_default() } + fn get_or_default(&self, key: &[u8]) -> T { + self.get(key).unwrap_or_default() + } /// Put a value in under a key. fn put(&self, key: &[u8], val: &T); @@ -83,10 +87,14 @@ pub trait Storage { } /// Take a value from storage, deleting it after reading. - fn take_or_panic(&self, key: &[u8]) -> T { self.take(key).expect("Required values must be in storage") } + fn take_or_panic(&self, key: &[u8]) -> T { + self.take(key).expect("Required values must be in storage") + } /// Take a value from storage, deleting it after reading. - fn take_or_default(&self, key: &[u8]) -> T { self.take(key).unwrap_or_default() } + fn take_or_default(&self, key: &[u8]) -> T { + self.take(key).unwrap_or_default() + } } /// A strongly-typed value kept in storage. @@ -516,9 +524,9 @@ macro_rules! __decl_storage_item { } // TODO: revisit this idiom once we get `type`s in `impl`s. -/*impl Module { - type Now = super::Now; -}*/ +// impl Module { +// type Now = super::Now; +// } /// Declares strongly-typed wrappers around codec-compatible types in storage. /// @@ -654,16 +662,26 @@ macro_rules! __decl_store_items { #[macro_export] #[doc(hidden)] macro_rules! __impl_store_fn { - ($traitinstance:ident $name:ident $get_fn:ident ($gettype:ty) $key:expr => $ty:ty) => { + ($traitinstance:ident $name:ident $get_fn:ident($gettype:ty) $key:expr => $ty:ty) => { pub fn $get_fn() -> $gettype { - <$name<$traitinstance> as $crate::storage::generator::StorageValue<$ty>> :: get(&$crate::storage::RuntimeStorage) + <$name<$traitinstance> as $crate::storage::generator::StorageValue<$ty>>::get( + &$crate::storage::RuntimeStorage, + ) } }; - ($traitinstance:ident $name:ident $get_fn:ident ($gettype:ty) $prefix:expr => map [$kty:ty => $ty:ty]) => { + ( + $traitinstance:ident + $name:ident + $get_fn:ident($gettype:ty) + $prefix:expr => map[$kty:ty => $ty:ty] + ) => { pub fn $get_fn>(key: K) -> $gettype { - <$name<$traitinstance> as $crate::storage::generator::StorageMap<$kty, $ty>> :: get(key.borrow(), &$crate::storage::RuntimeStorage) + <$name<$traitinstance> as $crate::storage::generator::StorageMap<$kty, $ty>>::get( + key.borrow(), + &$crate::storage::RuntimeStorage, + ) } - } + }; } #[macro_export] @@ -766,7 +784,9 @@ macro_rules! __impl_store_fns { #[macro_export] #[doc(hidden)] macro_rules! __impl_store_item { - ($name:ident $traitinstance:ident) => { type $name = $name<$traitinstance>; } + ($name:ident $traitinstance:ident) => { + type $name = $name<$traitinstance>; + }; } #[macro_export] @@ -989,10 +1009,10 @@ macro_rules! __decl_storage_items { #[cfg(test)] mod tests { - use std::collections::HashMap; - use std::cell::RefCell; - use codec::Slicable; use super::*; + use codec::Slicable; + use std::cell::RefCell; + use std::collections::HashMap; impl Storage for RefCell, Vec>> { fn exists(&self, key: &[u8]) -> bool { @@ -1000,7 +1020,9 @@ mod tests { } fn get(&self, key: &[u8]) -> Option { - self.borrow_mut().get(key).map(|v| T::decode(&mut &v[..]).unwrap()) + self.borrow_mut() + .get(key) + .map(|v| T::decode(&mut &v[..]).unwrap()) } fn put(&self, key: &[u8], val: &T) { diff --git a/substrate/runtime-support/src/storage/mod.rs b/substrate/runtime-support/src/storage/mod.rs index 2c9e2a5887fce..d9fb9de9896ef 100644 --- a/substrate/runtime-support/src/storage/mod.rs +++ b/substrate/runtime-support/src/storage/mod.rs @@ -16,10 +16,10 @@ //! Stuff to do with the runtime's storage. -use rstd::prelude::*; +use codec::{Input, KeyedVec, Slicable}; use rstd::borrow::Borrow; +use rstd::prelude::*; use runtime_io::{self, twox_128}; -use codec::{Slicable, KeyedVec, Input}; pub mod generator; @@ -39,7 +39,7 @@ impl<'a> Input for IncrementalInput<'a> { } } - /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. +/// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get(key: &[u8]) -> Option { let key = twox_128(key); runtime_io::read_storage(&key[..], &mut [0; 0][..], 0).map(|_| { @@ -175,7 +175,10 @@ pub trait StorageValue { fn take() -> Self::Query; } -impl StorageValue for U where U: generator::StorageValue { +impl StorageValue for U +where + U: generator::StorageValue, +{ type Query = U::Query; fn key() -> &'static [u8] { @@ -228,7 +231,10 @@ pub trait StorageList { fn clear(); } -impl StorageList for U where U: generator::StorageList { +impl StorageList for U +where + U: generator::StorageList, +{ fn prefix() -> &'static [u8] { >::prefix() } @@ -293,7 +299,10 @@ pub trait StorageMap { fn take>(key: KeyArg) -> Self::Query; } -impl StorageMap for U where U: generator::StorageMap { +impl StorageMap for U +where + U: generator::StorageMap, +{ type Query = U::Query; fn prefix() -> &'static [u8] { @@ -337,15 +346,17 @@ pub trait StorageVec { /// Set the current set of items. fn set_items(items: I) - where - I: IntoIterator, - T: Borrow, + where + I: IntoIterator, + T: Borrow, { let mut count: u32 = 0; for i in items.into_iter() { put(&count.to_keyed_vec(Self::PREFIX), i.borrow()); - count = count.checked_add(1).expect("exceeded runtime storage capacity"); + count = count + .checked_add(1) + .expect("exceeded runtime storage capacity"); } Self::set_count(count); @@ -385,17 +396,16 @@ pub trait StorageVec { } pub mod unhashed { + use super::{runtime_io, IncrementalInput, KeyedVec, Slicable, Vec}; use rstd::borrow::Borrow; - use super::{runtime_io, Slicable, KeyedVec, Vec, IncrementalInput}; - /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. + /// Return the value of the item in storage under `key`, or `None` if there is no explicit + /// entry. pub fn get(key: &[u8]) -> Option { runtime_io::read_storage(key, &mut [0; 0][..], 0).map(|_| { - let mut input = IncrementalInput { - key, - pos: 0, - }; - Slicable::decode(&mut input).expect("stroage is not null, therefore must be a valid type") + let mut input = IncrementalInput { key, pos: 0 }; + Slicable::decode(&mut input) + .expect("stroage is not null, therefore must be a valid type") }) } @@ -422,7 +432,8 @@ pub mod unhashed { value.using_encoded(|slice| runtime_io::set_storage(key, slice)); } - /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. + /// Remove `key` from storage, returning its value if it had an explicit entry or `None` + /// otherwise. pub fn take(key: &[u8]) -> Option { let r = get(key); if r.is_some() { @@ -431,8 +442,8 @@ pub mod unhashed { r } - /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, - /// the default for its type. + /// Remove `key` from storage, returning its value, or, if there was no explicit entry in + /// storage, the default for its type. pub fn take_or_default(key: &[u8]) -> T { take(key).unwrap_or_else(Default::default) } @@ -451,7 +462,7 @@ pub mod unhashed { /// Check to see if `key` has an explicit entry in storage. pub fn exists(key: &[u8]) -> bool { - runtime_io::read_storage(key, &mut [0;0][..], 0).is_some() + runtime_io::read_storage(key, &mut [0; 0][..], 0).is_some() } /// Ensure `key` has no explicit entry in storage. @@ -486,15 +497,17 @@ pub mod unhashed { /// Set the current set of items. fn set_items(items: I) - where - I: IntoIterator, - T: Borrow, + where + I: IntoIterator, + T: Borrow, { let mut count: u32 = 0; for i in items.into_iter() { put(&count.to_keyed_vec(Self::PREFIX), i.borrow()); - count = count.checked_add(1).expect("exceeded runtime storage capacity"); + count = count + .checked_add(1) + .expect("exceeded runtime storage capacity"); } Self::set_count(count); @@ -530,7 +543,7 @@ pub mod unhashed { #[cfg(test)] mod tests { use super::*; - use runtime_io::{twox_128, TestExternalities, with_externalities}; + use runtime_io::{twox_128, with_externalities, TestExternalities}; #[test] fn integers_can_be_stored() { @@ -575,7 +588,6 @@ mod tests { let x = b"Hello world".to_vec(); let y = get::>(b":test").unwrap(); assert_eq!(x, y); - }); } diff --git a/substrate/runtime/consensus/src/lib.rs b/substrate/runtime/consensus/src/lib.rs index 30a1be0c94619..701d07bf707ac 100644 --- a/substrate/runtime/consensus/src/lib.rs +++ b/substrate/runtime/consensus/src/lib.rs @@ -32,18 +32,18 @@ extern crate serde; #[macro_use] extern crate serde_derive; +extern crate substrate_codec as codec; +extern crate substrate_primitives; extern crate substrate_runtime_io as runtime_io; extern crate substrate_runtime_primitives as primitives; -extern crate substrate_codec as codec; extern crate substrate_runtime_system as system; -extern crate substrate_primitives; +use primitives::bft::MisbehaviorReport; +use primitives::traits::{MaybeEmpty, MaybeSerializeDebug, RefInto}; use rstd::prelude::*; -use runtime_support::{storage, Parameter}; use runtime_support::dispatch::Result; use runtime_support::storage::unhashed::StorageVec; -use primitives::traits::{RefInto, MaybeSerializeDebug, MaybeEmpty}; -use primitives::bft::MisbehaviorReport; +use runtime_support::{storage, Parameter}; pub const AUTHORITY_AT: &'static [u8] = b":auth:"; pub const AUTHORITY_COUNT: &'static [u8] = b":auth:len"; @@ -59,7 +59,7 @@ pub const CODE: &'static [u8] = b":code"; pub type KeyValue = (Vec, Vec); pub trait Trait: system::Trait { - type PublicAux: RefInto + MaybeEmpty; // MaybeEmpty is for Timestamp's usage. + type PublicAux: RefInto + MaybeEmpty; // MaybeEmpty is for Timestamp's usage. type SessionKey: Parameter + Default + MaybeSerializeDebug; } @@ -99,7 +99,10 @@ impl Module { } /// Report some misbehaviour. - fn report_misbehavior(_aux: &T::PublicAux, _report: MisbehaviorReport) -> Result { + fn report_misbehavior( + _aux: &T::PublicAux, + _report: MisbehaviorReport, + ) -> Result { // TODO. Ok(()) } @@ -138,14 +141,16 @@ impl Default for GenesisConfig { } #[cfg(any(feature = "std", test))] -impl primitives::BuildStorage for GenesisConfig -{ +impl primitives::BuildStorage for GenesisConfig { fn build_storage(self) -> ::std::result::Result { - use codec::{Slicable, KeyedVec}; + use codec::{KeyedVec, Slicable}; let auth_count = self.authorities.len() as u32; - let mut r: runtime_io::TestExternalities = self.authorities.into_iter().enumerate().map(|(i, v)| - ((i as u32).to_keyed_vec(AUTHORITY_AT), v.encode()) - ).collect(); + let mut r: runtime_io::TestExternalities = self + .authorities + .into_iter() + .enumerate() + .map(|(i, v)| ((i as u32).to_keyed_vec(AUTHORITY_AT), v.encode())) + .collect(); r.insert(AUTHORITY_COUNT.to_vec(), auth_count.encode()); r.insert(CODE.to_vec(), self.code); Ok(r) diff --git a/substrate/runtime/contract/src/lib.rs b/substrate/runtime/contract/src/lib.rs index bc51673fd44db..c6e09b56289af 100644 --- a/substrate/runtime/contract/src/lib.rs +++ b/substrate/runtime/contract/src/lib.rs @@ -17,7 +17,8 @@ //! Crate for executing smart-contracts. //! //! It provides an means for executing contracts represented in WebAssembly (Wasm for short). -//! Contracts are able to create other contracts, transfer funds to each other and operate on a simple key-value storage. +//! Contracts are able to create other contracts, transfer funds to each other and operate on a +//! simple key-value storage. #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] @@ -25,9 +26,9 @@ extern crate parity_wasm; extern crate pwasm_utils; -extern crate substrate_runtime_std as rstd; -extern crate substrate_runtime_sandbox as sandbox; extern crate substrate_codec as codec; +extern crate substrate_runtime_sandbox as sandbox; +extern crate substrate_runtime_std as rstd; #[cfg(test)] #[macro_use] @@ -36,8 +37,8 @@ extern crate assert_matches; #[cfg(test)] extern crate wabt; -use rstd::prelude::*; use codec::Slicable; +use rstd::prelude::*; use parity_wasm::elements::{self, External, MemoryType}; use pwasm_utils::rules; @@ -62,8 +63,8 @@ pub trait Ext { // TODO: Return the address of the created contract. /// Create a new account for a contract. /// - /// The newly created account will be associated with the `code`. `value` specifies the amount of value - /// transfered from this to the newly created account. + /// The newly created account will be associated with the `code`. `value` specifies the amount + /// of value transfered from this to the newly created account. fn create(&mut self, code: &[u8], value: Self::Balance); /// Transfer some funds to the specified account. @@ -131,7 +132,8 @@ impl<'a, T: Ext + 'a> Runtime<'a, T> { /// Returns `false` if there is not enough gas or addition of the specified /// amount of gas has lead to overflow. On success returns `true`. /// - /// Intuition about the return value sense is to answer the question 'are we allowed to continue?' + /// Intuition about the return value sense is to answer the question 'are we allowed to + /// continue?' fn charge_gas(&mut self, amount: u64) -> bool { match self.gas_used.checked_add(amount) { None => false, @@ -139,23 +141,22 @@ impl<'a, T: Ext + 'a> Runtime<'a, T> { Some(val) => { self.gas_used = val; true - } + }, } } } /// Execute the given code as a contract. -pub fn execute<'a, T: Ext>( - code: &[u8], - ext: &'a mut T, - gas_limit: u64, -) -> Result<(), Error> { +pub fn execute<'a, T: Ext>(code: &[u8], ext: &'a mut T, gas_limit: u64) -> Result<(), Error> { // ext_gas(amount: u32) // // Account for used gas. Traps if gas used is greater than gas limit. // // - amount: How much gas is used. - fn ext_gas(e: &mut Runtime, args: &[sandbox::TypedValue]) -> Result { + fn ext_gas( + e: &mut Runtime, + args: &[sandbox::TypedValue], + ) -> Result { let amount = args[0].as_i32().unwrap() as u32; if e.charge_gas(amount as u64) { Ok(sandbox::ReturnValue::Unit) @@ -173,7 +174,8 @@ pub fn execute<'a, T: Ext>( // - value_non_null: if set to 0, then the entry // at the given location will be removed. // - value_ptr: pointer into the linear memory - // where the value to set is placed. If `value_non_null` is set to 0, then this parameter is ignored. + // where the value to set is placed. If `value_non_null` is set to 0, then this parameter is + // ignored. fn ext_set_storage( e: &mut Runtime, args: &[sandbox::TypedValue], @@ -193,10 +195,7 @@ pub fn execute<'a, T: Ext>( } else { None }; - e.ext_mut().set_storage( - &location, - value, - ); + e.ext_mut().set_storage(&location, value); Ok(sandbox::ReturnValue::Unit) } @@ -211,7 +210,10 @@ pub fn execute<'a, T: Ext>( // memory where the location of the requested value is placed. // - dest_ptr: pointer where contents of the specified storage location // should be placed. - fn ext_get_storage(e: &mut Runtime, args: &[sandbox::TypedValue]) -> Result { + fn ext_get_storage( + e: &mut Runtime, + args: &[sandbox::TypedValue], + ) -> Result { let location_ptr = args[0].as_i32().unwrap() as u32; let dest_ptr = args[1].as_i32().unwrap() as u32; @@ -228,7 +230,10 @@ pub fn execute<'a, T: Ext>( } // ext_transfer(transfer_to: u32, transfer_to_len: u32, value_ptr: u32, value_len: u32) - fn ext_transfer(e: &mut Runtime, args: &[sandbox::TypedValue]) -> Result { + fn ext_transfer( + e: &mut Runtime, + args: &[sandbox::TypedValue], + ) -> Result { let transfer_to_ptr = args[0].as_i32().unwrap() as u32; let transfer_to_len = args[1].as_i32().unwrap() as u32; let value_ptr = args[2].as_i32().unwrap() as u32; @@ -250,7 +255,10 @@ pub fn execute<'a, T: Ext>( } // ext_create(code_ptr: u32, code_len: u32, value_ptr: u32, value_len: u32) - fn ext_create(e: &mut Runtime, args: &[sandbox::TypedValue]) -> Result { + fn ext_create( + e: &mut Runtime, + args: &[sandbox::TypedValue], + ) -> Result { let code_ptr = args[0].as_i32().unwrap() as u32; let code_len = args[1].as_i32().unwrap() as u32; let value_ptr = args[2].as_i32().unwrap() as u32; @@ -291,9 +299,8 @@ pub fn execute<'a, T: Ext>( gas_used: 0, }; - let mut instance = - sandbox::Instance::new(&instrumented_code, &imports, &mut runtime) - .map_err(|_| Error::Instantiate)?; + let mut instance = sandbox::Instance::new(&instrumented_code, &imports, &mut runtime) + .map_err(|_| Error::Instantiate)?; instance .invoke(b"call", &[], &mut runtime) .map(|_| ()) @@ -354,14 +361,15 @@ impl ContractModule { /// Memory section contains declarations of internal linear memories, so if we find one /// we reject such a module. fn ensure_no_internal_memory(&self) -> Result<(), Error> { - let module = self.module + let module = self + .module .as_ref() .expect("On entry to the function `module` can't be None; qed"); if module .memory_section() .map_or(false, |ms| ms.entries().len() > 0) { - return Err(Error::InternalMemoryDeclared); + return Err(Error::InternalMemoryDeclared) } Ok(()) } @@ -371,7 +379,8 @@ impl ContractModule { .with_grow_cost(self.config.grow_mem_cost) .with_forbidden_floats(); - let module = self.module + let module = self + .module .take() .expect("On entry to the function `module` can't be `None`; qed"); @@ -383,7 +392,8 @@ impl ContractModule { } fn inject_stack_height_metering(&mut self) -> Result<(), Error> { - let module = self.module + let module = self + .module .take() .expect("On entry to the function `module` can't be `None`; qed"); @@ -397,7 +407,8 @@ impl ContractModule { /// Find the memory import entry and return it's descriptor. fn find_mem_import(&self) -> Option<&MemoryType> { - let import_section = self.module + let import_section = self + .module .as_ref() .expect("On entry to the function `module` can't be `None`; qed") .import_section()?; @@ -405,7 +416,7 @@ impl ContractModule { if let ("env", "memory", &External::Memory(ref memory_type)) = (import.module(), import.field(), import.external()) { - return Some(memory_type); + return Some(memory_type) } } None @@ -440,22 +451,19 @@ fn prepare_contract(original_code: &[u8]) -> Result { match (limits.initial(), limits.maximum()) { (initial, Some(maximum)) if initial > maximum => { // Requested initial number of pages should not exceed the requested maximum. - return Err(Error::Memory); - } + return Err(Error::Memory) + }, (_, Some(maximum)) if maximum > config.max_memory_pages => { // Maximum number of pages should not exceed the configured maximum. - return Err(Error::Memory); - } + return Err(Error::Memory) + }, (_, None) => { // Maximum number of pages should be always declared. // This isn't a hard requirement and can be treated as a maxiumum set // to configured maximum. return Err(Error::Memory) - } - (initial, maximum) => sandbox::Memory::new( - initial, - maximum, - ) + }, + (initial, maximum) => sandbox::Memory::new(initial, maximum), } }, @@ -473,9 +481,9 @@ fn prepare_contract(original_code: &[u8]) -> Result { #[cfg(test)] mod tests { use super::*; + use std::collections::HashMap; use std::fmt; use wabt; - use std::collections::HashMap; #[derive(Debug, PartialEq, Eq)] struct CreateEntry { @@ -504,20 +512,13 @@ mod tests { *self.storage.entry(key.to_vec()).or_insert(Vec::new()) = value.unwrap_or(Vec::new()); } fn create(&mut self, code: &[u8], value: Self::Balance) { - self.creates.push( - CreateEntry { - code: code.to_vec(), - endownment: value, - } - ); + self.creates.push(CreateEntry { + code: code.to_vec(), + endownment: value, + }); } fn transfer(&mut self, to: &Self::AccountId, value: Self::Balance) { - self.transfers.push( - TransferEntry { - to: *to, - value, - } - ); + self.transfers.push(TransferEntry { to: *to, value }); } } @@ -528,18 +529,13 @@ mod tests { } fn parse_and_prepare_wat(wat: &str) -> Result { - let wasm = wabt::Wat2Wasm::new() - .validate(false) - .convert(wat) - .unwrap(); + let wasm = wabt::Wat2Wasm::new().validate(false).convert(wat).unwrap(); prepare_contract(wasm.as_ref()) } #[test] fn internal_memory_declaration() { - let r = parse_and_prepare_wat( - r#"(module (memory 1 1))"#, - ); + let r = parse_and_prepare_wat(r#"(module (memory 1 1))"#); assert_matches!(r, Err(Error::InternalMemoryDeclared)); } @@ -548,40 +544,28 @@ mod tests { // This test assumes that maximum page number is configured to a certain number. assert_eq!(Config::default().max_memory_pages, 16); - let r = parse_and_prepare_wat( - r#"(module (import "env" "memory" (memory 1 1)))"#, - ); + let r = parse_and_prepare_wat(r#"(module (import "env" "memory" (memory 1 1)))"#); assert_matches!(r, Ok(_)); // No memory import - let r = parse_and_prepare_wat( - r#"(module)"#, - ); + let r = parse_and_prepare_wat(r#"(module)"#); assert_matches!(r, Ok(_)); // incorrect import name. That's kinda ok, since this will fail // at later stage when imports will be resolved. - let r = parse_and_prepare_wat( - r#"(module (import "vne" "memory" (memory 1 1)))"#, - ); + let r = parse_and_prepare_wat(r#"(module (import "vne" "memory" (memory 1 1)))"#); assert_matches!(r, Ok(_)); // initial exceed maximum - let r = parse_and_prepare_wat( - r#"(module (import "env" "memory" (memory 16 1)))"#, - ); + let r = parse_and_prepare_wat(r#"(module (import "env" "memory" (memory 16 1)))"#); assert_matches!(r, Err(Error::Memory)); // no maximum - let r = parse_and_prepare_wat( - r#"(module (import "env" "memory" (memory 1)))"#, - ); + let r = parse_and_prepare_wat(r#"(module (import "env" "memory" (memory 1)))"#); assert_matches!(r, Err(Error::Memory)); // requested maximum exceed configured maximum - let r = parse_and_prepare_wat( - r#"(module (import "env" "memory" (memory 1 17)))"#, - ); + let r = parse_and_prepare_wat(r#"(module (import "env" "memory" (memory 1 17)))"#); assert_matches!(r, Err(Error::Memory)); } @@ -618,14 +602,10 @@ mod tests { let mut mock_ext = MockExt::default(); execute(&code_transfer, &mut mock_ext, 50_000).unwrap(); - assert_eq!(&mock_ext.transfers, &[TransferEntry { - to: 2, - value: 6, - }]); + assert_eq!(&mock_ext.transfers, &[TransferEntry { to: 2, value: 6 }]); } - const CODE_MEM: &str = -r#" + const CODE_MEM: &str = r#" (module ;; Internal memory is not allowed. (memory 1 1) @@ -642,9 +622,6 @@ r#" let mut mock_ext = MockExt::default(); - assert_matches!( - execute(&code_mem, &mut mock_ext, 100_000), - Err(_) - ); + assert_matches!(execute(&code_mem, &mut mock_ext, 100_000), Err(_)); } } diff --git a/substrate/runtime/council/src/lib.rs b/substrate/runtime/council/src/lib.rs index a40a64b27b84f..68813009f102b 100644 --- a/substrate/runtime/council/src/lib.rs +++ b/substrate/runtime/council/src/lib.rs @@ -27,25 +27,28 @@ extern crate serde_derive; extern crate integer_sqrt; extern crate substrate_codec as codec; +#[cfg(any(feature = "std", test))] +extern crate substrate_keyring as keyring; extern crate substrate_primitives; -#[cfg(any(feature = "std", test))] extern crate substrate_keyring as keyring; -#[macro_use] extern crate substrate_runtime_std as rstd; +#[macro_use] +extern crate substrate_runtime_std as rstd; extern crate substrate_runtime_io as runtime_io; -#[macro_use] extern crate substrate_runtime_support; -extern crate substrate_runtime_primitives as primitives; +#[macro_use] +extern crate substrate_runtime_support; extern crate substrate_runtime_consensus as consensus; extern crate substrate_runtime_democracy as democracy; +extern crate substrate_runtime_primitives as primitives; extern crate substrate_runtime_session as session; extern crate substrate_runtime_staking as staking; extern crate substrate_runtime_system as system; #[cfg(test)] extern crate substrate_runtime_timestamp as timestamp; +use primitives::traits::{As, AuxLookup, One, RefInto, Zero}; use rstd::prelude::*; -use primitives::traits::{Zero, One, RefInto, As, AuxLookup}; -use substrate_runtime_support::{StorageValue, StorageMap}; -use substrate_runtime_support::dispatch::Result; use staking::address::Address; +use substrate_runtime_support::dispatch::Result; +use substrate_runtime_support::{StorageMap, StorageValue}; pub mod voting; @@ -53,8 +56,8 @@ pub mod voting; // // all unbonded public operations should be constant time. // all other public operations must be linear time in terms of prior public operations and: -// - those "valid" ones that cost nothing be limited to a constant number per single protected operation -// - the rest costing the same order as the computational complexity +// - those "valid" ones that cost nothing be limited to a constant number per single protected +// operation - the rest costing the same order as the computational complexity // all protected operations must complete in at most O(public operations) // // we assume "beneficial" transactions will have the same access as attack transactions. @@ -62,12 +65,13 @@ pub mod voting; // any storage requirements should be bonded by the same order as the volume. // public operations: -// - express approvals (you pay in a "voter" bond the first time you do this; O(1); one extra DB entry, one DB change) -// - remove active voter (you get your "voter" bond back; O(1); one fewer DB entry, one DB change) -// - remove inactive voter (either you or the target is removed; if the target, you get their "voter" bond back; O(1); one fewer DB entry, one DB change) +// - express approvals (you pay in a "voter" bond the first time you do this; O(1); one extra DB +// entry, one DB change) - remove active voter (you get your "voter" bond back; O(1); one fewer DB +// entry, one DB change) - remove inactive voter (either you or the target is removed; if the +// target, you get their "voter" bond back; O(1); one fewer DB entry, one DB change) // - submit candidacy (you pay a "candidate" bond; O(1); one extra DB entry, two DB changes) -// - present winner/runner-up (you may pay a "presentation" bond of O(voters) if the presentation is invalid; O(voters) compute; ) -// protected operations: +// - present winner/runner-up (you may pay a "presentation" bond of O(voters) if the presentation +// is invalid; O(voters) compute; ) protected operations: // - remove candidacy (remove all votes for a candidate) (one fewer DB entry, two DB changes) // to avoid a potentially problematic case of not-enough approvals prior to voting causing a @@ -183,7 +187,6 @@ decl_storage! { } impl Module { - // exposed immutables. /// True if we're currently in a presentation period. @@ -210,20 +213,22 @@ impl Module { None } else { let c = Self::active_council(); - let (next_possible, count, coming) = - if let Some((tally_end, comers, leavers)) = Self::next_finalise() { - // if there's a tally in progress, then next tally can begin immediately afterwards - (tally_end, c.len() - leavers.len() + comers as usize, comers) - } else { - (>::block_number(), c.len(), 0) - }; + let (next_possible, count, coming) = if let Some((tally_end, comers, leavers)) = + Self::next_finalise() + { + // if there's a tally in progress, then next tally can begin immediately afterwards + (tally_end, c.len() - leavers.len() + comers as usize, comers) + } else { + (>::block_number(), c.len(), 0) + }; if count < desired_seats as usize { Some(next_possible) } else { - // next tally begins once enough council members expire to bring members below desired. + // next tally begins once enough council members expire to bring members below + // desired. if desired_seats <= coming { - // the entire amount of desired seats is less than those new members - we'll have - // to wait until they expire. + // the entire amount of desired seats is less than those new members - we'll + // have to wait until they expire. Some(next_possible + Self::term_duration()) } else { Some(c[c.len() - (desired_seats - coming) as usize].1) @@ -237,7 +242,10 @@ impl Module { /// Set candidate approvals. Approval slots stay valid as long as candidates in those slots /// are registered. fn set_approvals(aux: &T::PublicAux, votes: Vec, index: VoteIndex) -> Result { - ensure!(!Self::presentation_active(), "no approval changes during presentation period"); + ensure!( + !Self::presentation_active(), + "no approval changes during presentation period" + ); ensure!(index == Self::vote_index(), "incorrect vote index"); if !>::exists(aux.ref_into()) { // not yet a voter - deduct bond. @@ -265,34 +273,56 @@ impl Module { signed_index: u32, who: Address, who_index: u32, - assumed_vote_index: VoteIndex + assumed_vote_index: VoteIndex, ) -> Result { let who = >::lookup(who)?; - ensure!(!Self::presentation_active(), "cannot reap during presentation period"); - ensure!(Self::voter_last_active(aux.ref_into()).is_some(), "reaper must be a voter"); - let last_active = Self::voter_last_active(&who).ok_or("target for inactivity cleanup must be active")?; - ensure!(assumed_vote_index == Self::vote_index(), "vote index not current"); - ensure!(last_active < assumed_vote_index - Self::inactivity_grace_period(), "cannot reap during grace perid"); + ensure!( + !Self::presentation_active(), + "cannot reap during presentation period" + ); + ensure!( + Self::voter_last_active(aux.ref_into()).is_some(), + "reaper must be a voter" + ); + let last_active = + Self::voter_last_active(&who).ok_or("target for inactivity cleanup must be active")?; + ensure!( + assumed_vote_index == Self::vote_index(), + "vote index not current" + ); + ensure!( + last_active < assumed_vote_index - Self::inactivity_grace_period(), + "cannot reap during grace perid" + ); let voters = Self::voters(); let signed_index = signed_index as usize; let who_index = who_index as usize; - ensure!(signed_index < voters.len() && &voters[signed_index] == aux.ref_into(), "bad reporter index"); - ensure!(who_index < voters.len() && voters[who_index] == who, "bad target index"); + ensure!( + signed_index < voters.len() && &voters[signed_index] == aux.ref_into(), + "bad reporter index" + ); + ensure!( + who_index < voters.len() && voters[who_index] == who, + "bad target index" + ); // will definitely kill one of signed or who now. - let valid = !Self::approvals_of(&who).iter() + let valid = !Self::approvals_of(&who) + .iter() .zip(Self::candidates().iter()) - .any(|(&appr, addr)| - appr && - *addr != T::AccountId::default() && - Self::candidate_reg_info(addr).map_or(false, |x| x.0 <= last_active)/*defensive only: all items in candidates list are registered*/ + .any( + |(&appr, addr)| { + appr + && *addr != T::AccountId::default() + && Self::candidate_reg_info(addr).map_or(false, |x| x.0 <= last_active) + }, // defensive only: all items in candidates list are registered ); Self::remove_voter( if valid { &who } else { aux.ref_into() }, if valid { who_index } else { signed_index }, - voters + voters, ); if valid { // This only fails if `who` doesn't exist, which it clearly must do since its the aux. @@ -306,12 +336,21 @@ impl Module { /// Remove a voter. All votes are cancelled and the voter deposit is returned. fn retract_voter(aux: &T::PublicAux, index: u32) -> Result { - ensure!(!Self::presentation_active(), "cannot retract when presenting"); - ensure!(>::exists(aux.ref_into()), "cannot retract non-voter"); + ensure!( + !Self::presentation_active(), + "cannot retract when presenting" + ); + ensure!( + >::exists(aux.ref_into()), + "cannot retract non-voter" + ); let voters = Self::voters(); let index = index as usize; ensure!(index < voters.len(), "retraction index invalid"); - ensure!(&voters[index] == aux.ref_into(), "retraction index mismatch"); + ensure!( + &voters[index] == aux.ref_into(), + "retraction index mismatch" + ); Self::remove_voter(aux.ref_into(), index, voters); >::unreserve(aux.ref_into(), Self::voting_bond()); @@ -322,13 +361,16 @@ impl Module { /// /// Account must have enough transferrable funds in it to pay the bond. fn submit_candidacy(aux: &T::PublicAux, slot: u32) -> Result { - ensure!(!Self::is_a_candidate(aux.ref_into()), "duplicate candidate submission"); + ensure!( + !Self::is_a_candidate(aux.ref_into()), + "duplicate candidate submission" + ); let slot = slot as usize; let count = Self::candidate_count() as usize; let candidates = Self::candidates(); ensure!( - (slot == count && count == candidates.len()) || - (slot < candidates.len() && candidates[slot] == T::AccountId::default()), + (slot == count && count == candidates.len()) + || (slot < candidates.len() && candidates[slot] == T::AccountId::default()), "invalid candidate slot" ); // NOTE: This must be last as it has side-effects. @@ -348,42 +390,60 @@ impl Module { } /// Claim that `signed` is one of the top Self::carry_count() + current_vote().1 candidates. - /// Only works if the `block_number >= current_vote().0` and `< current_vote().0 + presentation_duration()`` - /// `signed` should have at least + /// Only works if the `block_number >= current_vote().0` and `< current_vote().0 + + /// presentation_duration()`` `signed` should have at least fn present_winner( aux: &T::PublicAux, candidate: Address, total: T::Balance, - index: VoteIndex + index: VoteIndex, ) -> Result { let candidate = >::lookup(candidate)?; ensure!(index == Self::vote_index(), "index not current"); - let (_, _, expiring) = Self::next_finalise().ok_or("cannot present outside of presentation period")?; + let (_, _, expiring) = + Self::next_finalise().ok_or("cannot present outside of presentation period")?; let stakes = Self::snapshoted_stakes(); let voters = Self::voters(); - let bad_presentation_punishment = Self::present_slash_per_voter() * T::Balance::sa(voters.len() as u64); - ensure!(>::can_slash(aux.ref_into(), bad_presentation_punishment), "presenter must have sufficient slashable funds"); + let bad_presentation_punishment = + Self::present_slash_per_voter() * T::Balance::sa(voters.len() as u64); + ensure!( + >::can_slash(aux.ref_into(), bad_presentation_punishment), + "presenter must have sufficient slashable funds" + ); - let mut leaderboard = Self::leaderboard().ok_or("leaderboard must exist while present phase active")?; - ensure!(total > leaderboard[0].0, "candidate not worthy of leaderboard"); + let mut leaderboard = + Self::leaderboard().ok_or("leaderboard must exist while present phase active")?; + ensure!( + total > leaderboard[0].0, + "candidate not worthy of leaderboard" + ); - if let Some(p) = Self::active_council().iter().position(|&(ref c, _)| c == &candidate) { - ensure!(p < expiring.len(), "candidate must not form a duplicated member if elected"); + if let Some(p) = Self::active_council() + .iter() + .position(|&(ref c, _)| c == &candidate) + { + ensure!( + p < expiring.len(), + "candidate must not form a duplicated member if elected" + ); } let (registered_since, candidate_index): (VoteIndex, u32) = Self::candidate_reg_info(&candidate).ok_or("presented candidate must be current")?; - let actual_total = voters.iter() + let actual_total = voters + .iter() .zip(stakes.iter()) - .filter_map(|(voter, stake)| - match Self::voter_last_active(voter) { - Some(b) if b >= registered_since => - Self::approvals_of(voter).get(candidate_index as usize) - .and_then(|approved| if *approved { Some(*stake) } else { None }), - _ => None, - }) + .filter_map(|(voter, stake)| match Self::voter_last_active(voter) { + Some(b) if b >= registered_since => Self::approvals_of(voter) + .get(candidate_index as usize) + .and_then(|approved| if *approved { Some(*stake) } else { None }), + _ => None, + }) .fold(Zero::zero(), |acc, n| acc + n); - let dupe = leaderboard.iter().find(|&&(_, ref c)| c == &candidate).is_some(); + let dupe = leaderboard + .iter() + .find(|&&(_, ref c)| c == &candidate) + .is_some(); if total == actual_total && !dupe { // insert into leaderboard leaderboard[0] = (total, candidate); @@ -394,7 +454,11 @@ impl Module { // we can rest assured it will be Ok since we checked `can_slash` earlier; still // better safe than sorry. let _ = >::slash(aux.ref_into(), bad_presentation_punishment); - Err(if dupe { "duplicate presentation" } else { "incorrect total" }) + Err(if dupe { + "duplicate presentation" + } else { + "incorrect total" + }) } } @@ -454,45 +518,66 @@ impl Module { /// Remove a voter from the system. Trusts that Self::voters()[index] != voter. fn remove_voter(voter: &T::AccountId, index: usize, mut voters: Vec) { - >::put({ voters.swap_remove(index); voters }); + >::put({ + voters.swap_remove(index); + voters + }); >::remove(voter); >::remove(voter); } - /// Close the voting, snapshot the staking and the number of seats that are actually up for grabs. + /// Close the voting, snapshot the staking and the number of seats that are actually up for + /// grabs. fn start_tally() { let active_council = Self::active_council(); let desired_seats = Self::desired_seats() as usize; let number = >::block_number(); - let expiring = active_council.iter().take_while(|i| i.1 == number).map(|i| i.0.clone()).collect::>(); + let expiring = active_council + .iter() + .take_while(|i| i.1 == number) + .map(|i| i.0.clone()) + .collect::>(); if active_council.len() - expiring.len() < desired_seats { let empty_seats = desired_seats - (active_council.len() - expiring.len()); - >::put((number + Self::presentation_duration(), empty_seats as u32, expiring)); + >::put(( + number + Self::presentation_duration(), + empty_seats as u32, + expiring, + )); let voters = Self::voters(); - let votes = voters.iter().map(>::voting_balance).collect::>(); + let votes = voters + .iter() + .map(>::voting_balance) + .collect::>(); >::put(votes); // initialise leaderboard. let leaderboard_size = empty_seats + Self::carry_count() as usize; - >::put(vec![(T::Balance::zero(), T::AccountId::default()); leaderboard_size]); + >::put(vec![ + (T::Balance::zero(), T::AccountId::default()); + leaderboard_size + ]); } } - /// Finalise the vote, removing each of the `removals` and inserting `seats` of the most approved - /// candidates in their place. If the total council members is less than the desired membership - /// a new vote is started. + /// Finalise the vote, removing each of the `removals` and inserting `seats` of the most + /// approved candidates in their place. If the total council members is less than the desired + /// membership a new vote is started. /// Clears all presented candidates, returning the bond of the elected ones. fn finalise_tally() -> Result { >::kill(); let (_, coming, expiring): (T::BlockNumber, u32, Vec) = - >::take().ok_or("finalise can only be called after a tally is started.")?; - let leaderboard: Vec<(T::Balance, T::AccountId)> = >::take().unwrap_or_default(); + >::take() + .ok_or("finalise can only be called after a tally is started.")?; + let leaderboard: Vec<(T::Balance, T::AccountId)> = + >::take().unwrap_or_default(); let new_expiry = >::block_number() + Self::term_duration(); // return bond to winners. let candidacy_bond = Self::candidacy_bond(); - for &(_, ref w) in leaderboard.iter() + for &(_, ref w) in leaderboard + .iter() .rev() .take_while(|&&(b, _)| !b.is_zero()) .take(coming as usize) @@ -504,20 +589,24 @@ impl Module { let mut new_council: Vec<_> = Self::active_council() .into_iter() .skip(expiring.len()) - .chain(leaderboard.iter() - .rev() - .take_while(|&&(b, _)| !b.is_zero()) - .take(coming as usize) - .cloned() - .map(|(_, a)| (a, new_expiry))) + .chain( + leaderboard + .iter() + .rev() + .take_while(|&&(b, _)| !b.is_zero()) + .take(coming as usize) + .cloned() + .map(|(_, a)| (a, new_expiry)), + ) .collect(); new_council.sort_by_key(|&(_, expiry)| expiry); >::put(new_council); // clear all except runners-up from candidate list. let candidates = Self::candidates(); - let mut new_candidates = vec![T::AccountId::default(); candidates.len()]; // shrink later. - let runners_up = leaderboard.into_iter() + let mut new_candidates = vec![T::AccountId::default(); candidates.len()]; // shrink later. + let runners_up = leaderboard + .into_iter() .rev() .take_while(|&(b, _)| !b.is_zero()) .skip(coming as usize) @@ -534,7 +623,10 @@ impl Module { } } // discard any superfluous slots. - if let Some(last_index) = new_candidates.iter().rposition(|c| *c != T::AccountId::default()) { + if let Some(last_index) = new_candidates + .iter() + .rposition(|c| *c != T::AccountId::default()) + { new_candidates.truncate(last_index + 1); } >::put(new_candidates); @@ -561,7 +653,6 @@ pub struct GenesisConfig { pub term_duration: T::BlockNumber, pub inactive_grace_period: T::BlockNumber, - // for the council's votes. pub cooloff_period: T::BlockNumber, pub voting_period: T::BlockNumber, @@ -588,8 +679,7 @@ impl Default for GenesisConfig { } #[cfg(any(feature = "std", test))] -impl primitives::BuildStorage for GenesisConfig -{ +impl primitives::BuildStorage for GenesisConfig { fn build_storage(self) -> ::std::result::Result { use codec::Slicable; use runtime_io::twox_128; @@ -616,11 +706,11 @@ impl primitives::BuildStorage for GenesisConfig #[cfg(test)] mod tests { pub use super::*; + use primitives::testing::{Digest, Header}; + use primitives::traits::{BlakeTwo256, HasPublicAux, Identity}; + use primitives::BuildStorage; pub use runtime_io::with_externalities; pub use substrate_primitives::H256; - use primitives::BuildStorage; - use primitives::traits::{HasPublicAux, Identity, BlakeTwo256}; - use primitives::testing::{Digest, Header}; impl_outer_dispatch! { #[derive(Debug, Clone, Eq, Serialize, Deserialize, PartialEq)] @@ -668,57 +758,78 @@ mod tests { impl Trait for Test {} pub fn new_test_ext(with_council: bool) -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap(); - t.extend(consensus::GenesisConfig::{ - code: vec![], - authorities: vec![], - }.build_storage().unwrap()); - t.extend(session::GenesisConfig::{ - session_length: 1, //??? or 2? - validators: vec![10, 20], - broken_percent_late: 100, - }.build_storage().unwrap()); - t.extend(staking::GenesisConfig::{ - sessions_per_era: 1, - current_era: 0, - balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - intentions: vec![], - validator_count: 2, - bonding_duration: 0, - transaction_base_fee: 0, - transaction_byte_fee: 0, - existential_deposit: 0, - transfer_fee: 0, - creation_fee: 0, - contract_fee: 0, - reclaim_rebate: 0, - early_era_slash: 0, - session_reward: 0, - }.build_storage().unwrap()); - t.extend(democracy::GenesisConfig::{ - launch_period: 1, - voting_period: 3, - minimum_deposit: 1, - }.build_storage().unwrap()); - t.extend(GenesisConfig::{ - candidacy_bond: 9, - voter_bond: 3, - present_slash_per_voter: 1, - carry_count: 2, - inactive_grace_period: 1, - active_council: if with_council { vec![ - (1, 10), - (2, 10), - (3, 10) - ] } else { vec![] }, - approval_voting_period: 4, - presentation_duration: 2, - desired_seats: 2, - term_duration: 5, - cooloff_period: 2, - voting_period: 1, - }.build_storage().unwrap()); - t.extend(timestamp::GenesisConfig::::default().build_storage().unwrap()); + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap(); + t.extend( + consensus::GenesisConfig:: { + code: vec![], + authorities: vec![], + }.build_storage() + .unwrap(), + ); + t.extend( + session::GenesisConfig:: { + session_length: 1, //??? or 2? + validators: vec![10, 20], + broken_percent_late: 100, + }.build_storage() + .unwrap(), + ); + t.extend( + staking::GenesisConfig:: { + sessions_per_era: 1, + current_era: 0, + balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], + intentions: vec![], + validator_count: 2, + bonding_duration: 0, + transaction_base_fee: 0, + transaction_byte_fee: 0, + existential_deposit: 0, + transfer_fee: 0, + creation_fee: 0, + contract_fee: 0, + reclaim_rebate: 0, + early_era_slash: 0, + session_reward: 0, + }.build_storage() + .unwrap(), + ); + t.extend( + democracy::GenesisConfig:: { + launch_period: 1, + voting_period: 3, + minimum_deposit: 1, + }.build_storage() + .unwrap(), + ); + t.extend( + GenesisConfig:: { + candidacy_bond: 9, + voter_bond: 3, + present_slash_per_voter: 1, + carry_count: 2, + inactive_grace_period: 1, + active_council: if with_council { + vec![(1, 10), (2, 10), (3, 10)] + } else { + vec![] + }, + approval_voting_period: 4, + presentation_duration: 2, + desired_seats: 2, + term_duration: 5, + cooloff_period: 2, + voting_period: 1, + }.build_storage() + .unwrap(), + ); + t.extend( + timestamp::GenesisConfig::::default() + .build_storage() + .unwrap(), + ); t } @@ -862,7 +973,10 @@ mod tests { assert_eq!(Council::candidates(), Vec::::new()); assert_ok!(Council::submit_candidacy(&1, 0)); assert_eq!(Council::candidates(), vec![1]); - assert_noop!(Council::submit_candidacy(&1, 1), "duplicate candidate submission"); + assert_noop!( + Council::submit_candidacy(&1, 1), + "duplicate candidate submission" + ); }); } @@ -871,7 +985,10 @@ mod tests { with_externalities(&mut new_test_ext(false), || { System::set_block_number(1); assert_eq!(Council::candidates(), Vec::::new()); - assert_noop!(Council::submit_candidacy(&7, 0), "candidate has not enough funds"); + assert_noop!( + Council::submit_candidacy(&7, 0), + "candidate has not enough funds" + ); }); } @@ -1019,7 +1136,10 @@ mod tests { assert!(Council::presentation_active()); assert_eq!(Council::present_winner(&4, 2.into(), 20, 0), Ok(())); assert_eq!(Council::present_winner(&4, 5.into(), 50, 0), Ok(())); - assert_eq!(Council::leaderboard(), Some(vec![(0, 0), (0, 0), (20, 2), (50, 5)])); + assert_eq!( + Council::leaderboard(), + Some(vec![(0, 0), (0, 0), (20, 2), (50, 5)]) + ); assert_ok!(Council::end_block(System::block_number())); @@ -1049,7 +1169,10 @@ mod tests { System::set_block_number(6); assert_ok!(Council::present_winner(&4, 2.into(), 20, 0)); assert_ok!(Council::present_winner(&4, 5.into(), 50, 0)); - assert_eq!(Council::present_winner(&4, 5.into(), 50, 0), Err("duplicate presentation")); + assert_eq!( + Council::present_winner(&4, 5.into(), 50, 0), + Err("duplicate presentation") + ); assert_ok!(Council::end_block(System::block_number())); assert_eq!(Council::active_council(), vec![(5, 11), (2, 11)]); @@ -1078,9 +1201,11 @@ mod tests { assert_ok!(Council::present_winner(&4, 5.into(), 50, 1)); assert_ok!(Council::end_block(System::block_number())); - assert_ok!(Council::reap_inactive_voter(&5, + assert_ok!(Council::reap_inactive_voter( + &5, Council::voters().iter().position(|&i| i == 5).unwrap() as u32, - 2.into(), Council::voters().iter().position(|&i| i == 2).unwrap() as u32, + 2.into(), + Council::voters().iter().position(|&i| i == 2).unwrap() as u32, 2 )); @@ -1109,7 +1234,10 @@ mod tests { assert_ok!(Council::end_block(System::block_number())); System::set_block_number(10); - assert_noop!(Council::present_winner(&4, 2.into(), 20, 1), "candidate must not form a duplicated member if elected"); + assert_noop!( + Council::present_winner(&4, 2.into(), 20, 1), + "candidate must not form a duplicated member if elected" + ); }); } @@ -1137,9 +1265,11 @@ mod tests { System::set_block_number(11); assert_ok!(Council::submit_candidacy(&1, 0)); - assert_ok!(Council::reap_inactive_voter(&5, + assert_ok!(Council::reap_inactive_voter( + &5, Council::voters().iter().position(|&i| i == 5).unwrap() as u32, - 2.into(), Council::voters().iter().position(|&i| i == 2).unwrap() as u32, + 2.into(), + Council::voters().iter().position(|&i| i == 2).unwrap() as u32, 2 )); @@ -1171,11 +1301,16 @@ mod tests { assert_ok!(Council::present_winner(&4, 5.into(), 50, 1)); assert_ok!(Council::end_block(System::block_number())); - assert_noop!(Council::reap_inactive_voter(&2, - 42, - 2.into(), Council::voters().iter().position(|&i| i == 2).unwrap() as u32, - 2 - ), "bad reporter index"); + assert_noop!( + Council::reap_inactive_voter( + &2, + 42, + 2.into(), + Council::voters().iter().position(|&i| i == 2).unwrap() as u32, + 2 + ), + "bad reporter index" + ); }); } @@ -1200,11 +1335,16 @@ mod tests { assert_ok!(Council::present_winner(&4, 5.into(), 50, 1)); assert_ok!(Council::end_block(System::block_number())); - assert_noop!(Council::reap_inactive_voter(&2, - Council::voters().iter().position(|&i| i == 2).unwrap() as u32, - 2.into(), 42, - 2 - ), "bad target index"); + assert_noop!( + Council::reap_inactive_voter( + &2, + Council::voters().iter().position(|&i| i == 2).unwrap() as u32, + 2.into(), + 42, + 2 + ), + "bad target index" + ); }); } @@ -1216,10 +1356,26 @@ mod tests { assert_ok!(Council::submit_candidacy(&3, 1)); assert_ok!(Council::submit_candidacy(&4, 2)); assert_ok!(Council::submit_candidacy(&5, 3)); - assert_ok!(Council::set_approvals(&2, vec![true, false, false, false], 0)); - assert_ok!(Council::set_approvals(&3, vec![false, true, false, false], 0)); - assert_ok!(Council::set_approvals(&4, vec![false, false, true, false], 0)); - assert_ok!(Council::set_approvals(&5, vec![false, false, false, true], 0)); + assert_ok!(Council::set_approvals( + &2, + vec![true, false, false, false], + 0 + )); + assert_ok!(Council::set_approvals( + &3, + vec![false, true, false, false], + 0 + )); + assert_ok!(Council::set_approvals( + &4, + vec![false, false, true, false], + 0 + )); + assert_ok!(Council::set_approvals( + &5, + vec![false, false, false, true], + 0 + )); assert_ok!(Council::end_block(System::block_number())); System::set_block_number(6); @@ -1238,9 +1394,11 @@ mod tests { assert_ok!(Council::present_winner(&4, 3.into(), 30, 1)); assert_ok!(Council::end_block(System::block_number())); - assert_ok!(Council::reap_inactive_voter(&4, + assert_ok!(Council::reap_inactive_voter( + &4, Council::voters().iter().position(|&i| i == 4).unwrap() as u32, - 2.into(), Council::voters().iter().position(|&i| i == 2).unwrap() as u32, + 2.into(), + Council::voters().iter().position(|&i| i == 2).unwrap() as u32, 2 )); @@ -1271,11 +1429,16 @@ mod tests { assert_ok!(Council::present_winner(&4, 5.into(), 50, 1)); assert_ok!(Council::end_block(System::block_number())); - assert_noop!(Council::reap_inactive_voter(&4, - 0, - 2.into(), Council::voters().iter().position(|&i| i == 2).unwrap() as u32, - 2 - ), "reaper must be a voter"); + assert_noop!( + Council::reap_inactive_voter( + &4, + 0, + 2.into(), + Council::voters().iter().position(|&i| i == 2).unwrap() as u32, + 2 + ), + "reaper must be a voter" + ); }); } @@ -1290,9 +1453,17 @@ mod tests { assert_ok!(Council::submit_candidacy(&3, 2)); assert_ok!(Council::set_approvals(&3, vec![false, false, true], 0)); assert_ok!(Council::submit_candidacy(&4, 3)); - assert_ok!(Council::set_approvals(&4, vec![false, false, false, true], 0)); + assert_ok!(Council::set_approvals( + &4, + vec![false, false, false, true], + 0 + )); assert_ok!(Council::submit_candidacy(&5, 4)); - assert_ok!(Council::set_approvals(&5, vec![false, false, false, false, true], 0)); + assert_ok!(Council::set_approvals( + &5, + vec![false, false, false, false, true], + 0 + )); assert_ok!(Council::end_block(System::block_number())); System::set_block_number(6); @@ -1300,7 +1471,10 @@ mod tests { assert_ok!(Council::present_winner(&4, 3.into(), 30, 0)); assert_ok!(Council::present_winner(&4, 4.into(), 40, 0)); assert_ok!(Council::present_winner(&4, 5.into(), 50, 0)); - assert_noop!(Council::present_winner(&4, 2.into(), 20, 0), "candidate not worthy of leaderboard"); + assert_noop!( + Council::present_winner(&4, 2.into(), 20, 0), + "candidate not worthy of leaderboard" + ); }); } @@ -1315,9 +1489,17 @@ mod tests { assert_ok!(Council::submit_candidacy(&3, 2)); assert_ok!(Council::set_approvals(&3, vec![false, false, true], 0)); assert_ok!(Council::submit_candidacy(&4, 3)); - assert_ok!(Council::set_approvals(&4, vec![false, false, false, true], 0)); + assert_ok!(Council::set_approvals( + &4, + vec![false, false, false, true], + 0 + )); assert_ok!(Council::submit_candidacy(&5, 4)); - assert_ok!(Council::set_approvals(&5, vec![false, false, false, false, true], 0)); + assert_ok!(Council::set_approvals( + &5, + vec![false, false, false, false, true], + 0 + )); assert_ok!(Council::end_block(System::block_number())); System::set_block_number(6); @@ -1327,12 +1509,10 @@ mod tests { assert_ok!(Council::present_winner(&4, 4.into(), 40, 0)); assert_ok!(Council::present_winner(&4, 5.into(), 50, 0)); - assert_eq!(Council::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); + assert_eq!( + Council::leaderboard(), + Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)]) + ); }); } @@ -1341,7 +1521,10 @@ mod tests { with_externalities(&mut new_test_ext(false), || { System::set_block_number(4); assert!(!Council::presentation_active()); - assert_noop!(Council::present_winner(&5, 5.into(), 1, 0), "cannot present outside of presentation period"); + assert_noop!( + Council::present_winner(&5, 5.into(), 1, 0), + "cannot present outside of presentation period" + ); }); } @@ -1356,7 +1539,10 @@ mod tests { assert_ok!(Council::end_block(System::block_number())); System::set_block_number(6); - assert_noop!(Council::present_winner(&4, 2.into(), 20, 1), "index not current"); + assert_noop!( + Council::present_winner(&4, 2.into(), 20, 1), + "index not current" + ); }); } @@ -1375,7 +1561,10 @@ mod tests { System::set_block_number(6); assert_eq!(Staking::free_balance(&1), 1); assert_eq!(Staking::reserved_balance(&1), 9); - assert_noop!(Council::present_winner(&1, 1.into(), 20, 0), "presenter must have sufficient slashable funds"); + assert_noop!( + Council::present_winner(&1, 1.into(), 20, 0), + "presenter must have sufficient slashable funds" + ); }); } @@ -1393,7 +1582,10 @@ mod tests { assert_ok!(Council::end_block(System::block_number())); System::set_block_number(6); - assert_err!(Council::present_winner(&4, 2.into(), 80, 0), "incorrect total"); + assert_err!( + Council::present_winner(&4, 2.into(), 80, 0), + "incorrect total" + ); assert_eq!(Staking::voting_balance(&4), 38); }); @@ -1412,30 +1604,34 @@ mod tests { assert_ok!(Council::submit_candidacy(&3, 2)); assert_ok!(Council::set_approvals(&3, vec![false, false, true], 0)); assert_ok!(Council::submit_candidacy(&4, 3)); - assert_ok!(Council::set_approvals(&4, vec![false, false, false, true], 0)); + assert_ok!(Council::set_approvals( + &4, + vec![false, false, false, true], + 0 + )); assert_ok!(Council::submit_candidacy(&5, 4)); - assert_ok!(Council::set_approvals(&5, vec![false, false, false, false, true], 0)); + assert_ok!(Council::set_approvals( + &5, + vec![false, false, false, false, true], + 0 + )); assert_ok!(Council::end_block(System::block_number())); System::set_block_number(6); assert!(Council::presentation_active()); assert_ok!(Council::present_winner(&4, 1.into(), 60, 0)); - assert_eq!(Council::leaderboard(), Some(vec![ - (0, 0), - (0, 0), - (0, 0), - (60, 1) - ])); + assert_eq!( + Council::leaderboard(), + Some(vec![(0, 0), (0, 0), (0, 0), (60, 1)]) + ); assert_ok!(Council::present_winner(&4, 3.into(), 30, 0)); assert_ok!(Council::present_winner(&4, 4.into(), 40, 0)); assert_ok!(Council::present_winner(&4, 5.into(), 50, 0)); - assert_eq!(Council::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); + assert_eq!( + Council::leaderboard(), + Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)]) + ); assert_ok!(Council::end_block(System::block_number())); @@ -1469,9 +1665,17 @@ mod tests { assert_ok!(Council::submit_candidacy(&3, 2)); assert_ok!(Council::set_approvals(&3, vec![false, false, true], 0)); assert_ok!(Council::submit_candidacy(&4, 3)); - assert_ok!(Council::set_approvals(&4, vec![false, false, false, true], 0)); + assert_ok!(Council::set_approvals( + &4, + vec![false, false, false, true], + 0 + )); assert_ok!(Council::submit_candidacy(&5, 4)); - assert_ok!(Council::set_approvals(&5, vec![false, false, false, false, true], 0)); + assert_ok!(Council::set_approvals( + &5, + vec![false, false, false, false, true], + 0 + )); assert_ok!(Council::end_block(System::block_number())); System::set_block_number(6); @@ -1482,7 +1686,11 @@ mod tests { assert_ok!(Council::end_block(System::block_number())); System::set_block_number(8); - assert_ok!(Council::set_approvals(&6, vec![false, false, true, false], 1)); + assert_ok!(Council::set_approvals( + &6, + vec![false, false, true, false], + 1 + )); assert_ok!(Council::set_desired_seats(3)); assert_ok!(Council::end_block(System::block_number())); diff --git a/substrate/runtime/council/src/voting.rs b/substrate/runtime/council/src/voting.rs index 8470d1b420404..941b80d24fd08 100644 --- a/substrate/runtime/council/src/voting.rs +++ b/substrate/runtime/council/src/voting.rs @@ -16,14 +16,14 @@ //! Council voting system. -use rstd::prelude::*; +use super::{Module as Council, Trait}; +use primitives::traits::{Executable, Hashing, RefInto}; use rstd::borrow::Borrow; -use primitives::traits::{Executable, RefInto, Hashing}; +use rstd::prelude::*; use runtime_io::print; use substrate_runtime_support::dispatch::Result; -use substrate_runtime_support::{StorageValue, StorageMap, IsSubType}; -use {system, democracy}; -use super::{Trait, Module as Council}; +use substrate_runtime_support::{IsSubType, StorageMap, StorageValue}; +use {democracy, system}; decl_module! { pub struct Module; @@ -57,34 +57,46 @@ decl_storage! { impl Module { pub fn is_vetoed>(proposal: B) -> bool { Self::veto_of(proposal.borrow()) - .map(|(expiry, _): (T::BlockNumber, Vec)| >::block_number() < expiry) + .map(|(expiry, _): (T::BlockNumber, Vec)| { + >::block_number() < expiry + }) .unwrap_or(false) } pub fn will_still_be_councillor_at(who: &T::AccountId, n: T::BlockNumber) -> bool { - >::active_council().iter() + >::active_council() + .iter() .find(|&&(ref a, _)| a == who) .map(|&(_, expires)| expires > n) .unwrap_or(false) } pub fn is_councillor(who: &T::AccountId) -> bool { - >::active_council().iter() + >::active_council() + .iter() .any(|&(ref a, _)| a == who) } pub fn tally(proposal_hash: &T::Hash) -> (u32, u32, u32) { - Self::generic_tally(proposal_hash, |w: &T::AccountId, p: &T::Hash| Self::vote_of((*p, w.clone()))) + Self::generic_tally(proposal_hash, |w: &T::AccountId, p: &T::Hash| { + Self::vote_of((*p, w.clone())) + }) } // Dispatch fn propose(aux: &T::PublicAux, proposal: Box) -> Result { let expiry = >::block_number() + Self::voting_period(); - ensure!(Self::will_still_be_councillor_at(aux.ref_into(), expiry), "proposer would not be on council"); + ensure!( + Self::will_still_be_councillor_at(aux.ref_into(), expiry), + "proposer would not be on council" + ); let proposal_hash = T::Hashing::hash_of(&proposal); - ensure!(!>::exists(proposal_hash), "duplicate proposals not allowed"); + ensure!( + !>::exists(proposal_hash), + "duplicate proposals not allowed" + ); ensure!(!Self::is_vetoed(&proposal_hash), "proposal is vetoed"); let mut proposals = Self::proposals(); @@ -109,18 +121,35 @@ impl Module { } fn veto(aux: &T::PublicAux, proposal_hash: T::Hash) -> Result { - ensure!(Self::is_councillor(aux.ref_into()), "only councillors may veto council proposals"); - ensure!(>::exists(&proposal_hash), "proposal must exist to be vetoed"); + ensure!( + Self::is_councillor(aux.ref_into()), + "only councillors may veto council proposals" + ); + ensure!( + >::exists(&proposal_hash), + "proposal must exist to be vetoed" + ); let mut existing_vetoers = Self::veto_of(&proposal_hash) .map(|pair| pair.1) .unwrap_or_else(Vec::new); - let insert_position = existing_vetoers.binary_search(aux.ref_into()) - .err().ok_or("a councillor may not veto a proposal twice")?; + let insert_position = existing_vetoers + .binary_search(aux.ref_into()) + .err() + .ok_or("a councillor may not veto a proposal twice")?; existing_vetoers.insert(insert_position, aux.ref_into().clone()); - Self::set_veto_of(&proposal_hash, >::block_number() + Self::cooloff_period(), existing_vetoers); - - Self::set_proposals(&Self::proposals().into_iter().filter(|&(_, h)| h != proposal_hash).collect::>()); + Self::set_veto_of( + &proposal_hash, + >::block_number() + Self::cooloff_period(), + existing_vetoers, + ); + + Self::set_proposals( + &Self::proposals() + .into_iter() + .filter(|&(_, h)| h != proposal_hash) + .collect::>(), + ); >::remove(proposal_hash); >::remove(proposal_hash); for (c, _) in >::active_council() { @@ -141,7 +170,6 @@ impl Module { // private - fn set_veto_of(proposal: &T::Hash, expiry: T::BlockNumber, vetoers: Vec) { >::insert(proposal, (expiry, vetoers)); } @@ -151,12 +179,18 @@ impl Module { } fn take_tally(proposal_hash: &T::Hash) -> (u32, u32, u32) { - Self::generic_tally(proposal_hash, |w: &T::AccountId, p: &T::Hash| >::take((*p, w.clone()))) + Self::generic_tally(proposal_hash, |w: &T::AccountId, p: &T::Hash| { + >::take((*p, w.clone())) + }) } - fn generic_tally Option>(proposal_hash: &T::Hash, vote_of: F) -> (u32, u32, u32) { + fn generic_tally Option>( + proposal_hash: &T::Hash, + vote_of: F, + ) -> (u32, u32, u32) { let c = >::active_council(); - let (approve, reject) = c.iter() + let (approve, reject) = c + .iter() .filter_map(|&(ref a, _)| vote_of(a, proposal_hash)) .map(|approve| if approve { (1, 0) } else { (0, 1) }) .fold((0, 0), |(a, b), (c, d)| (a + c, b + d)); @@ -173,8 +207,8 @@ impl Module { Some(&(expiry, hash)) if expiry == n => { // yes this is horrible, but fixing it will need substantial work in storage. Self::set_proposals(&proposals[1..].to_vec()); - >::take(hash).map(|p| (p, hash)) /* defensive only: all queued proposal hashes must have associated proposals*/ - } + >::take(hash).map(|p| (p, hash)) // defensive only: all queued proposal hashes must have associated proposals + }, _ => None, } } @@ -182,7 +216,9 @@ impl Module { fn end_block(now: T::BlockNumber) -> Result { while let Some((proposal, proposal_hash)) = Self::take_proposal_if_expiring_at(now) { let tally = Self::take_tally(&proposal_hash); - if let Some(&democracy::PrivCall::cancel_referendum(ref_index)) = IsSubType::>::is_sub_type(&proposal) { + if let Some(&democracy::PrivCall::cancel_referendum(ref_index)) = + IsSubType::>::is_sub_type(&proposal) + { if let (_, 0, 0) = tally { >::internal_cancel_referendum(ref_index); } @@ -190,8 +226,14 @@ impl Module { if tally.0 > tally.1 + tally.2 { Self::kill_veto_of(&proposal_hash); match tally { - (_, 0, 0) => >::internal_start_referendum(proposal, democracy::VoteThreshold::SuperMajorityAgainst).map(|_| ())?, - _ => >::internal_start_referendum(proposal, democracy::VoteThreshold::SimpleMajority).map(|_| ())?, + (_, 0, 0) => >::internal_start_referendum( + proposal, + democracy::VoteThreshold::SuperMajorityAgainst, + ).map(|_| ())?, + _ => >::internal_start_referendum( + proposal, + democracy::VoteThreshold::SimpleMajority, + ).map(|_| ())?, }; } } @@ -217,9 +259,9 @@ impl Executable for Council { #[cfg(test)] mod tests { use super::*; - use ::tests::*; - use substrate_runtime_support::Hashable; use democracy::VoteThreshold; + use substrate_runtime_support::Hashable; + use tests::*; type CouncilVoting = super::Module; @@ -236,7 +278,10 @@ mod tests { assert_eq!(CouncilVoting::is_councillor(&1), true); assert_eq!(CouncilVoting::is_councillor(&4), false); assert_eq!(CouncilVoting::proposals(), Vec::<(u64, H256)>::new()); - assert_eq!(CouncilVoting::proposal_voters(H256::default()), Vec::::new()); + assert_eq!( + CouncilVoting::proposal_voters(H256::default()), + Vec::::new() + ); assert_eq!(CouncilVoting::is_vetoed(&H256::default()), false); assert_eq!(CouncilVoting::vote_of((H256::default(), 1)), None); assert_eq!(CouncilVoting::tally(&H256::default()), (0, 0, 3)); @@ -256,8 +301,17 @@ mod tests { with_externalities(&mut new_test_ext(true), || { System::set_block_number(1); let proposal = bonding_duration_proposal(42); - assert_ok!(Democracy::internal_start_referendum(proposal.clone(), VoteThreshold::SuperMajorityApprove), 0); - assert_eq!(Democracy::active_referendums(), vec![(0, 4, proposal, VoteThreshold::SuperMajorityApprove)]); + assert_ok!( + Democracy::internal_start_referendum( + proposal.clone(), + VoteThreshold::SuperMajorityApprove + ), + 0 + ); + assert_eq!( + Democracy::active_referendums(), + vec![(0, 4, proposal, VoteThreshold::SuperMajorityApprove)] + ); let cancellation = cancel_referendum_proposal(0); let hash = cancellation.blake2_256().into(); @@ -279,7 +333,13 @@ mod tests { with_externalities(&mut new_test_ext(true), || { System::set_block_number(1); let proposal = bonding_duration_proposal(42); - assert_ok!(Democracy::internal_start_referendum(proposal.clone(), VoteThreshold::SuperMajorityApprove), 0); + assert_ok!( + Democracy::internal_start_referendum( + proposal.clone(), + VoteThreshold::SuperMajorityApprove + ), + 0 + ); let cancellation = cancel_referendum_proposal(0); let hash = cancellation.blake2_256().into(); @@ -290,7 +350,10 @@ mod tests { System::set_block_number(2); assert_ok!(CouncilVoting::end_block(System::block_number())); - assert_eq!(Democracy::active_referendums(), vec![(0, 4, proposal, VoteThreshold::SuperMajorityApprove)]); + assert_eq!( + Democracy::active_referendums(), + vec![(0, 4, proposal, VoteThreshold::SuperMajorityApprove)] + ); }); } @@ -299,7 +362,13 @@ mod tests { with_externalities(&mut new_test_ext(true), || { System::set_block_number(1); let proposal = bonding_duration_proposal(42); - assert_ok!(Democracy::internal_start_referendum(proposal.clone(), VoteThreshold::SuperMajorityApprove), 0); + assert_ok!( + Democracy::internal_start_referendum( + proposal.clone(), + VoteThreshold::SuperMajorityApprove + ), + 0 + ); let cancellation = cancel_referendum_proposal(0); let hash = cancellation.blake2_256().into(); @@ -309,7 +378,10 @@ mod tests { System::set_block_number(2); assert_ok!(CouncilVoting::end_block(System::block_number())); - assert_eq!(Democracy::active_referendums(), vec![(0, 4, proposal, VoteThreshold::SuperMajorityApprove)]); + assert_eq!( + Democracy::active_referendums(), + vec![(0, 4, proposal, VoteThreshold::SuperMajorityApprove)] + ); }); } @@ -337,7 +409,10 @@ mod tests { System::set_block_number(3); assert_ok!(CouncilVoting::propose(&1, Box::new(proposal.clone()))); - assert_noop!(CouncilVoting::veto(&2, hash), "a councillor may not veto a proposal twice"); + assert_noop!( + CouncilVoting::veto(&2, hash), + "a councillor may not veto a proposal twice" + ); }); } @@ -351,7 +426,10 @@ mod tests { assert_ok!(CouncilVoting::veto(&2, hash)); System::set_block_number(2); - assert_noop!(CouncilVoting::propose(&1, Box::new(proposal.clone())), "proposal is vetoed"); + assert_noop!( + CouncilVoting::propose(&1, Box::new(proposal.clone())), + "proposal is vetoed" + ); }); } @@ -373,7 +451,15 @@ mod tests { System::set_block_number(4); assert_ok!(CouncilVoting::end_block(System::block_number())); assert_eq!(CouncilVoting::proposals().len(), 0); - assert_eq!(Democracy::active_referendums(), vec![(0, 7, bonding_duration_proposal(42), VoteThreshold::SimpleMajority)]); + assert_eq!( + Democracy::active_referendums(), + vec![( + 0, + 7, + bonding_duration_proposal(42), + VoteThreshold::SimpleMajority, + )] + ); }); } @@ -414,7 +500,10 @@ mod tests { System::set_block_number(1); let proposal = bonding_duration_proposal(42); assert_ok!(CouncilVoting::propose(&1, Box::new(proposal.clone()))); - assert_eq!(CouncilVoting::tally(&proposal.blake2_256().into()), (1, 0, 2)); + assert_eq!( + CouncilVoting::tally(&proposal.blake2_256().into()), + (1, 0, 2) + ); assert_ok!(CouncilVoting::end_block(System::block_number())); System::set_block_number(2); @@ -432,13 +521,19 @@ mod tests { assert_ok!(CouncilVoting::propose(&1, Box::new(proposal.clone()))); assert_ok!(CouncilVoting::vote(&2, proposal.blake2_256().into(), true)); assert_ok!(CouncilVoting::vote(&3, proposal.blake2_256().into(), true)); - assert_eq!(CouncilVoting::tally(&proposal.blake2_256().into()), (3, 0, 0)); + assert_eq!( + CouncilVoting::tally(&proposal.blake2_256().into()), + (3, 0, 0) + ); assert_ok!(CouncilVoting::end_block(System::block_number())); System::set_block_number(2); assert_ok!(CouncilVoting::end_block(System::block_number())); assert_eq!(CouncilVoting::proposals().len(), 0); - assert_eq!(Democracy::active_referendums(), vec![(0, 5, proposal, VoteThreshold::SuperMajorityAgainst)]); + assert_eq!( + Democracy::active_referendums(), + vec![(0, 5, proposal, VoteThreshold::SuperMajorityAgainst)] + ); }); } @@ -450,13 +545,19 @@ mod tests { assert_ok!(CouncilVoting::propose(&1, Box::new(proposal.clone()))); assert_ok!(CouncilVoting::vote(&2, proposal.blake2_256().into(), true)); assert_ok!(CouncilVoting::vote(&3, proposal.blake2_256().into(), false)); - assert_eq!(CouncilVoting::tally(&proposal.blake2_256().into()), (2, 1, 0)); + assert_eq!( + CouncilVoting::tally(&proposal.blake2_256().into()), + (2, 1, 0) + ); assert_ok!(CouncilVoting::end_block(System::block_number())); System::set_block_number(2); assert_ok!(CouncilVoting::end_block(System::block_number())); assert_eq!(CouncilVoting::proposals().len(), 0); - assert_eq!(Democracy::active_referendums(), vec![(0, 5, proposal, VoteThreshold::SimpleMajority)]); + assert_eq!( + Democracy::active_referendums(), + vec![(0, 5, proposal, VoteThreshold::SimpleMajority)] + ); }); } @@ -465,7 +566,10 @@ mod tests { with_externalities(&mut new_test_ext(true), || { System::set_block_number(1); let proposal = bonding_duration_proposal(42); - assert_noop!(CouncilVoting::propose(&4, Box::new(proposal)), "proposer would not be on council"); + assert_noop!( + CouncilVoting::propose(&4, Box::new(proposal)), + "proposer would not be on council" + ); }); } } diff --git a/substrate/runtime/democracy/src/lib.rs b/substrate/runtime/democracy/src/lib.rs index c20d60d2de359..68a788158a4eb 100644 --- a/substrate/runtime/democracy/src/lib.rs +++ b/substrate/runtime/democracy/src/lib.rs @@ -35,20 +35,20 @@ extern crate substrate_primitives; extern crate substrate_runtime_std as rstd; extern crate substrate_codec as codec; +extern crate substrate_runtime_consensus as consensus; extern crate substrate_runtime_io as runtime_io; extern crate substrate_runtime_primitives as primitives; -extern crate substrate_runtime_consensus as consensus; extern crate substrate_runtime_session as session; extern crate substrate_runtime_staking as staking; extern crate substrate_runtime_system as system; #[cfg(test)] extern crate substrate_runtime_timestamp as timestamp; +use primitives::traits::{As, Executable, MaybeSerializeDebug, RefInto, Zero}; use rstd::prelude::*; use rstd::result; -use primitives::traits::{Zero, Executable, RefInto, As, MaybeSerializeDebug}; -use substrate_runtime_support::{StorageValue, StorageMap, Parameter, Dispatchable, IsSubType}; use substrate_runtime_support::dispatch::Result; +use substrate_runtime_support::{Dispatchable, IsSubType, Parameter, StorageMap, StorageValue}; mod vote_threshold; pub use vote_threshold::{Approved, VoteThreshold}; @@ -111,7 +111,6 @@ decl_storage! { } impl Module { - // exposed immutables. /// Get the amount locked in support of `proposal`; `None` if proposal isn't a valid proposal @@ -126,19 +125,24 @@ impl Module { } /// Get all referendums currently active. - pub fn active_referendums() -> Vec<(ReferendumIndex, T::BlockNumber, T::Proposal, VoteThreshold)> { + pub fn active_referendums() -> Vec<(ReferendumIndex, T::BlockNumber, T::Proposal, VoteThreshold)> + { let next = Self::next_tally(); let last = Self::referendum_count(); - (next..last).into_iter() + (next..last) + .into_iter() .filter_map(|i| Self::referendum_info(i).map(|(n, p, t)| (i, n, p, t))) .collect() } /// Get all referendums ready for tally at block `n`. - pub fn maturing_referendums_at(n: T::BlockNumber) -> Vec<(ReferendumIndex, T::BlockNumber, T::Proposal, VoteThreshold)> { + pub fn maturing_referendums_at( + n: T::BlockNumber, + ) -> Vec<(ReferendumIndex, T::BlockNumber, T::Proposal, VoteThreshold)> { let next = Self::next_tally(); let last = Self::referendum_count(); - (next..last).into_iter() + (next..last) + .into_iter() .filter_map(|i| Self::referendum_info(i).map(|(n, p, t)| (i, n, p, t))) .take_while(|&(_, block_number, _, _)| block_number == n) .collect() @@ -146,10 +150,31 @@ impl Module { /// Get the voters for the current proposal. pub fn tally(ref_index: ReferendumIndex) -> (T::Balance, T::Balance) { - Self::voters_for(ref_index).iter() - .map(|a| (>::voting_balance(a), Self::vote_of((ref_index, a.clone())).unwrap_or(false)/*defensive only: all items come from `voters`; for an item to be in `voters` there must be a vote registered; qed*/)) - .map(|(bal, vote)| if vote { (bal, Zero::zero()) } else { (Zero::zero(), bal) }) - .fold((Zero::zero(), Zero::zero()), |(a, b), (c, d)| (a + c, b + d)) + Self::voters_for(ref_index) + .iter() + .map(|a| { + ( + >::voting_balance(a), + Self::vote_of((ref_index, a.clone())).unwrap_or(false), /* defensive only: + * all items come + * from `voters`; + * for an item to + * be in `voters` + * there must be a + * vote registered; + * qed */ + ) + }) + .map(|(bal, vote)| { + if vote { + (bal, Zero::zero()) + } else { + (Zero::zero(), bal) + } + }) + .fold((Zero::zero(), Zero::zero()), |(a, b), (c, d)| { + (a + c, b + d) + }) } // dispatching. @@ -172,8 +197,7 @@ impl Module { /// Propose a sensitive action to be taken. fn second(aux: &T::PublicAux, proposal: PropIndex) -> Result { - let mut deposit = Self::deposit_of(proposal) - .ok_or("can only second an existing proposal")?; + let mut deposit = Self::deposit_of(proposal).ok_or("can only second an existing proposal")?; >::reserve(aux.ref_into(), deposit.0) .map_err(|_| "seconder's balance too low")?; deposit.1.push(aux.ref_into().clone()); @@ -184,9 +208,14 @@ impl Module { /// Vote in a referendum. If `approve_proposal` is true, the vote is to enact the proposal; /// false would be a vote to keep the status quo.. fn vote(aux: &T::PublicAux, ref_index: ReferendumIndex, approve_proposal: bool) -> Result { - ensure!(Self::is_active_referendum(ref_index), "vote given for invalid referendum."); - ensure!(!>::voting_balance(aux.ref_into()).is_zero(), - "transactor must have balance to signal approval."); + ensure!( + Self::is_active_referendum(ref_index), + "vote given for invalid referendum." + ); + ensure!( + !>::voting_balance(aux.ref_into()).is_zero(), + "transactor must have balance to signal approval." + ); if !>::exists(&(ref_index, aux.ref_into().clone())) { let mut voters = Self::voters_for(ref_index); voters.push(aux.ref_into().clone()); @@ -201,7 +230,7 @@ impl Module { Self::inject_referendum( >::block_number() + Self::voting_period(), *proposal, - vote_threshold + vote_threshold, ).map(|_| ()) } @@ -214,8 +243,15 @@ impl Module { // exposed mutables. /// Start a referendum. Can be called directly by the council. - pub fn internal_start_referendum(proposal: T::Proposal, vote_threshold: VoteThreshold) -> result::Result { - >::inject_referendum(>::block_number() + >::voting_period(), proposal, vote_threshold) + pub fn internal_start_referendum( + proposal: T::Proposal, + vote_threshold: VoteThreshold, + ) -> result::Result { + >::inject_referendum( + >::block_number() + >::voting_period(), + proposal, + vote_threshold, + ) } /// Remove a referendum. Can be called directly by the council. @@ -229,10 +265,14 @@ impl Module { fn inject_referendum( end: T::BlockNumber, proposal: T::Proposal, - vote_threshold: VoteThreshold + vote_threshold: VoteThreshold, ) -> result::Result { let ref_index = Self::referendum_count(); - if ref_index > 0 && Self::referendum_info(ref_index - 1).map(|i| i.0 > end).unwrap_or(false) { + if ref_index > 0 + && Self::referendum_info(ref_index - 1) + .map(|i| i.0 > end) + .unwrap_or(false) + { Err("Cannot inject a referendum that ends earlier than preceeding referendum")? } @@ -255,18 +295,25 @@ impl Module { // pick out another public referendum if it's time. if (now % Self::launch_period()).is_zero() { let mut public_props = Self::public_props(); - if let Some((winner_index, _)) = public_props.iter() - .enumerate() - .max_by_key(|x| Self::locked_for((x.1).0).unwrap_or_else(Zero::zero)/*defensive only: All current public proposals have an amount locked*/) - { + if let Some((winner_index, _)) = public_props.iter().enumerate().max_by_key( + |x| Self::locked_for((x.1).0).unwrap_or_else(Zero::zero), /* defensive only: + * All current public + * proposals have an + * amount locked */ + ) { let (prop_index, proposal, _) = public_props.swap_remove(winner_index); - if let Some((deposit, depositors)) = >::take(prop_index) {//: (T::Balance, Vec) = + if let Some((deposit, depositors)) = >::take(prop_index) { + //: (T::Balance, Vec) = // refund depositors for d in &depositors { >::unreserve(d, deposit); } >::put(public_props); - Self::inject_referendum(now + Self::voting_period(), proposal, VoteThreshold::SuperMajorityApprove)?; + Self::inject_referendum( + now + Self::voting_period(), + proposal, + VoteThreshold::SuperMajorityApprove, + )?; } else { return Err("depositors always exist for current proposals") } @@ -336,8 +383,7 @@ impl Default for GenesisConfig { } #[cfg(any(feature = "std", test))] -impl primitives::BuildStorage for GenesisConfig -{ +impl primitives::BuildStorage for GenesisConfig { fn build_storage(self) -> ::std::result::Result { use codec::Slicable; use runtime_io::twox_128; @@ -356,12 +402,12 @@ impl primitives::BuildStorage for GenesisConfig #[cfg(test)] mod tests { use super::*; - use runtime_io::with_externalities; - use substrate_primitives::H256; - use primitives::BuildStorage; - use primitives::traits::{HasPublicAux, Identity, BlakeTwo256}; use primitives::testing::{Digest, Header}; + use primitives::traits::{BlakeTwo256, HasPublicAux, Identity}; + use primitives::BuildStorage; + use runtime_io::with_externalities; use session::OnSessionChange; + use substrate_primitives::H256; impl_outer_dispatch! { #[derive(Debug, Clone, Eq, Serialize, Deserialize, PartialEq)] @@ -409,39 +455,57 @@ mod tests { } fn new_test_ext() -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap(); - t.extend(consensus::GenesisConfig::{ - code: vec![], - authorities: vec![], - }.build_storage().unwrap()); - t.extend(session::GenesisConfig::{ - session_length: 1, //??? or 2? - validators: vec![10, 20], - broken_percent_late: 100, - }.build_storage().unwrap()); - t.extend(staking::GenesisConfig::{ - sessions_per_era: 1, - current_era: 0, - balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - intentions: vec![], - validator_count: 2, - bonding_duration: 3, - transaction_base_fee: 0, - transaction_byte_fee: 0, - existential_deposit: 0, - transfer_fee: 0, - creation_fee: 0, - contract_fee: 0, - reclaim_rebate: 0, - early_era_slash: 0, - session_reward: 0, - }.build_storage().unwrap()); - t.extend(GenesisConfig::{ - launch_period: 1, - voting_period: 1, - minimum_deposit: 1, - }.build_storage().unwrap()); - t.extend(timestamp::GenesisConfig::::default().build_storage().unwrap()); + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap(); + t.extend( + consensus::GenesisConfig:: { + code: vec![], + authorities: vec![], + }.build_storage() + .unwrap(), + ); + t.extend( + session::GenesisConfig:: { + session_length: 1, //??? or 2? + validators: vec![10, 20], + broken_percent_late: 100, + }.build_storage() + .unwrap(), + ); + t.extend( + staking::GenesisConfig:: { + sessions_per_era: 1, + current_era: 0, + balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], + intentions: vec![], + validator_count: 2, + bonding_duration: 3, + transaction_base_fee: 0, + transaction_byte_fee: 0, + existential_deposit: 0, + transfer_fee: 0, + creation_fee: 0, + contract_fee: 0, + reclaim_rebate: 0, + early_era_slash: 0, + session_reward: 0, + }.build_storage() + .unwrap(), + ); + t.extend( + GenesisConfig:: { + launch_period: 1, + voting_period: 1, + minimum_deposit: 1, + }.build_storage() + .unwrap(), + ); + t.extend( + timestamp::GenesisConfig::::default() + .build_storage() + .unwrap(), + ); t } @@ -463,7 +527,13 @@ mod tests { } fn propose_sessions_per_era(who: u64, value: u64, locked: u64) -> super::Result { - Democracy::propose(&who, Box::new(Proposal::Staking(staking::PrivCall::set_sessions_per_era(value))), locked) + Democracy::propose( + &who, + Box::new(Proposal::Staking(staking::PrivCall::set_sessions_per_era( + value, + ))), + locked, + ) } #[test] @@ -545,7 +615,10 @@ mod tests { fn poor_proposer_should_not_work() { with_externalities(&mut new_test_ext(), || { System::set_block_number(1); - assert_noop!(propose_sessions_per_era(1, 2, 11), "proposer\'s balance too low"); + assert_noop!( + propose_sessions_per_era(1, 2, 11), + "proposer\'s balance too low" + ); }); } @@ -559,7 +632,13 @@ mod tests { } fn propose_bonding_duration(who: u64, value: u64, locked: u64) -> super::Result { - Democracy::propose(&who, Box::new(Proposal::Staking(staking::PrivCall::set_bonding_duration(value))), locked) + Democracy::propose( + &who, + Box::new(Proposal::Staking(staking::PrivCall::set_bonding_duration( + value, + ))), + locked, + ) } #[test] @@ -599,7 +678,11 @@ mod tests { fn simple_passing_should_work() { with_externalities(&mut new_test_ext(), || { System::set_block_number(1); - let r = Democracy::inject_referendum(1, sessions_per_era_proposal(2), VoteThreshold::SuperMajorityApprove).unwrap(); + let r = Democracy::inject_referendum( + 1, + sessions_per_era_proposal(2), + VoteThreshold::SuperMajorityApprove, + ).unwrap(); assert_ok!(Democracy::vote(&1, r, true)); assert_eq!(Democracy::voters_for(r), vec![1]); @@ -617,7 +700,11 @@ mod tests { fn cancel_referendum_should_work() { with_externalities(&mut new_test_ext(), || { System::set_block_number(1); - let r = Democracy::inject_referendum(1, sessions_per_era_proposal(2), VoteThreshold::SuperMajorityApprove).unwrap(); + let r = Democracy::inject_referendum( + 1, + sessions_per_era_proposal(2), + VoteThreshold::SuperMajorityApprove, + ).unwrap(); assert_ok!(Democracy::vote(&1, r, true)); assert_ok!(Democracy::cancel_referendum(r)); @@ -632,7 +719,11 @@ mod tests { fn simple_failing_should_work() { with_externalities(&mut new_test_ext(), || { System::set_block_number(1); - let r = Democracy::inject_referendum(1, sessions_per_era_proposal(2), VoteThreshold::SuperMajorityApprove).unwrap(); + let r = Democracy::inject_referendum( + 1, + sessions_per_era_proposal(2), + VoteThreshold::SuperMajorityApprove, + ).unwrap(); assert_ok!(Democracy::vote(&1, r, false)); assert_eq!(Democracy::voters_for(r), vec![1]); @@ -650,7 +741,11 @@ mod tests { fn controversial_voting_should_work() { with_externalities(&mut new_test_ext(), || { System::set_block_number(1); - let r = Democracy::inject_referendum(1, sessions_per_era_proposal(2), VoteThreshold::SuperMajorityApprove).unwrap(); + let r = Democracy::inject_referendum( + 1, + sessions_per_era_proposal(2), + VoteThreshold::SuperMajorityApprove, + ).unwrap(); assert_ok!(Democracy::vote(&1, r, true)); assert_ok!(Democracy::vote(&2, r, false)); assert_ok!(Democracy::vote(&3, r, false)); @@ -671,7 +766,11 @@ mod tests { fn controversial_low_turnout_voting_should_work() { with_externalities(&mut new_test_ext(), || { System::set_block_number(1); - let r = Democracy::inject_referendum(1, sessions_per_era_proposal(2), VoteThreshold::SuperMajorityApprove).unwrap(); + let r = Democracy::inject_referendum( + 1, + sessions_per_era_proposal(2), + VoteThreshold::SuperMajorityApprove, + ).unwrap(); assert_ok!(Democracy::vote(&5, r, false)); assert_ok!(Democracy::vote(&6, r, true)); @@ -691,7 +790,11 @@ mod tests { assert_eq!(Staking::total_stake(), 210); System::set_block_number(1); - let r = Democracy::inject_referendum(1, sessions_per_era_proposal(2), VoteThreshold::SuperMajorityApprove).unwrap(); + let r = Democracy::inject_referendum( + 1, + sessions_per_era_proposal(2), + VoteThreshold::SuperMajorityApprove, + ).unwrap(); assert_ok!(Democracy::vote(&4, r, true)); assert_ok!(Democracy::vote(&5, r, false)); assert_ok!(Democracy::vote(&6, r, true)); diff --git a/substrate/runtime/democracy/src/vote_threshold.rs b/substrate/runtime/democracy/src/vote_threshold.rs index 8512417b03cc8..7e986831cc02b 100644 --- a/substrate/runtime/democracy/src/vote_threshold.rs +++ b/substrate/runtime/democracy/src/vote_threshold.rs @@ -16,9 +16,9 @@ //! Voting thresholds. -use primitives::traits::{Zero, IntegerSquareRoot}; use codec::{Input, Slicable}; -use rstd::ops::{Add, Mul, Div, Rem}; +use primitives::traits::{IntegerSquareRoot, Zero}; +use rstd::ops::{Add, Div, Mul, Rem}; /// A means of determining if a vote is past pass threshold. #[derive(Clone, Copy, PartialEq, Eq)] @@ -59,25 +59,32 @@ pub trait Approved { } /// Return `true` iff `n1 / d1 < n2 / d2`. `d1` and `d2` may not be zero. -fn compare_rationals + Div + Rem + Ord + Copy>(mut n1: T, mut d1: T, mut n2: T, mut d2: T) -> bool { +fn compare_rationals< + T: Zero + Mul + Div + Rem + Ord + Copy, +>( + mut n1: T, + mut d1: T, + mut n2: T, + mut d2: T, +) -> bool { // Uses a continued fractional representation for a non-overflowing compare. // Detailed at https://janmr.com/blog/2014/05/comparing-rational-numbers-without-overflow/. loop { let q1 = n1 / d1; let q2 = n2 / d2; if q1 < q2 { - return true; + return true } if q2 < q1 { - return false; + return false } let r1 = n1 % d1; let r2 = n2 % d2; if r2.is_zero() { - return false; + return false } if r1.is_zero() { - return true; + return true } n1 = d2; n2 = d1; @@ -86,7 +93,17 @@ fn compare_rationals + Div + Rem + Mul + Div + Rem + Copy> Approved for VoteThreshold { +impl< + Balance: IntegerSquareRoot + + Zero + + Ord + + Add + + Mul + + Div + + Rem + + Copy, + > Approved for VoteThreshold +{ /// Given `approve` votes for and `against` votes against from a total electorate size of /// `electorate` (`electorate - (approve + against)` are abstainers), then returns true if the /// overall outcome is in favour of approval. @@ -94,7 +111,9 @@ impl + let voters = approve + against; let sqrt_voters = voters.integer_sqrt(); let sqrt_electorate = electorate.integer_sqrt(); - if sqrt_voters.is_zero() { return false; } + if sqrt_voters.is_zero() { + return false + } match *self { VoteThreshold::SuperMajorityApprove => compare_rationals(against, sqrt_voters, approve, sqrt_electorate), @@ -111,7 +130,13 @@ mod tests { #[test] fn should_work() { - assert_eq!(VoteThreshold::SuperMajorityApprove.approved(60, 50, 210), false); - assert_eq!(VoteThreshold::SuperMajorityApprove.approved(100, 50, 210), true); + assert_eq!( + VoteThreshold::SuperMajorityApprove.approved(60, 50, 210), + false + ); + assert_eq!( + VoteThreshold::SuperMajorityApprove.approved(100, 50, 210), + true + ); } } diff --git a/substrate/runtime/executive/src/lib.rs b/substrate/runtime/executive/src/lib.rs index e5a8a1a19e29a..9cb421e1d3cb5 100644 --- a/substrate/runtime/executive/src/lib.rs +++ b/substrate/runtime/executive/src/lib.rs @@ -24,11 +24,11 @@ extern crate serde; #[macro_use] extern crate serde_derive; -extern crate substrate_runtime_std as rstd; -extern crate substrate_runtime_support as runtime_support; -extern crate substrate_runtime_io as runtime_io; extern crate substrate_codec as codec; +extern crate substrate_runtime_io as runtime_io; extern crate substrate_runtime_primitives as primitives; +extern crate substrate_runtime_std as rstd; +extern crate substrate_runtime_support as runtime_support; extern crate substrate_runtime_system as system; #[cfg(test)] extern crate substrate_runtime_timestamp as timestamp; @@ -49,15 +49,17 @@ extern crate substrate_runtime_session as session; #[cfg(test)] extern crate substrate_runtime_staking as staking; -use rstd::prelude::*; +use codec::Slicable; +use primitives::traits::{ + self, Applyable, AuxLookup, CheckEqual, Checkable, Executable, Hashing, Header, MakePayment, + One, Zero, +}; +use primitives::{ApplyError, ApplyOutcome}; use rstd::marker::PhantomData; +use rstd::prelude::*; use rstd::result; use runtime_support::StorageValue; -use primitives::traits::{self, Header, Zero, One, Checkable, Applyable, CheckEqual, Executable, - MakePayment, Hashing, AuxLookup}; -use codec::Slicable; use system::extrinsics_root; -use primitives::{ApplyOutcome, ApplyError}; mod internal { pub enum ApplyError { @@ -73,27 +75,29 @@ mod internal { } } -pub struct Executive< - System, - Block, - Lookup, - Payment, - Finalisation, ->(PhantomData<(System, Block, Lookup, Payment, Finalisation)>); +pub struct Executive( + PhantomData<(System, Block, Lookup, Payment, Finalisation)>, +); impl< - System: system::Trait, - Block: traits::Block, - Lookup: AuxLookup::Address, Target=System::AccountId>, - Payment: MakePayment, - Finalisation: Executable, -> Executive where - Block::Extrinsic: Checkable + Slicable, - ::Checked: Applyable + System: system::Trait, + Block: traits::Block

, + Lookup: AuxLookup::Address, Target = System::AccountId>, + Payment: MakePayment, + Finalisation: Executable, + > Executive +where + Block::Extrinsic: Checkable + Slicable, + ::Checked: + Applyable, { /// Start the execution of a particular block. pub fn initialise_block(header: &System::Header) { - >::initialise(header.number(), header.parent_hash(), header.extrinsics_root()); + >::initialise( + header.number(), + header.parent_hash(), + header.extrinsics_root(), + ); } fn initial_checks(block: &Block) { @@ -102,14 +106,19 @@ impl< // check parent_hash is correct. let n = header.number().clone(); assert!( - n > System::BlockNumber::zero() && >::block_hash(n - System::BlockNumber::one()) == *header.parent_hash(), + n > System::BlockNumber::zero() + && >::block_hash(n - System::BlockNumber::one()) + == *header.parent_hash(), "Parent hash should be valid." ); // check transaction trie root represents the transactions. let xts_root = extrinsics_root::(&block.extrinsics()); header.extrinsics_root().check_equal(&xts_root); - assert!(header.extrinsics_root() == &xts_root, "Transaction trie root must be valid."); + assert!( + header.extrinsics_root() == &xts_root, + "Transaction trie root must be valid." + ); } /// Actually execute all transitioning for `block`. @@ -121,7 +130,9 @@ impl< // execute transactions let (header, extrinsics) = block.deconstruct(); - extrinsics.into_iter().for_each(Self::apply_extrinsic_no_note); + extrinsics + .into_iter() + .for_each(Self::apply_extrinsic_no_note); // post-transactional book-keeping. Finalisation::execute(); @@ -163,26 +174,39 @@ impl< match Self::apply_extrinsic_no_note_with_len(uxt, l) { Ok(internal::ApplyOutcome::Success) => (), Ok(internal::ApplyOutcome::Fail(e)) => runtime_io::print(e), - Err(internal::ApplyError::CantPay) => panic!("All extrinsics should have sender able to pay their fees"), - Err(internal::ApplyError::BadSignature(_)) => panic!("All extrinsics should be properly signed"), - Err(internal::ApplyError::Stale) | Err(internal::ApplyError::Future) => panic!("All extrinsics should have the correct nonce"), + Err(internal::ApplyError::CantPay) => + panic!("All extrinsics should have sender able to pay their fees"), + Err(internal::ApplyError::BadSignature(_)) => + panic!("All extrinsics should be properly signed"), + Err(internal::ApplyError::Stale) | Err(internal::ApplyError::Future) => + panic!("All extrinsics should have the correct nonce"), } } /// Actually apply an extrinsic given its `encoded_len`; this doesn't note its hash. - fn apply_extrinsic_no_note_with_len(uxt: Block::Extrinsic, encoded_len: usize) -> result::Result { + fn apply_extrinsic_no_note_with_len( + uxt: Block::Extrinsic, + encoded_len: usize, + ) -> result::Result { // Verify the signature is good. - let xt = uxt.check(Lookup::lookup).map_err(internal::ApplyError::BadSignature)?; + let xt = uxt + .check(Lookup::lookup) + .map_err(internal::ApplyError::BadSignature)?; if xt.sender() != &Default::default() { // check index let expected_index = >::account_nonce(xt.sender()); - if xt.index() != &expected_index { return Err( - if xt.index() < &expected_index { internal::ApplyError::Stale } else { internal::ApplyError::Future } - ) } + if xt.index() != &expected_index { + return Err(if xt.index() < &expected_index { + internal::ApplyError::Stale + } else { + internal::ApplyError::Future + }) + } // pay any fees. - Payment::make_payment(xt.sender(), encoded_len).map_err(|_| internal::ApplyError::CantPay)?; + Payment::make_payment(xt.sender(), encoded_len) + .map_err(|_| internal::ApplyError::CantPay)?; // AUDIT: Under no circumstances may this function panic from here onwards. @@ -195,7 +219,8 @@ impl< >::put(>::get() + 1u32); - r.map(|_| internal::ApplyOutcome::Success).or_else(|e| Ok(internal::ApplyOutcome::Fail(e))) + r.map(|_| internal::ApplyOutcome::Success) + .or_else(|e| Ok(internal::ApplyOutcome::Fail(e))) } fn final_checks(header: &System::Header) { @@ -208,19 +233,22 @@ impl< // check storage root. let storage_root = System::Hashing::storage_root(); header.state_root().check_equal(&storage_root); - assert!(header.state_root() == &storage_root, "Storage root must match that calculated."); + assert!( + header.state_root() == &storage_root, + "Storage root must match that calculated." + ); } } #[cfg(test)] mod tests { use super::*; - use staking::Call; + use primitives::testing::{Block, Digest, Header}; + use primitives::traits::{AuxLookup, BlakeTwo256, HasPublicAux, Header as HeaderT, Identity}; + use primitives::BuildStorage; use runtime_io::with_externalities; + use staking::Call; use substrate_primitives::H256; - use primitives::BuildStorage; - use primitives::traits::{HasPublicAux, Identity, Header as HeaderT, BlakeTwo256, AuxLookup}; - use primitives::testing::{Digest, Header, Block}; struct NullLookup; impl AuxLookup for NullLookup { @@ -265,31 +293,48 @@ mod tests { } type TestXt = primitives::testing::TestXt>; - type Executive = super::Executive, NullLookup, staking::Module, (session::Module, staking::Module)>; + type Executive = super::Executive< + Test, + Block, + NullLookup, + staking::Module, + (session::Module, staking::Module), + >; #[test] fn staking_balance_transfer_dispatch_works() { - let mut t = system::GenesisConfig::::default().build_storage().unwrap(); - t.extend(staking::GenesisConfig:: { - sessions_per_era: 0, - current_era: 0, - balances: vec![(1, 111)], - intentions: vec![], - validator_count: 0, - bonding_duration: 0, - transaction_base_fee: 10, - transaction_byte_fee: 0, - existential_deposit: 0, - transfer_fee: 0, - creation_fee: 0, - contract_fee: 0, - reclaim_rebate: 0, - early_era_slash: 0, - session_reward: 0, - }.build_storage().unwrap()); + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap(); + t.extend( + staking::GenesisConfig:: { + sessions_per_era: 0, + current_era: 0, + balances: vec![(1, 111)], + intentions: vec![], + validator_count: 0, + bonding_duration: 0, + transaction_base_fee: 10, + transaction_byte_fee: 0, + existential_deposit: 0, + transfer_fee: 0, + creation_fee: 0, + contract_fee: 0, + reclaim_rebate: 0, + early_era_slash: 0, + session_reward: 0, + }.build_storage() + .unwrap(), + ); let xt = primitives::testing::TestXt((1, 0, Call::transfer(2.into(), 69))); with_externalities(&mut t, || { - Executive::initialise_block(&Header::new(1, H256::default(), H256::default(), [69u8; 32].into(), Digest::default())); + Executive::initialise_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); Executive::apply_extrinsic(xt).unwrap(); assert_eq!(>::voting_balance(&1), 32); assert_eq!(>::voting_balance(&2), 69); @@ -297,11 +342,29 @@ mod tests { } fn new_test_ext() -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap(); - t.extend(consensus::GenesisConfig::::default().build_storage().unwrap()); - t.extend(session::GenesisConfig::::default().build_storage().unwrap()); - t.extend(staking::GenesisConfig::::default().build_storage().unwrap()); - t.extend(timestamp::GenesisConfig::::default().build_storage().unwrap()); + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap(); + t.extend( + consensus::GenesisConfig::::default() + .build_storage() + .unwrap(), + ); + t.extend( + session::GenesisConfig::::default() + .build_storage() + .unwrap(), + ); + t.extend( + staking::GenesisConfig::::default() + .build_storage() + .unwrap(), + ); + t.extend( + timestamp::GenesisConfig::::default() + .build_storage() + .unwrap(), + ); t } @@ -312,9 +375,13 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("b47a0bfc249af6e00c71a45fcd5619c47b6f71cb4d5c62ab7bf1fe9601d5efc4").into(), - extrinsics_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), - digest: Digest { logs: vec![], }, + state_root: hex!( + "b47a0bfc249af6e00c71a45fcd5619c47b6f71cb4d5c62ab7bf1fe9601d5efc4" + ).into(), + extrinsics_root: hex!( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + ).into(), + digest: Digest { logs: vec![] }, }, extrinsics: vec![], }); @@ -330,8 +397,10 @@ mod tests { parent_hash: [69u8; 32].into(), number: 1, state_root: [0u8; 32].into(), - extrinsics_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), - digest: Digest { logs: vec![], }, + extrinsics_root: hex!( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + ).into(), + digest: Digest { logs: vec![] }, }, extrinsics: vec![], }); @@ -346,9 +415,11 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("b47a0bfc249af6e00c71a45fcd5619c47b6f71cb4d5c62ab7bf1fe9601d5efc4").into(), + state_root: hex!( + "b47a0bfc249af6e00c71a45fcd5619c47b6f71cb4d5c62ab7bf1fe9601d5efc4" + ).into(), extrinsics_root: [0u8; 32].into(), - digest: Digest { logs: vec![], }, + digest: Digest { logs: vec![] }, }, extrinsics: vec![], }); diff --git a/substrate/runtime/primitives/src/bft.rs b/substrate/runtime/primitives/src/bft.rs index 111c265233af2..6553ef7acbba8 100644 --- a/substrate/runtime/primitives/src/bft.rs +++ b/substrate/runtime/primitives/src/bft.rs @@ -16,8 +16,8 @@ //! Message formats for the BFT consensus layer. +use codec::{Input, Slicable}; use rstd::prelude::*; -use codec::{Slicable, Input}; use substrate_primitives::{AuthorityId, Signature}; #[derive(Clone, Copy, PartialEq, Eq)] @@ -59,26 +59,26 @@ impl Slicable for Action { v.push(ActionKind::Propose as u8); round.using_encoded(|s| v.extend(s)); block.using_encoded(|s| v.extend(s)); - } + }, Action::ProposeHeader(ref round, ref hash) => { v.push(ActionKind::ProposeHeader as u8); round.using_encoded(|s| v.extend(s)); hash.using_encoded(|s| v.extend(s)); - } + }, Action::Prepare(ref round, ref hash) => { v.push(ActionKind::Prepare as u8); round.using_encoded(|s| v.extend(s)); hash.using_encoded(|s| v.extend(s)); - } + }, Action::Commit(ref round, ref hash) => { v.push(ActionKind::Commit as u8); round.using_encoded(|s| v.extend(s)); hash.using_encoded(|s| v.extend(s)); - } + }, Action::AdvanceRound(ref round) => { v.push(ActionKind::AdvanceRound as u8); round.using_encoded(|s| v.extend(s)); - } + }, } v @@ -89,22 +89,21 @@ impl Slicable for Action { Some(x) if x == ActionKind::Propose as i8 => { let (round, block) = Slicable::decode(value)?; Some(Action::Propose(round, block)) - } + }, Some(x) if x == ActionKind::ProposeHeader as i8 => { let (round, hash) = Slicable::decode(value)?; Some(Action::ProposeHeader(round, hash)) - } + }, Some(x) if x == ActionKind::Prepare as i8 => { let (round, hash) = Slicable::decode(value)?; Some(Action::Prepare(round, hash)) - } + }, Some(x) if x == ActionKind::Commit as i8 => { let (round, hash) = Slicable::decode(value)?; Some(Action::Commit(round, hash)) - } - Some(x) if x == ActionKind::AdvanceRound as i8 => { - Slicable::decode(value).map(Action::AdvanceRound) - } + }, + Some(x) if x == ActionKind::AdvanceRound as i8 => + Slicable::decode(value).map(Action::AdvanceRound), _ => None, } } @@ -147,7 +146,7 @@ pub struct Justification { /// The hash of the header justified. pub hash: H, /// The signatures and signers of the hash. - pub signatures: Vec<(AuthorityId, Signature)> + pub signatures: Vec<(AuthorityId, Signature)>, } impl Slicable for Justification { @@ -221,14 +220,18 @@ impl Slicable for MisbehaviorReport { + MisbehaviorKind::BftDoublePrepare( + ref round, + (ref h_a, ref s_a), + (ref h_b, ref s_b), + ) => { (MisbehaviorCode::BftDoublePrepare as i8).using_encoded(|s| v.extend(s)); round.using_encoded(|s| v.extend(s)); h_a.using_encoded(|s| v.extend(s)); s_a.using_encoded(|s| v.extend(s)); h_b.using_encoded(|s| v.extend(s)); s_b.using_encoded(|s| v.extend(s)); - } + }, MisbehaviorKind::BftDoubleCommit(ref round, (ref h_a, ref s_a), (ref h_b, ref s_b)) => { (MisbehaviorCode::BftDoubleCommit as i8).using_encoded(|s| v.extend(s)); round.using_encoded(|s| v.extend(s)); @@ -236,7 +239,7 @@ impl Slicable for MisbehaviorReport Slicable for MisbehaviorReport { - MisbehaviorKind::BftDoublePrepare( - u32::decode(input)?, - (Hash::decode(input)?, Signature::decode(input)?), - (Hash::decode(input)?, Signature::decode(input)?), - ) - } - MisbehaviorCode::BftDoubleCommit => { - MisbehaviorKind::BftDoubleCommit( - u32::decode(input)?, - (Hash::decode(input)?, Signature::decode(input)?), - (Hash::decode(input)?, Signature::decode(input)?), - ) - } + MisbehaviorCode::BftDoublePrepare => MisbehaviorKind::BftDoublePrepare( + u32::decode(input)?, + (Hash::decode(input)?, Signature::decode(input)?), + (Hash::decode(input)?, Signature::decode(input)?), + ), + MisbehaviorCode::BftDoubleCommit => MisbehaviorKind::BftDoubleCommit( + u32::decode(input)?, + (Hash::decode(input)?, Signature::decode(input)?), + (Hash::decode(input)?, Signature::decode(input)?), + ), }; Some(MisbehaviorReport { @@ -292,7 +291,10 @@ mod test { }; let encoded = report.encode(); - assert_eq!(MisbehaviorReport::::decode(&mut &encoded[..]).unwrap(), report); + assert_eq!( + MisbehaviorReport::::decode(&mut &encoded[..]).unwrap(), + report + ); let report = MisbehaviorReport:: { parent_hash: [0; 32].into(), @@ -306,6 +308,9 @@ mod test { }; let encoded = report.encode(); - assert_eq!(MisbehaviorReport::::decode(&mut &encoded[..]).unwrap(), report); + assert_eq!( + MisbehaviorReport::::decode(&mut &encoded[..]).unwrap(), + report + ); } } diff --git a/substrate/runtime/primitives/src/generic.rs b/substrate/runtime/primitives/src/generic.rs index 6f8a46841c748..7a69825bcaee4 100644 --- a/substrate/runtime/primitives/src/generic.rs +++ b/substrate/runtime/primitives/src/generic.rs @@ -22,13 +22,15 @@ use std::fmt; #[cfg(feature = "std")] use serde::{Deserialize, Deserializer}; +use bft::Justification; +use codec::{Input, Slicable}; +use rstd::ops; use rstd::prelude::*; -use codec::{Slicable, Input}; use runtime_support::AuxDispatchable; -use traits::{self, Member, SimpleArithmetic, SimpleBitOps, MaybeDisplay, Block as BlockT, - Header as HeaderT, Hashing as HashingT}; -use rstd::ops; -use bft::Justification; +use traits::{ + self, Block as BlockT, Hashing as HashingT, Header as HeaderT, MaybeDisplay, Member, + SimpleArithmetic, SimpleBitOps, +}; /// Definition of something that the external world might want to say. #[derive(PartialEq, Eq, Clone)] @@ -42,10 +44,11 @@ pub struct Extrinsic { pub function: Call, } -impl Slicable for Extrinsic where +impl Slicable for Extrinsic +where Address: Member + Slicable + MaybeDisplay, Index: Member + Slicable + MaybeDisplay + SimpleArithmetic, - Call: Member + Slicable + Call: Member + Slicable, { fn decode(input: &mut I) -> Option { Some(Extrinsic { @@ -86,8 +89,10 @@ impl UncheckedExtrinsic UncheckedExtrinsic> where - Signature: traits::Verify + Default + Eq, +impl + UncheckedExtrinsic> +where + Signature: traits::Verify + Default + Eq, AccountId: Default + Eq, { /// `true` if this extrinsic is signed. @@ -102,7 +107,7 @@ where Address: Member + Default + MaybeDisplay, Index: Member + MaybeDisplay + SimpleArithmetic, Call: Member, - Signature: traits::Verify + Eq + Default, + Signature: traits::Verify + Eq + Default, AccountId: Member + Default + MaybeDisplay, ::MaybeUnsigned: Member, Extrinsic: Slicable, @@ -115,7 +120,8 @@ where &self.extrinsic.signed } - fn check(self, lookup: ThisLookup) -> Result where + fn check(self, lookup: ThisLookup) -> Result + where ThisLookup: FnOnce(Address) -> Result, { if !self.is_signed() { @@ -125,12 +131,11 @@ where function: self.extrinsic.function, })) } else { - let extrinsic: Extrinsic - = Extrinsic { - signed: lookup(self.extrinsic.signed)?, - index: self.extrinsic.index, - function: self.extrinsic.function, - }; + let extrinsic: Extrinsic = Extrinsic { + signed: lookup(self.extrinsic.signed)?, + index: self.extrinsic.index, + function: self.extrinsic.function, + }; if ::verify_encoded_lazy(&self.signature, &extrinsic, &extrinsic.signed) { Ok(CheckedExtrinsic(extrinsic)) } else { @@ -140,7 +145,9 @@ where } } -impl Slicable for UncheckedExtrinsic where +impl Slicable + for UncheckedExtrinsic +where Signature: Slicable, Extrinsic: Slicable, { @@ -153,7 +160,7 @@ impl Slicable for UncheckedExtrinsic Slicable for UncheckedExtrinsic fmt::Debug for UncheckedExtrinsic where +impl fmt::Debug + for UncheckedExtrinsic +where Address: fmt::Debug, Index: fmt::Debug, Call: fmt::Debug, @@ -190,11 +199,9 @@ impl fmt::Debug for UncheckedExtrinsic - (Extrinsic); +pub struct CheckedExtrinsic(Extrinsic); -impl ops::Deref - for CheckedExtrinsic +impl ops::Deref for CheckedExtrinsic where AccountId: Member + MaybeDisplay, Index: Member + MaybeDisplay + SimpleArithmetic, @@ -207,8 +214,7 @@ where } } -impl traits::Applyable - for CheckedExtrinsic +impl traits::Applyable for CheckedExtrinsic where AccountId: Member + MaybeDisplay, Index: Member + MaybeDisplay + SimpleArithmetic, @@ -237,18 +243,22 @@ pub struct Digest { pub logs: Vec, } -impl Slicable for Digest where - Item: Member + Default + Slicable +impl Slicable for Digest +where + Item: Member + Default + Slicable, { fn decode(input: &mut I) -> Option { - Some(Digest { logs: Slicable::decode(input)? }) + Some(Digest { + logs: Slicable::decode(input)?, + }) } fn using_encoded R>(&self, f: F) -> R { self.logs.using_encoded(f) } } -impl traits::Digest for Digest where - Item: Member + Default + Slicable +impl traits::Digest for Digest +where + Item: Member + Default + Slicable, { type Item = Item; fn push(&mut self, item: Self::Item) { @@ -256,7 +266,6 @@ impl traits::Digest for Digest where } } - /// Abstraction over a block header for a substrate chain. #[derive(PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(Debug, Serialize))] @@ -291,7 +300,9 @@ struct DeserializeHeader { } #[cfg(feature = "std")] -impl From> for Header { +impl From> + for Header +{ fn from(other: DeserializeHeader) -> Self { Header { parent_hash: other.parent_hash, @@ -304,7 +315,9 @@ impl From> for } #[cfg(feature = "std")] -impl<'a, Number: 'a, Hashing: 'a + HashingT, DigestItem: 'a> Deserialize<'a> for Header where +impl<'a, Number: 'a, Hashing: 'a + HashingT, DigestItem: 'a> Deserialize<'a> + for Header +where Number: Deserialize<'a>, Hashing::Output: Deserialize<'a>, DigestItem: Deserialize<'a>, @@ -314,7 +327,8 @@ impl<'a, Number: 'a, Hashing: 'a + HashingT, DigestItem: 'a> Deserialize<'a> for } } -impl Slicable for Header where +impl Slicable for Header +where Number: Member + Slicable + MaybeDisplay + SimpleArithmetic + Slicable, Hashing: HashingT, DigestItem: Member + Default + Slicable, @@ -341,51 +355,81 @@ impl Slicable for Header traits::Header for Header where - Number: Member + ::rstd::hash::Hash + Copy + Slicable + MaybeDisplay + SimpleArithmetic + Slicable, +impl traits::Header for Header +where + Number: + Member + ::rstd::hash::Hash + Copy + Slicable + MaybeDisplay + SimpleArithmetic + Slicable, Hashing: HashingT, DigestItem: Member + Default + Slicable, - Hashing::Output: Default + ::rstd::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Slicable, - { + Hashing::Output: + Default + ::rstd::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Slicable, +{ type Number = Number; type Hash = ::Output; type Hashing = Hashing; type Digest = Digest; - fn number(&self) -> &Self::Number { &self.number } - fn set_number(&mut self, num: Self::Number) { self.number = num } + fn number(&self) -> &Self::Number { + &self.number + } + fn set_number(&mut self, num: Self::Number) { + self.number = num + } - fn extrinsics_root(&self) -> &Self::Hash { &self.extrinsics_root } - fn set_extrinsics_root(&mut self, root: Self::Hash) { self.extrinsics_root = root } + fn extrinsics_root(&self) -> &Self::Hash { + &self.extrinsics_root + } + fn set_extrinsics_root(&mut self, root: Self::Hash) { + self.extrinsics_root = root + } - fn state_root(&self) -> &Self::Hash { &self.state_root } - fn set_state_root(&mut self, root: Self::Hash) { self.state_root = root } + fn state_root(&self) -> &Self::Hash { + &self.state_root + } + fn set_state_root(&mut self, root: Self::Hash) { + self.state_root = root + } - fn parent_hash(&self) -> &Self::Hash { &self.parent_hash } - fn set_parent_hash(&mut self, hash: Self::Hash) { self.parent_hash = hash } + fn parent_hash(&self) -> &Self::Hash { + &self.parent_hash + } + fn set_parent_hash(&mut self, hash: Self::Hash) { + self.parent_hash = hash + } - fn digest(&self) -> &Self::Digest { &self.digest } - fn set_digest(&mut self, digest: Self::Digest) { self.digest = digest } + fn digest(&self) -> &Self::Digest { + &self.digest + } + fn set_digest(&mut self, digest: Self::Digest) { + self.digest = digest + } fn new( number: Self::Number, extrinsics_root: Self::Hash, state_root: Self::Hash, parent_hash: Self::Hash, - digest: Self::Digest + digest: Self::Digest, ) -> Self { Header { - number, extrinsics_root: extrinsics_root, state_root, parent_hash, digest + number, + extrinsics_root, + state_root, + parent_hash, + digest, } } } -impl Header where - Number: Member + ::rstd::hash::Hash + Copy + Slicable + MaybeDisplay + SimpleArithmetic + Slicable, +impl Header +where + Number: + Member + ::rstd::hash::Hash + Copy + Slicable + MaybeDisplay + SimpleArithmetic + Slicable, Hashing: HashingT, DigestItem: Member + Default + Slicable, - Hashing::Output: Default + ::rstd::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Slicable, - { + Hashing::Output: + Default + ::rstd::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Slicable, +{ /// Convenience helper for computing the hash of the header without having /// to import the trait. pub fn hash(&self) -> Hashing::Output { @@ -489,7 +533,9 @@ pub struct SignedBlock { pub justification: Justification, } -impl Slicable for SignedBlock { +impl Slicable + for SignedBlock +{ fn decode(input: &mut I) -> Option { Some(SignedBlock { block: Slicable::decode(input)?, @@ -507,9 +553,9 @@ impl Slicable for SignedB #[cfg(test)] mod tests { + use super::{Digest, Extrinsic, Header, UncheckedExtrinsic}; use codec::Slicable; use substrate_primitives::{H256, H512}; - use super::{Digest, Header, UncheckedExtrinsic, Extrinsic}; type Block = super::Block< Header>, @@ -524,7 +570,9 @@ mod tests { number: 100_000, state_root: [1u8; 32].into(), extrinsics_root: [2u8; 32].into(), - digest: Digest { logs: vec![vec![1, 2, 3], vec![4, 5, 6]] }, + digest: Digest { + logs: vec![vec![1, 2, 3], vec![4, 5, 6]], + }, }, extrinsics: vec![ UncheckedExtrinsic::new( @@ -533,7 +581,7 @@ mod tests { index: 0, function: 100, }, - H512::from([0u8; 64]).into() + H512::from([0u8; 64]).into(), ), UncheckedExtrinsic::new( Extrinsic { @@ -541,9 +589,9 @@ mod tests { index: 100, function: 99, }, - H512::from([255u8; 64]).into() - ) - ] + H512::from([255u8; 64]).into(), + ), + ], }; { diff --git a/substrate/runtime/primitives/src/lib.rs b/substrate/runtime/primitives/src/lib.rs index f073dc7280327..c9bd302a344a1 100644 --- a/substrate/runtime/primitives/src/lib.rs +++ b/substrate/runtime/primitives/src/lib.rs @@ -26,13 +26,13 @@ extern crate serde; #[macro_use] extern crate serde_derive; -extern crate num_traits; extern crate integer_sqrt; -extern crate substrate_runtime_std as rstd; -extern crate substrate_runtime_io as runtime_io; -extern crate substrate_runtime_support as runtime_support; +extern crate num_traits; extern crate substrate_codec as codec; extern crate substrate_primitives; +extern crate substrate_runtime_io as runtime_io; +extern crate substrate_runtime_std as rstd; +extern crate substrate_runtime_support as runtime_support; #[cfg(test)] extern crate serde_json; @@ -50,7 +50,7 @@ pub mod traits; pub mod generic; pub mod bft; -use traits::{Verify, Lazy}; +use traits::{Lazy, Verify}; /// A set of key value pairs for storage. #[cfg(feature = "std")] @@ -82,8 +82,12 @@ impl Verify for Ed25519Signature { } impl codec::Slicable for Ed25519Signature { - fn decode(input: &mut I) -> Option { Some(Ed25519Signature(codec::Slicable::decode(input)?,)) } - fn using_encoded R>(&self, f: F) -> R { self.0.using_encoded(f) } + fn decode(input: &mut I) -> Option { + Some(Ed25519Signature(codec::Slicable::decode(input)?)) + } + fn using_encoded R>(&self, f: F) -> R { + self.0.using_encoded(f) + } } impl From for Ed25519Signature { @@ -152,7 +156,8 @@ pub type ApplyResult = Result; #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] pub struct MaybeUnsigned(pub T); -impl MaybeUnsigned where +impl MaybeUnsigned +where T: Default + Eq, ::Signer: Default + Eq, { @@ -165,7 +170,8 @@ impl MaybeUnsigned where } } -impl Verify for MaybeUnsigned where +impl Verify for MaybeUnsigned +where T: Default + Eq, ::Signer: Default + Eq, { @@ -180,8 +186,12 @@ impl Verify for MaybeUnsigned where } impl codec::Slicable for MaybeUnsigned { - fn decode(input: &mut I) -> Option { Some(MaybeUnsigned(codec::Slicable::decode(input)?)) } - fn using_encoded R>(&self, f: F) -> R { self.0.using_encoded(f) } + fn decode(input: &mut I) -> Option { + Some(MaybeUnsigned(codec::Slicable::decode(input)?)) + } + fn using_encoded R>(&self, f: F) -> R { + self.0.using_encoded(f) + } } impl From for MaybeUnsigned { @@ -192,7 +202,11 @@ impl From for MaybeUnsigned { /// Verify a signature on an encoded value in a lazy manner. This can be /// an optimization if the signature scheme has an "unsigned" escape hash. -pub fn verify_encoded_lazy(sig: &V, item: &T, signer: &V::Signer) -> bool { +pub fn verify_encoded_lazy( + sig: &V, + item: &T, + signer: &V::Signer, +) -> bool { // The `Lazy` trait expresses something like `X: FnMut &'a T>`. // unfortunately this is a lifetime relationship that can't // be expressed without generic associated types, better unification of HRTBs in type position, @@ -209,7 +223,10 @@ pub fn verify_encoded_lazy(sig: &V, item: &T, sig } sig.verify( - LazyEncode { inner: || item.encode(), encoded: None }, + LazyEncode { + inner: || item.encode(), + encoded: None, + }, signer, ) } diff --git a/substrate/runtime/primitives/src/testing.rs b/substrate/runtime/primitives/src/testing.rs index 7c712b40105e6..2a4cded123042 100644 --- a/substrate/runtime/primitives/src/testing.rs +++ b/substrate/runtime/primitives/src/testing.rs @@ -16,11 +16,11 @@ //! Testing utilities. -use serde::{Serialize, de::DeserializeOwned}; -use std::fmt::Debug; -use codec::{Slicable, Input}; +use codec::{Input, Slicable}; use runtime_support::AuxDispatchable; -use traits::{self, Checkable, Applyable, BlakeTwo256}; +use serde::{de::DeserializeOwned, Serialize}; +use std::fmt::Debug; +use traits::{self, Applyable, BlakeTwo256, Checkable}; pub use substrate_primitives::H256; @@ -80,30 +80,54 @@ impl traits::Header for Header { type Hash = H256; type Digest = Digest; - fn number(&self) -> &Self::Number { &self.number } - fn set_number(&mut self, num: Self::Number) { self.number = num } + fn number(&self) -> &Self::Number { + &self.number + } + fn set_number(&mut self, num: Self::Number) { + self.number = num + } - fn extrinsics_root(&self) -> &Self::Hash { &self.extrinsics_root } - fn set_extrinsics_root(&mut self, root: Self::Hash) { self.extrinsics_root = root } + fn extrinsics_root(&self) -> &Self::Hash { + &self.extrinsics_root + } + fn set_extrinsics_root(&mut self, root: Self::Hash) { + self.extrinsics_root = root + } - fn state_root(&self) -> &Self::Hash { &self.state_root } - fn set_state_root(&mut self, root: Self::Hash) { self.state_root = root } + fn state_root(&self) -> &Self::Hash { + &self.state_root + } + fn set_state_root(&mut self, root: Self::Hash) { + self.state_root = root + } - fn parent_hash(&self) -> &Self::Hash { &self.parent_hash } - fn set_parent_hash(&mut self, hash: Self::Hash) { self.parent_hash = hash } + fn parent_hash(&self) -> &Self::Hash { + &self.parent_hash + } + fn set_parent_hash(&mut self, hash: Self::Hash) { + self.parent_hash = hash + } - fn digest(&self) -> &Self::Digest { &self.digest } - fn set_digest(&mut self, digest: Self::Digest) { self.digest = digest } + fn digest(&self) -> &Self::Digest { + &self.digest + } + fn set_digest(&mut self, digest: Self::Digest) { + self.digest = digest + } fn new( number: Self::Number, extrinsics_root: Self::Hash, state_root: Self::Hash, parent_hash: Self::Hash, - digest: Self::Digest + digest: Self::Digest, ) -> Self { Header { - number, extrinsics_root: extrinsics_root, state_root, parent_hash, digest + number, + extrinsics_root, + state_root, + parent_hash, + digest, } } } @@ -113,7 +137,9 @@ pub struct Block, } -impl Slicable for Block { +impl + Slicable for Block +{ fn decode(input: &mut I) -> Option { Some(Block { header: Slicable::decode(input)?, @@ -127,7 +153,19 @@ impl traits::Block for Block { +impl< + Xt: 'static + + Slicable + + Sized + + Send + + Sync + + Serialize + + DeserializeOwned + + Clone + + Eq + + Debug, + > traits::Block for Block +{ type Extrinsic = Xt; type Header = Header; type Hash =
::Hash; @@ -147,9 +185,23 @@ impl(pub (u64, u64, Call)); +pub struct TestXt( + pub (u64, u64, Call), +); -impl Slicable for TestXt { +impl< + Call: AuxDispatchable + + Slicable + + Sized + + Send + + Sync + + Serialize + + DeserializeOwned + + Clone + + Eq + + Debug, + > Slicable for TestXt +{ fn decode(input: &mut I) -> Option { Some(TestXt(Slicable::decode(input)?)) } @@ -157,17 +209,55 @@ impl Checkable for TestXt { +impl< + Call: 'static + + AuxDispatchable + + Slicable + + Sized + + Send + + Sync + + Serialize + + DeserializeOwned + + Clone + + Eq + + Debug, + > Checkable for TestXt +{ type Checked = Self; type Address = u64; type AccountId = u64; - fn sender(&self) -> &u64 { &(self.0).0 } - fn check Result>(self, _lookup: ThisLookup) -> Result { Ok(self) } + fn sender(&self) -> &u64 { + &(self.0).0 + } + fn check Result>( + self, + _lookup: ThisLookup, + ) -> Result { + Ok(self) + } } -impl + Slicable + Sized + Send + Sync + Serialize + DeserializeOwned + Clone + Eq + Debug> Applyable for TestXt { +impl< + Call: AuxDispatchable + + Slicable + + Sized + + Send + + Sync + + Serialize + + DeserializeOwned + + Clone + + Eq + + Debug, + > Applyable for TestXt +{ type AccountId = u64; type Index = u64; - fn sender(&self) -> &u64 { &(self.0).0 } - fn index(&self) -> &u64 { &(self.0).1 } - fn apply(self) -> Result<(), &'static str> { (self.0).2.dispatch(&(self.0).0) } + fn sender(&self) -> &u64 { + &(self.0).0 + } + fn index(&self) -> &u64 { + &(self.0).1 + } + fn apply(self) -> Result<(), &'static str> { + (self.0).2.dispatch(&(self.0).0) + } } diff --git a/substrate/runtime/primitives/src/traits.rs b/substrate/runtime/primitives/src/traits.rs index 218fbc12769f6..05b62860bc29b 100644 --- a/substrate/runtime/primitives/src/traits.rs +++ b/substrate/runtime/primitives/src/traits.rs @@ -16,16 +16,18 @@ //! Primitives for the runtime modules. +use codec::Slicable; +pub use integer_sqrt::IntegerSquareRoot; +pub use num_traits::{Bounded, One, Zero}; +use rstd::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, RemAssign, Sub, SubAssign}; use rstd::prelude::*; use rstd::{self, result}; use runtime_io; -#[cfg(feature = "std")] use std::fmt::{Debug, Display}; -#[cfg(feature = "std")] use serde::{Serialize, de::DeserializeOwned}; +#[cfg(feature = "std")] +use serde::{de::DeserializeOwned, Serialize}; +#[cfg(feature = "std")] +use std::fmt::{Debug, Display}; use substrate_primitives; -use codec::Slicable; -pub use integer_sqrt::IntegerSquareRoot; -pub use num_traits::{Zero, One, Bounded}; -use rstd::ops::{Add, Sub, Mul, Div, Rem, AddAssign, SubAssign, MulAssign, DivAssign, RemAssign}; /// A lazy value. pub trait Lazy { @@ -58,7 +60,9 @@ pub trait MakePayment { } impl MakePayment for () { - fn make_payment(_: &T, _: usize) -> Result<(), &'static str> { Ok(()) } + fn make_payment(_: &T, _: usize) -> Result<(), &'static str> { + Ok(()) + } } /// Extensible conversion trait. Generic over both source and destination types. @@ -96,7 +100,9 @@ impl_numerics!(u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize); pub struct Identity; impl Convert for Identity { - fn convert(a: T) -> T { a } + fn convert(a: T) -> T { + a + } } pub trait MaybeEmpty { @@ -117,27 +123,49 @@ pub trait RefInto { fn ref_into(&self) -> &T; } impl RefInto for T { - fn ref_into(&self) -> &T { &self } + fn ref_into(&self) -> &T { + &self + } } pub trait SimpleArithmetic: - Zero + One + IntegerSquareRoot + As + - Add + AddAssign + - Sub + SubAssign + - Mul + MulAssign + - Div + DivAssign + - Rem + RemAssign + - PartialOrd + Ord + Zero + + One + + IntegerSquareRoot + + As + + Add + + AddAssign + + Sub + + SubAssign + + Mul + + MulAssign + + Div + + DivAssign + + Rem + + RemAssign + + PartialOrd + + Ord +{ +} +impl< + T: Zero + + One + + IntegerSquareRoot + + As + + Add + + AddAssign + + Sub + + SubAssign + + Mul + + MulAssign + + Div + + DivAssign + + Rem + + RemAssign + + PartialOrd + + Ord, + > SimpleArithmetic for T {} -impl + - Add + AddAssign + - Sub + SubAssign + - Mul + MulAssign + - Div + DivAssign + - Rem + RemAssign + - PartialOrd + Ord -> SimpleArithmetic for T {} /// Trait for things that can be clear (have no bits set). For numeric types, essentially the same /// as `Zero`. @@ -150,20 +178,25 @@ pub trait Clear { } impl Clear for T { - fn is_clear(&self) -> bool { *self == Self::clear() } - fn clear() -> Self { Default::default() } + fn is_clear(&self) -> bool { + *self == Self::clear() + } + fn clear() -> Self { + Default::default() + } } pub trait SimpleBitOps: - Sized + Clear + - rstd::ops::BitOr + - rstd::ops::BitAnd + Sized + Clear + rstd::ops::BitOr + rstd::ops::BitAnd +{ +} +impl< + T: Sized + + Clear + + rstd::ops::BitOr + + rstd::ops::BitAnd, + > SimpleBitOps for T {} -impl + - rstd::ops::BitAnd -> SimpleBitOps for T {} /// Something that can be executed. pub trait Executable { @@ -181,8 +214,9 @@ impl Executable for (A, B) { } /// Abstraction around hashing -pub trait Hashing: 'static + MaybeSerializeDebug + Clone + Eq + PartialEq { // Stupid bug in the Rust compiler believes derived - // traits must be fulfilled by all type parameters. +pub trait Hashing: 'static + MaybeSerializeDebug + Clone + Eq + PartialEq { + // Stupid bug in the Rust compiler believes derived + // traits must be fulfilled by all type parameters. /// The hash type produced. type Output: Member + AsRef<[u8]>; @@ -198,17 +232,12 @@ pub trait Hashing: 'static + MaybeSerializeDebug + Clone + Eq + PartialEq { // S fn enumerated_trie_root(items: &[&[u8]]) -> Self::Output; /// Iterator-based version of `enumerated_trie_root`. - fn ordered_trie_root< - I: IntoIterator, - A: AsRef<[u8]> - >(input: I) -> Self::Output; + fn ordered_trie_root, A: AsRef<[u8]>>(input: I) -> Self::Output; /// The Patricia tree root of the given mapping as an iterator. - fn trie_root< - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]> - >(input: I) -> Self::Output; + fn trie_root, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>>( + input: I, + ) -> Self::Output; /// Acquire the global storage root. fn storage_root() -> Self::Output; @@ -227,17 +256,12 @@ impl Hashing for BlakeTwo256 { fn enumerated_trie_root(items: &[&[u8]]) -> Self::Output { runtime_io::enumerated_trie_root(items).into() } - fn trie_root< - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]> - >(input: I) -> Self::Output { + fn trie_root, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>>( + input: I, + ) -> Self::Output { runtime_io::trie_root(input).into() } - fn ordered_trie_root< - I: IntoIterator, - A: AsRef<[u8]> - >(input: I) -> Self::Output { + fn ordered_trie_root, A: AsRef<[u8]>>(input: I) -> Self::Output { runtime_io::ordered_trie_root(input).into() } fn storage_root() -> Self::Output { @@ -255,7 +279,11 @@ impl CheckEqual for substrate_primitives::H256 { fn check_equal(&self, other: &Self) { use substrate_primitives::hexdisplay::HexDisplay; if &self.0 != &other.0 { - println!("Hash: given={}, expected={}", HexDisplay::from(&self.0), HexDisplay::from(&other.0)); + println!( + "Hash: given={}, expected={}", + HexDisplay::from(&self.0), + HexDisplay::from(&other.0) + ); } } @@ -299,7 +327,10 @@ pub trait MaybeDisplay {} #[cfg(not(feature = "std"))] impl MaybeDisplay for T {} -pub trait Member: Send + Sync + Sized + MaybeSerializeDebug + Eq + PartialEq + Clone + 'static {} +pub trait Member: + Send + Sync + Sized + MaybeSerializeDebug + Eq + PartialEq + Clone + 'static +{ +} impl Member for T {} /// Something that acts like a `Digest` - it can have `Log`s `push`ed onto it and these `Log`s are @@ -316,7 +347,14 @@ pub trait Digest { /// You can also create a `new` one from those fields. pub trait Header: Clone + Send + Sync + Slicable + Eq + MaybeSerializeDebug + 'static { type Number: Member + ::rstd::hash::Hash + Copy + MaybeDisplay + SimpleArithmetic + Slicable; - type Hash: Member + ::rstd::hash::Hash + Copy + MaybeDisplay + Default + SimpleBitOps + Slicable + AsRef<[u8]>; + type Hash: Member + + ::rstd::hash::Hash + + Copy + + MaybeDisplay + + Default + + SimpleBitOps + + Slicable + + AsRef<[u8]>; type Hashing: Hashing; type Digest: Member + Default; @@ -325,7 +363,7 @@ pub trait Header: Clone + Send + Sync + Slicable + Eq + MaybeSerializeDebug + 's extrinsics_root: Self::Hash, state_root: Self::Hash, parent_hash: Self::Hash, - digest: Self::Digest + digest: Self::Digest, ) -> Self; fn number(&self) -> &Self::Number; @@ -354,8 +392,15 @@ pub trait Header: Clone + Send + Sync + Slicable + Eq + MaybeSerializeDebug + 's /// You can get an iterator over each of the `extrinsics` and retrieve the `header`. pub trait Block: Clone + Send + Sync + Slicable + Eq + MaybeSerializeDebug + 'static { type Extrinsic: Member + Slicable; - type Header: Header; - type Hash: Member + ::rstd::hash::Hash + Copy + MaybeDisplay + Default + SimpleBitOps + Slicable + AsRef<[u8]>; + type Header: Header; + type Hash: Member + + ::rstd::hash::Hash + + Copy + + MaybeDisplay + + Default + + SimpleBitOps + + Slicable + + AsRef<[u8]>; fn header(&self) -> &Self::Header; fn extrinsics(&self) -> &[Self::Extrinsic]; @@ -376,7 +421,10 @@ pub trait Checkable: Sized + Send + Sync { type AccountId: Member + MaybeDisplay; type Checked: Member; fn sender(&self) -> &Self::Address; - fn check Result>(self, lookup: ThisLookup) -> Result; + fn check Result>( + self, + lookup: ThisLookup, + ) -> Result; } /// A "checkable" piece of information, used by the standard Substrate Executive in order to @@ -394,8 +442,15 @@ impl Checkable for T { type Address = ::Address; type AccountId = ::Address; type Checked = ::Checked; - fn sender(&self) -> &Self::Address { BlindCheckable::sender(self) } - fn check Result>(self, _: ThisLookup) -> Result { BlindCheckable::check(self) } + fn sender(&self) -> &Self::Address { + BlindCheckable::sender(self) + } + fn check Result>( + self, + _: ThisLookup, + ) -> Result { + BlindCheckable::check(self) + } } /// An "executable" piece of information, used by the standard Substrate Executive in order to diff --git a/substrate/runtime/session/src/lib.rs b/substrate/runtime/session/src/lib.rs index 74debe5a84e24..692b8960f9447 100644 --- a/substrate/runtime/session/src/lib.rs +++ b/substrate/runtime/session/src/lib.rs @@ -38,17 +38,17 @@ extern crate substrate_runtime_std as rstd; #[macro_use] extern crate substrate_runtime_support as runtime_support; -extern crate substrate_runtime_io as runtime_io; extern crate substrate_codec as codec; -extern crate substrate_runtime_primitives as primitives; extern crate substrate_runtime_consensus as consensus; +extern crate substrate_runtime_io as runtime_io; +extern crate substrate_runtime_primitives as primitives; extern crate substrate_runtime_system as system; extern crate substrate_runtime_timestamp as timestamp; +use primitives::traits::{As, Convert, Executable, One, RefInto, Zero}; use rstd::prelude::*; -use primitives::traits::{Zero, One, RefInto, Executable, Convert, As}; -use runtime_support::{StorageValue, StorageMap}; use runtime_support::dispatch::Result; +use runtime_support::{StorageMap, StorageValue}; /// A session has changed. pub trait OnSessionChange { @@ -104,7 +104,7 @@ decl_storage! { impl Module { /// The number of validators currently. pub fn validator_count() -> u32 { - >::get().len() as u32 // TODO: can probably optimised + >::get().len() as u32 // TODO: can probably optimised } /// The last length change, if there was one, zero if not. @@ -139,19 +139,23 @@ impl Module { /// Called by `staking::next_era()` only. `next_session` should be called after this in order to /// update the session keys to the next validator set. pub fn set_validators(new: &[T::AccountId]) { - >::put(&new.to_vec()); // TODO: optimise. + >::put(&new.to_vec()); // TODO: optimise. >::set_authorities( - &new.iter().cloned().map(T::ConvertAccountIdToSessionKey::convert).collect::>() + &new.iter() + .cloned() + .map(T::ConvertAccountIdToSessionKey::convert) + .collect::>(), ); } /// Hook to be called after transaction processing. pub fn check_rotate_session() { - // do this last, after the staking system has had chance to switch out the authorities for the - // new set. + // do this last, after the staking system has had chance to switch out the authorities for + // the new set. // check block number and call next_session if necessary. let block_number = >::block_number(); - let is_final_block = ((block_number - Self::last_length_change()) % Self::length()).is_zero(); + let is_final_block = + ((block_number - Self::last_length_change()) % Self::length()).is_zero(); let broken_validation = Self::broken_validation(); if is_final_block || broken_validation { Self::rotate_session(!broken_validation); @@ -207,9 +211,10 @@ impl Module { let block_period = >::block_period(); let blocks_remaining = Self::blocks_remaining(); let blocks_remaining = >::sa(blocks_remaining); - now + blocks_remaining * block_period > - Self::current_start() + Self::ideal_session_duration() * - (T::Moment::sa(100) + Self::broken_percent_late()) / T::Moment::sa(100) + now + blocks_remaining * block_period + > Self::current_start() + + Self::ideal_session_duration() + * (T::Moment::sa(100) + Self::broken_percent_late()) / T::Moment::sa(100) } } @@ -242,12 +247,11 @@ impl Default for GenesisConfig { } #[cfg(any(feature = "std", test))] -impl primitives::BuildStorage for GenesisConfig -{ +impl primitives::BuildStorage for GenesisConfig { fn build_storage(self) -> ::std::result::Result { - use runtime_io::twox_128; use codec::Slicable; use primitives::traits::As; + use runtime_io::twox_128; Ok(map![ twox_128(>::key()).to_vec() => self.session_length.encode(), twox_128(>::key()).to_vec() => T::BlockNumber::sa(0).encode(), @@ -261,11 +265,11 @@ impl primitives::BuildStorage for GenesisConfig #[cfg(test)] mod tests { use super::*; + use primitives::testing::{Digest, Header}; + use primitives::traits::{BlakeTwo256, HasPublicAux, Identity}; + use primitives::BuildStorage; use runtime_io::with_externalities; use substrate_primitives::H256; - use primitives::BuildStorage; - use primitives::traits::{HasPublicAux, Identity, BlakeTwo256}; - use primitives::testing::{Digest, Header}; #[derive(Clone, Eq, PartialEq)] pub struct Test; @@ -300,19 +304,29 @@ mod tests { type Session = Module; fn new_test_ext() -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap(); - t.extend(consensus::GenesisConfig::{ - code: vec![], - authorities: vec![1, 2, 3], - }.build_storage().unwrap()); - t.extend(timestamp::GenesisConfig::{ - period: 5, - }.build_storage().unwrap()); - t.extend(GenesisConfig::{ - session_length: 2, - validators: vec![1, 2, 3], - broken_percent_late: 30, - }.build_storage().unwrap()); + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap(); + t.extend( + consensus::GenesisConfig:: { + code: vec![], + authorities: vec![1, 2, 3], + }.build_storage() + .unwrap(), + ); + t.extend( + timestamp::GenesisConfig:: { period: 5 } + .build_storage() + .unwrap(), + ); + t.extend( + GenesisConfig:: { + session_length: 2, + validators: vec![1, 2, 3], + broken_percent_late: 30, + }.build_storage() + .unwrap(), + ); t } @@ -339,16 +353,16 @@ mod tests { assert_eq!(Session::ideal_session_duration(), 15); // ideal end = 0 + 15 * 3 = 15 // broken_limit = 15 * 130 / 100 = 19 - + System::set_block_number(3); assert_eq!(Session::blocks_remaining(), 2); - Timestamp::set_timestamp(9); // earliest end = 9 + 2 * 5 = 19; OK. + Timestamp::set_timestamp(9); // earliest end = 9 + 2 * 5 = 19; OK. assert!(!Session::broken_validation()); Session::check_rotate_session(); System::set_block_number(4); assert_eq!(Session::blocks_remaining(), 1); - Timestamp::set_timestamp(15); // another 1 second late. earliest end = 15 + 1 * 5 = 20; broken. + Timestamp::set_timestamp(15); // another 1 second late. earliest end = 15 + 1 * 5 = 20; broken. assert!(Session::broken_validation()); Session::check_rotate_session(); assert_eq!(Session::current_index(), 2); diff --git a/substrate/runtime/staking/src/account_db.rs b/substrate/runtime/staking/src/account_db.rs index b41ce55458e65..8a03143c536b3 100644 --- a/substrate/runtime/staking/src/account_db.rs +++ b/substrate/runtime/staking/src/account_db.rs @@ -16,12 +16,12 @@ //! Auxilliaries to help with managing partial changes to accounts state. -use rstd::prelude::*; +use super::*; +use double_map::StorageDoubleMap; use rstd::cell::RefCell; use rstd::collections::btree_map::{BTreeMap, Entry}; +use rstd::prelude::*; use runtime_support::StorageMap; -use double_map::StorageDoubleMap; -use super::*; pub struct ChangeEntry { balance: Option, @@ -42,10 +42,18 @@ impl Default for ChangeEntry { impl ChangeEntry { pub fn contract_created(b: T::Balance, c: Vec) -> Self { - ChangeEntry { balance: Some(b), code: Some(c), storage: Default::default() } + ChangeEntry { + balance: Some(b), + code: Some(c), + storage: Default::default(), + } } pub fn balance_changed(b: T::Balance) -> Self { - ChangeEntry { balance: Some(b), code: None, storage: Default::default() } + ChangeEntry { + balance: Some(b), + code: None, + storage: Default::default(), + } } } @@ -87,7 +95,7 @@ impl AccountDb for DirectAccountDb { // TODO: enforce this for the other balance-altering functions. if balance < ed { >::on_free_too_low(&address); - continue; + continue } else { if !>::exists(&address) { let outcome = >::new_account(&address, balance); @@ -185,10 +193,10 @@ impl<'a, T: Trait> AccountDb for OverlayAccountDb<'a, T> { value.code = changed.code; } value.storage.extend(changed.storage.into_iter()); - } + }, Entry::Vacant(e) => { e.insert(changed); - } + }, } } } @@ -206,7 +214,8 @@ impl<'a, 'b: 'a, T: Trait> contract::Ext for StakingExt<'a, 'b, T> { self.account_db.get_storage(&self.account, key) } fn set_storage(&mut self, key: &[u8], value: Option>) { - self.account_db.set_storage(&self.account, key.to_vec(), value); + self.account_db + .set_storage(&self.account, key.to_vec(), value); } fn create(&mut self, code: &[u8], value: Self::Balance) { if let Ok(Some(commit_state)) = diff --git a/substrate/runtime/staking/src/address.rs b/substrate/runtime/staking/src/address.rs index dc0769e4ad124..9fb88bad3cb8d 100644 --- a/substrate/runtime/staking/src/address.rs +++ b/substrate/runtime/staking/src/address.rs @@ -16,28 +16,30 @@ //! Address type that is union of index and id for an account. +use super::{As, Input, Member, Slicable}; use rstd::prelude::*; #[cfg(feature = "std")] use std::fmt; -use super::{Member, Slicable, As, Input}; /// A vetted and verified extrinsic from the external world. #[derive(PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Hash))] -pub enum Address where +pub enum Address +where AccountId: Member, AccountIndex: Member, { /// It's an account ID (pubkey). - #[cfg_attr(feature = "std", serde(deserialize_with="AccountId::deserialize"))] + #[cfg_attr(feature = "std", serde(deserialize_with = "AccountId::deserialize"))] Id(AccountId), /// It's an account index. - #[cfg_attr(feature = "std", serde(deserialize_with="AccountIndex::deserialize"))] + #[cfg_attr(feature = "std", serde(deserialize_with = "AccountIndex::deserialize"))] Index(AccountIndex), } #[cfg(feature = "std")] -impl fmt::Display for Address where +impl fmt::Display for Address +where AccountId: Member, AccountIndex: Member, { @@ -46,7 +48,8 @@ impl fmt::Display for Address } } -impl From for Address where +impl From for Address +where AccountId: Member, AccountIndex: Member, { @@ -56,19 +59,28 @@ impl From for Address(a: T, b: T) -> Option { - if a < b { Some(a) } else { None } + if a < b { + Some(a) + } else { + None + } } -impl Slicable for Address where +impl Slicable for Address +where AccountId: Member + Slicable, - AccountIndex: Member + Slicable + PartialOrd + Ord + As + As + As + Copy, + AccountIndex: + Member + Slicable + PartialOrd + Ord + As + As + As + Copy, { fn decode(input: &mut I) -> Option { Some(match input.read_byte()? { x @ 0x00...0xef => Address::Index(As::sa(x)), 0xfc => Address::Index(As::sa(need_more_than(0xef, u16::decode(input)?)?)), 0xfd => Address::Index(As::sa(need_more_than(0xffff, u32::decode(input)?)?)), - 0xfe => Address::Index(need_more_than(As::sa(0xffffffffu32), Slicable::decode(input)?)?), + 0xfe => Address::Index(need_more_than( + As::sa(0xffffffffu32), + Slicable::decode(input)?, + )?), 0xff => Address::Id(Slicable::decode(input)?), _ => return None, }) @@ -81,19 +93,19 @@ impl Slicable for Address wher Address::Id(ref i) => { v.push(255); i.using_encoded(|s| v.extend(s)); - } + }, Address::Index(i) if i > As::sa(0xffffffffu32) => { v.push(254); i.using_encoded(|s| v.extend(s)); - } + }, Address::Index(i) if i > As::sa(0xffffu32) => { v.push(253); As::::as_(i).using_encoded(|s| v.extend(s)); - } + }, Address::Index(i) if i >= As::sa(0xf0u32) => { v.push(252); As::::as_(i).using_encoded(|s| v.extend(s)); - } + }, Address::Index(i) => v.push(As::::as_(i)), } @@ -101,7 +113,8 @@ impl Slicable for Address wher } } -impl Default for Address where +impl Default for Address +where AccountId: Member + Default, AccountIndex: Member, { diff --git a/substrate/runtime/staking/src/double_map.rs b/substrate/runtime/staking/src/double_map.rs index 36c4be98d559c..620fbee72ca16 100644 --- a/substrate/runtime/staking/src/double_map.rs +++ b/substrate/runtime/staking/src/double_map.rs @@ -18,10 +18,10 @@ //! //! This implementation is somewhat specialized to the tracking of the storage of accounts. -use rstd::prelude::*; use codec::Slicable; -use runtime_support::storage::unhashed; +use rstd::prelude::*; use runtime_io::{blake2_256, twox_128}; +use runtime_support::storage::unhashed; /// Returns only a first part of the storage key. /// diff --git a/substrate/runtime/staking/src/genesis_config.rs b/substrate/runtime/staking/src/genesis_config.rs index 9d29ab13a0014..8c1f07951067b 100644 --- a/substrate/runtime/staking/src/genesis_config.rs +++ b/substrate/runtime/staking/src/genesis_config.rs @@ -18,16 +18,18 @@ #![cfg(feature = "std")] +use super::{ + BondingDuration, ContractFee, CreationFee, CurrentEra, EarlyEraSlash, EnumSet, + ExistentialDeposit, FreeBalance, Intentions, NextEnumSet, ReclaimRebate, SessionReward, + SessionsPerEra, TotalStake, Trait, TransactionBaseFee, TransactionByteFee, TransferFee, + ValidatorCount, ENUM_SET_SIZE, +}; +use codec::Slicable; +use primitives::traits::{As, Zero}; use rstd::prelude::*; use runtime_io::twox_128; -use codec::Slicable; -use runtime_support::{StorageValue, StorageMap}; -use primitives::traits::{Zero, As}; -use {runtime_io, primitives}; -use super::{Trait, ENUM_SET_SIZE, EnumSet, NextEnumSet, Intentions, CurrentEra, - BondingDuration, ContractFee, CreationFee, TransferFee, ReclaimRebate, - ExistentialDeposit, TransactionByteFee, TransactionBaseFee, TotalStake, - SessionsPerEra, ValidatorCount, FreeBalance, SessionReward, EarlyEraSlash}; +use runtime_support::{StorageMap, StorageValue}; +use {primitives, runtime_io}; #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -50,13 +52,20 @@ pub struct GenesisConfig { pub early_era_slash: T::Balance, } -impl GenesisConfig where T::AccountId: From { +impl GenesisConfig +where + T::AccountId: From, +{ pub fn simple() -> Self { GenesisConfig { sessions_per_era: T::BlockNumber::sa(2), current_era: T::BlockNumber::sa(0), balances: vec![(T::AccountId::from(1), T::Balance::sa(111))], - intentions: vec![T::AccountId::from(1), T::AccountId::from(2), T::AccountId::from(3)], + intentions: vec![ + T::AccountId::from(1), + T::AccountId::from(2), + T::AccountId::from(3), + ], validator_count: 3, bonding_duration: T::BlockNumber::sa(0), transaction_base_fee: T::Balance::sa(0), @@ -82,9 +91,13 @@ impl GenesisConfig where T::AccountId: From { (T::AccountId::from(4), T::Balance::sa(40)), (T::AccountId::from(5), T::Balance::sa(50)), (T::AccountId::from(6), T::Balance::sa(60)), - (T::AccountId::from(7), T::Balance::sa(1)) + (T::AccountId::from(7), T::Balance::sa(1)), + ], + intentions: vec![ + T::AccountId::from(1), + T::AccountId::from(2), + T::AccountId::from(3), ], - intentions: vec![T::AccountId::from(1), T::AccountId::from(2), T::AccountId::from(3)], validator_count: 3, bonding_duration: T::BlockNumber::sa(0), transaction_base_fee: T::Balance::sa(1), @@ -124,7 +137,10 @@ impl Default for GenesisConfig { impl primitives::BuildStorage for GenesisConfig { fn build_storage(self) -> Result { - let total_stake: T::Balance = self.balances.iter().fold(Zero::zero(), |acc, &(_, n)| acc + n); + let total_stake: T::Balance = self + .balances + .iter() + .fold(Zero::zero(), |acc, &(_, n)| acc + n); let mut r: runtime_io::TestExternalities = map![ twox_128(>::key()).to_vec() => T::AccountIndex::sa(self.balances.len() / ENUM_SET_SIZE).encode(), @@ -147,11 +163,18 @@ impl primitives::BuildStorage for GenesisConfig { let ids: Vec<_> = self.balances.iter().map(|x| x.0.clone()).collect(); for i in 0..(ids.len() + ENUM_SET_SIZE - 1) / ENUM_SET_SIZE { - r.insert(twox_128(&>::key_for(T::AccountIndex::sa(i))).to_vec(), - ids[i * ENUM_SET_SIZE..ids.len().min((i + 1) * ENUM_SET_SIZE)].to_owned().encode()); + r.insert( + twox_128(&>::key_for(T::AccountIndex::sa(i))).to_vec(), + ids[i * ENUM_SET_SIZE..ids.len().min((i + 1) * ENUM_SET_SIZE)] + .to_owned() + .encode(), + ); } for (who, value) in self.balances.into_iter() { - r.insert(twox_128(&>::key_for(who)).to_vec(), value.encode()); + r.insert( + twox_128(&>::key_for(who)).to_vec(), + value.encode(), + ); } Ok(r) } diff --git a/substrate/runtime/staking/src/lib.rs b/substrate/runtime/staking/src/lib.rs index cf64fca9d7426..48d8fd07d205e 100644 --- a/substrate/runtime/staking/src/lib.rs +++ b/substrate/runtime/staking/src/lib.rs @@ -36,28 +36,31 @@ extern crate substrate_runtime_std as rstd; extern crate substrate_codec as codec; extern crate substrate_primitives; +extern crate substrate_runtime_consensus as consensus; extern crate substrate_runtime_contract as contract; extern crate substrate_runtime_io as runtime_io; extern crate substrate_runtime_primitives as primitives; -extern crate substrate_runtime_consensus as consensus; extern crate substrate_runtime_sandbox as sandbox; extern crate substrate_runtime_session as session; extern crate substrate_runtime_system as system; extern crate substrate_runtime_timestamp as timestamp; -#[cfg(test)] use std::fmt::Debug; use account_db::State; +use address::Address as RawAddress; +use codec::{Input, Slicable}; +use double_map::StorageDoubleMap; +use primitives::traits::{ + As, AuxLookup, Bounded, Executable, Hashing as HashingT, MakePayment, Member, One, RefInto, + SimpleArithmetic, Zero, +}; +use rstd::collections::btree_map::BTreeMap; use rstd::prelude::*; use rstd::{cmp, result}; -use rstd::collections::btree_map::BTreeMap; -use codec::{Input, Slicable}; -use runtime_support::{StorageValue, StorageMap, Parameter}; use runtime_support::dispatch::Result; +use runtime_support::{Parameter, StorageMap, StorageValue}; use session::OnSessionChange; -use primitives::traits::{Zero, One, Bounded, RefInto, SimpleArithmetic, Executable, MakePayment, - As, AuxLookup, Hashing as HashingT, Member}; -use address::Address as RawAddress; -use double_map::StorageDoubleMap; +#[cfg(test)] +use std::fmt::Debug; pub mod address; mod mock; @@ -108,10 +111,11 @@ impl ContractAddressFor for DummyContractAddressFor { } } -impl ContractAddressFor for Hashing where +impl ContractAddressFor for Hashing +where Hashing: HashingT, AccountId: Sized + Slicable + From, - Hashing::Output: Slicable + Hashing::Output: Slicable, { fn contract_address_for(code: &[u8], origin: &AccountId) -> AccountId { let mut dest_pre = Hashing::hash(code).encode(); @@ -122,12 +126,28 @@ impl ContractAddressFor for Hashing where pub trait Trait: system::Trait + session::Trait { /// The balance of an account. - type Balance: Parameter + SimpleArithmetic + Slicable + Default + Copy + As + As + As; + type Balance: Parameter + + SimpleArithmetic + + Slicable + + Default + + Copy + + As + + As + + As; /// Function type to get the contract address given the creator. type DetermineContractAddress: ContractAddressFor; /// Type used for storing an account's index; implies the maximum number of accounts the system /// can hold. - type AccountIndex: Parameter + Member + Slicable + SimpleArithmetic + As + As + As + As + As + Copy; + type AccountIndex: Parameter + + Member + + Slicable + + SimpleArithmetic + + As + + As + + As + + As + + As + + Copy; } decl_module! { @@ -254,7 +274,6 @@ enum NewAccountOutcome { } impl Module { - // PUBLIC IMMUTABLES /// The length of a staking era in blocks. @@ -308,7 +327,8 @@ impl Module { fn transfer(aux: &T::PublicAux, dest: Address, value: T::Balance) -> Result { let dest = Self::lookup(dest)?; // commit anything that made it this far to storage - if let Some(commit) = Self::effect_transfer(aux.ref_into(), &dest, value, &DirectAccountDb)? { + if let Some(commit) = Self::effect_transfer(aux.ref_into(), &dest, value, &DirectAccountDb)? + { >::merge(&mut DirectAccountDb, commit); } Ok(()) @@ -319,10 +339,16 @@ impl Module { /// Effects will be felt at the beginning of the next era. fn stake(aux: &T::PublicAux) -> Result { let aux = aux.ref_into(); - ensure!(Self::nominating(aux).is_none(), "Cannot stake if already nominating."); + ensure!( + Self::nominating(aux).is_none(), + "Cannot stake if already nominating." + ); let mut intentions = >::get(); // can't be in the list twice. - ensure!(intentions.iter().find(|&t| t == aux).is_none(), "Cannot stake if already staked."); + ensure!( + intentions.iter().find(|&t| t == aux).is_none(), + "Cannot stake if already staked." + ); intentions.push(aux.clone()); >::put(intentions); >::insert(aux, T::BlockNumber::max_value()); @@ -336,13 +362,17 @@ impl Module { let aux = aux.ref_into(); let position = position as usize; let mut intentions = >::get(); -// let position = intentions.iter().position(|t| t == aux.ref_into()).ok_or("Cannot unstake if not already staked.")?; + // let position = intentions.iter().position(|t| t == aux.ref_into()).ok_or("Cannot + // unstake if not already staked.")?; if intentions.get(position) != Some(aux) { return Err("Invalid index") } intentions.swap_remove(position); >::put(intentions); - >::insert(aux.ref_into(), Self::current_era() + Self::bonding_duration()); + >::insert( + aux.ref_into(), + Self::current_era() + Self::bonding_duration(), + ); Ok(()) } @@ -350,8 +380,17 @@ impl Module { let target = Self::lookup(target)?; let aux = aux.ref_into(); - ensure!(Self::nominating(aux).is_none(), "Cannot nominate if already nominating."); - ensure!(Self::intentions().iter().find(|&t| t == aux.ref_into()).is_none(), "Cannot nominate if already staked."); + ensure!( + Self::nominating(aux).is_none(), + "Cannot nominate if already nominating." + ); + ensure!( + Self::intentions() + .iter() + .find(|&t| t == aux.ref_into()) + .is_none(), + "Cannot nominate if already staked." + ); // update nominators_for let mut t = Self::nominators_for(&target); @@ -390,7 +429,10 @@ impl Module { >::remove(source); // update bondage - >::insert(aux.ref_into(), Self::current_era() + Self::bonding_duration()); + >::insert( + aux.ref_into(), + Self::current_era() + Self::bonding_duration(), + ); Ok(()) } @@ -441,7 +483,7 @@ impl Module { /// is known that the account already exists. pub fn set_free_balance(who: &T::AccountId, balance: T::Balance) -> bool { // Commented out for no - but consider it instructive. -// assert!(!Self::voting_balance(who).is_zero()); + // assert!(!Self::voting_balance(who).is_zero()); if balance < Self::existential_deposit() { Self::on_free_too_low(who); false @@ -472,7 +514,7 @@ impl Module { /// If `who` doesn't exist, nothing is done and an Err returned. pub fn reward(who: &T::AccountId, value: T::Balance) -> Result { if Self::voting_balance(who).is_zero() { - return Err("beneficiary account must pre-exist"); + return Err("beneficiary account must pre-exist") } Self::set_free_balance(who, Self::free_balance(who) + value); Ok(()) @@ -536,10 +578,10 @@ impl Module { pub fn transfer_reserved( slashed: &T::AccountId, beneficiary: &T::AccountId, - value: T::Balance + value: T::Balance, ) -> result::Result, &'static str> { if Self::voting_balance(beneficiary).is_zero() { - return Err("beneficiary account must pre-exist"); + return Err("beneficiary account must pre-exist") } let b = Self::reserved_balance(slashed); let slash = cmp::min(b, value); @@ -560,14 +602,20 @@ impl Module { if normal_rotation { // reward let ideal_elapsed = >::ideal_session_duration(); - let per65536: u64 = (T::Moment::sa(65536u64) * ideal_elapsed.clone() / actual_elapsed.max(ideal_elapsed)).as_(); - let reward = Self::session_reward() * T::Balance::sa(per65536) / T::Balance::sa(65536u64); + let per65536: u64 = (T::Moment::sa(65536u64) * ideal_elapsed.clone() + / actual_elapsed.max(ideal_elapsed)) + .as_(); + let reward = + Self::session_reward() * T::Balance::sa(per65536) / T::Balance::sa(65536u64); // apply good session reward for v in >::validators().iter() { let noms = Self::current_nominators_for(v); - let total = noms.iter().map(Self::voting_balance).fold(Self::voting_balance(v), |acc, x| acc + x); + let total = noms + .iter() + .map(Self::voting_balance) + .fold(Self::voting_balance(v), |acc, x| acc + x); if !total.is_zero() { - let safe_mul_rational = |b| b * reward / total;// TODO: avoid overflow + let safe_mul_rational = |b| b * reward / total; // TODO: avoid overflow for n in noms.iter() { let _ = Self::reward(n, safe_mul_rational(Self::voting_balance(n))); } @@ -580,24 +628,32 @@ impl Module { for v in >::validators().iter() { if let Some(rem) = Self::slash(v, early_era_slash) { let noms = Self::current_nominators_for(v); - let total = noms.iter().map(Self::voting_balance).fold(T::Balance::zero(), |acc, x| acc + x); + let total = noms + .iter() + .map(Self::voting_balance) + .fold(T::Balance::zero(), |acc, x| acc + x); if !total.is_zero() { - let safe_mul_rational = |b| b * rem / total;// TODO: avoid overflow + let safe_mul_rational = |b| b * rem / total; // TODO: avoid overflow for n in noms.iter() { - let _ = Self::slash(n, safe_mul_rational(Self::voting_balance(n))); // best effort - not much that can be done on fail. + let _ = Self::slash(n, safe_mul_rational(Self::voting_balance(n))); // best effort - not much that can be done on fail. } } } } } - if ((session_index - Self::last_era_length_change()) % Self::sessions_per_era()).is_zero() || !normal_rotation { + if ((session_index - Self::last_era_length_change()) % Self::sessions_per_era()).is_zero() + || !normal_rotation + { Self::new_era(); } } /// Balance of a (potential) validator that includes all nominators. fn nomination_balance(who: &T::AccountId) -> T::Balance { - Self::nominators_for(who).iter().map(Self::voting_balance).fold(Zero::zero(), |acc, x| acc + x) + Self::nominators_for(who) + .iter() + .map(Self::voting_balance) + .fold(Zero::zero(), |acc, x| acc + x) } /// The era has changed - enact new staking set. @@ -627,16 +683,17 @@ impl Module { .collect::>(); intentions.sort_unstable_by(|&(ref b1, _), &(ref b2, _)| b2.cmp(&b1)); - >::put( - if intentions.len() > 0 { - let i = (>::get() as usize).min(intentions.len() - 1); - intentions[i].0.clone() - } else { Zero::zero() } - ); - let vals = &intentions.into_iter() - .map(|(_, v)| v) - .take(>::get() as usize) - .collect::>(); + >::put(if intentions.len() > 0 { + let i = (>::get() as usize).min(intentions.len() - 1); + intentions[i].0.clone() + } else { + Zero::zero() + }); + let vals = &intentions + .into_iter() + .map(|(_, v)| v) + .take(>::get() as usize) + .collect::>(); for v in >::validators().iter() { >::remove(v); } @@ -678,10 +735,14 @@ impl Module { let ret = { // we quantise the number of accounts so it stays constant over a reasonable // period of time. - let quantized_account_count: T::AccountIndex = (next_set_index * enum_set_size / quantization + One::one()) * quantization; + let quantized_account_count: T::AccountIndex = + (next_set_index * enum_set_size / quantization + One::one()) * quantization; // then modify the starting balance to be modulo this to allow it to potentially // identify an account index for reuse. - let maybe_try_index = balance % >::sa(quantized_account_count * reclaim_index_modulus); + let maybe_try_index = balance + % >::sa( + quantized_account_count * reclaim_index_modulus, + ); let maybe_try_index = As::::as_(maybe_try_index); // this identifier must end with magic byte 0x69 to trigger this check (a minor @@ -700,7 +761,7 @@ impl Module { try_set[item_index] = who.clone(); >::insert(set_index, try_set); - return NewAccountOutcome::GoodHint; + return NewAccountOutcome::GoodHint } } NewAccountOutcome::BadHint @@ -711,11 +772,12 @@ impl Module { // insert normally as a back up let mut set_index = next_set_index; - // defensive only: this loop should never iterate since we keep NextEnumSet up to date later. + // defensive only: this loop should never iterate since we keep NextEnumSet up to date + // later. let mut set = loop { let set = Self::enum_set(set_index); if set.len() < ENUM_SET_SIZE { - break set; + break set } set_index += One::one(); }; @@ -765,27 +827,30 @@ impl Module { let liability = value + Self::contract_fee(); if from_balance < liability { - return Err("balance too low to send value"); + return Err("balance too low to send value") } if value < Self::existential_deposit() { - return Err("value too low to create account"); + return Err("value too low to create account") } if >::get(transactor) > >::block_number() { - return Err("bondage too high to send value"); + return Err("bondage too high to send value") } let dest = T::DetermineContractAddress::contract_address_for(code, transactor); // early-out if degenerate. if &dest == transactor { - return Ok(None); + return Ok(None) } let mut local = BTreeMap::new(); // two inserts are safe // note that we now know that `&dest != transactor` due to early-out before. local.insert(dest, ChangeEntry::contract_created(value, code.to_vec())); - local.insert(transactor.clone(), ChangeEntry::balance_changed(from_balance - liability)); + local.insert( + transactor.clone(), + ChangeEntry::balance_changed(from_balance - liability), + ); Ok(Some(local)) } @@ -796,23 +861,27 @@ impl Module { account_db: &DB, ) -> result::Result>, &'static str> { let would_create = account_db.get_balance(transactor).is_zero(); - let fee = if would_create { Self::creation_fee() } else { Self::transfer_fee() }; + let fee = if would_create { + Self::creation_fee() + } else { + Self::transfer_fee() + }; let liability = value + fee; let from_balance = account_db.get_balance(transactor); if from_balance < liability { - return Err("balance too low to send value"); + return Err("balance too low to send value") } if would_create && value < Self::existential_deposit() { - return Err("value too low to create account"); + return Err("value too low to create account") } if >::get(transactor) > >::block_number() { - return Err("bondage too high to send value"); + return Err("bondage too high to send value") } let to_balance = account_db.get_balance(dest); if to_balance + value <= to_balance { - return Err("destination balance too high to receive value"); + return Err("destination balance too high to receive value") } // TODO: an additional fee, based upon gaslimit/gasprice. @@ -851,8 +920,7 @@ impl Module { } impl Executable for Module { - fn execute() { - } + fn execute() {} } impl OnSessionChange for Module { @@ -867,7 +935,8 @@ impl AuxLookup for Module { fn lookup(a: Self::Source) -> result::Result { match a { address::Address::Id(i) => Ok(i), - address::Address::Index(i) => >::lookup_index(i).ok_or("invalid account index"), + address::Address::Index(i) => + >::lookup_index(i).ok_or("invalid account index"), } } } @@ -875,9 +944,10 @@ impl AuxLookup for Module { impl MakePayment for Module { fn make_payment(transactor: &T::AccountId, encoded_len: usize) -> Result { let b = Self::free_balance(transactor); - let transaction_fee = Self::transaction_base_fee() + Self::transaction_byte_fee() * >::sa(encoded_len as u64); + let transaction_fee = Self::transaction_base_fee() + + Self::transaction_byte_fee() * >::sa(encoded_len as u64); if b < transaction_fee { - return Err("not enough funds for transaction fee"); + return Err("not enough funds for transaction fee") } >::insert(transactor, b - transaction_fee); Ok(()) diff --git a/substrate/runtime/staking/src/mock.rs b/substrate/runtime/staking/src/mock.rs index c2cf48dbd19ab..fd29026e16c7b 100644 --- a/substrate/runtime/staking/src/mock.rs +++ b/substrate/runtime/staking/src/mock.rs @@ -18,13 +18,13 @@ #![cfg(test)] -use primitives::BuildStorage; -use primitives::traits::{HasPublicAux, Identity}; +use super::DummyContractAddressFor; use primitives::testing::{Digest, Header}; -use substrate_primitives::H256; +use primitives::traits::{HasPublicAux, Identity}; +use primitives::BuildStorage; use runtime_io; -use {GenesisConfig, Module, Trait, consensus, session, system, timestamp}; -use super::DummyContractAddressFor; +use substrate_primitives::H256; +use {consensus, session, system, timestamp, GenesisConfig, Module, Trait}; // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] @@ -59,50 +59,78 @@ impl Trait for Test { type AccountIndex = u64; } -pub fn new_test_ext(ext_deposit: u64, session_length: u64, sessions_per_era: u64, current_era: u64, monied: bool, reward: u64) -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap(); - let balance_factor = if ext_deposit > 0 { - 256 - } else { - 1 - }; - t.extend(consensus::GenesisConfig::{ - code: vec![], - authorities: vec![], - }.build_storage().unwrap()); - t.extend(session::GenesisConfig::{ - session_length, - validators: vec![10, 20], - broken_percent_late: 30, - }.build_storage().unwrap()); - t.extend(GenesisConfig::{ - sessions_per_era, - current_era, - balances: if monied { - if reward > 0 { - vec![(1, 10 * balance_factor), (2, 20 * balance_factor), (3, 30 * balance_factor), (4, 40 * balance_factor), (10, balance_factor), (20, balance_factor)] +pub fn new_test_ext( + ext_deposit: u64, + session_length: u64, + sessions_per_era: u64, + current_era: u64, + monied: bool, + reward: u64, +) -> runtime_io::TestExternalities { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap(); + let balance_factor = if ext_deposit > 0 { 256 } else { 1 }; + t.extend( + consensus::GenesisConfig:: { + code: vec![], + authorities: vec![], + }.build_storage() + .unwrap(), + ); + t.extend( + session::GenesisConfig:: { + session_length, + validators: vec![10, 20], + broken_percent_late: 30, + }.build_storage() + .unwrap(), + ); + t.extend( + GenesisConfig:: { + sessions_per_era, + current_era, + balances: if monied { + if reward > 0 { + vec![ + (1, 10 * balance_factor), + (2, 20 * balance_factor), + (3, 30 * balance_factor), + (4, 40 * balance_factor), + (10, balance_factor), + (20, balance_factor), + ] + } else { + vec![ + (1, 10 * balance_factor), + (2, 20 * balance_factor), + (3, 30 * balance_factor), + (4, 40 * balance_factor), + ] + } } else { - vec![(1, 10 * balance_factor), (2, 20 * balance_factor), (3, 30 * balance_factor), (4, 40 * balance_factor)] - } - } else { - vec![(10, balance_factor), (20, balance_factor)] - }, - intentions: vec![], - validator_count: 2, - bonding_duration: 3, - transaction_base_fee: 0, - transaction_byte_fee: 0, - existential_deposit: ext_deposit, - transfer_fee: 0, - creation_fee: 0, - contract_fee: 0, - reclaim_rebate: 0, - session_reward: reward, - early_era_slash: if monied { 20 } else { 0 }, - }.build_storage().unwrap()); - t.extend(timestamp::GenesisConfig::{ - period: 5 - }.build_storage().unwrap()); + vec![(10, balance_factor), (20, balance_factor)] + }, + intentions: vec![], + validator_count: 2, + bonding_duration: 3, + transaction_base_fee: 0, + transaction_byte_fee: 0, + existential_deposit: ext_deposit, + transfer_fee: 0, + creation_fee: 0, + contract_fee: 0, + reclaim_rebate: 0, + session_reward: reward, + early_era_slash: if monied { 20 } else { 0 }, + }.build_storage() + .unwrap(), + ); + t.extend( + timestamp::GenesisConfig:: { period: 5 } + .build_storage() + .unwrap(), + ); t } diff --git a/substrate/runtime/staking/src/tests.rs b/substrate/runtime/staking/src/tests.rs index 0034c261986de..b4f8bd975c9ae 100644 --- a/substrate/runtime/staking/src/tests.rs +++ b/substrate/runtime/staking/src/tests.rs @@ -19,8 +19,8 @@ #![cfg(test)] use super::*; +use mock::{new_test_ext, Session, Staking, System, Test, Timestamp}; use runtime_io::with_externalities; -use mock::{Session, Staking, System, Timestamp, Test, new_test_ext}; #[test] fn reward_should_work() { @@ -42,23 +42,23 @@ fn rewards_should_work() { assert_eq!(Staking::voting_balance(&10), 1); System::set_block_number(3); - Timestamp::set_timestamp(15); // on time. + Timestamp::set_timestamp(15); // on time. Session::check_rotate_session(); assert_eq!(Staking::current_era(), 0); assert_eq!(Session::current_index(), 1); assert_eq!(Staking::voting_balance(&10), 11); System::set_block_number(6); - Timestamp::set_timestamp(31); // a little late + Timestamp::set_timestamp(31); // a little late Session::check_rotate_session(); assert_eq!(Staking::current_era(), 0); assert_eq!(Session::current_index(), 2); - assert_eq!(Staking::voting_balance(&10), 20); // less reward + assert_eq!(Staking::voting_balance(&10), 20); // less reward System::set_block_number(9); - Timestamp::set_timestamp(50); // very late + Timestamp::set_timestamp(50); // very late Session::check_rotate_session(); assert_eq!(Staking::current_era(), 1); assert_eq!(Session::current_index(), 3); - assert_eq!(Staking::voting_balance(&10), 27); // much less reward + assert_eq!(Staking::voting_balance(&10), 27); // much less reward }); } @@ -73,21 +73,21 @@ fn slashing_should_work() { assert_eq!(Staking::voting_balance(&10), 1); System::set_block_number(3); - Timestamp::set_timestamp(15); // on time. + Timestamp::set_timestamp(15); // on time. Session::check_rotate_session(); assert_eq!(Staking::current_era(), 0); assert_eq!(Session::current_index(), 1); assert_eq!(Staking::voting_balance(&10), 11); System::set_block_number(6); - Timestamp::set_timestamp(30); // on time. + Timestamp::set_timestamp(30); // on time. Session::check_rotate_session(); assert_eq!(Staking::current_era(), 0); assert_eq!(Session::current_index(), 2); assert_eq!(Staking::voting_balance(&10), 21); System::set_block_number(7); - Timestamp::set_timestamp(100); // way too late - early exit. + Timestamp::set_timestamp(100); // way too late - early exit. Session::check_rotate_session(); assert_eq!(Staking::current_era(), 1); assert_eq!(Session::current_index(), 3); @@ -122,7 +122,7 @@ fn dust_account_removal_should_work() { assert_eq!(System::account_nonce(&2), 1); assert_eq!(Staking::voting_balance(&2), 256 * 20); - assert_ok!(Staking::transfer(&2, 5.into(), 256 * 10 + 1)); // index 1 (account 2) becomes zombie + assert_ok!(Staking::transfer(&2, 5.into(), 256 * 10 + 1)); // index 1 (account 2) becomes zombie assert_eq!(Staking::voting_balance(&2), 0); assert_eq!(Staking::voting_balance(&5), 256 * 10 + 1); assert_eq!(System::account_nonce(&2), 0); @@ -136,10 +136,10 @@ fn reclaim_indexing_on_new_accounts_should_work() { assert_eq!(Staking::lookup_index(4), None); assert_eq!(Staking::voting_balance(&2), 256 * 20); - assert_ok!(Staking::transfer(&2, 5.into(), 256 * 20)); // account 2 becomes zombie freeing index 1 for reclaim) + assert_ok!(Staking::transfer(&2, 5.into(), 256 * 20)); // account 2 becomes zombie freeing index 1 for reclaim) assert_eq!(Staking::voting_balance(&2), 0); - assert_ok!(Staking::transfer(&5, 6.into(), 256 * 1 + 0x69)); // account 6 takes index 1. + assert_ok!(Staking::transfer(&5, 6.into(), 256 * 1 + 0x69)); // account 6 takes index 1. assert_eq!(Staking::voting_balance(&6), 256 * 1 + 0x69); assert_eq!(Staking::lookup_index(1), Some(6)); }); @@ -153,23 +153,23 @@ fn reserved_balance_should_prevent_reclaim_count() { assert_eq!(Staking::lookup_index(4), None); assert_eq!(Staking::voting_balance(&2), 256 * 20); - assert_ok!(Staking::reserve(&2, 256 * 19 + 1)); // account 2 becomes mostly reserved - assert_eq!(Staking::free_balance(&2), 0); // "free" account deleted." - assert_eq!(Staking::voting_balance(&2), 256 * 19 + 1); // reserve still exists. + assert_ok!(Staking::reserve(&2, 256 * 19 + 1)); // account 2 becomes mostly reserved + assert_eq!(Staking::free_balance(&2), 0); // "free" account deleted." + assert_eq!(Staking::voting_balance(&2), 256 * 19 + 1); // reserve still exists. assert_eq!(System::account_nonce(&2), 1); - assert_ok!(Staking::transfer(&4, 5.into(), 256 * 1 + 0x69)); // account 4 tries to take index 1 for account 5. + assert_ok!(Staking::transfer(&4, 5.into(), 256 * 1 + 0x69)); // account 4 tries to take index 1 for account 5. assert_eq!(Staking::voting_balance(&5), 256 * 1 + 0x69); - assert_eq!(Staking::lookup_index(1), Some(2)); // but fails. + assert_eq!(Staking::lookup_index(1), Some(2)); // but fails. assert_eq!(System::account_nonce(&2), 1); - assert_eq!(Staking::slash(&2, 256 * 18 + 2), None); // account 2 gets slashed - assert_eq!(Staking::voting_balance(&2), 0); // "free" account deleted." + assert_eq!(Staking::slash(&2, 256 * 18 + 2), None); // account 2 gets slashed + assert_eq!(Staking::voting_balance(&2), 0); // "free" account deleted." assert_eq!(System::account_nonce(&2), 0); - assert_ok!(Staking::transfer(&4, 6.into(), 256 * 1 + 0x69)); // account 4 tries to take index 1 again for account 6. + assert_ok!(Staking::transfer(&4, 6.into(), 256 * 1 + 0x69)); // account 4 tries to take index 1 again for account 6. assert_eq!(Staking::voting_balance(&6), 256 * 1 + 0x69); - assert_eq!(Staking::lookup_index(1), Some(6)); // and succeeds. + assert_eq!(Staking::lookup_index(1), Some(6)); // and succeeds. }); } @@ -199,7 +199,10 @@ fn staking_should_work() { // Block 3: Unstake highest, introduce another staker. No change yet. System::set_block_number(3); assert_ok!(Staking::stake(&3)); - assert_ok!(Staking::unstake(&4, Staking::intentions().iter().position(|&x| x == 4).unwrap() as u32)); + assert_ok!(Staking::unstake( + &4, + Staking::intentions().iter().position(|&x| x == 4).unwrap() as u32 + )); assert_eq!(Staking::current_era(), 1); Session::check_rotate_session(); @@ -221,7 +224,10 @@ fn staking_should_work() { // Block 7: Unstake three. No change yet. System::set_block_number(7); - assert_ok!(Staking::unstake(&3, Staking::intentions().iter().position(|&x| x == 3).unwrap() as u32)); + assert_ok!(Staking::unstake( + &3, + Staking::intentions().iter().position(|&x| x == 3).unwrap() as u32 + )); Session::check_rotate_session(); assert_eq!(Session::validators(), vec![1, 3]); @@ -247,7 +253,7 @@ fn nominating_and_rewards_should_work() { assert_ok!(Staking::nominate(&4, 1.into())); Session::check_rotate_session(); assert_eq!(Staking::current_era(), 1); - assert_eq!(Session::validators(), vec![1, 3]); // 4 + 1, 3 + assert_eq!(Session::validators(), vec![1, 3]); // 4 + 1, 3 assert_eq!(Staking::voting_balance(&1), 10); assert_eq!(Staking::voting_balance(&2), 20); assert_eq!(Staking::voting_balance(&3), 30); @@ -265,7 +271,10 @@ fn nominating_and_rewards_should_work() { System::set_block_number(3); assert_ok!(Staking::stake(&4)); - assert_ok!(Staking::unstake(&3, Staking::intentions().iter().position(|&x| x == 3).unwrap() as u32)); + assert_ok!(Staking::unstake( + &3, + Staking::intentions().iter().position(|&x| x == 3).unwrap() as u32 + )); assert_ok!(Staking::nominate(&3, 1.into())); Session::check_rotate_session(); assert_eq!(Session::validators(), vec![1, 4]); @@ -303,14 +312,14 @@ fn nominating_slashes_should_work() { Session::check_rotate_session(); assert_eq!(Staking::current_era(), 1); - assert_eq!(Session::validators(), vec![1, 3]); // 1 + 4, 3 + 2 + assert_eq!(Session::validators(), vec![1, 3]); // 1 + 4, 3 + 2 assert_eq!(Staking::voting_balance(&1), 10); assert_eq!(Staking::voting_balance(&2), 20); assert_eq!(Staking::voting_balance(&3), 30); assert_eq!(Staking::voting_balance(&4), 40); System::set_block_number(5); - Timestamp::set_timestamp(100); // late + Timestamp::set_timestamp(100); // late assert_eq!(Session::blocks_remaining(), 1); assert!(Session::broken_validation()); Session::check_rotate_session(); @@ -329,10 +338,16 @@ fn double_staking_should_fail() { System::set_block_number(1); assert_ok!(Staking::stake(&1)); assert_noop!(Staking::stake(&1), "Cannot stake if already staked."); - assert_noop!(Staking::nominate(&1, 1.into()), "Cannot nominate if already staked."); + assert_noop!( + Staking::nominate(&1, 1.into()), + "Cannot nominate if already staked." + ); assert_ok!(Staking::nominate(&2, 1.into())); assert_noop!(Staking::stake(&2), "Cannot stake if already nominating."); - assert_noop!(Staking::nominate(&2, 1.into()), "Cannot nominate if already nominating."); + assert_noop!( + Staking::nominate(&2, 1.into()), + "Cannot nominate if already nominating." + ); }); } @@ -433,7 +448,10 @@ fn staking_balance_transfer_when_bonded_should_not_work() { with_externalities(&mut new_test_ext(0, 1, 3, 1, false, 0), || { >::insert(1, 111); assert_ok!(Staking::stake(&1)); - assert_noop!(Staking::transfer(&1, 2.into(), 69), "bondage too high to send value"); + assert_noop!( + Staking::transfer(&1, 2.into(), 69), + "bondage too high to send value" + ); }); } @@ -459,7 +477,10 @@ fn staking_balance_transfer_when_reserved_should_not_work() { with_externalities(&mut new_test_ext(0, 1, 3, 1, false, 0), || { >::insert(1, 111); assert_ok!(Staking::reserve(&1, 69)); - assert_noop!(Staking::transfer(&1, 2.into(), 69), "balance too low to send value"); + assert_noop!( + Staking::transfer(&1, 2.into(), 69), + "balance too low to send value" + ); }); } @@ -568,7 +589,10 @@ fn transferring_reserved_balance_to_nonexistent_should_fail() { with_externalities(&mut new_test_ext(0, 1, 3, 1, false, 0), || { >::insert(1, 111); assert_ok!(Staking::reserve(&1, 111)); - assert_noop!(Staking::transfer_reserved(&1, &2, 42), "beneficiary account must pre-exist"); + assert_noop!( + Staking::transfer_reserved(&1, &2, 42), + "beneficiary account must pre-exist" + ); }); } @@ -612,8 +636,14 @@ fn account_removal_removes_storage() { assert_eq!(>::get(1, b"foo".to_vec()), None); assert_eq!(>::get(1, b"bar".to_vec()), None); - assert_eq!(>::get(2, b"hello".to_vec()), Some(b"3".to_vec())); - assert_eq!(>::get(2, b"world".to_vec()), Some(b"4".to_vec())); + assert_eq!( + >::get(2, b"hello".to_vec()), + Some(b"3".to_vec()) + ); + assert_eq!( + >::get(2, b"world".to_vec()), + Some(b"4".to_vec()) + ); } }); } diff --git a/substrate/runtime/system/src/lib.rs b/substrate/runtime/system/src/lib.rs index e5e0b47a29b9f..124d6f8b1e85c 100644 --- a/substrate/runtime/system/src/lib.rs +++ b/substrate/runtime/system/src/lib.rs @@ -32,21 +32,23 @@ extern crate serde; #[macro_use] extern crate serde_derive; -extern crate substrate_runtime_io as runtime_io; +extern crate safe_mix; extern crate substrate_codec as codec; +extern crate substrate_runtime_io as runtime_io; extern crate substrate_runtime_primitives as primitives; -extern crate safe_mix; +use primitives::traits::{ + self, Bounded, CheckEqual, Hashing, MaybeDisplay, Member, One, SimpleArithmetic, SimpleBitOps, + Zero, +}; use rstd::prelude::*; -use primitives::traits::{self, CheckEqual, SimpleArithmetic, SimpleBitOps, Zero, One, Bounded, - Hashing, Member, MaybeDisplay}; -use runtime_support::{StorageValue, StorageMap, Parameter}; +use runtime_support::{Parameter, StorageMap, StorageValue}; use safe_mix::TripletMix; -#[cfg(any(feature = "std", test))] -use rstd::marker::PhantomData; #[cfg(any(feature = "std", test))] use codec::Slicable; +#[cfg(any(feature = "std", test))] +use rstd::marker::PhantomData; #[cfg(any(feature = "std", test))] use runtime_io::{twox_128, TestExternalities}; @@ -64,17 +66,33 @@ pub fn extrinsics_data_root(xts: Vec>) -> H::Output { pub trait Trait: Eq + Clone { type Index: Parameter + Member + Default + MaybeDisplay + SimpleArithmetic + Copy; - type BlockNumber: Parameter + Member + MaybeDisplay + SimpleArithmetic + Default + Bounded + Copy + rstd::hash::Hash; - type Hash: Parameter + Member + MaybeDisplay + SimpleBitOps + Default + Copy + CheckEqual + rstd::hash::Hash + AsRef<[u8]>; + type BlockNumber: Parameter + + Member + + MaybeDisplay + + SimpleArithmetic + + Default + + Bounded + + Copy + + rstd::hash::Hash; + type Hash: Parameter + + Member + + MaybeDisplay + + SimpleBitOps + + Default + + Copy + + CheckEqual + + rstd::hash::Hash + + AsRef<[u8]>; type Hashing: Hashing; type Digest: Parameter + Member + Default + traits::Digest; type AccountId: Parameter + Member + MaybeDisplay + Ord + Default; - type Header: Parameter + traits::Header< - Number = Self::BlockNumber, - Hashing = Self::Hashing, - Hash = Self::Hash, - Digest = Self::Digest - >; + type Header: Parameter + + traits::Header< + Number = Self::BlockNumber, + Hashing = Self::Hashing, + Hash = Self::Hash, + Digest = Self::Digest, + >; } decl_module! { @@ -119,7 +137,13 @@ impl Module { let digest = >::take(); let extrinsics_root = >::take(); let storage_root = T::Hashing::storage_root(); - ::new(number, extrinsics_root, storage_root, parent_hash, digest) + ::new( + number, + extrinsics_root, + storage_root, + parent_hash, + digest, + ) } /// Deposits a log and ensures it matches the blocks log data. @@ -131,11 +155,16 @@ impl Module { /// Calculate the current block's random seed. fn calculate_random() -> T::Hash { - assert!(Self::block_number() > Zero::zero(), "Block number may never be zero"); + assert!( + Self::block_number() > Zero::zero(), + "Block number may never be zero" + ); (0..81) - .scan( - Self::block_number() - One::one(), - |c, _| { if *c > Zero::zero() { *c -= One::one() }; Some(*c) + .scan(Self::block_number() - One::one(), |c, _| { + if *c > Zero::zero() { + *c -= One::one() + }; + Some(*c) }) .map(Self::block_hash) .triplet_mix() @@ -186,7 +215,9 @@ impl Module { /// Remove all extrinsics data and save the extrinsics trie root. pub fn derive_extrinsics() { - let extrinsics = (0..Self::extrinsic_index()).map(>::take).collect(); + let extrinsics = (0..Self::extrinsic_index()) + .map(>::take) + .collect(); let xts_root = extrinsics_data_root::(extrinsics); >::put(xts_root); } @@ -206,11 +237,10 @@ impl Default for GenesisConfig { } #[cfg(any(feature = "std", test))] -impl primitives::BuildStorage for GenesisConfig -{ +impl primitives::BuildStorage for GenesisConfig { fn build_storage(self) -> Result { - use runtime_io::twox_128; use codec::Slicable; + use runtime_io::twox_128; Ok(map![ twox_128(&>::key_for(T::BlockNumber::zero())).to_vec() => [69u8; 32].encode(), diff --git a/substrate/runtime/timestamp/src/lib.rs b/substrate/runtime/timestamp/src/lib.rs index f541e0a11f30a..2f7c34d6e7bb2 100644 --- a/substrate/runtime/timestamp/src/lib.rs +++ b/substrate/runtime/timestamp/src/lib.rs @@ -31,19 +31,20 @@ extern crate substrate_runtime_io as runtime_io; #[macro_use] extern crate serde_derive; +extern crate substrate_codec as codec; #[cfg(test)] extern crate substrate_primitives; +extern crate substrate_runtime_consensus as consensus; extern crate substrate_runtime_primitives as runtime_primitives; extern crate substrate_runtime_system as system; -extern crate substrate_runtime_consensus as consensus; -extern crate substrate_codec as codec; -use runtime_support::{StorageValue, Parameter}; +use runtime_primitives::traits::{As, Executable, MaybeEmpty, SimpleArithmetic, Zero}; use runtime_support::dispatch::Result; -use runtime_primitives::traits::{Executable, MaybeEmpty, SimpleArithmetic, As, Zero}; +use runtime_support::{Parameter, StorageValue}; -pub trait Trait: consensus::Trait where - ::PublicAux: MaybeEmpty +pub trait Trait: consensus::Trait +where + ::PublicAux: MaybeEmpty, { // the position of the required timestamp-set extrinsic. const TIMESTAMP_SET_POSITION: u32; @@ -78,7 +79,10 @@ impl Module { /// Set the current time. fn set(aux: &T::PublicAux, now: T::Moment) -> Result { assert!(aux.is_empty()); - assert!(!::DidUpdate::exists(), "Timestamp must be updated only once in the block"); + assert!( + !::DidUpdate::exists(), + "Timestamp must be updated only once in the block" + ); assert!( >::extrinsic_index() == T::TIMESTAMP_SET_POSITION, "Timestamp extrinsic must be at position {} in the block", @@ -102,7 +106,10 @@ impl Module { impl Executable for Module { fn execute() { - assert!(::DidUpdate::take(), "Timestamp must be updated once in the block"); + assert!( + ::DidUpdate::take(), + "Timestamp must be updated once in the block" + ); } } @@ -124,11 +131,10 @@ impl Default for GenesisConfig { } #[cfg(any(feature = "std", test))] -impl runtime_primitives::BuildStorage for GenesisConfig -{ +impl runtime_primitives::BuildStorage for GenesisConfig { fn build_storage(self) -> ::std::result::Result { - use runtime_io::twox_128; use codec::Slicable; + use runtime_io::twox_128; Ok(map![ twox_128(>::key()).to_vec() => self.period.encode(), twox_128(>::key()).to_vec() => T::Moment::sa(0).encode() @@ -141,10 +147,10 @@ mod tests { use super::*; use runtime_io::with_externalities; - use substrate_primitives::H256; - use runtime_primitives::BuildStorage; - use runtime_primitives::traits::{HasPublicAux, BlakeTwo256}; use runtime_primitives::testing::{Digest, Header}; + use runtime_primitives::traits::{BlakeTwo256, HasPublicAux}; + use runtime_primitives::BuildStorage; + use substrate_primitives::H256; #[derive(Clone, Eq, PartialEq)] pub struct Test; @@ -172,7 +178,9 @@ mod tests { #[test] fn timestamp_works() { - let mut t = system::GenesisConfig::::default().build_storage().unwrap(); + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap(); t.extend(GenesisConfig:: { period: 0 }.build_storage().unwrap()); with_externalities(&mut t, || { @@ -185,7 +193,9 @@ mod tests { #[test] #[should_panic(expected = "Timestamp must be updated only once in the block")] fn double_timestamp_should_fail() { - let mut t = system::GenesisConfig::::default().build_storage().unwrap(); + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap(); t.extend(GenesisConfig:: { period: 5 }.build_storage().unwrap()); with_externalities(&mut t, || { @@ -196,9 +206,13 @@ mod tests { } #[test] - #[should_panic(expected = "Timestamp but increment by at least between sequential blocks")] + #[should_panic( + expected = "Timestamp but increment by at least between sequential blocks" + )] fn block_period_is_enforced() { - let mut t = system::GenesisConfig::::default().build_storage().unwrap(); + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap(); t.extend(GenesisConfig:: { period: 5 }.build_storage().unwrap()); with_externalities(&mut t, || { diff --git a/substrate/runtime/version/src/lib.rs b/substrate/runtime/version/src/lib.rs index cadd7ad13b593..e35396898e883 100644 --- a/substrate/runtime/version/src/lib.rs +++ b/substrate/runtime/version/src/lib.rs @@ -34,8 +34,8 @@ extern crate substrate_runtime_support as runtime_support; extern crate substrate_codec as codec; -use rstd::prelude::*; use codec::Slicable; +use rstd::prelude::*; #[cfg(feature = "std")] use std::borrow::Cow; @@ -47,20 +47,25 @@ pub type VersionString = &'static str; #[cfg(feature = "std")] #[macro_export] macro_rules! ver_str { - ( $y:expr ) => {{ ::std::borrow::Cow::Borrowed($y) }} + ($y:expr) => {{ + ::std::borrow::Cow::Borrowed($y) + }}; } #[cfg(not(feature = "std"))] #[macro_export] macro_rules! ver_str { - ( $y:expr ) => {{ $y }} + ($y:expr) => {{ + $y + }}; } /// Runtime version. /// This should not be thought of as classic Semver (major/minor/tiny). /// This triplet have different semantics and mis-interpretation could cause problems. -/// In particular: bug fixes should result in an increment of `spec_version` and possibly `authoring_version`, -/// absolutely not `impl_version` since they change the semantics of the runtime. +/// In particular: bug fixes should result in an increment of `spec_version` and possibly +/// `authoring_version`, absolutely not `impl_version` since they change the semantics of the +/// runtime. #[derive(Clone)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] pub struct RuntimeVersion { @@ -68,18 +73,18 @@ pub struct RuntimeVersion { /// A different on-chain spec_name to that of the native runtime would normally result /// in node not attempting to sync or author blocks. pub spec_name: VersionString, - + /// Name of the implementation of the spec. This is of little consequence for the node /// and serves only to differentiate code of different implementation teams. For this /// codebase, it will be parity-polkadot. If there were a non-Rust implementation of the /// Polkadot runtime (e.g. C++), then it would identify itself with an accordingly different /// `impl_name`. pub impl_name: VersionString, - + /// `authoring_version` is the version of the authorship interface. An authoring node /// will not attempt to author blocks unless this is equal to its native runtime. pub authoring_version: u32, - + /// Version of the runtime specification. A full-node will not attempt to use its native /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, /// `spec_version` and `authoring_version` are the same between Wasm and native. @@ -112,15 +117,14 @@ impl Default for RuntimeVersion { impl RuntimeVersion { /// Check if this version matches other version for calling into runtime. pub fn can_call_with(&self, other: &RuntimeVersion) -> bool { - self.spec_version == other.spec_version && - self.spec_name == other.spec_name && - self.authoring_version == other.authoring_version + self.spec_version == other.spec_version + && self.spec_name == other.spec_name + && self.authoring_version == other.authoring_version } /// Check if this version matches other version for authoring blocks. pub fn can_author_with(&self, other: &RuntimeVersion) -> bool { - self.authoring_version == other.authoring_version && - self.spec_name == other.spec_name + self.authoring_version == other.authoring_version && self.spec_name == other.spec_name } } diff --git a/substrate/serializer/src/lib.rs b/substrate/serializer/src/lib.rs index 74cf155ce77e8..74896c60a5c3e 100644 --- a/substrate/serializer/src/lib.rs +++ b/substrate/serializer/src/lib.rs @@ -24,7 +24,7 @@ extern crate serde; extern crate serde_json; -pub use serde_json::{from_str, from_slice, from_reader, Result, Error}; +pub use serde_json::{from_reader, from_slice, from_str, Error, Result}; const PROOF: &str = "Serializers are infallible; qed"; @@ -39,6 +39,9 @@ pub fn encode(value: &T) -> Vec { } /// Serialize the given data structure as JSON into the IO stream. -pub fn to_writer(writer: W, value: &T) -> Result<()> { +pub fn to_writer( + writer: W, + value: &T, +) -> Result<()> { serde_json::to_writer(writer, value) } diff --git a/substrate/state-db/src/lib.rs b/substrate/state-db/src/lib.rs index 8e4e330333b63..9452f7203df2f 100644 --- a/substrate/state-db/src/lib.rs +++ b/substrate/state-db/src/lib.rs @@ -26,32 +26,60 @@ //! their subtrees. //! //! # Pruning. -//! See `RefWindow` for pruning algorithm details. `StateDb` prunes on each finalization until pruning -//! constraints are satisfied. +//! See `RefWindow` for pruning algorithm details. `StateDb` prunes on each finalization until +//! pruning constraints are satisfied. //! -#[macro_use] extern crate log; +#[macro_use] +extern crate log; extern crate parking_lot; extern crate substrate_codec as codec; extern crate substrate_primitives as primitives; mod unfinalized; mod pruning; -#[cfg(test)] mod test; +#[cfg(test)] +mod test; -use std::fmt; -use parking_lot::RwLock; use codec::Slicable; +use parking_lot::RwLock; +use pruning::RefWindow; use std::collections::HashSet; +use std::fmt; use unfinalized::UnfinalizedOverlay; -use pruning::RefWindow; /// Database value type. pub type DBValue = Vec; /// Basic set of requirements for the Block hash and node key types. -pub trait Hash: Send + Sync + Sized + Eq + PartialEq + Clone + Default + fmt::Debug + Slicable + std::hash::Hash + 'static {} -impl Hash for T {} +pub trait Hash: + Send + + Sync + + Sized + + Eq + + PartialEq + + Clone + + Default + + fmt::Debug + + Slicable + + std::hash::Hash + + 'static +{ +} +impl< + T: Send + + Sync + + Sized + + Eq + + PartialEq + + Clone + + Default + + fmt::Debug + + Slicable + + std::hash::Hash + + 'static, + > Hash for T +{} /// Backend database trait. Read-only. pub trait MetaDb { @@ -61,7 +89,6 @@ pub trait MetaDb { fn get_meta(&self, key: &[u8]) -> Result, Self::Error>; } - /// Backend database trait. Read-only. pub trait HashDb { type Hash: Hash; @@ -98,7 +125,6 @@ pub struct ChangeSet { pub deleted: Vec, } - /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] pub struct CommitSet { @@ -111,7 +137,8 @@ pub struct CommitSet { /// Pruning contraints. If none are specified pruning is #[derive(Default, Debug, Clone)] pub struct Constraints { - /// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only unfinalized states. + /// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only unfinalized + /// states. pub max_blocks: Option, /// Maximum memory in the pruning overlay. pub max_mem: Option, @@ -152,13 +179,15 @@ struct StateDbSync { } impl StateDbSync { - pub fn new(mode: PruningMode, db: &D) -> Result, Error> { + pub fn new( + mode: PruningMode, + db: &D, + ) -> Result, Error> { trace!("StateDb settings: {:?}", mode); let unfinalized: UnfinalizedOverlay = UnfinalizedOverlay::new(db)?; let pruning: Option> = match mode { PruningMode::Constrained(Constraints { - max_mem: Some(_), - .. + max_mem: Some(_), .. }) => unimplemented!(), PruningMode::Constrained(_) => Some(RefWindow::new(db)?), PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, @@ -166,12 +195,18 @@ impl StateDbSync { Ok(StateDbSync { mode, unfinalized, - pruning: pruning, + pruning, pinned: Default::default(), }) } - pub fn insert_block(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet) -> CommitSet { + pub fn insert_block( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + mut changeset: ChangeSet, + ) -> CommitSet { if number == 0 { return CommitSet { data: changeset, @@ -187,25 +222,21 @@ impl StateDbSync { meta: Default::default(), } }, - PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { - self.unfinalized.insert(hash, number, parent_hash, changeset) - } + PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => + self.unfinalized + .insert(hash, number, parent_hash, changeset), } } pub fn finalize_block(&mut self, hash: &BlockHash) -> CommitSet { let mut commit = match self.mode { - PruningMode::ArchiveAll => { - CommitSet::default() - }, + PruningMode::ArchiveAll => CommitSet::default(), PruningMode::ArchiveCanonical => { let mut commit = self.unfinalized.finalize(hash); commit.data.deleted.clear(); commit }, - PruningMode::Constrained(_) => { - self.unfinalized.finalize(hash) - }, + PruningMode::Constrained(_) => self.unfinalized.finalize(hash), }; if let Some(ref mut pruning) = self.pruning { pruning.note_finalized(hash, &mut commit); @@ -215,19 +246,24 @@ impl StateDbSync { } fn prune(&mut self, commit: &mut CommitSet) { - if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = (&mut self.pruning, &self.mode) { + if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = + (&mut self.pruning, &self.mode) + { loop { if pruning.window_size() <= constraints.max_blocks.unwrap_or(0) as u64 { - break; + break } - if constraints.max_mem.map_or(false, |m| pruning.mem_used() > m) { - break; + if constraints + .max_mem + .map_or(false, |m| pruning.mem_used() > m) + { + break } let pinned = &self.pinned; if pruning.next_hash().map_or(false, |h| pinned.contains(&h)) { - break; + break } pruning.prune_one(commit); @@ -243,9 +279,13 @@ impl StateDbSync { self.pinned.remove(hash); } - pub fn get>(&self, key: &Key, db: &D) -> Result, Error> { + pub fn get>( + &self, + key: &Key, + db: &D, + ) -> Result, Error> { if let Some(value) = self.unfinalized.get(key) { - return Ok(Some(value)); + return Ok(Some(value)) } db.get(key).map_err(|e| Error::Db(e)) } @@ -259,15 +299,26 @@ pub struct StateDb { impl StateDb { /// Creates a new instance. Does not expect any metadata in the database. - pub fn new(mode: PruningMode, db: &D) -> Result, Error> { + pub fn new( + mode: PruningMode, + db: &D, + ) -> Result, Error> { Ok(StateDb { - db: RwLock::new(StateDbSync::new(mode, db)?) + db: RwLock::new(StateDbSync::new(mode, db)?), }) } /// Add a new unfinalized block. - pub fn insert_block(&self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> CommitSet { - self.db.write().insert_block(hash, number, parent_hash, changeset) + pub fn insert_block( + &self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: ChangeSet, + ) -> CommitSet { + self.db + .write() + .insert_block(hash, number, parent_hash, changeset) } /// Finalize a previously inserted block. @@ -286,7 +337,11 @@ impl StateDb { } /// Get a value from unfinalized/pruning overlay or the backing DB. - pub fn get>(&self, key: &Key, db: &D) -> Result, Error> { + pub fn get>( + &self, + key: &Key, + db: &D, + ) -> Result, Error> { self.db.read().get(key, db) } } @@ -294,19 +349,44 @@ impl StateDb { #[cfg(test)] mod tests { use primitives::H256; - use {StateDb, PruningMode, Constraints}; - use test::{make_db, make_changeset, TestDb}; + use test::{make_changeset, make_db, TestDb}; + use {Constraints, PruningMode, StateDb}; fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { let mut db = make_db(&[91, 921, 922, 93, 94]); let state_db = StateDb::new(settings, &db).unwrap(); - db.commit(&state_db.insert_block(&H256::from(1), 1, &H256::from(0), make_changeset(&[1], &[91]))); - db.commit(&state_db.insert_block(&H256::from(21), 2, &H256::from(1), make_changeset(&[21], &[921, 1]))); - db.commit(&state_db.insert_block(&H256::from(22), 2, &H256::from(1), make_changeset(&[22], &[922]))); - db.commit(&state_db.insert_block(&H256::from(3), 3, &H256::from(21), make_changeset(&[3], &[93]))); + db.commit(&state_db.insert_block( + &H256::from(1), + 1, + &H256::from(0), + make_changeset(&[1], &[91]), + )); + db.commit(&state_db.insert_block( + &H256::from(21), + 2, + &H256::from(1), + make_changeset(&[21], &[921, 1]), + )); + db.commit(&state_db.insert_block( + &H256::from(22), + 2, + &H256::from(1), + make_changeset(&[22], &[922]), + )); + db.commit(&state_db.insert_block( + &H256::from(3), + 3, + &H256::from(21), + make_changeset(&[3], &[93]), + )); db.commit(&state_db.finalize_block(&H256::from(1))); - db.commit(&state_db.insert_block(&H256::from(4), 4, &H256::from(3), make_changeset(&[4], &[94]))); + db.commit(&state_db.insert_block( + &H256::from(4), + 4, + &H256::from(3), + make_changeset(&[4], &[94]), + )); db.commit(&state_db.finalize_block(&H256::from(21))); db.commit(&state_db.finalize_block(&H256::from(3))); diff --git a/substrate/state-db/src/pruning.rs b/substrate/state-db/src/pruning.rs index b5fbb553d9f09..bcc1ff87e14fe 100644 --- a/substrate/state-db/src/pruning.rs +++ b/substrate/state-db/src/pruning.rs @@ -22,9 +22,9 @@ //! the death list. //! The changes are journaled in the DB. +use codec::{self, Slicable}; use std::collections::{HashMap, HashSet, VecDeque}; -use codec::{Slicable, self}; -use {CommitSet, Error, MetaDb, to_meta_key, Hash}; +use {to_meta_key, CommitSet, Error, Hash, MetaDb}; const LAST_PRUNED: &[u8] = b"last_pruned"; const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; @@ -73,7 +73,8 @@ fn to_journal_key(block: u64) -> Vec { impl RefWindow { pub fn new(db: &D) -> Result, Error> { - let last_pruned = db.get_meta(&to_meta_key(LAST_PRUNED, &())) + let last_pruned = db + .get_meta(&to_meta_key(LAST_PRUNED, &())) .map_err(|e| Error::Db(e))?; let pending_number: u64 = match last_pruned { Some(buffer) => u64::decode(&mut buffer.as_slice()).ok_or(Error::Decoding)? + 1, @@ -83,7 +84,7 @@ impl RefWindow { let mut pruning = RefWindow { death_rows: Default::default(), death_index: Default::default(), - pending_number: pending_number, + pending_number, }; // read the journal trace!(target: "state-db", "Reading pruning journal. Last pruned #{}", pending_number - 1); @@ -91,9 +92,15 @@ impl RefWindow { let journal_key = to_journal_key(block); match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { Some(record) => { - let record: JournalRecord = Slicable::decode(&mut record.as_slice()).ok_or(Error::Decoding)?; + let record: JournalRecord = + Slicable::decode(&mut record.as_slice()).ok_or(Error::Decoding)?; trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); + pruning.import( + &record.hash, + journal_key, + record.inserted.into_iter(), + record.deleted, + ); }, None => break, } @@ -102,11 +109,19 @@ impl RefWindow { Ok(pruning) } - fn import>(&mut self, hash: &BlockHash, journal_key: Vec, inserted: I, deleted: Vec) { + fn import>( + &mut self, + hash: &BlockHash, + journal_key: Vec, + inserted: I, + deleted: Vec, + ) { // remove all re-inserted keys from death rows for k in inserted { if let Some(block) = self.death_index.remove(&k) { - self.death_rows[(block - self.pending_number) as usize].deleted.remove(&k); + self.death_rows[(block - self.pending_number) as usize] + .deleted + .remove(&k); } } @@ -115,13 +130,11 @@ impl RefWindow { for k in deleted.iter() { self.death_index.insert(k.clone(), imported_block); } - self.death_rows.push_back( - DeathRow { - hash: hash.clone(), - deleted: deleted.into_iter().collect(), - journal_key: journal_key, - } - ); + self.death_rows.push_back(DeathRow { + hash: hash.clone(), + deleted: deleted.into_iter().collect(), + journal_key, + }); } pub fn window_size(&self) -> u64 { @@ -138,13 +151,19 @@ impl RefWindow { /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. pub fn prune_one(&mut self, commit: &mut CommitSet) { - let pruned = self.death_rows.pop_front().expect("prune_one is only called with a non-empty window"); + let pruned = self + .death_rows + .pop_front() + .expect("prune_one is only called with a non-empty window"); trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); for k in pruned.deleted.iter() { self.death_index.remove(&k); } commit.data.deleted.extend(pruned.deleted.into_iter()); - commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), self.pending_number.encode())); + commit + .meta + .inserted + .push((to_meta_key(LAST_PRUNED, &()), self.pending_number.encode())); commit.meta.deleted.push(pruned.journal_key); self.pending_number += 1; } @@ -152,7 +171,12 @@ impl RefWindow { /// Add a change set to the window. Creates a journal record and pushes it to `commit` pub fn note_finalized(&mut self, hash: &BlockHash, commit: &mut CommitSet) { trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); - let inserted = commit.data.inserted.iter().map(|(k, _)| k.clone()).collect(); + let inserted = commit + .data + .inserted + .iter() + .map(|(k, _)| k.clone()) + .collect(); let deleted = ::std::mem::replace(&mut commit.data.deleted, Vec::new()); let journal_record = JournalRecord { hash: hash.clone(), @@ -161,9 +185,17 @@ impl RefWindow { }; let block = self.pending_number + self.window_size(); let journal_key = to_journal_key(block); - commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); - - self.import(hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted); + commit + .meta + .inserted + .push((journal_key.clone(), journal_record.encode())); + + self.import( + hash, + journal_key, + journal_record.inserted.into_iter(), + journal_record.deleted, + ); } } @@ -171,8 +203,8 @@ impl RefWindow { mod tests { use super::RefWindow; use primitives::H256; - use {CommitSet}; - use test::{make_db, make_commit, TestDb}; + use test::{make_commit, make_db, TestDb}; + use CommitSet; fn check_journal(pruning: &RefWindow, db: &TestDb) { let restored: RefWindow = RefWindow::new(db).unwrap(); diff --git a/substrate/state-db/src/test.rs b/substrate/state-db/src/test.rs index d9ff05a6a26a7..0b79889a8056d 100644 --- a/substrate/state-db/src/test.rs +++ b/substrate/state-db/src/test.rs @@ -16,9 +16,9 @@ //! Test utils -use std::collections::HashMap; use primitives::H256; -use {DBValue, ChangeSet, CommitSet, MetaDb, HashDb}; +use std::collections::HashMap; +use {ChangeSet, CommitSet, DBValue, HashDb, MetaDb}; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { @@ -62,7 +62,10 @@ impl TestDb { pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { ChangeSet { - inserted: inserted.iter().map(|v| (H256::from(*v), H256::from(*v).to_vec())).collect(), + inserted: inserted + .iter() + .map(|v| (H256::from(*v), H256::from(*v).to_vec())) + .collect(), deleted: deleted.iter().map(|v| H256::from(*v)).collect(), } } @@ -76,8 +79,10 @@ pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { pub fn make_db(inserted: &[u64]) -> TestDb { TestDb { - data: inserted.iter().map(|v| (H256::from(*v), H256::from(*v).to_vec())).collect(), + data: inserted + .iter() + .map(|v| (H256::from(*v), H256::from(*v).to_vec())) + .collect(), meta: Default::default(), } } - diff --git a/substrate/state-db/src/unfinalized.rs b/substrate/state-db/src/unfinalized.rs index e4cd5aa278366..4bbd9e48a7876 100644 --- a/substrate/state-db/src/unfinalized.rs +++ b/substrate/state-db/src/unfinalized.rs @@ -17,9 +17,9 @@ //! Finalization window. //! Maintains trees of block overlays and allows discarding trees/roots -use std::collections::{HashMap, VecDeque}; -use super::{Error, DBValue, ChangeSet, CommitSet, MetaDb, Hash, to_meta_key}; +use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb}; use codec::{self, Slicable}; +use std::collections::{HashMap, VecDeque}; const UNFINALIZED_JOURNAL: &[u8] = b"unfinalized_journal"; const LAST_FINALIZED: &[u8] = b"last_finalized"; @@ -73,10 +73,12 @@ struct BlockOverlay { impl UnfinalizedOverlay { /// Creates a new instance. Does not expect any metadata to be present in the DB. pub fn new(db: &D) -> Result, Error> { - let last_finalized = db.get_meta(&to_meta_key(LAST_FINALIZED, &())) + let last_finalized = db + .get_meta(&to_meta_key(LAST_FINALIZED, &())) .map_err(|e| Error::Db(e))?; let last_finalized = match last_finalized { - Some(buffer) => Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice()).ok_or(Error::Decoding)?), + Some(buffer) => + Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice()).ok_or(Error::Decoding)?), None => None, }; let mut levels = VecDeque::new(); @@ -93,7 +95,8 @@ impl UnfinalizedOverlay { let journal_key = to_journal_key(block, index); match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { Some(record) => { - let record: JournalRecord = Slicable::decode(&mut record.as_slice()).ok_or(Error::Decoding)?; + let record: JournalRecord = + Slicable::decode(&mut record.as_slice()).ok_or(Error::Decoding)?; let overlay = BlockOverlay { hash: record.hash.clone(), journal_key, @@ -110,7 +113,7 @@ impl UnfinalizedOverlay { } } if level.is_empty() { - break; + break } levels.push_back(level); block += 1; @@ -118,36 +121,58 @@ impl UnfinalizedOverlay { trace!(target: "state-db", "Finished reading unfinalized journal, {} entries", total); } Ok(UnfinalizedOverlay { - last_finalized: last_finalized, + last_finalized, levels, parents, }) } - /// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window. - pub fn insert(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> CommitSet { + /// Insert a new block into the overlay. If inserted on the second level or lover expects + /// parent to be present in the window. + pub fn insert( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: ChangeSet, + ) -> CommitSet { let mut commit = CommitSet::default(); if self.levels.is_empty() && self.last_finalized.is_none() { // assume that parent was finalized let last_finalized = (parent_hash.clone(), number - 1); - commit.meta.inserted.push((to_meta_key(LAST_FINALIZED, &()), last_finalized.encode())); + commit + .meta + .inserted + .push((to_meta_key(LAST_FINALIZED, &()), last_finalized.encode())); self.last_finalized = Some(last_finalized); } else if self.last_finalized.is_some() { - assert!(number >= self.front_block_number() && number < (self.front_block_number() + self.levels.len() as u64 + 1)); + assert!( + number >= self.front_block_number() + && number < (self.front_block_number() + self.levels.len() as u64 + 1) + ); // check for valid parent if inserting on second level or higher if number == self.front_block_number() { - assert!(self.last_finalized.as_ref().map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1)); + assert!( + self.last_finalized + .as_ref() + .map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) + ); } else { assert!(self.parents.contains_key(&parent_hash)); } } - let level = if self.levels.is_empty() || number == self.front_block_number() + self.levels.len() as u64 { + let level = if self.levels.is_empty() + || number == self.front_block_number() + self.levels.len() as u64 + { self.levels.push_back(Vec::new()); - self.levels.back_mut().expect("can't be empty after insertion; qed") + self.levels + .back_mut() + .expect("can't be empty after insertion; qed") } else { let front_block_number = self.front_block_number(); - self.levels.get_mut((number - front_block_number) as usize) - .expect("number is [front_block_number .. front_block_number + levels.len()) is asserted in precondition; qed") + self.levels.get_mut((number - front_block_number) as usize).expect( + "number is [front_block_number .. front_block_number + levels.len()) is asserted in precondition; qed", + ) }; let index = level.len() as u64; @@ -182,11 +207,20 @@ impl UnfinalizedOverlay { ) { if let Some((level, sublevels)) = levels.split_first_mut() { level.retain(|ref overlay| { - let parent = parents.get(&overlay.hash).expect("there is a parent entry for each entry in levels; qed").clone(); + let parent = parents + .get(&overlay.hash) + .expect("there is a parent entry for each entry in levels; qed") + .clone(); if parent == *hash { parents.remove(&overlay.hash); discarded_journals.push(overlay.journal_key.clone()); - Self::discard(sublevels, parents, discarded_journals, number + 1, &overlay.hash); + Self::discard( + sublevels, + parents, + discarded_journals, + number + 1, + &overlay.hash, + ); false } else { true @@ -196,7 +230,10 @@ impl UnfinalizedOverlay { } fn front_block_number(&self) -> u64 { - self.last_finalized.as_ref().map(|&(_, n)| n + 1).unwrap_or(0) + self.last_finalized + .as_ref() + .map(|&(_, n)| n + 1) + .unwrap_or(0) } /// Select a top-level root and finalized it. Discards all sibling subtrees and the root. @@ -204,7 +241,9 @@ impl UnfinalizedOverlay { pub fn finalize(&mut self, hash: &BlockHash) -> CommitSet { trace!(target: "state-db", "Finalizing {:?}", hash); let level = self.levels.pop_front().expect("no blocks to finalize"); - let index = level.iter().position(|overlay| overlay.hash == *hash) + let index = level + .iter() + .position(|overlay| overlay.hash == *hash) .expect("attempting to finalize unknown block"); let mut commit = CommitSet::default(); @@ -220,7 +259,13 @@ impl UnfinalizedOverlay { // required for recursive processing. A more efficient implementaion // that does not require converting to vector is possible let mut vec: Vec<_> = self.levels.drain(..).collect(); - Self::discard(&mut vec, &mut self.parents, &mut discarded_journals, 0, &overlay.hash); + Self::discard( + &mut vec, + &mut self.parents, + &mut discarded_journals, + 0, + &overlay.hash, + ); self.levels.extend(vec.into_iter()); } // cleanup journal entry @@ -228,7 +273,10 @@ impl UnfinalizedOverlay { } commit.meta.deleted.append(&mut discarded_journals); let last_finalized = (hash.clone(), self.front_block_number()); - commit.meta.inserted.push((to_meta_key(LAST_FINALIZED, &()), last_finalized.encode())); + commit + .meta + .inserted + .push((to_meta_key(LAST_FINALIZED, &()), last_finalized.encode())); self.last_finalized = Some(last_finalized); trace!(target: "state-db", "Discarded {} records", commit.meta.deleted.len()); commit @@ -239,7 +287,7 @@ impl UnfinalizedOverlay { for level in self.levels.iter() { for overlay in level.iter() { if let Some(value) = overlay.values.get(&key) { - return Some(value.clone()); + return Some(value.clone()) } } } @@ -250,9 +298,9 @@ impl UnfinalizedOverlay { #[cfg(test)] mod tests { use super::UnfinalizedOverlay; - use {ChangeSet}; use primitives::H256; - use test::{make_db, make_changeset}; + use test::{make_changeset, make_db}; + use ChangeSet; fn contains(overlay: &UnfinalizedOverlay, key: u64) -> bool { overlay.get(&H256::from(key)) == Some(H256::from(key).to_vec()) @@ -261,7 +309,7 @@ mod tests { #[test] fn created_from_empty_db() { let db = make_db(&[]); - let overlay: UnfinalizedOverlay = UnfinalizedOverlay::new(&db).unwrap(); + let overlay: UnfinalizedOverlay = UnfinalizedOverlay::new(&db).unwrap(); assert_eq!(overlay.last_finalized, None); assert!(overlay.levels.is_empty()); assert!(overlay.parents.is_empty()); @@ -382,7 +430,6 @@ mod tests { assert!(db.data_eq(&make_db(&[1, 4, 6, 7, 8]))); } - #[test] fn complex_tree() { let mut db = make_db(&[]); diff --git a/substrate/state-machine/src/backend.rs b/substrate/state-machine/src/backend.rs index 457a40dcc5382..c2654e93fe07d 100644 --- a/substrate/state-machine/src/backend.rs +++ b/substrate/state-machine/src/backend.rs @@ -16,10 +16,10 @@ //! State machine backends. These manage the code and storage of contracts. -use std::{error, fmt}; use std::collections::HashMap; use std::sync::Arc; -use trie_backend::{TryIntoTrieBackend, TrieBackend}; +use std::{error, fmt}; +use trie_backend::{TrieBackend, TryIntoTrieBackend}; /// A state backend is used to read state data and can have changes committed /// to it. @@ -42,7 +42,8 @@ pub trait Backend: TryIntoTrieBackend { /// Calculate the storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. fn storage_root(&self, delta: I) -> ([u8; 32], Self::Transaction) - where I: IntoIterator, Option>)>; + where + I: IntoIterator, Option>)>; /// Get all key/value pairs into a Vec. fn pairs(&self) -> Vec<(Vec, Vec)>; @@ -60,7 +61,9 @@ impl fmt::Display for Void { } impl error::Error for Void { - fn description(&self) -> &str { "unreachable error" } + fn description(&self) -> &str { + "unreachable error" + } } /// In-memory backend. Fully recomputes tries on each commit but useful for @@ -84,8 +87,12 @@ impl InMemory { let mut inner: HashMap<_, _> = (&*self.inner).clone(); for (key, val) in changes { match val { - Some(v) => { inner.insert(key, v); }, - None => { inner.remove(&key); }, + Some(v) => { + inner.insert(key, v); + }, + None => { + inner.remove(&key); + }, } } @@ -110,26 +117,36 @@ impl Backend for InMemory { } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.inner.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f); + self.inner + .keys() + .filter(|key| key.starts_with(prefix)) + .map(|k| &**k) + .for_each(f); } fn storage_root(&self, delta: I) -> ([u8; 32], Self::Transaction) - where I: IntoIterator, Option>)> + where + I: IntoIterator, Option>)>, { let existing_pairs = self.inner.iter().map(|(k, v)| (k.clone(), Some(v.clone()))); let transaction: Vec<_> = delta.into_iter().collect(); - let root = ::triehash::trie_root(existing_pairs.chain(transaction.iter().cloned()) - .collect::>() - .into_iter() - .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) + let root = ::triehash::trie_root( + existing_pairs + .chain(transaction.iter().cloned()) + .collect::>() + .into_iter() + .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))), ).0; (root, transaction) } fn pairs(&self) -> Vec<(Vec, Vec)> { - self.inner.iter().map(|(k, v)| (k.clone(), v.clone())).collect() + self.inner + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect() } } @@ -146,7 +163,7 @@ impl TryIntoTrieBackend for InMemory { for (key, value) in self.inner.iter() { if let Err(e) = trie.insert(&key, &value) { warn!(target: "trie", "Failed to write to trie: {}", e); - return None; + return None } } } diff --git a/substrate/state-machine/src/ext.rs b/substrate/state-machine/src/ext.rs index 5c5b9b573627d..3cd483cc3de47 100644 --- a/substrate/state-machine/src/ext.rs +++ b/substrate/state-machine/src/ext.rs @@ -16,8 +16,8 @@ //! Conrete externalities implementation. -use std::{error, fmt}; use backend::Backend; +use std::{error, fmt}; use {Externalities, OverlayedChanges}; /// Errors that can occur when interacting with the externalities. @@ -72,7 +72,9 @@ impl<'a, B: 'a + Backend> Ext<'a, B> { /// Get the transaction necessary to update the backend. pub fn transaction(mut self) -> B::Transaction { let _ = self.storage_root(); - self.transaction.expect("transaction always set after calling storage root; qed").0 + self.transaction + .expect("transaction always set after calling storage root; qed") + .0 } /// Invalidates the currently cached storage root and the db transaction. @@ -88,7 +90,9 @@ impl<'a, B: 'a + Backend> Ext<'a, B> { pub fn storage_pairs(&self) -> Vec<(Vec, Vec)> { use std::collections::HashMap; - self.backend.pairs().iter() + self.backend + .pairs() + .iter() .map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec()))) .chain(self.overlay.committed.clone().into_iter()) .chain(self.overlay.prospective.clone().into_iter()) @@ -100,11 +104,18 @@ impl<'a, B: 'a + Backend> Ext<'a, B> { } impl<'a, B: 'a> Externalities for Ext<'a, B> - where B: Backend +where + B: Backend, { fn storage(&self, key: &[u8]) -> Option> { - self.overlay.storage(key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(|| - self.backend.storage(key).expect("Externalities not allowed to fail within runtime")) + self.overlay + .storage(key) + .map(|x| x.map(|x| x.to_vec())) + .unwrap_or_else(|| { + self.backend + .storage(key) + .expect("Externalities not allowed to fail within runtime") + }) } fn place_storage(&mut self, key: Vec, value: Option>) { @@ -124,12 +135,15 @@ impl<'a, B: 'a> Externalities for Ext<'a, B> } fn storage_root(&mut self) -> [u8; 32] { - if let Some((_, ref root)) = self.transaction { - return root.clone(); + if let Some((_, ref root)) = self.transaction { + return root.clone() } // compute and memoize - let delta = self.overlay.committed.iter() + let delta = self + .overlay + .committed + .iter() .chain(self.overlay.prospective.iter()) .map(|(k, v)| (k.clone(), v.clone())); diff --git a/substrate/state-machine/src/lib.rs b/substrate/state-machine/src/lib.rs index e0119edeaaa40..40c60a0658af2 100644 --- a/substrate/state-machine/src/lib.rs +++ b/substrate/state-machine/src/lib.rs @@ -27,14 +27,14 @@ extern crate log; extern crate ethereum_types; extern crate hashdb; extern crate memorydb; -extern crate triehash; extern crate patricia_trie; +extern crate triehash; extern crate byteorder; extern crate parking_lot; -use std::collections::HashMap; use std::collections::hash_map::Drain; +use std::collections::HashMap; use std::fmt; pub mod backend; @@ -43,10 +43,10 @@ mod testing; mod proving_backend; mod trie_backend; -pub use testing::TestExternalities; -pub use ext::Ext; pub use backend::Backend; -pub use trie_backend::{TryIntoTrieBackend, TrieBackend, TrieH256, Storage, DBValue}; +pub use ext::Ext; +pub use testing::TestExternalities; +pub use trie_backend::{DBValue, Storage, TrieBackend, TrieH256, TryIntoTrieBackend}; /// The overlayed changes to state to be queried on top of the backend. /// @@ -63,7 +63,8 @@ impl OverlayedChanges { /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. pub fn storage(&self, key: &[u8]) -> Option> { - self.prospective.get(key) + self.prospective + .get(key) .or_else(|| self.committed.get(key)) .map(|x| x.as_ref().map(AsRef::as_ref)) } @@ -114,7 +115,9 @@ pub enum ExecutionError { } impl fmt::Display for ExecutionError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Externalities Error") } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Externalities Error") + } } /// Externalities: pinned to specific active address. @@ -135,7 +138,8 @@ pub trait Externalities { /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); - /// Set or clear a storage entry (`key`) of current contract being called (effective immediately). + /// Set or clear a storage entry (`key`) of current contract being called (effective + /// immediately). fn place_storage(&mut self, key: Vec, value: Option>); /// Get the identity of the chain. @@ -174,32 +178,28 @@ pub fn execute( exec: &Exec, method: &str, call_data: &[u8], -) -> Result<(Vec, B::Transaction), Box> -{ +) -> Result<(Vec, B::Transaction), Box> { let result = { let mut externalities = ext::Ext::new(overlay, backend); // make a copy. - let code = externalities.storage(b":code") + let code = externalities + .storage(b":code") .ok_or(Box::new(ExecutionError::CodeEntryDoesNotExist) as Box)? .to_vec(); - exec.call( - &mut externalities, - &code, - method, - call_data, - ).map(move |out| (out, externalities.transaction())) + exec.call(&mut externalities, &code, method, call_data) + .map(move |out| (out, externalities.transaction())) }; match result { Ok(x) => { overlay.commit_prospective(); Ok(x) - } + }, Err(e) => { overlay.discard_prospective(); Err(Box::new(e)) - } + }, } } @@ -218,9 +218,9 @@ pub fn prove_execution( exec: &Exec, method: &str, call_data: &[u8], -) -> Result<(Vec, Vec>, ::Transaction), Box> -{ - let trie_backend = backend.try_into_trie_backend() +) -> Result<(Vec, Vec>, ::Transaction), Box> { + let trie_backend = backend + .try_into_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; let proving_backend = proving_backend::ProvingBackend::new(trie_backend); let (result, transaction) = execute(&proving_backend, overlay, exec, method, call_data)?; @@ -236,17 +236,16 @@ pub fn execution_proof_check( exec: &Exec, method: &str, call_data: &[u8], -) -> Result<(Vec, memorydb::MemoryDB), Box> -{ +) -> Result<(Vec, memorydb::MemoryDB), Box> { let backend = proving_backend::create_proof_check_backend(root.into(), proof)?; execute(&backend, overlay, exec, method, call_data) } #[cfg(test)] mod tests { - use super::*; use super::backend::InMemory; use super::ext::Ext; + use super::*; struct DummyCodeExecutor; @@ -260,7 +259,9 @@ mod tests { _method: &str, _data: &[u8], ) -> Result, Self::Error> { - Ok(vec![ext.storage(b"value1").unwrap()[0] + ext.storage(b"value2").unwrap()[0]]) + Ok(vec![ + ext.storage(b"value1").unwrap()[0] + ext.storage(b"value2").unwrap()[0], + ]) } } @@ -319,14 +320,24 @@ mod tests { ], }; let mut ext = Ext::new(&mut overlay, &backend); - const ROOT: [u8; 32] = hex!("8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3"); + const ROOT: [u8; 32] = + hex!("8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3"); assert_eq!(ext.storage_root(), ROOT); } #[test] fn execute_works() { - assert_eq!(execute(&trie_backend::tests::test_trie(), - &mut Default::default(), &DummyCodeExecutor, "test", &[]).unwrap().0, vec![66]); + assert_eq!( + execute( + &trie_backend::tests::test_trie(), + &mut Default::default(), + &DummyCodeExecutor, + "test", + &[] + ).unwrap() + .0, + vec![66] + ); } #[test] @@ -334,12 +345,23 @@ mod tests { // fetch execution proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; - let (remote_result, remote_proof, _) = prove_execution(remote_backend, - &mut Default::default(), &DummyCodeExecutor, "test", &[]).unwrap(); + let (remote_result, remote_proof, _) = prove_execution( + remote_backend, + &mut Default::default(), + &DummyCodeExecutor, + "test", + &[], + ).unwrap(); // check proof locally - let (local_result, _) = execution_proof_check(remote_root, remote_proof, - &mut Default::default(), &DummyCodeExecutor, "test", &[]).unwrap(); + let (local_result, _) = execution_proof_check( + remote_root, + remote_proof, + &mut Default::default(), + &DummyCodeExecutor, + "test", + &[], + ).unwrap(); // check that both results are correct assert_eq!(remote_result, vec![66]); diff --git a/substrate/state-machine/src/proving_backend.rs b/substrate/state-machine/src/proving_backend.rs index 70961adc96902..5428f31237465 100644 --- a/substrate/state-machine/src/proving_backend.rs +++ b/substrate/state-machine/src/proving_backend.rs @@ -16,13 +16,13 @@ //! Proving state machine backend. -use std::cell::RefCell; use ethereum_types::H256 as TrieH256; use hashdb::HashDB; use memorydb::MemoryDB; -use patricia_trie::{TrieDB, TrieError, Trie, Recorder}; -use trie_backend::{TrieBackend, Ephemeral}; -use {Error, ExecutionError, Backend, TryIntoTrieBackend}; +use patricia_trie::{Recorder, Trie, TrieDB, TrieError}; +use std::cell::RefCell; +use trie_backend::{Ephemeral, TrieBackend}; +use {Backend, Error, ExecutionError, TryIntoTrieBackend}; /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. @@ -43,7 +43,9 @@ impl ProvingBackend { /// Consume the backend, extracting the gathered proof in lexicographical order /// by value. pub fn extract_proof(self) -> Vec> { - self.proof_recorder.into_inner().drain() + self.proof_recorder + .into_inner() + .drain() .into_iter() .map(|n| n.data.to_vec()) .collect() @@ -56,17 +58,19 @@ impl Backend for ProvingBackend { fn storage(&self, key: &[u8]) -> Result>, Self::Error> { let mut read_overlay = MemoryDB::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); let map_e = |e: Box| format!("Trie lookup error: {}", e); - let mut proof_recorder = self.proof_recorder.try_borrow_mut() + let mut proof_recorder = self + .proof_recorder + .try_borrow_mut() .expect("only fails when already borrowed; storage() is non-reentrant; qed"); - TrieDB::new(&eph, &self.backend.root()).map_err(map_e)? - .get_with(key, &mut *proof_recorder).map(|x| x.map(|val| val.to_vec())).map_err(map_e) + TrieDB::new(&eph, &self.backend.root()) + .map_err(map_e)? + .get_with(key, &mut *proof_recorder) + .map(|x| x.map(|val| val.to_vec())) + .map_err(map_e) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -78,7 +82,8 @@ impl Backend for ProvingBackend { } fn storage_root(&self, delta: I) -> ([u8; 32], MemoryDB) - where I: IntoIterator, Option>)> + where + I: IntoIterator, Option>)>, { self.backend.storage_root(delta) } @@ -91,25 +96,27 @@ impl TryIntoTrieBackend for ProvingBackend { } /// Create proof check backend. -pub fn create_proof_check_backend(root: TrieH256, proof: Vec>) -> Result> { +pub fn create_proof_check_backend( + root: TrieH256, + proof: Vec>, +) -> Result> { let mut db = MemoryDB::new(); for item in proof { db.insert(&item); } if !db.contains(&root) { - return Err(Box::new(ExecutionError::InvalidProof) as Box); + return Err(Box::new(ExecutionError::InvalidProof) as Box) } - Ok(TrieBackend::with_memorydb(db, root)) } #[cfg(test)] mod tests { - use backend::{InMemory}; - use trie_backend::tests::test_trie; use super::*; + use backend::InMemory; + use trie_backend::tests::test_trie; fn test_proving() -> ProvingBackend { ProvingBackend::new(test_trie()) @@ -136,7 +143,10 @@ mod tests { fn passes_throgh_backend_calls() { let trie_backend = test_trie(); let proving_backend = test_proving(); - assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); + assert_eq!( + trie_backend.storage(b"key").unwrap(), + proving_backend.storage(b"key").unwrap() + ); assert_eq!(trie_backend.pairs(), proving_backend.pairs()); let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); @@ -147,7 +157,9 @@ mod tests { #[test] fn proof_recorded_and_checked() { - let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); + let contents = (0..64) + .map(|i| (vec![i], Some(vec![i]))) + .collect::>(); let in_memory = InMemory::default(); let in_memory = in_memory.update(contents); let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; diff --git a/substrate/state-machine/src/testing.rs b/substrate/state-machine/src/testing.rs index 7b85b523cd47a..0e769a011d7c6 100644 --- a/substrate/state-machine/src/testing.rs +++ b/substrate/state-machine/src/testing.rs @@ -16,8 +16,8 @@ //! Test implementation for Externalities. -use std::collections::HashMap; use super::Externalities; +use std::collections::HashMap; use triehash::trie_root; /// Simple HashMap based Externalities impl. @@ -30,18 +30,22 @@ impl Externalities for TestExternalities { fn place_storage(&mut self, key: Vec, maybe_value: Option>) { match maybe_value { - Some(value) => { self.insert(key, value); } - None => { self.remove(&key); } + Some(value) => { + self.insert(key, value); + }, + None => { + self.remove(&key); + }, } } fn clear_prefix(&mut self, prefix: &[u8]) { - self.retain(|key, _| - !key.starts_with(prefix) - ) + self.retain(|key, _| !key.starts_with(prefix)) } - fn chain_id(&self) -> u64 { 42 } + fn chain_id(&self) -> u64 { + 42 + } fn storage_root(&mut self) -> [u8; 32] { trie_root(self.clone()).0 @@ -58,7 +62,8 @@ mod tests { ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - const ROOT: [u8; 32] = hex!("8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3"); + const ROOT: [u8; 32] = + hex!("8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3"); assert_eq!(ext.storage_root(), ROOT); } } diff --git a/substrate/state-machine/src/trie_backend.rs b/substrate/state-machine/src/trie_backend.rs index 628bb04fbf941..90b3d6daafd60 100644 --- a/substrate/state-machine/src/trie_backend.rs +++ b/substrate/state-machine/src/trie_backend.rs @@ -16,14 +16,14 @@ //! Trie-based state machine backend. -use std::collections::HashMap; -use std::sync::Arc; -use hashdb::HashDB; -use memorydb::MemoryDB; -use patricia_trie::{TrieDB, TrieDBMut, TrieError, Trie, TrieMut}; -use {Backend}; pub use ethereum_types::H256 as TrieH256; pub use hashdb::DBValue; +use hashdb::HashDB; +use memorydb::MemoryDB; +use patricia_trie::{Trie, TrieDB, TrieDBMut, TrieError, TrieMut}; +use std::collections::HashMap; +use std::sync::Arc; +use Backend; /// Backend trie storage trait. pub trait Storage: Send + Sync { @@ -95,8 +95,11 @@ impl Backend for TrieBackend { let map_e = |e: Box| format!("Trie lookup error: {}", e); - TrieDB::new(&eph, &self.root).map_err(map_e)? - .get(key).map(|x| x.map(|val| val.to_vec())).map_err(map_e) + TrieDB::new(&eph, &self.root) + .map_err(map_e)? + .get(key) + .map(|x| x.map(|val| val.to_vec())) + .map_err(map_e) } fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { @@ -116,7 +119,7 @@ impl Backend for TrieBackend { let (key, _) = x?; if !key.starts_with(prefix) { - break; + break } f(&key); @@ -153,12 +156,13 @@ impl Backend for TrieBackend { Err(e) => { debug!(target: "trie", "Error extracting trie values: {}", e); Vec::new() - } + }, } } fn storage_root(&self, delta: I) -> ([u8; 32], MemoryDB) - where I: IntoIterator, Option>)> + where + I: IntoIterator, Option>)>, { let mut write_overlay = MemoryDB::default(); let mut root = self.root; @@ -168,7 +172,8 @@ impl Backend for TrieBackend { overlay: &mut write_overlay, }; - let mut trie = TrieDBMut::from_existing(&mut eph, &mut root).expect("prior state root to exist"); // TODO: handle gracefully + let mut trie = + TrieDBMut::from_existing(&mut eph, &mut root).expect("prior state root to exist"); // TODO: handle gracefully for (key, change) in delta { let result = match change { Some(val) => trie.insert(&key, &val), @@ -198,10 +203,7 @@ pub struct Ephemeral<'a> { impl<'a> Ephemeral<'a> { pub fn new(storage: &'a TrieBackendStorage, overlay: &'a mut MemoryDB) -> Self { - Ephemeral { - storage, - overlay, - } + Ephemeral { storage, overlay } } } @@ -212,13 +214,11 @@ impl<'a> HashDB for Ephemeral<'a> { fn get(&self, key: &TrieH256) -> Option { match self.overlay.raw(key) { - Some((val, i)) => { - if i <= 0 { - None - } else { - Some(val) - } - } + Some((val, i)) => if i <= 0 { + None + } else { + Some(val) + }, None => match self.storage.get(&key) { Ok(x) => x, Err(e) => { @@ -258,10 +258,8 @@ impl TrieBackendStorage { pub fn get(&self, key: &TrieH256) -> Result, String> { match *self { TrieBackendStorage::Storage(ref db) => - db.get(key) - .map_err(|e| format!("Trie lookup error: {}", e)), - TrieBackendStorage::MemoryDb(ref db) => - Ok(db.get(key)), + db.get(key).map_err(|e| format!("Trie lookup error: {}", e)), + TrieBackendStorage::MemoryDb(ref db) => Ok(db.get(key)), } } } @@ -291,7 +289,10 @@ pub mod tests { #[test] fn read_from_storage_returns_some() { - assert_eq!(test_trie().storage(b"key").unwrap(), Some(b"value".to_vec())); + assert_eq!( + test_trie().storage(b"key").unwrap(), + Some(b"value".to_vec()) + ); } #[test] @@ -306,7 +307,11 @@ pub mod tests { #[test] fn pairs_are_empty_on_empty_storage() { - assert!(TrieBackend::with_memorydb(MemoryDB::new(), Default::default()).pairs().is_empty()); + assert!( + TrieBackend::with_memorydb(MemoryDB::new(), Default::default()) + .pairs() + .is_empty() + ); } #[test] @@ -316,12 +321,19 @@ pub mod tests { #[test] fn storage_root_transaction_is_empty() { - assert!(test_trie().storage_root(::std::iter::empty()).1.drain().is_empty()); + assert!( + test_trie() + .storage_root(::std::iter::empty()) + .1 + .drain() + .is_empty() + ); } #[test] fn storage_root_transaction_is_non_empty() { - let (new_root, mut tx) = test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); + let (new_root, mut tx) = + test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); assert!(!tx.drain().is_empty()); assert!(new_root != test_trie().storage_root(::std::iter::empty()).0); } diff --git a/substrate/telemetry/src/lib.rs b/substrate/telemetry/src/lib.rs index 335a93ee68458..2c1a491db10c5 100644 --- a/substrate/telemetry/src/lib.rs +++ b/substrate/telemetry/src/lib.rs @@ -15,26 +15,26 @@ // along with Substrate. If not, see . //! Telemtetry utils. -//! +//! //! `telemetry` macro be used from whereever in the Substrate codebase //! in order to send real-time logging information to the telemetry //! server (if there is one). We use the async drain adapter of `slog` //! so that the logging thread doesn't get held up at all. extern crate parking_lot; -extern crate websocket as ws; extern crate slog_async; extern crate slog_json; +extern crate websocket as ws; #[macro_use] extern crate log; #[macro_use(o, kv)] extern crate slog; extern crate slog_scope; -use std::io; use parking_lot::Mutex; use slog::Drain; pub use slog_scope::with_logger; +use std::io; /// Configuration for telemetry. pub struct TelemetryConfig { @@ -48,17 +48,19 @@ pub struct TelemetryConfig { pub fn init_telemetry(config: TelemetryConfig) -> slog_scope::GlobalLoggerGuard { let log = slog::Logger::root( slog_async::Async::new( - slog_json::Json::default( - TelemetryWriter { - buffer: vec![], - out: Mutex::new( - ws::ClientBuilder::new(&config.url).ok().and_then(|mut x| x.connect(None).ok()) - ), - config, - first_time: true, // ensures that on_connect will be called. - } - ).fuse() - ).build().fuse(), o!() + slog_json::Json::default(TelemetryWriter { + buffer: vec![], + out: Mutex::new( + ws::ClientBuilder::new(&config.url) + .ok() + .and_then(|mut x| x.connect(None).ok()), + ), + config, + first_time: true, // ensures that on_connect will be called. + }).fuse(), + ).build() + .fuse(), + o!(), ); slog_scope::set_global_logger(log) } @@ -85,7 +87,9 @@ impl TelemetryWriter { } let mut client = self.out.lock(); if client.is_none() { - *client = ws::ClientBuilder::new(&self.config.url).ok().and_then(|mut x| x.connect(None).ok()); + *client = ws::ClientBuilder::new(&self.config.url) + .ok() + .and_then(|mut x| x.connect(None).ok()); drop(client); (self.config.on_connect)(); } @@ -109,8 +113,12 @@ impl io::Write for TelemetryWriter { let socket_closed = if let Some(ref mut socket) = *l { if let Ok(s) = ::std::str::from_utf8(&self.buffer[..]) { socket.send_message(&ws::Message::text(s)).is_err() - } else { false } - } else { false }; + } else { + false + } + } else { + false + }; if socket_closed { *l = None; } diff --git a/substrate/test-client/src/client_ext.rs b/substrate/test-client/src/client_ext.rs index 8d1fa946ac978..a14d0749473dc 100644 --- a/substrate/test-client/src/client_ext.rs +++ b/substrate/test-client/src/client_ext.rs @@ -16,12 +16,12 @@ //! Client extension for tests. +use bft; use client::{self, Client}; use keyring::Keyring; -use runtime_primitives::StorageMap; -use runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; use runtime; -use bft; +use runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; +use runtime_primitives::StorageMap; use {Backend, Executor, NativeExecutor}; /// Extension trait for a test client. @@ -30,7 +30,11 @@ pub trait TestClient { fn new_for_tests() -> Self; /// Justify and import block to the chain. - fn justify_and_import(&self, origin: client::BlockOrigin, block: runtime::Block) -> client::error::Result<()>; + fn justify_and_import( + &self, + origin: client::BlockOrigin, + block: runtime::Block, + ) -> client::error::Result<()>; /// Returns hash of the genesis block. fn genesis_hash(&self) -> runtime::Hash; @@ -41,7 +45,11 @@ impl TestClient for Client { client::new_in_mem(NativeExecutor::new(), genesis_storage()).unwrap() } - fn justify_and_import(&self, origin: client::BlockOrigin, block: runtime::Block) -> client::error::Result<()> { + fn justify_and_import( + &self, + origin: client::BlockOrigin, + block: runtime::Block, + ) -> client::error::Result<()> { let justification = fake_justify(&block.header); let justified = self.check_justification(block.header, justification)?; self.import_block(origin, justified, Some(block.extrinsics))?; @@ -70,33 +78,39 @@ fn fake_justify(header: &runtime::Header) -> bft::UncheckedJustification( - bft::generic::Vote::Commit(1, hash).into(), - key, - header.parent_hash - ); - - match msg { - bft::generic::LocalizedMessage::Vote(vote) => vote.signature, - _ => panic!("signing vote leads to signed vote"), - } - }).collect(), + signatures: authorities + .iter() + .map(|key| { + let msg = bft::sign_message::( + bft::generic::Vote::Commit(1, hash).into(), + key, + header.parent_hash, + ); + + match msg { + bft::generic::LocalizedMessage::Vote(vote) => vote.signature, + _ => panic!("signing vote leads to signed vote"), + } + }) + .collect(), round_number: 1, } } fn genesis_config() -> GenesisConfig { - GenesisConfig::new_simple(vec![ - Keyring::Alice.to_raw_public().into(), - Keyring::Bob.to_raw_public().into(), - Keyring::Charlie.to_raw_public().into(), - ], 1000) + GenesisConfig::new_simple( + vec![ + Keyring::Alice.to_raw_public().into(), + Keyring::Bob.to_raw_public().into(), + Keyring::Charlie.to_raw_public().into(), + ], + 1000, + ) } fn genesis_storage() -> StorageMap { - let mut storage = genesis_config().genesis_map(); - let block: runtime::Block = client::genesis::construct_genesis_block(&storage); - storage.extend(additional_storage_with_genesis(&block)); - storage + let mut storage = genesis_config().genesis_map(); + let block: runtime::Block = client::genesis::construct_genesis_block(&storage); + storage.extend(additional_storage_with_genesis(&block)); + storage } diff --git a/substrate/test-client/src/lib.rs b/substrate/test-client/src/lib.rs index fed17e46cda72..82507307bd19a 100644 --- a/substrate/test-client/src/lib.rs +++ b/substrate/test-client/src/lib.rs @@ -22,12 +22,13 @@ extern crate substrate_bft as bft; extern crate substrate_codec as codec; extern crate substrate_keyring as keyring; extern crate substrate_primitives as primitives; -extern crate substrate_runtime_support as runtime_support; extern crate substrate_runtime_primitives as runtime_primitives; -#[macro_use] extern crate substrate_executor as executor; +extern crate substrate_runtime_support as runtime_support; +#[macro_use] +extern crate substrate_executor as executor; -pub extern crate substrate_test_runtime as runtime; pub extern crate substrate_client as client; +pub extern crate substrate_test_runtime as runtime; mod client_ext; diff --git a/substrate/test-runtime/src/genesismap.rs b/substrate/test-runtime/src/genesismap.rs index 10bf1751f4c99..b66009f33d003 100644 --- a/substrate/test-runtime/src/genesismap.rs +++ b/substrate/test-runtime/src/genesismap.rs @@ -16,11 +16,11 @@ //! Tool for creating the genesis block. -use std::collections::HashMap; -use runtime_io::twox_128; -use codec::{KeyedVec, Joiner}; +use codec::{Joiner, KeyedVec}; use primitives::AuthorityId; +use runtime_io::twox_128; use runtime_primitives::traits::Block; +use std::collections::HashMap; /// Configuration of a general Substrate test genesis block. pub struct GenesisConfig { @@ -38,16 +38,24 @@ impl GenesisConfig { pub fn genesis_map(&self) -> HashMap, Vec> { let wasm_runtime = include_bytes!("../wasm/genesis.wasm").to_vec(); - self.balances.iter() + self.balances + .iter() .map(|&(account, balance)| (account.to_keyed_vec(b"balance:"), vec![].and(&balance))) .map(|(k, v)| (twox_128(&k[..])[..].to_vec(), v.to_vec())) - .chain(vec![ - (b":code"[..].into(), wasm_runtime), - (b":auth:len"[..].into(), vec![].and(&(self.authorities.len() as u32))), - ].into_iter()) - .chain(self.authorities.iter() - .enumerate() - .map(|(i, account)| ((i as u32).to_keyed_vec(b":auth:"), vec![].and(account))) + .chain( + vec![ + (b":code"[..].into(), wasm_runtime), + ( + b":auth:len"[..].into(), + vec![].and(&(self.authorities.len() as u32)), + ), + ].into_iter(), + ) + .chain( + self.authorities + .iter() + .enumerate() + .map(|(i, account)| ((i as u32).to_keyed_vec(b":auth:"), vec![].and(account))), ) .collect() } diff --git a/substrate/test-runtime/src/lib.rs b/substrate/test-runtime/src/lib.rs index 1b07035e966e8..8590c1eae1c39 100644 --- a/substrate/test-runtime/src/lib.rs +++ b/substrate/test-runtime/src/lib.rs @@ -18,9 +18,9 @@ #![cfg_attr(not(feature = "std"), no_std)] -extern crate substrate_runtime_std as rstd; extern crate substrate_codec as codec; extern crate substrate_runtime_primitives as runtime_primitives; +extern crate substrate_runtime_std as rstd; #[cfg(feature = "std")] extern crate serde; @@ -46,17 +46,17 @@ extern crate substrate_runtime_io as runtime_io; #[macro_use] extern crate substrate_runtime_version as runtime_version; - -#[cfg(feature = "std")] pub mod genesismap; +#[cfg(feature = "std")] +pub mod genesismap; pub mod system; -use rstd::prelude::*; use codec::Slicable; +use rstd::prelude::*; -use runtime_primitives::traits::{BlindCheckable, BlakeTwo256}; +pub use primitives::hash::H256; +use runtime_primitives::traits::{BlakeTwo256, BlindCheckable}; use runtime_primitives::Ed25519Signature; use runtime_version::RuntimeVersion; -pub use primitives::hash::H256; /// Test runtime version. pub const VERSION: RuntimeVersion = RuntimeVersion { @@ -92,7 +92,12 @@ impl Slicable for Transfer { } fn decode(input: &mut I) -> Option { - Slicable::decode(input).map(|(from, to, amount, nonce)| Transfer { from, to, amount, nonce }) + Slicable::decode(input).map(|(from, to, amount, nonce)| Transfer { + from, + to, + amount, + nonce, + }) } } @@ -113,7 +118,10 @@ impl Slicable for Extrinsic { } fn decode(input: &mut I) -> Option { - Slicable::decode(input).map(|(transfer, signature)| Extrinsic { transfer, signature }) + Slicable::decode(input).map(|(transfer, signature)| Extrinsic { + transfer, + signature, + }) } } @@ -125,7 +133,11 @@ impl BlindCheckable for Extrinsic { &self.transfer.from } fn check(self) -> Result { - if ::runtime_primitives::verify_encoded_lazy(&self.signature, &self.transfer, &self.transfer.from) { + if ::runtime_primitives::verify_encoded_lazy( + &self.signature, + &self.transfer, + &self.transfer.from, + ) { Ok(self) } else { Err("bad signature") @@ -155,7 +167,11 @@ pub fn run_tests(mut input: &[u8]) -> Vec { print("run_tests..."); let block = Block::decode(&mut input).unwrap(); print("deserialised block."); - let stxs = block.extrinsics.iter().map(Slicable::encode).collect::>(); + let stxs = block + .extrinsics + .iter() + .map(Slicable::encode) + .collect::>(); print("reserialised transactions."); [stxs.len() as u8].encode() } diff --git a/substrate/test-runtime/src/system.rs b/substrate/test-runtime/src/system.rs index f549954aa8a70..baa1db8680427 100644 --- a/substrate/test-runtime/src/system.rs +++ b/substrate/test-runtime/src/system.rs @@ -17,17 +17,17 @@ //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. -use rstd::prelude::*; -use runtime_io::{storage_root, enumerated_trie_root}; -use runtime_support::storage::{self, StorageValue, StorageMap}; -use runtime_primitives::traits::{Hashing, BlakeTwo256}; +use super::{AccountId, Block, BlockNumber, Extrinsic, H256 as Hash, Header}; use codec::{KeyedVec, Slicable}; -use super::{AccountId, BlockNumber, Extrinsic, H256 as Hash, Block, Header}; +use rstd::prelude::*; +use runtime_io::{enumerated_trie_root, storage_root}; +use runtime_primitives::traits::{BlakeTwo256, Hashing}; +use runtime_support::storage::{self, StorageMap, StorageValue}; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; -const AUTHORITY_AT: &'static[u8] = b":auth:"; -const AUTHORITY_COUNT: &'static[u8] = b":auth:len"; +const AUTHORITY_AT: &'static [u8] = b":auth:"; +const AUTHORITY_COUNT: &'static [u8] = b":auth:len"; storage_items! { ExtrinsicIndex: b"sys:xti" => required u32; @@ -47,9 +47,13 @@ pub fn nonce_of(who: AccountId) -> u64 { /// Get authorities ar given block. pub fn authorities() -> Vec<::primitives::AuthorityId> { - let len: u32 = storage::unhashed::get(AUTHORITY_COUNT).expect("There are always authorities in test-runtime"); + let len: u32 = storage::unhashed::get(AUTHORITY_COUNT) + .expect("There are always authorities in test-runtime"); (0..len) - .map(|i| storage::unhashed::get(&i.to_keyed_vec(AUTHORITY_AT)).expect("Authority is properly encoded in test-runtime")) + .map(|i| { + storage::unhashed::get(&i.to_keyed_vec(AUTHORITY_AT)) + .expect("Authority is properly encoded in test-runtime") + }) .collect() } @@ -65,19 +69,32 @@ pub fn execute_block(block: Block) { let ref header = block.header; // check transaction trie root represents the transactions. - let txs = block.extrinsics.iter().map(Slicable::encode).collect::>(); + let txs = block + .extrinsics + .iter() + .map(Slicable::encode) + .collect::>(); let txs = txs.iter().map(Vec::as_slice).collect::>(); let txs_root = enumerated_trie_root(&txs).into(); info_expect_equal_hash(&header.extrinsics_root, &txs_root); - assert!(header.extrinsics_root == txs_root, "Transaction trie root must be valid."); + assert!( + header.extrinsics_root == txs_root, + "Transaction trie root must be valid." + ); // execute transactions - block.extrinsics.iter().for_each(execute_transaction_backend); + block + .extrinsics + .iter() + .for_each(execute_transaction_backend); // check storage root. let storage_root = storage_root().into(); info_expect_equal_hash(&header.state_root, &storage_root); - assert!(header.state_root == storage_root, "Storage root must match that calculated."); + assert!( + header.state_root == storage_root, + "Storage root must match that calculated." + ); } /// Execute a transaction outside of the block execution function. @@ -123,7 +140,10 @@ fn execute_transaction_backend(utx: &Extrinsic) { // check nonce let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::get_or(&nonce_key, 0); - assert!(tx.nonce == expected_nonce, "All transactions should have the correct nonce"); + assert!( + tx.nonce == expected_nonce, + "All transactions should have the correct nonce" + ); // increment nonce in storage storage::put(&nonce_key, &(expected_nonce + 1)); @@ -133,7 +153,10 @@ fn execute_transaction_backend(utx: &Extrinsic) { let from_balance: u64 = storage::get_or(&from_balance_key, 0); // enact transfer - assert!(tx.amount <= from_balance, "All transactions should transfer at most the sender balance"); + assert!( + tx.amount <= from_balance, + "All transactions should transfer at most the sender balance" + ); let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::get_or(&to_balance_key, 0); storage::put(&from_balance_key, &(from_balance - tx.amount)); @@ -144,7 +167,11 @@ fn execute_transaction_backend(utx: &Extrinsic) { fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use primitives::hexdisplay::HexDisplay; if given != expected { - println!("Hash: given={}, expected={}", HexDisplay::from(&given.0), HexDisplay::from(&expected.0)); + println!( + "Hash: given={}, expected={}", + HexDisplay::from(&given.0), + HexDisplay::from(&expected.0) + ); } } @@ -161,10 +188,10 @@ fn info_expect_equal_hash(given: &Hash, expected: &Hash) { mod tests { use super::*; - use runtime_io::{with_externalities, twox_128, TestExternalities}; use codec::{Joiner, KeyedVec}; use keyring::Keyring; - use ::{Header, Digest, Extrinsic, Transfer}; + use runtime_io::{twox_128, with_externalities, TestExternalities}; + use {Digest, Extrinsic, Header, Transfer}; fn new_test_ext() -> TestExternalities { map![ @@ -178,8 +205,14 @@ mod tests { } fn construct_signed_tx(tx: Transfer) -> Extrinsic { - let signature = Keyring::from_raw_public(tx.from.0).unwrap().sign(&tx.encode()).into(); - Extrinsic { transfer: tx, signature } + let signature = Keyring::from_raw_public(tx.from.0) + .unwrap() + .sign(&tx.encode()) + .into(); + Extrinsic { + transfer: tx, + signature, + } } #[test] @@ -189,9 +222,12 @@ mod tests { let h = Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("97dfcd1f8cbf8845fcb544f89332f1a94c1137f7d1b199ef0b0a6ed217015c3e").into(), - extrinsics_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), - digest: Digest { logs: vec![], }, + state_root: hex!("97dfcd1f8cbf8845fcb544f89332f1a94c1137f7d1b199ef0b0a6ed217015c3e") + .into(), + extrinsics_root: hex!( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + ).into(), + digest: Digest { logs: vec![] }, }; let b = Block { @@ -217,18 +253,20 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("0dd8210adaf581464cc68555814a787ed491f8c608d0a0dbbf2208a6d44190b1").into(), - extrinsics_root: hex!("951508f2cc0071500a74765ab0fb2f280fdcdd329d5f989dda675010adee99d6").into(), - digest: Digest { logs: vec![], }, + state_root: hex!( + "0dd8210adaf581464cc68555814a787ed491f8c608d0a0dbbf2208a6d44190b1" + ).into(), + extrinsics_root: hex!( + "951508f2cc0071500a74765ab0fb2f280fdcdd329d5f989dda675010adee99d6" + ).into(), + digest: Digest { logs: vec![] }, }, - extrinsics: vec![ - construct_signed_tx(Transfer { - from: Keyring::Alice.to_raw_public().into(), - to: Keyring::Bob.to_raw_public().into(), - amount: 69, - nonce: 0, - }) - ], + extrinsics: vec![construct_signed_tx(Transfer { + from: Keyring::Alice.to_raw_public().into(), + to: Keyring::Bob.to_raw_public().into(), + amount: 69, + nonce: 0, + })], }; with_externalities(&mut t, || { @@ -242,9 +280,13 @@ mod tests { header: Header { parent_hash: b.header.hash(), number: 2, - state_root: hex!("c93f2fd494c386fa32ee76b6198a7ccf5db12c02c3a79755fd2d4646ec2bf8d7").into(), - extrinsics_root: hex!("3563642676d7e042c894eedc579ba2d6eeedf9a6c66d9d557599effc9f674372").into(), - digest: Digest { logs: vec![], }, + state_root: hex!( + "c93f2fd494c386fa32ee76b6198a7ccf5db12c02c3a79755fd2d4646ec2bf8d7" + ).into(), + extrinsics_root: hex!( + "3563642676d7e042c894eedc579ba2d6eeedf9a6c66d9d557599effc9f674372" + ).into(), + digest: Digest { logs: vec![] }, }, extrinsics: vec![ construct_signed_tx(Transfer {