From 18001f62d95923b26d6d902f57c3057ce8347862 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Tue, 26 Nov 2019 15:38:21 +0900 Subject: [PATCH 01/52] Introduce on_open_block `on_open_block` is called when the block is created, before processing any transactions included in the block. --- core/src/block.rs | 5 +++++ core/src/consensus/mod.rs | 5 +++++ core/src/consensus/tendermint/engine.rs | 9 +++++++++ core/src/miner/miner.rs | 1 + 4 files changed, 20 insertions(+) diff --git a/core/src/block.rs b/core/src/block.rs index 9a6584d090..ab35efe3cf 100644 --- a/core/src/block.rs +++ b/core/src/block.rs @@ -313,6 +313,10 @@ impl<'x> OpenBlock<'x> { self.block.header.set_seal(seal); Ok(()) } + + pub fn inner_mut(&mut self) -> &mut ExecutedBlock { + &mut self.block + } } /// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields. @@ -492,6 +496,7 @@ pub fn enact( let mut b = OpenBlock::try_new(engine, db, parent, Address::default(), vec![])?; b.populate_from(header); + engine.on_open_block(b.inner_mut())?; b.push_transactions(transactions, client, parent.number(), parent.timestamp())?; let term_common_params = client.term_common_params(BlockId::Hash(*header.parent_hash())); diff --git a/core/src/consensus/mod.rs b/core/src/consensus/mod.rs index c0d78b1485..329e9e2420 100644 --- a/core/src/consensus/mod.rs +++ b/core/src/consensus/mod.rs @@ -221,6 +221,11 @@ pub trait ConsensusEngine: Sync + Send { /// Stops any services that the may hold the Engine and makes it safe to drop. fn stop(&self) {} + /// Block transformation functions, before the transactions. + fn on_open_block(&self, _block: &mut ExecutedBlock) -> Result<(), Error> { + Ok(()) + } + /// Block transformation functions, after the transactions. fn on_close_block( &self, diff --git a/core/src/consensus/tendermint/engine.rs b/core/src/consensus/tendermint/engine.rs index 3ea3282c7a..4ea4c9396b 100644 --- a/core/src/consensus/tendermint/engine.rs +++ b/core/src/consensus/tendermint/engine.rs @@ -134,6 +134,15 @@ impl ConsensusEngine for Tendermint { fn stop(&self) {} + /// Block transformation functions, before the transactions. + fn on_open_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + let metadata = block.state().metadata()?.expect("Metadata must exist"); + if block.header().number() == metadata.last_term_finished_block_num() + 1 { + // FIXME: on_term_open + } + Ok(()) + } + fn on_close_block( &self, block: &mut ExecutedBlock, diff --git a/core/src/miner/miner.rs b/core/src/miner/miner.rs index 08a2ba0a99..5b7c42d865 100644 --- a/core/src/miner/miner.rs +++ b/core/src/miner/miner.rs @@ -527,6 +527,7 @@ impl Miner { return Ok(None) } } + self.engine.on_open_block(open_block.inner_mut())?; let mut invalid_transactions = Vec::new(); From 3c8d9f0f8e8896cb61c4675a4824606027aecb49 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Thu, 7 Nov 2019 17:36:05 +0900 Subject: [PATCH 02/52] Implement TrieDB::is_complete --- util/merkle/src/triedb.rs | 64 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/util/merkle/src/triedb.rs b/util/merkle/src/triedb.rs index d10a4f00ca..d03939a4cd 100644 --- a/util/merkle/src/triedb.rs +++ b/util/merkle/src/triedb.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -use ccrypto::blake256; +use ccrypto::{blake256, BLAKE_NULL_RLP}; use hashdb::HashDB; use primitives::H256; @@ -105,6 +105,26 @@ impl<'db> TrieDB<'db> { None => Ok(None), } } + + /// Check if every leaf of the trie exists + pub fn is_complete(&self) -> bool { + *self.root == BLAKE_NULL_RLP || self.is_complete_aux(self.root) + } + + /// Check if every leaf of the trie starting from `hash` exists + fn is_complete_aux(&self, hash: &H256) -> bool { + if let Some(node_rlp) = self.db.get(hash) { + match RlpNode::decoded(node_rlp.as_ref()) { + Some(RlpNode::Branch(.., children)) => { + children.iter().flatten().all(|child| self.is_complete_aux(child)) + } + Some(RlpNode::Leaf(..)) => true, + None => false, + } + } else { + false + } + } } impl<'db> Trie for TrieDB<'db> { @@ -126,6 +146,19 @@ mod tests { use crate::*; use memorydb::*; + fn delete_any_child(db: &mut MemoryDB, root: &H256) { + let node_rlp = db.get(root).unwrap(); + match RlpNode::decoded(&node_rlp).unwrap() { + RlpNode::Leaf(..) => { + db.remove(root); + } + RlpNode::Branch(.., children) => { + let first_child = children.iter().find(|c| c.is_some()).unwrap().unwrap(); + db.remove(&first_child); + } + } + } + #[test] fn get() { let mut memdb = MemoryDB::new(); @@ -141,4 +174,33 @@ mod tests { assert_eq!(t.get(b"B"), Ok(Some(b"ABCBA".to_vec()))); assert_eq!(t.get(b"C"), Ok(None)); } + + #[test] + fn is_complete_success() { + let mut memdb = MemoryDB::new(); + let mut root = H256::new(); + { + let mut t = TrieDBMut::new(&mut memdb, &mut root); + t.insert(b"A", b"ABC").unwrap(); + t.insert(b"B", b"ABCBA").unwrap(); + } + + let t = TrieDB::try_new(&memdb, &root).unwrap(); + assert!(t.is_complete()); + } + + #[test] + fn is_complete_fail() { + let mut memdb = MemoryDB::new(); + let mut root = H256::new(); + { + let mut t = TrieDBMut::new(&mut memdb, &mut root); + t.insert(b"A", b"ABC").unwrap(); + t.insert(b"B", b"ABCBA").unwrap(); + } + delete_any_child(&mut memdb, &root); + + let t = TrieDB::try_new(&memdb, &root).unwrap(); + assert!(!t.is_complete()); + } } From 1dff7e26673e94fd239adea436a63e67dae5690a Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Tue, 19 Nov 2019 16:35:15 +0900 Subject: [PATCH 03/52] Add complete_register to ConsensusEngine This is a work-around to the currently order-dependant and non-deterministic initialization of Tendermint ConsensusEngine/Worker. --- codechain/run_node.rs | 1 + core/src/consensus/mod.rs | 2 ++ core/src/consensus/tendermint/engine.rs | 10 ++++++---- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/codechain/run_node.rs b/codechain/run_node.rs index 9ec3646293..9f731500d9 100644 --- a/codechain/run_node.rs +++ b/codechain/run_node.rs @@ -367,6 +367,7 @@ pub fn run_node(matches: &ArgMatches) -> Result<(), String> { // drop the scheme to free up genesis state. drop(scheme); + client.client().engine().complete_register(); cinfo!(TEST_SCRIPT, "Initialization complete"); diff --git a/core/src/consensus/mod.rs b/core/src/consensus/mod.rs index 329e9e2420..de08a49a7c 100644 --- a/core/src/consensus/mod.rs +++ b/core/src/consensus/mod.rs @@ -265,6 +265,8 @@ pub trait ConsensusEngine: Sync + Send { fn register_chain_notify(&self, _: &Client) {} + fn complete_register(&self) {} + fn get_best_block_from_best_proposal_header(&self, header: &HeaderView) -> BlockHash { header.hash() } diff --git a/core/src/consensus/tendermint/engine.rs b/core/src/consensus/tendermint/engine.rs index 4ea4c9396b..4dd8984163 100644 --- a/core/src/consensus/tendermint/engine.rs +++ b/core/src/consensus/tendermint/engine.rs @@ -281,10 +281,6 @@ impl ConsensusEngine for Tendermint { let extension = service.register_extension(move |api| TendermintExtension::new(inner, timeouts, api)); let client = Arc::downgrade(&self.client().unwrap()); self.extension_initializer.send((extension, client)).unwrap(); - - let (result, receiver) = crossbeam::bounded(1); - self.inner.send(worker::Event::Restore(result)).unwrap(); - receiver.recv().unwrap(); } fn register_time_gap_config_to_worker(&self, time_gap_params: TimeGapParams) { @@ -303,6 +299,12 @@ impl ConsensusEngine for Tendermint { client.add_notify(Arc::downgrade(&self.chain_notify) as Weak); } + fn complete_register(&self) { + let (result, receiver) = crossbeam::bounded(1); + self.inner.send(worker::Event::Restore(result)).unwrap(); + receiver.recv().unwrap(); + } + fn get_best_block_from_best_proposal_header(&self, header: &HeaderView) -> BlockHash { header.parent_hash() } From fc6a3ded0dbdf36e7f79315f1af291b734fa2a72 Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Wed, 20 Nov 2019 18:00:54 +0900 Subject: [PATCH 04/52] Change waitNodeUntilTerm to return TermMetadata --- test/src/e2e.dynval/setup.ts | 4 ++-- test/src/helper/spawn.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/src/e2e.dynval/setup.ts b/test/src/e2e.dynval/setup.ts index 493f6ef602..2c4aefa645 100644 --- a/test/src/e2e.dynval/setup.ts +++ b/test/src/e2e.dynval/setup.ts @@ -451,7 +451,7 @@ interface TermWaiter { target: number; termPeriods: number; } - ): Promise; + ): Promise; } export function setTermTestTimeout( @@ -483,7 +483,7 @@ export function setTermTestTimeout( termPeriods: number; } ) { - await node.waitForTermChange( + return await node.waitForTermChange( waiterParams.target, termPeriodsToTime(waiterParams.termPeriods, 0.5) ); diff --git a/test/src/helper/spawn.ts b/test/src/helper/spawn.ts index 4b2eb4bc95..9b3277ddaa 100644 --- a/test/src/helper/spawn.ts +++ b/test/src/helper/spawn.ts @@ -861,7 +861,7 @@ export default class CodeChain { while (true) { const termMetadata = await stake.getTermMetadata(this.sdk); if (termMetadata && termMetadata.currentTermId >= target) { - break; + return termMetadata; } await wait(1000); if (timeout) { From 80e6282d970aa4695acc9921395b056256deff84 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Wed, 20 Nov 2019 12:22:58 +0900 Subject: [PATCH 05/52] Refactor rpc startup code --- codechain/rpc.rs | 71 +++++++++++++++---------------------------- codechain/rpc_apis.rs | 6 ++-- codechain/run_node.rs | 63 +++++++++++++++++++++----------------- 3 files changed, 64 insertions(+), 76 deletions(-) diff --git a/codechain/rpc.rs b/codechain/rpc.rs index 5a3f696ca4..b3219e25d8 100644 --- a/codechain/rpc.rs +++ b/codechain/rpc.rs @@ -15,8 +15,8 @@ // along with this program. If not, see . use std::io; -use std::net::SocketAddr; +use crate::config::Config; use crate::rpc_apis; use crpc::{ jsonrpc_core, start_http, start_ipc, start_ws, HttpServer, IpcServer, MetaIoHandler, Middleware, WsError, WsServer, @@ -33,38 +33,27 @@ pub struct RpcHttpConfig { } pub fn rpc_http_start( - cfg: RpcHttpConfig, - enable_devel_api: bool, - deps: &rpc_apis::ApiDependencies, + server: MetaIoHandler<(), impl Middleware<()>>, + config: RpcHttpConfig, ) -> Result { - let url = format!("{}:{}", cfg.interface, cfg.port); + let url = format!("{}:{}", config.interface, config.port); let addr = url.parse().map_err(|_| format!("Invalid JSONRPC listen host/port given: {}", url))?; - let server = setup_http_rpc_server(&addr, cfg.cors.clone(), cfg.hosts.clone(), enable_devel_api, deps)?; - cinfo!(RPC, "RPC Listening on {}", url); - if let Some(hosts) = cfg.hosts { - cinfo!(RPC, "Allowed hosts are {:?}", hosts); - } - if let Some(cors) = cfg.cors { - cinfo!(RPC, "CORS domains are {:?}", cors); - } - Ok(server) -} - -fn setup_http_rpc_server( - url: &SocketAddr, - cors_domains: Option>, - allowed_hosts: Option>, - enable_devel_api: bool, - deps: &rpc_apis::ApiDependencies, -) -> Result { - let server = setup_rpc_server(enable_devel_api, deps); - let start_result = start_http(url, cors_domains, allowed_hosts, server); + let start_result = start_http(&addr, config.cors.clone(), config.hosts.clone(), server); match start_result { Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => { Err(format!("RPC address {} is already in use, make sure that another instance of a CodeChain node is not running or change the address using the --jsonrpc-port option.", url)) }, Err(e) => Err(format!("RPC error: {:?}", e)), - Ok(server) => Ok(server), + Ok(server) => { + cinfo!(RPC, "RPC Listening on {}", url); + if let Some(hosts) = config.hosts { + cinfo!(RPC, "Allowed hosts are {:?}", hosts); + } + if let Some(cors) = config.cors { + cinfo!(RPC, "CORS domains are {:?}", cors); + } + Ok(server) + }, } } @@ -74,19 +63,17 @@ pub struct RpcIpcConfig { } pub fn rpc_ipc_start( - cfg: &RpcIpcConfig, - enable_devel_api: bool, - deps: &rpc_apis::ApiDependencies, + server: MetaIoHandler<(), impl Middleware<()>>, + config: RpcIpcConfig, ) -> Result { - let server = setup_rpc_server(enable_devel_api, deps); - let start_result = start_ipc(&cfg.socket_addr, server); + let start_result = start_ipc(&config.socket_addr, server); match start_result { Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => { - Err(format!("IPC address {} is already in use, make sure that another instance of a Codechain node is not running or change the address using the --ipc-path options.", cfg.socket_addr)) + Err(format!("IPC address {} is already in use, make sure that another instance of a Codechain node is not running or change the address using the --ipc-path options.", config.socket_addr)) }, Err(e) => Err(format!("IPC error: {:?}", e)), Ok(server) => { - cinfo!(RPC, "IPC Listening on {}", cfg.socket_addr); + cinfo!(RPC, "IPC Listening on {}", config.socket_addr); Ok(server) }, } @@ -99,15 +86,10 @@ pub struct RpcWsConfig { pub max_connections: usize, } -pub fn rpc_ws_start( - cfg: &RpcWsConfig, - enable_devel_api: bool, - deps: &rpc_apis::ApiDependencies, -) -> Result { - let server = setup_rpc_server(enable_devel_api, deps); - let url = format!("{}:{}", cfg.interface, cfg.port); +pub fn rpc_ws_start(server: MetaIoHandler<(), impl Middleware<()>>, config: RpcWsConfig) -> Result { + let url = format!("{}:{}", config.interface, config.port); let addr = url.parse().map_err(|_| format!("Invalid WebSockets listen host/port given: {}", url))?; - let start_result = start_ws(&addr, server, cfg.max_connections); + let start_result = start_ws(&addr, server, config.max_connections); match start_result { Err(WsError::Io(ref err)) if err.kind() == io::ErrorKind::AddrInUse => { Err(format!("WebSockets address {} is already in use, make sure that another instance of a Codechain node is not running or change the address using the --ws-port options.", addr)) @@ -120,12 +102,9 @@ pub fn rpc_ws_start( } } -fn setup_rpc_server( - enable_devel_api: bool, - deps: &rpc_apis::ApiDependencies, -) -> MetaIoHandler<(), impl Middleware<()>> { +pub fn setup_rpc_server(config: &Config, deps: &rpc_apis::ApiDependencies) -> MetaIoHandler<(), impl Middleware<()>> { let mut handler = MetaIoHandler::with_middleware(LogMiddleware::new()); - deps.extend_api(enable_devel_api, &mut handler); + deps.extend_api(config, &mut handler); rpc_apis::setup_rpc(handler) } diff --git a/codechain/rpc_apis.rs b/codechain/rpc_apis.rs index 3f4d3c016a..a447b43b42 100644 --- a/codechain/rpc_apis.rs +++ b/codechain/rpc_apis.rs @@ -22,6 +22,8 @@ use cnetwork::{EventSender, NetworkControl}; use crpc::{MetaIoHandler, Middleware, Params, Value}; use csync::BlockSyncEvent; +use crate::config::Config; + pub struct ApiDependencies { pub client: Arc, pub miner: Arc, @@ -31,11 +33,11 @@ pub struct ApiDependencies { } impl ApiDependencies { - pub fn extend_api(&self, enable_devel_api: bool, handler: &mut MetaIoHandler<(), impl Middleware<()>>) { + pub fn extend_api(&self, config: &Config, handler: &mut MetaIoHandler<(), impl Middleware<()>>) { use crpc::v1::*; handler.extend_with(ChainClient::new(Arc::clone(&self.client)).to_delegate()); handler.extend_with(MempoolClient::new(Arc::clone(&self.client)).to_delegate()); - if enable_devel_api { + if config.rpc.enable_devel_api { handler.extend_with( DevelClient::new(Arc::clone(&self.client), Arc::clone(&self.miner), self.block_sync.clone()) .to_delegate(), diff --git a/codechain/run_node.rs b/codechain/run_node.rs index 9f731500d9..85255e6e29 100644 --- a/codechain/run_node.rs +++ b/codechain/run_node.rs @@ -42,7 +42,7 @@ use crate::config::{self, load_config}; use crate::constants::{DEFAULT_DB_PATH, DEFAULT_KEYS_PATH}; use crate::dummy_network_service::DummyNetworkService; use crate::json::PasswordFile; -use crate::rpc::{rpc_http_start, rpc_ipc_start, rpc_ws_start}; +use crate::rpc::{rpc_http_start, rpc_ipc_start, rpc_ws_start, setup_rpc_server}; use crate::rpc_apis::ApiDependencies; fn network_start( @@ -316,36 +316,43 @@ pub fn run_node(matches: &ArgMatches) -> Result<(), String> { } }; - let rpc_apis_deps = ApiDependencies { - client: client.client(), - miner: Arc::clone(&miner), - network_control: Arc::clone(&network_service), - account_provider: ap, - block_sync: maybe_sync_sender, - }; + let (rpc_server, ipc_server, ws_server) = { + let rpc_apis_deps = ApiDependencies { + client: client.client(), + miner: Arc::clone(&miner), + network_control: Arc::clone(&network_service), + account_provider: ap, + block_sync: maybe_sync_sender, + }; + + let rpc_server = { + if !config.rpc.disable.unwrap() { + let server = setup_rpc_server(&config, &rpc_apis_deps); + Some(rpc_http_start(server, config.rpc_http_config())?) + } else { + None + } + }; - let rpc_server = { - if !config.rpc.disable.unwrap() { - Some(rpc_http_start(config.rpc_http_config(), config.rpc.enable_devel_api, &rpc_apis_deps)?) - } else { - None - } - }; + let ipc_server = { + if !config.ipc.disable.unwrap() { + let server = setup_rpc_server(&config, &rpc_apis_deps); + Some(rpc_ipc_start(server, config.rpc_ipc_config())?) + } else { + None + } + }; - let ipc_server = { - if !config.ipc.disable.unwrap() { - Some(rpc_ipc_start(&config.rpc_ipc_config(), config.rpc.enable_devel_api, &rpc_apis_deps)?) - } else { - None - } - }; + let ws_server = { + if !config.ws.disable.unwrap() { + let server = setup_rpc_server(&config, &rpc_apis_deps); + Some(rpc_ws_start(server, config.rpc_ws_config())?) + } else { + None + } + }; - let ws_server = { - if !config.ws.disable.unwrap() { - Some(rpc_ws_start(&config.rpc_ws_config(), config.rpc.enable_devel_api, &rpc_apis_deps)?) - } else { - None - } + (rpc_server, ipc_server, ws_server) }; if (!config.stratum.disable.unwrap()) && (miner.engine_type() == EngineType::PoW) { From 74c46d6a49ff8fb3f5126b269c491519bd40b26d Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Tue, 3 Dec 2019 16:27:48 +0900 Subject: [PATCH 06/52] Refactor send status function --- sync/src/block/extension.rs | 63 ++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 28 deletions(-) diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 1ea7eb4c5b..16fd3ad6be 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -109,6 +109,38 @@ impl Extension { } } + fn send_status(&mut self, id: &NodeId) { + let chain_info = self.client.chain_info(); + self.api.send( + id, + Arc::new( + Message::Status { + total_score: chain_info.best_proposal_score, + best_hash: chain_info.best_proposal_block_hash, + genesis_hash: chain_info.genesis_hash, + } + .rlp_bytes(), + ), + ); + } + + fn send_status_broadcast(&mut self) { + let chain_info = self.client.chain_info(); + for id in self.connected_nodes.iter() { + self.api.send( + id, + Arc::new( + Message::Status { + total_score: chain_info.best_proposal_score, + best_hash: chain_info.best_proposal_block_hash, + genesis_hash: chain_info.genesis_hash, + } + .rlp_bytes(), + ), + ); + } + } + fn send_header_request(&mut self, id: &NodeId, request: RequestMessage) { if let Some(requests) = self.requests.get_mut(id) { ctrace!(SYNC, "Send header request to {}", id); @@ -212,18 +244,8 @@ impl NetworkExtension for Extension { fn on_node_added(&mut self, id: &NodeId, _version: u64) { cinfo!(SYNC, "New peer detected #{}", id); - let chain_info = self.client.chain_info(); - self.api.send( - id, - Arc::new( - Message::Status { - total_score: chain_info.best_proposal_score, - best_hash: chain_info.best_proposal_block_hash, - genesis_hash: chain_info.genesis_hash, - } - .rlp_bytes(), - ), - ); + self.send_status(id); + let t = self.connected_nodes.insert(*id); debug_assert!(t, "{} is already added to peer list", id); @@ -420,22 +442,7 @@ impl Extension { self.body_downloader.remove_target(&imported); self.body_downloader.remove_target(&invalid); - - let chain_info = self.client.chain_info(); - - for id in &self.connected_nodes { - self.api.send( - id, - Arc::new( - Message::Status { - total_score: chain_info.best_proposal_score, - best_hash: chain_info.best_proposal_block_hash, - genesis_hash: chain_info.genesis_hash, - } - .rlp_bytes(), - ), - ); - } + self.send_status_broadcast(); } } From ec0ef2b078f1d154654a0b5a9f112d1fd4f0174f Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Tue, 3 Dec 2019 16:42:37 +0900 Subject: [PATCH 07/52] Remove unnecessary cloning in block sync --- sync/src/block/extension.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 16fd3ad6be..0b40141598 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -408,11 +408,8 @@ pub enum Event { impl Extension { fn new_headers(&mut self, imported: Vec, enacted: Vec, retracted: Vec) { - let peer_ids: Vec<_> = self.header_downloaders.keys().cloned().collect(); - for id in peer_ids { - if let Some(peer) = self.header_downloaders.get_mut(&id) { - peer.mark_as_imported(imported.clone()); - } + for peer in self.header_downloaders.values_mut() { + peer.mark_as_imported(imported.clone()); } let mut headers_to_download: Vec<_> = enacted .into_iter() From 06b3f97add242b8176af21ead2f0017aebc9e783 Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Wed, 20 Nov 2019 21:28:24 +0900 Subject: [PATCH 08/52] Enhance error message in waitForTermChange --- test/src/helper/spawn.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/src/helper/spawn.ts b/test/src/helper/spawn.ts index 9b3277ddaa..f2d1cf396b 100644 --- a/test/src/helper/spawn.ts +++ b/test/src/helper/spawn.ts @@ -859,14 +859,16 @@ export default class CodeChain { public async waitForTermChange(target: number, timeout?: number) { const start = Date.now(); while (true) { - const termMetadata = await stake.getTermMetadata(this.sdk); + const termMetadata = (await stake.getTermMetadata(this.sdk))!; if (termMetadata && termMetadata.currentTermId >= target) { return termMetadata; } await wait(1000); if (timeout) { if (Date.now() - start > timeout * 1000) { - throw new Error(`Term didn't changed in ${timeout} s`); + throw new Error( + `Term didn't changed to ${target} in ${timeout} s. It is ${termMetadata.currentTermId} now` + ); } } } From 3b9c38845db6b1c899d48d927dc07ce547dfe3fa Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Tue, 26 Nov 2019 11:21:29 +0900 Subject: [PATCH 09/52] Add term_params to Metadata `term_params` should be snapshot every `on_term_close` --- state/src/item/metadata.rs | 136 +++++++++++++++++++++++++------------ 1 file changed, 94 insertions(+), 42 deletions(-) diff --git a/state/src/item/metadata.rs b/state/src/item/metadata.rs index 6ad74076b3..39651888a4 100644 --- a/state/src/item/metadata.rs +++ b/state/src/item/metadata.rs @@ -34,6 +34,7 @@ pub struct Metadata { term: TermMetadata, seq: u64, params: Option, + term_params: Option, } impl Metadata { @@ -45,6 +46,7 @@ impl Metadata { term: Default::default(), seq: 0, params: None, + term_params: None, } } @@ -93,6 +95,14 @@ impl Metadata { self.params = Some(params); } + pub fn term_params(&self) -> Option<&CommonParams> { + self.term_params.as_ref() + } + + pub fn snapshot_term_params(&mut self) { + self.term_params = self.params; + } + pub fn increase_term_id(&mut self, last_term_finished_block_num: u64) { assert!(self.term.last_term_finished_block_num < last_term_finished_block_num); self.term.last_term_finished_block_num = last_term_finished_block_num; @@ -124,25 +134,31 @@ impl CacheableItem for Metadata { const PREFIX: u8 = super::METADATA_PREFIX; +const INITIAL_LEN: usize = 4; +const TERM_LEN: usize = INITIAL_LEN + 2; +const PARAMS_LEN: usize = TERM_LEN + 2; +const TERM_PARAMS_LEN: usize = PARAMS_LEN + 1; +const VALID_LEN: &[usize] = &[INITIAL_LEN, TERM_LEN, PARAMS_LEN, TERM_PARAMS_LEN]; + impl Encodable for Metadata { fn rlp_append(&self, s: &mut RlpStream) { - const INITIAL_LEN: usize = 4; - const TERM_LEN: usize = 2; - const PARAMS_LEN: usize = 2; - let mut len = INITIAL_LEN; - let term_changed = self.term != Default::default(); - if term_changed { - len += TERM_LEN; - } - let params_changed = self.seq != 0; - if params_changed { - if !term_changed { - len += TERM_LEN; + let term_params_exist = self.term_params.is_some(); + + let len = if term_params_exist { + if !params_changed { + panic!("Term params only can be changed if params changed"); } - len += PARAMS_LEN; - } + TERM_PARAMS_LEN + } else if params_changed { + PARAMS_LEN + } else if term_changed { + TERM_LEN + } else { + INITIAL_LEN + }; + s.begin_list(len) .append(&PREFIX) .append(&self.number_of_shards) @@ -159,48 +175,63 @@ impl Encodable for Metadata { } s.append(&self.seq).append(self.params.as_ref().unwrap()); } + if term_params_exist { + if !params_changed { + unreachable!("Term params only can be changed if params changed"); + } + s.append(self.term_params.as_ref().unwrap()); + } } } impl Decodable for Metadata { fn decode(rlp: &Rlp) -> Result { - let (term, seq, params) = match rlp.item_count()? { - 4 => (TermMetadata::default(), 0, None), - 6 => ( - TermMetadata { - last_term_finished_block_num: rlp.val_at(4)?, - current_term_id: rlp.val_at(5)?, - }, - 0, - None, - ), - 8 => ( - TermMetadata { - last_term_finished_block_num: rlp.val_at(4)?, - current_term_id: rlp.val_at(5)?, - }, - rlp.val_at(6)?, - Some(rlp.val_at(7)?), - ), - item_count => { - return Err(DecoderError::RlpInvalidLength { - got: item_count, - expected: 4, - }) - } - }; + let item_count = rlp.item_count()?; + if !VALID_LEN.contains(&item_count) { + return Err(DecoderError::RlpInvalidLength { + got: item_count, + expected: 4, + }) + } + let prefix = rlp.val_at::(0)?; if PREFIX != prefix { cdebug!(STATE, "{} is not an expected prefix for asset", prefix); return Err(DecoderError::Custom("Unexpected prefix")) } + let number_of_shards = rlp.val_at(1)?; + let number_of_initial_shards = rlp.val_at(2)?; + let hashes = rlp.list_at(3)?; + + let term = if item_count >= TERM_LEN { + TermMetadata { + last_term_finished_block_num: rlp.val_at(4)?, + current_term_id: rlp.val_at(5)?, + } + } else { + TermMetadata::default() + }; + + let (seq, params) = if item_count >= PARAMS_LEN { + (rlp.val_at(6)?, Some(rlp.val_at(7)?)) + } else { + Default::default() + }; + + let term_params = if item_count >= TERM_PARAMS_LEN { + Some(rlp.val_at(8)?) + } else { + Default::default() + }; + Ok(Self { - number_of_shards: rlp.val_at(1)?, - number_of_initial_shards: rlp.val_at(2)?, - hashes: rlp.list_at(3)?, + number_of_shards, + number_of_initial_shards, + hashes, term, seq, params, + term_params, }) } } @@ -266,6 +297,7 @@ mod tests { term: Default::default(), seq: 0, params: None, + term_params: None, }; let mut rlp = RlpStream::new_list(4); rlp.append(&PREFIX).append(&10u16).append(&1u16).append_list::(&[]); @@ -281,6 +313,7 @@ mod tests { term: Default::default(), seq: 3, params: Some(CommonParams::default_for_test()), + term_params: Some(CommonParams::default_for_test()), }; rlp_encode_and_decode_test!(metadata); } @@ -297,6 +330,7 @@ mod tests { }, seq: 0, params: None, + term_params: None, }; rlp_encode_and_decode_test!(metadata); } @@ -313,6 +347,24 @@ mod tests { }, seq: 3, params: Some(CommonParams::default_for_test()), + term_params: Some(CommonParams::default_for_test()), + }; + rlp_encode_and_decode_test!(metadata); + } + + #[test] + fn metadata_with_term_and_seq_but_not_term_params() { + let metadata = Metadata { + number_of_shards: 10, + number_of_initial_shards: 1, + hashes: vec![], + term: TermMetadata { + last_term_finished_block_num: 1, + current_term_id: 100, + }, + seq: 3, + params: Some(CommonParams::default_for_test()), + term_params: None, }; rlp_encode_and_decode_test!(metadata); } From 5c647379789399972224be60228ec47b093679e0 Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Tue, 26 Nov 2019 14:43:37 +0900 Subject: [PATCH 10/52] Snapshot term_metadata on every term close for the era > 0 --- core/src/consensus/tendermint/engine.rs | 10 +++++++++- state/src/impls/top_level.rs | 6 ++++++ state/src/traits.rs | 1 + 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/core/src/consensus/tendermint/engine.rs b/core/src/consensus/tendermint/engine.rs index 4dd8984163..184c6cb521 100644 --- a/core/src/consensus/tendermint/engine.rs +++ b/core/src/consensus/tendermint/engine.rs @@ -24,7 +24,7 @@ use std::sync::{Arc, Weak}; use ckey::{public_to_address, Address}; use cnetwork::NetworkService; use crossbeam_channel as crossbeam; -use cstate::{ActionHandler, TopStateView}; +use cstate::{ActionHandler, TopState, TopStateView}; use ctypes::{BlockHash, CommonParams, Header}; use num_rational::Ratio; @@ -244,6 +244,14 @@ impl ConsensusEngine for Tendermint { stake::on_term_close(block.state_mut(), block_number, &inactive_validators)?; + match term { + 0 => {} + _ => match term_common_params.expect("Term common params should exist").era() { + 0 => {} + 1 => block.state_mut().snapshot_term_params()?, + _ => unimplemented!("It is not decided how we handle this"), + }, + } Ok(()) } diff --git a/state/src/impls/top_level.rs b/state/src/impls/top_level.rs index b871efaa58..4d51ec0b3f 100644 --- a/state/src/impls/top_level.rs +++ b/state/src/impls/top_level.rs @@ -999,6 +999,12 @@ impl TopState for TopLevelState { metadata.increase_seq(); Ok(()) } + + fn snapshot_term_params(&mut self) -> StateResult<()> { + let mut metadata = self.get_metadata_mut()?; + metadata.snapshot_term_params(); + Ok(()) + } } fn is_active_account(state: &dyn TopStateView, address: &Address) -> TrieResult { diff --git a/state/src/traits.rs b/state/src/traits.rs index 40d94688f7..c9414611d1 100644 --- a/state/src/traits.rs +++ b/state/src/traits.rs @@ -183,6 +183,7 @@ pub trait TopState { fn remove_action_data(&mut self, key: &H256); fn update_params(&mut self, metadata_seq: u64, params: CommonParams) -> StateResult<()>; + fn snapshot_term_params(&mut self) -> StateResult<()>; } pub trait StateWithCache { From 7fba4f56a4899ba2b57777a529917b0560627010 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Tue, 3 Dec 2019 15:52:42 +0900 Subject: [PATCH 11/52] Update the era with the value in current block's state At `on_term_close`, the term_common_params is not updated to the new parameters yet. But the parameters in the current block's state is updated, so we should get the `era` from there. --- core/src/consensus/tendermint/engine.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/consensus/tendermint/engine.rs b/core/src/consensus/tendermint/engine.rs index 184c6cb521..dd60909093 100644 --- a/core/src/consensus/tendermint/engine.rs +++ b/core/src/consensus/tendermint/engine.rs @@ -246,7 +246,7 @@ impl ConsensusEngine for Tendermint { match term { 0 => {} - _ => match term_common_params.expect("Term common params should exist").era() { + _ => match metadata.params().map_or(0, |p| p.era()) { 0 => {} 1 => block.state_mut().snapshot_term_params()?, _ => unimplemented!("It is not decided how we handle this"), From 9a6ec55eff39d5059a3865284b6e0a7140d4b36b Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Tue, 26 Nov 2019 17:10:12 +0900 Subject: [PATCH 12/52] Read term_params from the snapshot --- core/src/client/client.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/core/src/client/client.rs b/core/src/client/client.rs index 6bb39e5543..aaa50d431f 100644 --- a/core/src/client/client.rs +++ b/core/src/client/client.rs @@ -815,11 +815,19 @@ impl TermInfo for Client { } fn term_common_params(&self, id: BlockId) -> Option { - let block_number = self.last_term_finished_block_num(id).expect("The block of the parent hash should exist"); - if block_number == 0 { - None + let state = self.state_at(id)?; + let metadata = state.metadata().unwrap().expect("Metadata always exist"); + + if let Some(term_params) = metadata.term_params() { + Some(*term_params) } else { - Some(self.common_params((block_number).into()).expect("Common params should exist")) + let block_number = + self.last_term_finished_block_num(id).expect("The block of the parent hash should exist"); + if block_number == 0 { + None + } else { + Some(self.common_params((block_number).into()).expect("Common params should exist")) + } } } } From 1101c5b15968a206aff1e485a51c26893254714f Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Wed, 27 Nov 2019 18:45:45 +0900 Subject: [PATCH 13/52] Precalculate the fee in era=1 --- core/src/consensus/solo/mod.rs | 6 +- core/src/consensus/stake/action_data.rs | 205 +++++++++++++++++++----- core/src/consensus/stake/mod.rs | 69 ++++++-- core/src/consensus/tendermint/engine.rs | 68 +++++++- 4 files changed, 277 insertions(+), 71 deletions(-) diff --git a/core/src/consensus/solo/mod.rs b/core/src/consensus/solo/mod.rs index 414e45a00f..77a41dd9bb 100644 --- a/core/src/consensus/solo/mod.rs +++ b/core/src/consensus/solo/mod.rs @@ -118,7 +118,7 @@ impl ConsensusEngine for Solo { self.machine.add_balance(block, &author, block_author_reward)?; return Ok(()) } - stake::add_intermediate_rewards(block.state_mut(), author, block_author_reward)?; + stake::v0::add_intermediate_rewards(block.state_mut(), author, block_author_reward)?; let last_term_finished_block_num = { let header = block.header(); let current_term_period = header.timestamp() / term_seconds; @@ -128,8 +128,8 @@ impl ConsensusEngine for Solo { } header.number() }; - stake::move_current_to_previous_intermediate_rewards(&mut block.state_mut())?; - let rewards = stake::drain_previous_rewards(&mut block.state_mut())?; + stake::v0::move_current_to_previous_intermediate_rewards(&mut block.state_mut())?; + let rewards = stake::v0::drain_previous_rewards(&mut block.state_mut())?; for (address, reward) in rewards { self.machine.add_balance(block, &address, reward)?; } diff --git a/core/src/consensus/stake/action_data.rs b/core/src/consensus/stake/action_data.rs index acc1add3b3..af14c8b33e 100644 --- a/core/src/consensus/stake/action_data.rs +++ b/core/src/consensus/stake/action_data.rs @@ -18,7 +18,6 @@ use std::cmp::Ordering; use std::collections::btree_map::{BTreeMap, Entry}; use std::collections::btree_set::{self, BTreeSet}; use std::collections::{btree_map, HashMap, HashSet}; -use std::mem; use std::ops::Deref; use std::vec; @@ -408,51 +407,116 @@ impl IntoIterator for Validators { } } -#[derive(Default, Debug, PartialEq)] -pub struct IntermediateRewards { - previous: BTreeMap, - current: BTreeMap, -} +pub mod v0 { + use std::mem; -impl IntermediateRewards { - pub fn load_from_state(state: &TopLevelState) -> StateResult { - let key = get_intermediate_rewards_key(); - let action_data = state.action_data(&key)?; - let (previous, current) = decode_map_tuple(action_data.as_ref()); + use super::*; - Ok(Self { - previous, - current, - }) + #[derive(Default, Debug, PartialEq)] + pub struct IntermediateRewards { + pub(super) previous: BTreeMap, + pub(super) current: BTreeMap, } - pub fn save_to_state(&self, state: &mut TopLevelState) -> StateResult<()> { - let key = get_intermediate_rewards_key(); - if self.previous.is_empty() && self.current.is_empty() { - state.remove_action_data(&key); - } else { - let encoded = encode_map_tuple(&self.previous, &self.current); - state.update_action_data(&key, encoded)?; + impl IntermediateRewards { + pub fn load_from_state(state: &TopLevelState) -> StateResult { + let key = get_intermediate_rewards_key(); + let action_data = state.action_data(&key)?; + let (previous, current) = decode_map_tuple(action_data.as_ref()); + + Ok(Self { + previous, + current, + }) } - Ok(()) - } - pub fn add_quantity(&mut self, address: Address, quantity: StakeQuantity) { - if quantity == 0 { - return + pub fn save_to_state(&self, state: &mut TopLevelState) -> StateResult<()> { + let key = get_intermediate_rewards_key(); + if self.previous.is_empty() && self.current.is_empty() { + state.remove_action_data(&key); + } else { + let encoded = encode_map_tuple(&self.previous, &self.current); + state.update_action_data(&key, encoded)?; + } + Ok(()) + } + + pub fn add_quantity(&mut self, address: Address, quantity: StakeQuantity) { + if quantity == 0 { + return + } + *self.current.entry(address).or_insert(0) += quantity; + } + + pub fn drain_previous(&mut self) -> BTreeMap { + let mut new = BTreeMap::new(); + mem::swap(&mut new, &mut self.previous); + new + } + + pub fn move_current_to_previous(&mut self) { + assert!(self.previous.is_empty()); + mem::swap(&mut self.previous, &mut self.current); } - *self.current.entry(address).or_insert(0) += quantity; } +} + +pub mod v1 { + use std::mem; + + use super::*; - pub fn drain_previous(&mut self) -> BTreeMap { - let mut new = BTreeMap::new(); - mem::swap(&mut new, &mut self.previous); - new + #[derive(Default, Debug, PartialEq)] + pub struct IntermediateRewards { + pub(super) current: BTreeMap, + pub(super) calculated: BTreeMap, } - pub fn move_current_to_previous(&mut self) { - assert!(self.previous.is_empty()); - mem::swap(&mut self.previous, &mut self.current); + impl IntermediateRewards { + pub fn load_from_state(state: &TopLevelState) -> StateResult { + let key = get_intermediate_rewards_key(); + let action_data = state.action_data(&key)?; + let (current, calculated) = decode_map_tuple(action_data.as_ref()); + + Ok(Self { + current, + calculated, + }) + } + + pub fn save_to_state(&self, state: &mut TopLevelState) -> StateResult<()> { + let key = get_intermediate_rewards_key(); + if self.current.is_empty() && self.calculated.is_empty() { + state.remove_action_data(&key); + } else { + let encoded = encode_map_tuple(&self.current, &self.calculated); + state.update_action_data(&key, encoded)?; + } + Ok(()) + } + + pub fn add_quantity(&mut self, address: Address, quantity: StakeQuantity) { + if quantity == 0 { + return + } + *self.current.entry(address).or_insert(0) += quantity; + } + + pub fn update_calculated(&mut self, rewards: BTreeMap) { + self.calculated = rewards; + } + + pub fn drain_current(&mut self) -> BTreeMap { + let mut new = BTreeMap::new(); + mem::swap(&mut new, &mut self.current); + new + } + + pub fn drain_calculated(&mut self) -> BTreeMap { + let mut new = BTreeMap::new(); + mem::swap(&mut new, &mut self.calculated); + new + } } } @@ -1129,39 +1193,39 @@ mod tests { } #[test] - fn load_and_save_intermediate_rewards() { + fn load_and_save_intermediate_rewards_v0() { let mut state = helpers::get_temp_state(); - let rewards = IntermediateRewards::load_from_state(&state).unwrap(); + let rewards = v0::IntermediateRewards::load_from_state(&state).unwrap(); rewards.save_to_state(&mut state).unwrap(); } #[test] - fn add_quantity() { + fn add_quantity_v0() { let address1 = Address::random(); let address2 = Address::random(); let mut state = helpers::get_temp_state(); - let mut origin_rewards = IntermediateRewards::load_from_state(&state).unwrap(); + let mut origin_rewards = v0::IntermediateRewards::load_from_state(&state).unwrap(); origin_rewards.add_quantity(address1, 1); origin_rewards.add_quantity(address2, 2); origin_rewards.save_to_state(&mut state).unwrap(); - let recovered_rewards = IntermediateRewards::load_from_state(&state).unwrap(); + let recovered_rewards = v0::IntermediateRewards::load_from_state(&state).unwrap(); assert_eq!(origin_rewards, recovered_rewards); } #[test] - fn drain() { + fn drain_v0() { let address1 = Address::random(); let address2 = Address::random(); let mut state = helpers::get_temp_state(); - let mut origin_rewards = IntermediateRewards::load_from_state(&state).unwrap(); + let mut origin_rewards = v0::IntermediateRewards::load_from_state(&state).unwrap(); origin_rewards.add_quantity(address1, 1); origin_rewards.add_quantity(address2, 2); origin_rewards.save_to_state(&mut state).unwrap(); - let mut recovered_rewards = IntermediateRewards::load_from_state(&state).unwrap(); + let mut recovered_rewards = v0::IntermediateRewards::load_from_state(&state).unwrap(); assert_eq!(origin_rewards, recovered_rewards); let _drained = recovered_rewards.drain_previous(); recovered_rewards.save_to_state(&mut state).unwrap(); - let mut final_rewards = IntermediateRewards::load_from_state(&state).unwrap(); + let mut final_rewards = v0::IntermediateRewards::load_from_state(&state).unwrap(); assert_eq!(BTreeMap::new(), final_rewards.previous); let current = final_rewards.current.clone(); final_rewards.move_current_to_previous(); @@ -1169,6 +1233,59 @@ mod tests { assert_eq!(current, final_rewards.previous); } + #[test] + fn save_v0_and_load_v1_intermediate_rewards() { + let address1 = Address::random(); + let address2 = Address::random(); + let mut state = helpers::get_temp_state(); + let mut origin_rewards = v0::IntermediateRewards::load_from_state(&state).unwrap(); + origin_rewards.add_quantity(address1, 1); + origin_rewards.add_quantity(address2, 2); + origin_rewards.save_to_state(&mut state).unwrap(); + let recovered_rewards = v1::IntermediateRewards::load_from_state(&state).unwrap(); + assert_eq!(origin_rewards.previous, recovered_rewards.current); + assert_eq!(origin_rewards.current, recovered_rewards.calculated); + } + + #[test] + fn load_and_save_intermediate_rewards_v1() { + let mut state = helpers::get_temp_state(); + let rewards = v1::IntermediateRewards::load_from_state(&state).unwrap(); + rewards.save_to_state(&mut state).unwrap(); + } + + #[test] + fn add_quantity_v1() { + let address1 = Address::random(); + let address2 = Address::random(); + let mut state = helpers::get_temp_state(); + let mut origin_rewards = v1::IntermediateRewards::load_from_state(&state).unwrap(); + origin_rewards.add_quantity(address1, 1); + origin_rewards.add_quantity(address2, 2); + origin_rewards.save_to_state(&mut state).unwrap(); + let recovered_rewards = v1::IntermediateRewards::load_from_state(&state).unwrap(); + assert_eq!(origin_rewards, recovered_rewards); + } + + #[test] + fn drain_v1() { + let address1 = Address::random(); + let address2 = Address::random(); + let mut state = helpers::get_temp_state(); + let mut origin_rewards = v1::IntermediateRewards::load_from_state(&state).unwrap(); + origin_rewards.add_quantity(address1, 1); + origin_rewards.add_quantity(address2, 2); + origin_rewards.save_to_state(&mut state).unwrap(); + let mut recovered_rewards = v1::IntermediateRewards::load_from_state(&state).unwrap(); + assert_eq!(origin_rewards, recovered_rewards); + recovered_rewards.drain_current(); + recovered_rewards.save_to_state(&mut state).unwrap(); + let mut final_rewards = v1::IntermediateRewards::load_from_state(&state).unwrap(); + assert_eq!(BTreeMap::new(), final_rewards.current); + final_rewards.drain_calculated(); + assert_eq!(BTreeMap::new(), final_rewards.calculated); + } + #[test] fn candidates_deposit_add() { let mut state = helpers::get_temp_state(); diff --git a/core/src/consensus/stake/mod.rs b/core/src/consensus/stake/mod.rs index 02789499fe..5134367362 100644 --- a/core/src/consensus/stake/mod.rs +++ b/core/src/consensus/stake/mod.rs @@ -34,7 +34,7 @@ use primitives::{Bytes, H256}; use rlp::{Decodable, Rlp}; pub use self::action_data::{Banned, Validator, Validators}; -use self::action_data::{Candidates, Delegation, IntermediateRewards, Jail, ReleaseResult, StakeAccount, Stakeholders}; +use self::action_data::{Candidates, Delegation, Jail, ReleaseResult, StakeAccount, Stakeholders}; pub use self::actions::Action; pub use self::distribute::fee_distribute; use super::ValidatorSet; @@ -321,24 +321,61 @@ pub fn get_validators(state: &TopLevelState) -> StateResult { Validators::load_from_state(state) } -pub fn add_intermediate_rewards(state: &mut TopLevelState, address: Address, reward: u64) -> StateResult<()> { - let mut rewards = IntermediateRewards::load_from_state(state)?; - rewards.add_quantity(address, reward); - rewards.save_to_state(state)?; - Ok(()) -} +pub mod v0 { + use super::action_data::v0::IntermediateRewards; + use super::*; + + pub fn add_intermediate_rewards(state: &mut TopLevelState, address: Address, reward: u64) -> StateResult<()> { + let mut rewards = IntermediateRewards::load_from_state(state)?; + rewards.add_quantity(address, reward); + rewards.save_to_state(state)?; + Ok(()) + } + + pub fn drain_previous_rewards(state: &mut TopLevelState) -> StateResult> { + let mut rewards = IntermediateRewards::load_from_state(state)?; + let drained = rewards.drain_previous(); + rewards.save_to_state(state)?; + Ok(drained) + } -pub fn drain_previous_rewards(state: &mut TopLevelState) -> StateResult> { - let mut rewards = IntermediateRewards::load_from_state(state)?; - let drained = rewards.drain_previous(); - rewards.save_to_state(state)?; - Ok(drained) + pub fn move_current_to_previous_intermediate_rewards(state: &mut TopLevelState) -> StateResult<()> { + let mut rewards = IntermediateRewards::load_from_state(state)?; + rewards.move_current_to_previous(); + rewards.save_to_state(state) + } } -pub fn move_current_to_previous_intermediate_rewards(state: &mut TopLevelState) -> StateResult<()> { - let mut rewards = IntermediateRewards::load_from_state(state)?; - rewards.move_current_to_previous(); - rewards.save_to_state(state) +pub mod v1 { + use super::action_data::v1::IntermediateRewards; + use super::*; + + pub fn add_intermediate_rewards(state: &mut TopLevelState, address: Address, reward: u64) -> StateResult<()> { + let mut rewards = IntermediateRewards::load_from_state(state)?; + rewards.add_quantity(address, reward); + rewards.save_to_state(state)?; + Ok(()) + } + + pub fn drain_current_rewards(state: &mut TopLevelState) -> StateResult> { + let mut rewards = IntermediateRewards::load_from_state(state)?; + let drained = rewards.drain_current(); + rewards.save_to_state(state)?; + Ok(drained) + } + + pub fn update_calculated_rewards(state: &mut TopLevelState, values: HashMap) -> StateResult<()> { + let mut rewards = IntermediateRewards::load_from_state(state)?; + rewards.update_calculated(values.into_iter().collect()); + rewards.save_to_state(state) + } + + pub fn drain_calculated_rewards(state: &mut TopLevelState) -> StateResult> { + let mut rewards = IntermediateRewards::load_from_state(state)?; + let drained = rewards.drain_calculated(); + rewards.save_to_state(state)?; + Ok(drained) + } } pub fn update_validator_weights(state: &mut TopLevelState, block_author: &Address) -> StateResult<()> { diff --git a/core/src/consensus/tendermint/engine.rs b/core/src/consensus/tendermint/engine.rs index dd60909093..22af949ea7 100644 --- a/core/src/consensus/tendermint/engine.rs +++ b/core/src/consensus/tendermint/engine.rs @@ -136,9 +136,43 @@ impl ConsensusEngine for Tendermint { /// Block transformation functions, before the transactions. fn on_open_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + let client = self.client().ok_or(EngineError::CannotOpenBlock)?; + + let block_number = block.header().number(); let metadata = block.state().metadata()?.expect("Metadata must exist"); - if block.header().number() == metadata.last_term_finished_block_num() + 1 { - // FIXME: on_term_open + let era = metadata.term_params().map_or(0, |p| p.era()); + if block_number == metadata.last_term_finished_block_num() + 1 { + match era { + 0 => {} + 1 => { + let rewards = stake::v1::drain_current_rewards(block.state_mut())?; + let start_of_the_current_term = block_number; + let start_of_the_previous_term = { + let end_of_the_two_level_previous_term = client + .last_term_finished_block_num((metadata.last_term_finished_block_num() - 1).into()) + .unwrap(); + + end_of_the_two_level_previous_term + 1 + }; + + let banned = stake::Banned::load_from_state(block.state())?; + let start_of_the_current_term_header = + encoded::Header::new(block.header().clone().rlp_bytes().to_vec()); + + let pending_rewards = calculate_pending_rewards_of_the_previous_term( + &*client, + &*self.validators, + rewards, + start_of_the_current_term, + start_of_the_current_term_header, + start_of_the_previous_term, + &banned, + )?; + + stake::v1::update_calculated_rewards(block.state_mut(), pending_rewards)?; + } + _ => unimplemented!(), + } } Ok(()) } @@ -174,6 +208,7 @@ impl ConsensusEngine for Tendermint { let block_author_reward = total_reward - total_min_fee + distributor.remaining_fee(); + let era = term_common_params.map_or(0, |p| p.era()); let metadata = block.state().metadata()?.expect("Metadata must exist"); let term = metadata.current_term_id(); let term_seconds = match term { @@ -187,7 +222,11 @@ impl ConsensusEngine for Tendermint { } _ => { stake::update_validator_weights(block.state_mut(), &author)?; - stake::add_intermediate_rewards(block.state_mut(), author, block_author_reward)?; + match era { + 0 => stake::v0::add_intermediate_rewards(block.state_mut(), author, block_author_reward)?, + 1 => stake::v1::add_intermediate_rewards(block.state_mut(), author, block_author_reward)?, + _ => unimplemented!(), + } } } @@ -195,10 +234,10 @@ impl ConsensusEngine for Tendermint { return Ok(()) } - let inactive_validators = match term { - 0 => Vec::new(), - _ => { - let rewards = stake::drain_previous_rewards(block.state_mut())?; + let inactive_validators = match (era, term) { + (0, 0) => Vec::new(), + (0, _) => { + let rewards = stake::v0::drain_previous_rewards(block.state_mut())?; let start_of_the_current_term = metadata.last_term_finished_block_num() + 1; if term > 1 { @@ -232,7 +271,7 @@ impl ConsensusEngine for Tendermint { } } - stake::move_current_to_previous_intermediate_rewards(block.state_mut())?; + stake::v0::move_current_to_previous_intermediate_rewards(block.state_mut())?; let validators = stake::Validators::load_from_state(block.state())? .into_iter() @@ -240,6 +279,19 @@ impl ConsensusEngine for Tendermint { .collect(); inactive_validators(&*client, start_of_the_current_term, block.header(), validators) } + (1, _) => { + for (address, reward) in stake::v1::drain_calculated_rewards(block.state_mut())? { + self.machine.add_balance(block, &address, reward)?; + } + + let start_of_the_current_term = metadata.last_term_finished_block_num() + 1; + let validators = stake::Validators::load_from_state(block.state())? + .into_iter() + .map(|val| public_to_address(val.pubkey())) + .collect(); + inactive_validators(&*client, start_of_the_current_term, block.header(), validators) + } + _ => unimplemented!(), }; stake::on_term_close(block.state_mut(), block_number, &inactive_validators)?; From 105166582863451acbd233d3125f1386166c2ae4 Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Thu, 31 Oct 2019 18:21:34 +0900 Subject: [PATCH 14/52] Remove outdated snapshot module --- codechain/run_node.rs | 15 +- sync/src/lib.rs | 2 - sync/src/snapshot/error.rs | 52 ----- sync/src/snapshot/mod.rs | 22 -- sync/src/snapshot/service.rs | 76 ------- sync/src/snapshot/snapshot.rs | 393 ---------------------------------- 6 files changed, 1 insertion(+), 559 deletions(-) delete mode 100644 sync/src/snapshot/error.rs delete mode 100644 sync/src/snapshot/mod.rs delete mode 100644 sync/src/snapshot/service.rs delete mode 100644 sync/src/snapshot/snapshot.rs diff --git a/codechain/run_node.rs b/codechain/run_node.rs index 85255e6e29..a02cb54e2f 100644 --- a/codechain/run_node.rs +++ b/codechain/run_node.rs @@ -30,7 +30,7 @@ use ckeystore::KeyStore; use clap::ArgMatches; use clogger::{self, EmailAlarm, LoggerConfig}; use cnetwork::{Filters, NetworkConfig, NetworkControl, NetworkService, RoutingTable, SocketAddr}; -use csync::{BlockSyncExtension, BlockSyncSender, SnapshotService, TransactionSyncExtension}; +use csync::{BlockSyncExtension, BlockSyncSender, TransactionSyncExtension}; use ctimer::TimerLoop; use ctrlc::CtrlC; use fdlimit::raise_fd_limit; @@ -359,19 +359,6 @@ pub fn run_node(matches: &ArgMatches) -> Result<(), String> { stratum_start(&config.stratum_config(), &miner, client.client())? } - let _snapshot_service = { - if !config.snapshot.disable.unwrap() { - // FIXME: Let's make it load snapshot period dynamically to support changing the period. - let client = client.client(); - let snapshot_period = client.common_params(BlockId::Latest).unwrap().snapshot_period(); - let service = SnapshotService::new(Arc::clone(&client), config.snapshot.path.unwrap(), snapshot_period); - client.add_notify(Arc::downgrade(&service) as Weak); - Some(service) - } else { - None - } - }; - // drop the scheme to free up genesis state. drop(scheme); client.client().engine().complete_register(); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index ff2fc0ab84..d67fc4c8c7 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -47,11 +47,9 @@ extern crate trie_standardmap; extern crate util_error; mod block; -mod snapshot; mod transaction; pub use crate::block::{BlockSyncEvent, BlockSyncExtension, BlockSyncSender}; -pub use crate::snapshot::SnapshotService; pub use crate::transaction::TransactionSyncExtension; #[cfg(test)] diff --git a/sync/src/snapshot/error.rs b/sync/src/snapshot/error.rs deleted file mode 100644 index dba9adc024..0000000000 --- a/sync/src/snapshot/error.rs +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2018 Kodebox, Inc. -// This file is part of CodeChain. -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -use std::fmt::{Display, Formatter, Result as FormatResult}; -use std::io::{Error as FileError, ErrorKind}; - -use primitives::H256; -use util_error::UtilError; - -#[derive(Debug)] -pub enum Error { - NodeNotFound(H256), - SyncError(String), - FileError(ErrorKind), - UtilError(UtilError), -} - -impl From for Error { - fn from(error: FileError) -> Self { - Error::FileError(error.kind()) - } -} - -impl From for Error { - fn from(error: UtilError) -> Self { - Error::UtilError(error) - } -} - -impl Display for Error { - fn fmt(&self, f: &mut Formatter) -> FormatResult { - match self { - Error::NodeNotFound(key) => write!(f, "State node not found: {:x}", key), - Error::SyncError(reason) => write!(f, "Sync error: {}", reason), - Error::FileError(kind) => write!(f, "File system error: {:?}", kind), - Error::UtilError(error) => write!(f, "Util error: {:?}", error), - } - } -} diff --git a/sync/src/snapshot/mod.rs b/sync/src/snapshot/mod.rs deleted file mode 100644 index f0e8c6bedc..0000000000 --- a/sync/src/snapshot/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 Kodebox, Inc. -// This file is part of CodeChain. -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -mod error; -mod service; -#[cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))] -mod snapshot; - -pub use self::service::Service as SnapshotService; diff --git a/sync/src/snapshot/service.rs b/sync/src/snapshot/service.rs deleted file mode 100644 index e076dc3661..0000000000 --- a/sync/src/snapshot/service.rs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2018 Kodebox, Inc. -// This file is part of CodeChain. -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -use std::io::ErrorKind; -use std::path::PathBuf; -use std::sync::Arc; -use std::thread::spawn; - -use ccore::{BlockChainClient, BlockChainTrait, BlockId, ChainNotify, Client, DatabaseClient}; -use ctypes::BlockHash; - -use super::error::Error; -use super::snapshot::{Snapshot, WriteSnapshot}; - -pub struct Service { - client: Arc, - /// Snapshot root directory - root_dir: String, - /// Snapshot creation period in unit of block numbers - period: u64, -} - -impl Service { - pub fn new(client: Arc, root_dir: String, period: u64) -> Arc { - Arc::new(Self { - client, - root_dir, - period, - }) - } -} - -impl ChainNotify for Service { - /// fires when chain has new blocks. - fn new_blocks( - &self, - _imported: Vec, - _invalid: Vec, - enacted: Vec, - _retracted: Vec, - _sealed: Vec, - ) { - let best_number = self.client.chain_info().best_block_number; - let is_checkpoint = enacted - .iter() - .map(|hash| self.client.block_number(&BlockId::Hash(*hash)).expect("Enacted block must exist")) - .any(|number| number % self.period == 0); - if is_checkpoint && best_number > self.period { - let number = (best_number / self.period - 1) * self.period; - let header = self.client.block_header(&BlockId::Number(number)).expect("Snapshot target must exist"); - - let db = self.client.database(); - let path: PathBuf = [self.root_dir.clone(), format!("{:x}", *header.hash())].iter().collect(); - let root = header.state_root(); - // FIXME: The db can be corrupted because the CodeChain doesn't wait child threads end on exit. - spawn(move || match Snapshot::try_new(path).map(|s| s.write_snapshot(db.as_ref(), &root)) { - Ok(_) => {} - Err(Error::FileError(ErrorKind::AlreadyExists)) => {} - Err(e) => cerror!(SNAPSHOT, "{}", e), - }); - } - } -} diff --git a/sync/src/snapshot/snapshot.rs b/sync/src/snapshot/snapshot.rs deleted file mode 100644 index f5a363540f..0000000000 --- a/sync/src/snapshot/snapshot.rs +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright 2018-2019 Kodebox, Inc. -// This file is part of CodeChain. -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -use std::collections::HashSet; -use std::convert::AsRef; -use std::fs::{create_dir_all, File}; -use std::io::{Read, Write}; -use std::iter::once; -use std::path::{Path, PathBuf}; -use std::sync::Arc; - -use ccore::COL_STATE; -use cmerkle::Node; -use journaldb::{self, Algorithm, JournalDB}; -use kvdb::KeyValueDB; -use primitives::H256; -use rlp::{Rlp, RlpStream}; -use snap; - -use super::error::Error; - -pub struct Snapshot { - path: PathBuf, -} - -impl Snapshot { - pub fn try_new

(path: P) -> Result - where - P: AsRef, { - create_dir_all(&path)?; - Ok(Snapshot { - path: path.as_ref().to_owned(), - }) - } -} - -impl Snapshot { - fn file_for(&self, root: &H256) -> PathBuf { - self.path.join(format!("{:x}", root)) - } - - fn write_nodes<'a, I>(&self, root: &H256, iter: I) -> Result<(), Error> - where - I: IntoIterator)>, { - let file = File::create(self.file_for(root))?; - let mut snappy = snap::Writer::new(file); - - let mut stream = RlpStream::new(); - stream.begin_unbounded_list(); - for (key, value) in iter { - stream.begin_list(2); - stream.append(key); - stream.append(value); - } - stream.complete_unbounded_list(); - - snappy.write_all(&stream.drain())?; - Ok(()) - } - - fn read_chunk(&self, backing: Arc, root: &H256) -> Result { - let file = File::open(self.file_for(root))?; - let mut buf = Vec::new(); - let mut snappy = snap::Reader::new(file); - snappy.read_to_end(&mut buf)?; - - let rlp = Rlp::new(&buf); - let mut journal = journaldb::new(backing, Algorithm::Archive, COL_STATE); - let mut inserted_keys = HashSet::new(); - let mut referenced_keys = HashSet::new(); - referenced_keys.insert(*root); - for rlp_pair in rlp.iter() { - if rlp_pair.item_count().unwrap() != 2 { - return Err(Error::SyncError("Chunk contains invalid size of pair".to_string())) - } - - let key = rlp_pair.val_at(0).unwrap(); - let value: Vec<_> = rlp_pair.val_at(1).unwrap(); - - let node = - Node::decoded(&value).ok_or_else(|| Error::SyncError("Chunk condtains an invalid node".to_string()))?; - - if journal.contains(&key) { - cwarn!(SNAPSHOT, "Chunk contains duplicated key: {}", key); - } - - if let Node::Branch(_, childs) = node { - for child in childs.iter() { - if let Some(child) = child { - referenced_keys.insert(*child); - } - } - } - - let hash_key = journal.insert(&value); - if hash_key != key { - return Err(Error::SyncError("Chunk contains an invalid key for a value".to_string())) - } - inserted_keys.insert(hash_key); - } - - let never_referenced_keys: Vec = - inserted_keys.iter().filter(|key| !referenced_keys.contains(key)).cloned().collect(); - - Ok(Chunk { - journal, - never_referenced_keys, - }) - } -} - -struct Chunk { - journal: Box, - never_referenced_keys: Vec, -} - -impl Chunk { - fn purge(&mut self) -> bool { - if self.never_referenced_keys.is_empty() { - return false - } - for key in &self.never_referenced_keys { - self.journal.remove(key); - } - self.never_referenced_keys.clear(); - true - } - - fn is_deeper_than(&self, root: &H256, max_depth: usize) -> bool { - let mut stack = Vec::new(); - stack.push((*root, 0)); - while let Some((key, depth)) = stack.pop() { - match self.journal.get(&key) { - None => continue, - Some(_) if depth >= max_depth => return false, - Some(value) => { - if let Some(Node::Branch(_, childs)) = Node::decoded(&value) { - for child in childs.iter() { - if let Some(child) = child { - stack.push((*child, depth + 1)); - } - } - } - } - } - } - false - } - - fn missing_keys(&self, root: &H256) -> Vec { - let mut result = Vec::new(); - let mut stack = Vec::new(); - stack.push(*root); - while let Some(key) = stack.pop() { - match self.journal.get(&key) { - None => { - result.push(key); - } - Some(value) => { - if let Some(Node::Branch(_, childs)) = Node::decoded(&value) { - for child in childs.iter() { - if let Some(child) = child { - stack.push(*child); - } - } - } - } - } - } - result - } -} - -pub trait WriteSnapshot { - fn write_snapshot(&self, db: &dyn KeyValueDB, root: &H256) -> Result<(), Error>; -} - -pub trait ReadSnapshot { - fn read_snapshot(&self, db: Arc, root: &H256) -> Result<(), Error>; -} - -impl WriteSnapshot for Snapshot { - fn write_snapshot(&self, db: &dyn KeyValueDB, root: &H256) -> Result<(), Error> { - let root_val = match db.get(COL_STATE, root) { - Ok(Some(value)) => value.to_vec(), - Ok(None) => return Err(Error::SyncError("Invalid state root, or the database is empty".to_string())), - Err(e) => return Err(e.into()), - }; - - let children = children_of(db, &root_val)?; - let mut grandchildren = Vec::new(); - for (_, value) in &children { - grandchildren.extend(children_of(db, value)?); - } - - self.write_nodes(root, once(&(*root, root_val)).chain(&children))?; - for (grandchild, _) in &grandchildren { - let nodes = enumerate_subtree(db, grandchild)?; - self.write_nodes(grandchild, &nodes)?; - } - - Ok(()) - } -} - -impl ReadSnapshot for Snapshot { - fn read_snapshot(&self, db: Arc, root: &H256) -> Result<(), Error> { - let head = { - let mut head = self.read_chunk(db.clone(), root)?; - if head.purge() { - cinfo!(SNAPSHOT, "Head chunk contains garbages"); - } - - if head.is_deeper_than(root, 2) { - return Err(Error::SyncError("Head chunk has an invalid shape".to_string())) - } - - let mut transaction = db.transaction(); - head.journal.inject(&mut transaction)?; - db.write_buffered(transaction); - head - }; - - for chunk_root in head.missing_keys(root) { - let mut chunk = self.read_chunk(db.clone(), &chunk_root)?; - if chunk.purge() { - cinfo!(SNAPSHOT, "Chunk contains garbages"); - } - - if !chunk.missing_keys(&chunk_root).is_empty() { - return Err(Error::SyncError("Chunk is an incomplete trie".to_string())) - } - - let mut transaction = db.transaction(); - chunk.journal.inject(&mut transaction)?; - db.write_buffered(transaction); - } - - Ok(()) - } -} - -fn get_node(db: &dyn KeyValueDB, key: &H256) -> Result, Error> { - match db.get(COL_STATE, key) { - Ok(Some(value)) => Ok(value.to_vec()), - Ok(None) => Err(Error::NodeNotFound(*key)), - Err(e) => Err(e.into()), - } -} - -fn children_of(db: &dyn KeyValueDB, node: &[u8]) -> Result)>, Error> { - let keys = match Node::decoded(node) { - None => Vec::new(), - Some(Node::Leaf(..)) => Vec::new(), - Some(Node::Branch(_, children)) => children.iter().filter_map(|child| *child).collect(), - }; - - let mut result = Vec::new(); - for key in keys { - result.push((key, get_node(db, &key)?)); - } - Ok(result) -} - -fn enumerate_subtree(db: &dyn KeyValueDB, root: &H256) -> Result)>, Error> { - let node = get_node(db, root)?; - let children = match Node::decoded(&node) { - None => Vec::new(), - Some(Node::Leaf(..)) => Vec::new(), - Some(Node::Branch(_, children)) => children.iter().filter_map(|child| *child).collect(), - }; - let mut result: Vec<_> = vec![(*root, node)]; - for child in children { - result.extend(enumerate_subtree(db, &child)?); - } - Ok(result) -} - -#[cfg(test)] -mod tests { - use std::collections::HashSet; - use std::sync::Arc; - - use ccore::COL_STATE; - - use cmerkle::{Trie, TrieDB, TrieDBMut, TrieMut}; - use journaldb; - use journaldb::Algorithm; - use kvdb_memorydb; - use primitives::H256; - use tempfile::tempdir; - use trie_standardmap::{Alphabet, StandardMap, ValueMode}; - - use super::{ReadSnapshot, Snapshot, WriteSnapshot}; - - #[test] - fn init() { - let snapshot_dir = tempdir().unwrap(); - let snapshot = Snapshot::try_new(&snapshot_dir).unwrap(); - let mut root = H256::new(); - - let kvdb = Arc::new(kvdb_memorydb::create(1)); - let mut jdb = journaldb::new(kvdb.clone(), Algorithm::Archive, COL_STATE); - { - let _ = TrieDBMut::new(jdb.as_hashdb_mut(), &mut root); - } - /* do nothing */ - let result = snapshot.write_snapshot(kvdb.as_ref(), &root); - - assert!(result.is_err()); - } - - fn random_insert_and_restore_with_count(count: usize) { - let mut seed = H256::new(); - let x = StandardMap { - alphabet: Alphabet::Custom(b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_".to_vec()), - min_key: 5, - journal_key: 0, - value_mode: ValueMode::Index, - count, - } - .make_with(&mut seed); - - let snapshot_dir = tempdir().unwrap(); - let snapshot = Snapshot::try_new(&snapshot_dir).unwrap(); - let mut root = H256::new(); - { - let kvdb = Arc::new(kvdb_memorydb::create(1)); - let mut jdb = journaldb::new(kvdb.clone(), Algorithm::Archive, COL_STATE); - { - let mut t = TrieDBMut::new(jdb.as_hashdb_mut(), &mut root); - let mut inserted_keys = HashSet::new(); - for &(ref key, ref value) in &x { - if !inserted_keys.insert(key) { - continue - } - assert!(t.insert(key, value).unwrap().is_none()); - assert_eq!(t.insert(key, value).unwrap(), Some(value.to_vec())); - } - } - { - let mut batch = jdb.backing().transaction(); - let _ = jdb.inject(&mut batch).unwrap(); - jdb.backing().write(batch).unwrap(); - } - - snapshot.write_snapshot(kvdb.as_ref(), &root).unwrap(); - } - - { - let kvdb = Arc::new(kvdb_memorydb::create(1)); - snapshot.read_snapshot(kvdb.clone(), &root).unwrap(); - - let mut jdb = journaldb::new(kvdb, Algorithm::Archive, COL_STATE); - let t = TrieDB::try_new(jdb.as_hashdb_mut(), &root).unwrap(); - let mut inserted_keys = HashSet::new(); - for &(ref key, ref value) in &x { - if !inserted_keys.insert(key) { - continue - } - assert_eq!(t.get(key).unwrap(), Some(value.to_vec())); - } - } - } - - #[test] - fn random_insert_and_restore_1() { - random_insert_and_restore_with_count(1); - } - - #[test] - fn random_insert_and_restore_100() { - random_insert_and_restore_with_count(100); - } - - #[test] - fn random_insert_and_restore_10000() { - random_insert_and_restore_with_count(10000); - } -} From f718e4a6fe859cd5f15019cc18b0a49f3704ccb2 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Wed, 6 Nov 2019 20:17:26 +0900 Subject: [PATCH 15/52] Add configuration option for the snapshot sync target --- codechain/codechain.yml | 10 ++++++++++ codechain/config/mod.rs | 15 +++++++++++++++ codechain/run_node.rs | 6 +++++- sync/src/block/extension.rs | 2 +- 4 files changed, 31 insertions(+), 2 deletions(-) diff --git a/codechain/codechain.yml b/codechain/codechain.yml index 0558880d58..ff0964615c 100644 --- a/codechain/codechain.yml +++ b/codechain/codechain.yml @@ -260,6 +260,16 @@ args: takes_value: true conflicts_with: - no-discovery + - snapshot-hash: + long: snapshot-hash + value_name: HASH + requires: snapshot-number + takes_value: true + - snapshot-number: + long: snapshot-number + value_name: NUM + requires: snapshot-hash + takes_value: true - no-snapshot: long: no-snapshot help: Disable snapshots diff --git a/codechain/config/mod.rs b/codechain/config/mod.rs index e4ea5538d6..f2bc321895 100644 --- a/codechain/config/mod.rs +++ b/codechain/config/mod.rs @@ -25,6 +25,7 @@ use cidr::IpCidr; use ckey::PlatformAddress; use clap; use cnetwork::{FilterEntry, NetworkConfig, SocketAddr}; +use primitives::H256; use toml; pub use self::chain_type::ChainType; @@ -274,6 +275,8 @@ pub struct Network { pub min_peers: Option, pub max_peers: Option, pub sync: Option, + pub snapshot_hash: Option, + pub snapshot_number: Option, pub transaction_relay: Option, pub discovery: Option, pub discovery_type: Option, @@ -575,6 +578,12 @@ impl Network { if other.sync.is_some() { self.sync = other.sync; } + if other.snapshot_hash.is_some() { + self.snapshot_hash = other.snapshot_hash; + } + if other.snapshot_number.is_some() { + self.snapshot_number = other.snapshot_number; + } if other.transaction_relay.is_some() { self.transaction_relay = other.transaction_relay; } @@ -627,6 +636,12 @@ impl Network { if matches.is_present("no-sync") { self.sync = Some(false); } + if let Some(snapshot_hash) = matches.value_of("snapshot-hash") { + self.snapshot_hash = Some(snapshot_hash.parse().map_err(|_| "Invalid snapshot-hash")?); + } + if let Some(snapshot_number) = matches.value_of("snapshot-number") { + self.snapshot_number = Some(snapshot_number.parse().map_err(|_| "Invalid snapshot-number")?); + } if matches.is_present("no-tx-relay") { self.transaction_relay = Some(false); } diff --git a/codechain/run_node.rs b/codechain/run_node.rs index a02cb54e2f..cd93744fe8 100644 --- a/codechain/run_node.rs +++ b/codechain/run_node.rs @@ -296,7 +296,11 @@ pub fn run_node(matches: &ArgMatches) -> Result<(), String> { if config.network.sync.unwrap() { let sync_sender = { let client = client.client(); - service.register_extension(move |api| BlockSyncExtension::new(client, api)) + let snapshot_target = match (config.network.snapshot_hash, config.network.snapshot_number) { + (Some(hash), Some(num)) => Some((hash, num)), + _ => None, + }; + service.register_extension(move |api| BlockSyncExtension::new(client, api, snapshot_target)) }; let sync = Arc::new(BlockSyncSender::from(sync_sender.clone())); client.client().add_notify(Arc::downgrade(&sync) as Weak); diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 0b40141598..3d18b34da7 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -69,7 +69,7 @@ pub struct Extension { } impl Extension { - pub fn new(client: Arc, api: Box) -> Extension { + pub fn new(client: Arc, api: Box, _snapshot_target: Option<(H256, u64)>) -> Extension { api.set_timer(SYNC_TIMER_TOKEN, Duration::from_millis(SYNC_TIMER_INTERVAL)).expect("Timer set succeeds"); let mut header = client.best_header(); From b3e22381d1a7a7a7219aa8c0f03385f321763f20 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Thu, 7 Nov 2019 15:31:15 +0900 Subject: [PATCH 16/52] Decide the sync extension's state with the snapshot target --- sync/Cargo.toml | 2 +- sync/src/block/extension.rs | 121 ++++++++++++++++++++++-------------- sync/src/lib.rs | 1 - 3 files changed, 77 insertions(+), 47 deletions(-) diff --git a/sync/Cargo.toml b/sync/Cargo.toml index c3ad555dc4..caf4846f4f 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -15,6 +15,7 @@ codechain-network = { path = "../network" } codechain-state = { path = "../state" } codechain-timer = { path = "../util/timer" } codechain-types = { path = "../types" } +hashdb = { path = "../util/hashdb" } journaldb = { path = "../util/journaldb" } kvdb = "0.1" log = "0.4.6" @@ -29,7 +30,6 @@ token-generator = "0.1.0" util-error = { path = "../util/error" } [dev-dependencies] -hashdb = { path = "../util/hashdb" } kvdb-memorydb = "0.1" tempfile = "3.0.4" trie-standardmap = { path = "../util/trie-standardmap" } diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 3d18b34da7..4a8763411a 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -24,12 +24,14 @@ use ccore::{ Block, BlockChainClient, BlockChainTrait, BlockId, BlockImportError, ChainNotify, Client, ImportBlock, ImportError, UnverifiedTransaction, }; +use cmerkle::TrieFactory; use cnetwork::{Api, EventSender, NetworkExtension, NodeId}; use cstate::FindActionHandler; use ctimer::TimerToken; use ctypes::header::{Header, Seal}; use ctypes::transaction::Action; use ctypes::{BlockHash, BlockNumber}; +use hashdb::AsHashDB; use primitives::{H256, U256}; use rand::prelude::SliceRandom; use rand::thread_rng; @@ -55,7 +57,14 @@ pub struct TokenInfo { request_id: Option, } +enum State { + SnapshotHeader(H256), + SnapshotChunk(H256), + Full, +} + pub struct Extension { + state: State, requests: HashMap>, connected_nodes: HashSet, header_downloaders: HashMap, @@ -69,9 +78,22 @@ pub struct Extension { } impl Extension { - pub fn new(client: Arc, api: Box, _snapshot_target: Option<(H256, u64)>) -> Extension { + pub fn new(client: Arc, api: Box, snapshot_target: Option<(H256, u64)>) -> Extension { api.set_timer(SYNC_TIMER_TOKEN, Duration::from_millis(SYNC_TIMER_INTERVAL)).expect("Timer set succeeds"); + let state = match snapshot_target { + Some((hash, num)) => match client.block_header(&BlockId::Number(num)) { + Some(ref header) if *header.hash() == hash => { + let state_db = client.state_db().read(); + match TrieFactory::readonly(state_db.as_hashdb(), &header.state_root()) { + Ok(ref trie) if trie.is_complete() => State::Full, + _ => State::SnapshotChunk(*header.hash()), + } + } + _ => State::SnapshotHeader(hash), + }, + None => State::Full, + }; let mut header = client.best_header(); let mut hollow_headers = vec![header.decode()]; while client.block_body(&BlockId::Hash(header.hash())).is_none() { @@ -90,6 +112,7 @@ impl Extension { } cinfo!(SYNC, "Sync extension initialized"); Extension { + state, requests: Default::default(), connected_nodes: Default::default(), header_downloaders: Default::default(), @@ -308,31 +331,35 @@ impl NetworkExtension for Extension { fn on_timeout(&mut self, token: TimerToken) { match token { - SYNC_TIMER_TOKEN => { - let best_proposal_score = self.client.chain_info().best_proposal_score; - let mut peer_ids: Vec<_> = self.header_downloaders.keys().cloned().collect(); - peer_ids.shuffle(&mut thread_rng()); - - for id in &peer_ids { - let request = self.header_downloaders.get_mut(id).and_then(HeaderDownloader::create_request); - if let Some(request) = request { - self.send_header_request(id, request); - break + SYNC_TIMER_TOKEN => match self.state { + State::SnapshotHeader(..) => unimplemented!(), + State::SnapshotChunk(..) => unimplemented!(), + State::Full => { + let best_proposal_score = self.client.chain_info().best_proposal_score; + let mut peer_ids: Vec<_> = self.header_downloaders.keys().cloned().collect(); + peer_ids.shuffle(&mut thread_rng()); + + for id in &peer_ids { + let request = self.header_downloaders.get_mut(id).and_then(HeaderDownloader::create_request); + if let Some(request) = request { + self.send_header_request(id, request); + break + } } - } - for id in peer_ids { - let peer_score = if let Some(peer) = self.header_downloaders.get(&id) { - peer.total_score() - } else { - U256::zero() - }; + for id in peer_ids { + let peer_score = if let Some(peer) = self.header_downloaders.get(&id) { + peer.total_score() + } else { + U256::zero() + }; - if peer_score > best_proposal_score { - self.send_body_request(&id); + if peer_score > best_proposal_score { + self.send_body_request(&id); + } } } - } + }, SYNC_EXPIRE_TOKEN_BEGIN..=SYNC_EXPIRE_TOKEN_END => { self.check_sync_variable(); let (id, request_id) = { @@ -576,33 +603,37 @@ impl Extension { return } - match response { - ResponseMessage::Headers(headers) => { - self.dismiss_request(from, id); - self.on_header_response(from, &headers) - } - ResponseMessage::Bodies(bodies) => { - self.check_sync_variable(); - let hashes = match request { - RequestMessage::Bodies(hashes) => hashes, - _ => unreachable!(), - }; - assert_eq!(bodies.len(), hashes.len()); - if let Some(token) = self.tokens.get(from) { - if let Some(token_info) = self.tokens_info.get_mut(token) { - if token_info.request_id.is_none() { - ctrace!(SYNC, "Expired before handling response"); - return + match self.state { + State::SnapshotHeader(..) => unimplemented!(), + State::SnapshotChunk(..) => unimplemented!(), + State::Full => match response { + ResponseMessage::Headers(headers) => { + self.dismiss_request(from, id); + self.on_header_response(from, &headers) + } + ResponseMessage::Bodies(bodies) => { + self.check_sync_variable(); + let hashes = match request { + RequestMessage::Bodies(hashes) => hashes, + _ => unreachable!(), + }; + assert_eq!(bodies.len(), hashes.len()); + if let Some(token) = self.tokens.get(from) { + if let Some(token_info) = self.tokens_info.get_mut(token) { + if token_info.request_id.is_none() { + ctrace!(SYNC, "Expired before handling response"); + return + } + self.api.clear_timer(*token).expect("Timer clear succeed"); + token_info.request_id = None; } - self.api.clear_timer(*token).expect("Timer clear succeed"); - token_info.request_id = None; } + self.dismiss_request(from, id); + self.on_body_response(hashes, bodies); + self.check_sync_variable(); } - self.dismiss_request(from, id); - self.on_body_response(hashes, bodies); - self.check_sync_variable(); - } - _ => unimplemented!(), + _ => unimplemented!(), + }, } } } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index d67fc4c8c7..b89deb036d 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -25,7 +25,6 @@ extern crate codechain_state as cstate; extern crate codechain_timer as ctimer; extern crate codechain_types as ctypes; -#[cfg(test)] extern crate hashdb; extern crate journaldb; extern crate kvdb; From 1b785d9a705fdd938e79b4b6898bb1b41f8902da Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Mon, 4 Nov 2019 11:16:37 +0900 Subject: [PATCH 17/52] Implement Snapshot & Restore modules --- Cargo.lock | 2 + util/merkle/Cargo.toml | 3 + util/merkle/src/lib.rs | 5 + util/merkle/src/nibbleslice.rs | 2 +- util/merkle/src/node.rs | 7 + util/merkle/src/snapshot/chunk.rs | 284 +++++++++++++++++++ util/merkle/src/snapshot/compress.rs | 119 ++++++++ util/merkle/src/snapshot/error.rs | 65 +++++ util/merkle/src/snapshot/mod.rs | 336 +++++++++++++++++++++++ util/merkle/src/snapshot/ordered_heap.rs | 76 +++++ util/merkle/src/triedbmut.rs | 107 ++++++++ 11 files changed, 1005 insertions(+), 1 deletion(-) create mode 100644 util/merkle/src/snapshot/chunk.rs create mode 100644 util/merkle/src/snapshot/compress.rs create mode 100644 util/merkle/src/snapshot/error.rs create mode 100644 util/merkle/src/snapshot/mod.rs create mode 100644 util/merkle/src/snapshot/ordered_heap.rs diff --git a/Cargo.lock b/Cargo.lock index c79806e732..bf7b0c295c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -461,6 +461,8 @@ dependencies = [ "primitives", "rand 0.6.1", "rlp", + "rlp_derive", + "snap", "tempfile", "trie-standardmap", ] diff --git a/util/merkle/Cargo.toml b/util/merkle/Cargo.toml index deb92626de..f95f3f8005 100644 --- a/util/merkle/Cargo.toml +++ b/util/merkle/Cargo.toml @@ -8,8 +8,11 @@ edition = "2018" rand = "0.6.1" hashdb = {path = "../hashdb" } codechain-crypto = { git = "https://github.com/CodeChain-io/rust-codechain-crypto.git", version = "0.1" } +memorydb = { path = "../memorydb" } primitives = { git = "https://github.com/CodeChain-io/rust-codechain-primitives.git", version = "0.4" } rlp = { git = "https://github.com/CodeChain-io/rlp.git", version = "0.4" } +rlp_derive = { path = "../rlp_derive" } +snap = "0.2" [dev-dependencies] journaldb = { path = "../journaldb" } diff --git a/util/merkle/src/lib.rs b/util/merkle/src/lib.rs index 80acf9399a..cf40cc9e46 100644 --- a/util/merkle/src/lib.rs +++ b/util/merkle/src/lib.rs @@ -20,6 +20,9 @@ extern crate hashdb; extern crate memorydb; extern crate primitives; extern crate rlp; +#[macro_use] +extern crate rlp_derive; +extern crate snap; #[cfg(test)] extern crate trie_standardmap as standardmap; @@ -33,6 +36,8 @@ use primitives::H256; mod nibbleslice; pub mod node; mod skewed; +#[allow(dead_code)] +pub mod snapshot; pub mod triedb; pub mod triedbmut; pub mod triehash; diff --git a/util/merkle/src/nibbleslice.rs b/util/merkle/src/nibbleslice.rs index d39714c21d..d979860809 100644 --- a/util/merkle/src/nibbleslice.rs +++ b/util/merkle/src/nibbleslice.rs @@ -17,7 +17,7 @@ use std::cmp::*; use std::fmt; -#[derive(Eq, Ord)] +#[derive(Eq, Ord, Copy, Clone)] pub struct NibbleSlice<'a> { pub data: &'a [u8], pub offset: usize, diff --git a/util/merkle/src/node.rs b/util/merkle/src/node.rs index 66f2704808..4d556860d6 100644 --- a/util/merkle/src/node.rs +++ b/util/merkle/src/node.rs @@ -112,4 +112,11 @@ impl<'a> Node<'a> { } } } + + pub fn mid(self, offset: usize) -> Self { + match self { + Node::Leaf(partial, value) => Node::Leaf(partial.mid(offset), value), + Node::Branch(partial, child) => Node::Branch(partial.mid(offset), child), + } + } } diff --git a/util/merkle/src/snapshot/chunk.rs b/util/merkle/src/snapshot/chunk.rs new file mode 100644 index 0000000000..40ee320c13 --- /dev/null +++ b/util/merkle/src/snapshot/chunk.rs @@ -0,0 +1,284 @@ +// Copyright 2019 Kodebox, Inc. +// This file is part of CodeChain. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +use std::collections::VecDeque; +use std::convert::From; + +use ccrypto::BLAKE_NULL_RLP; +use hashdb::{DBValue, HashDB}; +use primitives::H256; + +use super::error::{ChunkError, Error}; +use super::{DecodedPathSlice, PathSlice, CHUNK_HEIGHT}; +use crate::nibbleslice::NibbleSlice; +use crate::{Node, TrieDBMut}; + +#[derive(RlpEncodable, RlpDecodable, Eq, PartialEq)] +pub struct TerminalNode { + // Relative path from the chunk root. + pub path_slice: PathSlice, + pub node_rlp: Vec, +} + +impl std::fmt::Debug for TerminalNode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + let path_slice = NibbleSlice::from_encoded(&self.path_slice); + f.debug_struct("TerminalNode") + .field("path_slice", &path_slice) + .field("node_rlp", &NodeDebugAdaptor { + rlp: &self.node_rlp, + }) + .finish() + } +} + +struct NodeDebugAdaptor<'a> { + rlp: &'a [u8], +} + +impl<'a> std::fmt::Debug for NodeDebugAdaptor<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + match Node::decoded(&self.rlp) { + Some(node) => write!(f, "{:?}", &node), + None => write!(f, "{:?}", self.rlp), + } + } +} + +/// An unverified chunk from the network +#[derive(Debug)] +pub struct RawChunk { + pub nodes: Vec, +} + +/// Fully recovered, and re-hydrated chunk. +pub struct RecoveredChunk { + pub(crate) root: H256, + /// contains all nodes including non-terminal nodes and terminal nodes. + /// You can blindly pour all items in `nodes` into `HashDB`. + pub(crate) nodes: Vec<(H256, DBValue)>, + /// Their path slices are relative to this chunk root. + pub(crate) unresolved_chunks: Vec, +} + +impl RawChunk { + /// Verify and recover the chunk + pub fn recover(&self, expected_chunk_root: H256) -> Result { + let mut memorydb = memorydb::MemoryDB::new(); + let mut chunk_root = H256::new(); + + { + let mut trie = TrieDBMut::new(&mut memorydb, &mut chunk_root); + for node in self.nodes.iter() { + let old_val = match Node::decoded(&node.node_rlp) { + Some(Node::Branch(slice, child)) => { + let encoded = DecodedPathSlice::from_encoded(&node.path_slice).with_slice(slice).encode(); + trie.insert_raw(Node::Branch(NibbleSlice::from_encoded(&encoded), child))? + } + Some(Node::Leaf(slice, data)) => { + let encoded = DecodedPathSlice::from_encoded(&node.path_slice).with_slice(slice).encode(); + trie.insert_raw(Node::Leaf(NibbleSlice::from_encoded(&encoded), data))? + } + None => return Err(ChunkError::InvalidContent.into()), + }; + + if let Some(old_val) = old_val { + if old_val != node.node_rlp.as_slice() { + return Err(ChunkError::InvalidContent.into()) + } + } + } + } + + // Some nodes in the chunk is different from the expected. + if chunk_root != expected_chunk_root { + return Err(ChunkError::ChunkRootMismatch { + expected: expected_chunk_root, + actual: chunk_root, + } + .into()) + } + + let mut nodes = Vec::new(); + let mut unresolved_chunks = Vec::new(); + let mut queue: VecDeque = VecDeque::from(vec![NodePath::new(chunk_root)]); + while let Some(path) = queue.pop_front() { + let node = match memorydb.get(&path.key) { + Some(x) => x, + None => { + // all unresolved should depth == CHUNK_HEIGHT + 1 + if path.depth != CHUNK_HEIGHT + 1 { + return Err(ChunkError::InvalidHeight.into()) + } + + unresolved_chunks.push(UnresolvedChunk::from(path)); + continue + } + }; + + if path.depth > CHUNK_HEIGHT { + return Err(ChunkError::InvalidHeight.into()) + } + nodes.push((path.key, node.clone())); + + let node = Node::decoded(&node).expect("Chunk root was verified; Node can't be wrong"); + if let Node::Branch(slice, children) = node { + for (index, child) in children.iter().enumerate() { + if let Some(child) = child { + queue.push_back(path.with_slice_and_index(slice, index, *child)); + } + } + } + } + + Ok(RecoveredChunk { + root: expected_chunk_root, + nodes, + unresolved_chunks, + }) + } +} + +impl std::fmt::Debug for RecoveredChunk { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + struct Adapter<'a>(&'a [(H256, DBValue)]); + impl<'a> std::fmt::Debug for Adapter<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + f.debug_list() + .entries(self.0.iter().map(|(hash, rlp)| { + (hash, NodeDebugAdaptor { + rlp, + }) + })) + .finish() + } + } + + f.debug_struct("RecoveredChunk") + .field("root", &self.root) + .field("nodes", &Adapter(&self.nodes)) + .field("unresolved_chunks", &self.unresolved_chunks) + .finish() + } +} + +/// Chunk obtained from the state db. +#[derive(Debug)] +pub struct Chunk { + pub root: H256, + pub terminal_nodes: Vec, +} + +impl Chunk { + pub(crate) fn from_chunk_root(db: &dyn HashDB, chunk_root: H256) -> Chunk { + let mut unresolved: VecDeque = VecDeque::from(vec![NodePath::new(chunk_root)]); + let mut terminal_nodes: Vec = Vec::new(); + while let Some(path) = unresolved.pop_front() { + assert!(path.key != BLAKE_NULL_RLP, "Empty DB"); + assert!(path.depth <= CHUNK_HEIGHT); + let node = db.get(&path.key).expect("Can't find the node in a db. DB is inconsistent"); + let node_decoded = Node::decoded(&node).expect("Node cannot be decoded. DB is inconsistent"); + + match node_decoded { + // Continue to BFS + Node::Branch(slice, ref children) if path.depth < CHUNK_HEIGHT => { + for (i, hash) in children.iter().enumerate() { + if let Some(hash) = hash { + unresolved.push_back(path.with_slice_and_index(slice, i, *hash)); + } + } + } + // Reached the terminal node. Branch at path.depth == CHUNK_HEIGHT || Leaf + _ => terminal_nodes.push(TerminalNode { + path_slice: path.path_slice.encode(), + node_rlp: node.to_vec(), + }), + }; + } + Chunk { + root: chunk_root, + terminal_nodes, + } + } + + // Returns path slices to unresolved chunk roots relative to this chunk root + pub(crate) fn unresolved_chunks(&self) -> Vec { + let mut result = Vec::new(); + for node in self.terminal_nodes.iter() { + let decoded = Node::decoded(&node.node_rlp).expect("All terminal nodes should be valid"); + if let Node::Branch(slice, children) = decoded { + for (i, child) in children.iter().enumerate() { + if let Some(child) = child { + result.push(UnresolvedChunk { + path_slice: DecodedPathSlice::from_encoded(&node.path_slice).with_slice_and_index(slice, i), + chunk_root: *child, + }) + } + } + } + } + result + } + + #[cfg(test)] + pub(crate) fn into_raw_chunk(self) -> RawChunk { + RawChunk { + nodes: self.terminal_nodes, + } + } +} + +/// path slice to `chunk_root` is relative to the root of originating chunk. +#[derive(Debug)] +pub(crate) struct UnresolvedChunk { + pub path_slice: DecodedPathSlice, + pub chunk_root: H256, +} + +impl From for UnresolvedChunk { + fn from(path: NodePath) -> Self { + Self { + path_slice: path.path_slice, + chunk_root: path.key, + } + } +} + +#[derive(Debug)] +struct NodePath { + // path slice to the node relative to chunk_root + path_slice: DecodedPathSlice, + depth: usize, + key: H256, +} + +impl NodePath { + fn new(key: H256) -> NodePath { + NodePath { + path_slice: DecodedPathSlice::new(), + depth: 1, + key, + } + } + + fn with_slice_and_index(&self, slice: NibbleSlice, index: usize, key: H256) -> NodePath { + NodePath { + path_slice: self.path_slice.with_slice_and_index(slice, index), + depth: self.depth + 1, + key, + } + } +} diff --git a/util/merkle/src/snapshot/compress.rs b/util/merkle/src/snapshot/compress.rs new file mode 100644 index 0000000000..c03ea0cc08 --- /dev/null +++ b/util/merkle/src/snapshot/compress.rs @@ -0,0 +1,119 @@ +// Copyright 2019 Kodebox, Inc. +// This file is part of CodeChain. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +use std::io::{Cursor, Read, Write}; + +use rlp::{Rlp, RlpStream}; + +use super::chunk::{Chunk, RawChunk}; +use super::error::{ChunkError, Error}; +use super::CHUNK_MAX_NODES; + +pub struct ChunkDecompressor { + read: R, +} + +impl ChunkDecompressor { + pub fn new(read: R) -> Self { + ChunkDecompressor { + read, + } + } +} + +impl<'a> ChunkDecompressor> { + fn from_slice(slice: &'a [u8]) -> Self { + ChunkDecompressor::new(Cursor::new(slice)) + } +} + +impl ChunkDecompressor +where + R: Read + Clone, +{ + pub fn decompress(self) -> Result { + let mut buf = Vec::new(); + + let mut snappy = snap::Reader::new(self.read); + snappy.read_to_end(&mut buf)?; + + let rlp = Rlp::new(&buf); + let len = rlp.item_count()?; + if len > CHUNK_MAX_NODES { + return Err(ChunkError::TooBig.into()) + } + + Ok(RawChunk { + nodes: rlp.as_list()?, + }) + } +} + +pub struct ChunkCompressor { + write: W, +} + +impl ChunkCompressor { + pub fn new(write: W) -> Self { + ChunkCompressor { + write, + } + } +} + +impl ChunkCompressor +where + W: Write, +{ + pub fn compress_chunk(self, chunk: &Chunk) -> Result<(), Error> { + let mut rlp = RlpStream::new_list(chunk.terminal_nodes.len()); + for node in chunk.terminal_nodes.iter() { + rlp.append(node); + } + let mut snappy = snap::Writer::new(self.write); + snappy.write_all(rlp.as_raw())?; + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::snapshot::chunk::{Chunk, TerminalNode}; + + #[test] + fn test_compress_decompress() { + let chunk = Chunk { + root: Default::default(), + terminal_nodes: vec![ + (TerminalNode { + path_slice: b"12345".to_vec(), + node_rlp: b"45678".to_vec(), + }), + (TerminalNode { + path_slice: b"56789".to_vec(), + node_rlp: b"123abc".to_vec(), + }), + ], + }; + + let mut buffer = Vec::new(); + ChunkCompressor::new(&mut buffer).compress_chunk(&chunk).unwrap(); + let decompressed = ChunkDecompressor::from_slice(&buffer).decompress().unwrap(); + + assert_eq!(chunk.terminal_nodes, decompressed.nodes); + } +} diff --git a/util/merkle/src/snapshot/error.rs b/util/merkle/src/snapshot/error.rs new file mode 100644 index 0000000000..19f6876b06 --- /dev/null +++ b/util/merkle/src/snapshot/error.rs @@ -0,0 +1,65 @@ +// Copyright 2019 Kodebox, Inc. +// This file is part of CodeChain. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +use std::io::Error as IoError; + +use primitives::H256; +use rlp::DecoderError as RlpDecoderError; + +use crate::TrieError; + +#[derive(Debug)] +pub enum Error { + IoError(IoError), + RlpDecoderError(RlpDecoderError), + TrieError(TrieError), + ChunkError(ChunkError), +} + +impl From for Error { + fn from(err: IoError) -> Self { + Error::IoError(err) + } +} + +impl From for Error { + fn from(err: RlpDecoderError) -> Self { + Error::RlpDecoderError(err) + } +} + +impl From for Error { + fn from(err: TrieError) -> Self { + Error::TrieError(err) + } +} + +impl From for Error { + fn from(err: ChunkError) -> Self { + Error::ChunkError(err) + } +} + +#[derive(Debug)] +pub enum ChunkError { + TooBig, + InvalidHeight, + ChunkRootMismatch { + expected: H256, + actual: H256, + }, + InvalidContent, +} diff --git a/util/merkle/src/snapshot/mod.rs b/util/merkle/src/snapshot/mod.rs new file mode 100644 index 0000000000..f6fd8a17d8 --- /dev/null +++ b/util/merkle/src/snapshot/mod.rs @@ -0,0 +1,336 @@ +// Copyright 2019 Kodebox, Inc. +// This file is part of CodeChain. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +mod chunk; +mod compress; +mod error; +mod ordered_heap; + +use std::cmp::Ordering; + +use ccrypto::BLAKE_NULL_RLP; +use hashdb::HashDB; +use primitives::H256; + +use self::chunk::{Chunk, RecoveredChunk, UnresolvedChunk}; +use self::ordered_heap::OrderedHeap; +use crate::nibbleslice::NibbleSlice; + +const CHUNK_HEIGHT: usize = 3; +const CHUNK_MAX_NODES: usize = 256; // 16 ^ (CHUNK_HEIGHT-1) + +/// Example: +/// use codechain_merkle::snapshot::Restore; +/// let mut rm = Restore::new(db, root); +/// while let Some(root) = rm.next_to_feed() { +/// let raw_chunk = request(block_hash, root)?; +/// let chunk = raw_chunk.recover(root)?; +/// rm.feed(chunk); +/// } +pub struct Restore<'a> { + db: &'a mut dyn HashDB, + pending: Option, + unresolved: OrderedHeap>, +} + +impl<'a> Restore<'a> { + pub fn new(db: &'a mut dyn HashDB, merkle_root: H256) -> Self { + let mut result = Restore { + db, + pending: None, + unresolved: OrderedHeap::new(), + }; + if merkle_root != BLAKE_NULL_RLP { + result.unresolved.push(ChunkPathPrefix::new(merkle_root).into()); + } + result + } + + pub fn feed(&mut self, chunk: RecoveredChunk) { + let pending_path = self.pending.take().expect("feed() should be called after next()"); + assert_eq!(pending_path.chunk_root, chunk.root, "Unexpected chunk"); + + // Pour nodes into the DB + for (key, value) in chunk.nodes { + self.db.emplace(key, value); + } + + // Extend search paths + for unresolved in chunk.unresolved_chunks { + self.unresolved.push(pending_path.with_unresolved_chunk(&unresolved).into()); + } + + self.pending = None; + } + + pub fn next_to_feed(&mut self) -> Option { + if let Some(path) = self.unresolved.pop() { + assert!(self.pending.is_none(), "Previous feed() was failed"); + let chunk_root = path.chunk_root; + self.pending = Some(path.0); + + Some(chunk_root) + } else { + None + } + } +} + +/// Example: +/// use std::fs::File; +/// use codechain_merkle::snapshot::Snapshot; +/// +/// for chunk in Snapshot::from_hashdb(db, root) { +/// let mut file = File::create(format!("{}/{}", block_id, chunk.root))?; +/// let mut compressor = ChunkCompressor::new(&mut file); +/// compressor.compress(chunk); +/// } +pub struct Snapshot<'a> { + db: &'a dyn HashDB, + remaining: OrderedHeap>, +} + +impl<'a> Snapshot<'a> { + pub fn from_hashdb(db: &'a dyn HashDB, chunk_root: H256) -> Self { + let mut result = Snapshot { + db, + remaining: OrderedHeap::new(), + }; + if chunk_root != BLAKE_NULL_RLP { + result.remaining.push(ChunkPathPrefix::new(chunk_root).into()); + } + result + } +} + +impl<'a> Iterator for Snapshot<'a> { + type Item = Chunk; + + fn next(&mut self) -> Option { + if let Some(path) = self.remaining.pop() { + let chunk = Chunk::from_chunk_root(self.db, path.chunk_root); + for unresolved in chunk.unresolved_chunks() { + self.remaining.push(path.with_unresolved_chunk(&unresolved).into()); + } + Some(chunk) + } else { + None + } + } +} + + +#[derive(Debug)] +struct ChunkPathPrefix { + // Absolute path prefix of the chunk root + path_prefix: DecodedPathSlice, + depth: usize, + chunk_root: H256, +} + +impl ChunkPathPrefix { + fn new(chunk_root: H256) -> ChunkPathPrefix { + ChunkPathPrefix { + path_prefix: DecodedPathSlice::new(), + depth: 1, + chunk_root, + } + } + + fn with_unresolved_chunk(&self, unresolved: &UnresolvedChunk) -> ChunkPathPrefix { + ChunkPathPrefix { + path_prefix: self.path_prefix.with_path_slice(&unresolved.path_slice), + depth: self.depth + 1, + chunk_root: unresolved.chunk_root, + } + } +} + +impl Ord for DepthFirst { + fn cmp(&self, other: &Self) -> Ordering { + self.0.depth.cmp(&other.0.depth) + } +} + +impl From for DepthFirst { + fn from(path: ChunkPathPrefix) -> Self { + DepthFirst(path) + } +} + +/// Encoded value by NibbleSlice::encoded() +pub type PathSlice = Vec; + +/// for item i, i in 0..16 +pub(crate) struct DecodedPathSlice(Vec); + +impl DecodedPathSlice { + fn new() -> DecodedPathSlice { + DecodedPathSlice(Vec::new()) + } + + fn from_encoded(slice: &[u8]) -> DecodedPathSlice { + DecodedPathSlice(NibbleSlice::from_encoded(slice).to_vec()) + } + + fn with_slice_and_index(&self, slice: NibbleSlice, i: usize) -> DecodedPathSlice { + assert!(i < 16); + let mut v = self.0.clone(); + v.append(&mut slice.to_vec()); + v.push(i as u8); + DecodedPathSlice(v) + } + + fn with_slice(&self, slice: NibbleSlice) -> DecodedPathSlice { + let mut v = self.0.clone(); + v.append(&mut slice.to_vec()); + DecodedPathSlice(v) + } + + fn with_path_slice(&self, path_slice: &DecodedPathSlice) -> DecodedPathSlice { + let mut v = self.0.clone(); + v.extend(path_slice.0.as_slice()); + DecodedPathSlice(v) + } + + fn encode(&self) -> PathSlice { + let (encoded, _) = NibbleSlice::from_vec(&self.0); + encoded.to_vec() + } +} + +impl std::fmt::Debug for DecodedPathSlice { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + let (encoded, _) = NibbleSlice::from_vec(&self.0); + let nibble_slice = NibbleSlice::from_encoded(&encoded); + writeln!(f, "{:?}", nibble_slice) + } +} + +#[derive(Debug)] +struct DepthFirst(T); + +impl PartialOrd for DepthFirst +where + Self: Ord, +{ + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(&other)) + } +} + +impl PartialEq for DepthFirst +where + Self: Ord, +{ + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == Ordering::Equal + } +} + +impl Eq for DepthFirst where Self: Ord {} + +impl std::ops::Deref for DepthFirst { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use std::collections::HashMap; + use std::iter::FromIterator; + + use memorydb::MemoryDB; + use primitives::{Bytes, H256}; + use standardmap::{Alphabet, StandardMap, ValueMode}; + + use super::chunk::RawChunk; + use crate::{Trie, TrieDB, TrieDBMut, TrieMut}; + + fn random_insert_and_restore_with_count(count: usize) { + let standard_map = StandardMap { + alphabet: Alphabet::Custom(b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_".to_vec()), + min_key: 5, + journal_key: 0, + value_mode: ValueMode::Index, + count, + } + .make_with(&mut H256::new()); + // Unique standard map + let unique_map: HashMap = HashMap::from_iter(standard_map.into_iter()); + + let mut root = H256::new(); + let chunks: HashMap = { + // We will throw out `db` after snapshot. + let mut db = MemoryDB::new(); + let mut trie = TrieDBMut::new(&mut db, &mut root); + for (key, value) in &unique_map { + trie.insert(key, value).unwrap(); + } + + Snapshot::from_hashdb(&db, root).map(|chunk| (chunk.root, chunk.into_raw_chunk())).collect() + }; + dbg!(chunks.len()); + + let mut db = MemoryDB::new(); + let mut recover = Restore::new(&mut db, root); + while let Some(chunk_root) = recover.next_to_feed() { + let recovered = chunks[&chunk_root].recover(chunk_root).unwrap(); + recover.feed(recovered); + } + + let trie = TrieDB::try_new(&db, &root).unwrap(); + for (key, value) in &unique_map { + assert_eq!(trie.get(key).unwrap().as_ref(), Some(value)); + } + } + + #[test] + fn random_insert_and_restore_0() { + random_insert_and_restore_with_count(0); + } + + #[test] + fn random_insert_and_restore_1() { + random_insert_and_restore_with_count(1); + } + + #[test] + fn random_insert_and_restore_2() { + random_insert_and_restore_with_count(2); + } + + #[test] + fn random_insert_and_restore_100() { + random_insert_and_restore_with_count(100); + } + + #[test] + fn random_insert_and_restore_10000() { + random_insert_and_restore_with_count(10_000); + } + + #[test] + #[ignore] + fn random_insert_and_restore_100000() { + random_insert_and_restore_with_count(100_000); + } +} diff --git a/util/merkle/src/snapshot/ordered_heap.rs b/util/merkle/src/snapshot/ordered_heap.rs new file mode 100644 index 0000000000..d83efd77c1 --- /dev/null +++ b/util/merkle/src/snapshot/ordered_heap.rs @@ -0,0 +1,76 @@ +// Copyright 2019 Kodebox, Inc. +// This file is part of CodeChain. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}; +use std::collections::BinaryHeap; + +pub struct OrderedHeap { + heap: BinaryHeap>, + seq: usize, +} + +impl OrderedHeap { + pub fn new() -> OrderedHeap { + OrderedHeap { + heap: BinaryHeap::new(), + seq: 0, + } + } + + pub fn push(&mut self, value: T) { + self.heap.push(OrderedHeapEntry { + seq: self.seq, + value, + }); + self.seq += 1; + } + + pub fn pop(&mut self) -> Option { + self.heap.pop().map(|x| x.value) + } +} + +#[derive(Debug, Clone)] +struct OrderedHeapEntry { + seq: usize, + value: T, +} + +impl Ord for OrderedHeapEntry { + fn cmp(&self, other: &Self) -> Ordering { + self.value.cmp(&other.value).then(self.seq.cmp(&other.seq).reverse()) + } +} + +impl PartialOrd for OrderedHeapEntry +where + Self: Ord, +{ + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(&other)) + } +} + +impl PartialEq for OrderedHeapEntry +where + Self: Ord, +{ + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == Ordering::Equal + } +} + +impl Eq for OrderedHeapEntry where Self: Ord {} diff --git a/util/merkle/src/triedbmut.rs b/util/merkle/src/triedbmut.rs index 5684ae0556..6bb89adedc 100644 --- a/util/merkle/src/triedbmut.rs +++ b/util/merkle/src/triedbmut.rs @@ -170,6 +170,113 @@ impl<'a> TrieDBMut<'a> { } } + pub(crate) fn insert_raw(&mut self, node: RlpNode) -> crate::Result> { + let mut old_val = None; + let cur_hash = *self.root; + *self.root = self.insert_raw_aux(node, Some(cur_hash), &mut old_val)?; + + Ok(old_val) + } + + fn insert_raw_aux( + &mut self, + node: RlpNode, + cur_node_hash: Option, + old_val: &mut Option, + ) -> crate::Result { + let path = match &node { + RlpNode::Leaf(slice, _) | RlpNode::Branch(slice, _) => slice, + }; + + match cur_node_hash { + Some(hash) => { + let existing_node_rlp = self.db.get(&hash).ok_or_else(|| TrieError::IncompleteDatabase(hash))?; + match RlpNode::decoded(&existing_node_rlp) { + Some(RlpNode::Leaf(partial, value)) => { + // Renew the Leaf + if &partial == path { + let hash = self.db.insert(&RlpNode::encoded(node)); + *old_val = Some(existing_node_rlp); + Ok(hash) + } else { + // Make branch node and insert Leaves + let common = partial.common_prefix(&path); + let mut new_child = empty_children(); + let new_partial = partial.mid(common); + let new_path = path.mid(common); + new_child[new_partial.at(0) as usize] = Some(self.insert_aux( + new_partial.mid(1), + value, + new_child[new_partial.at(0) as usize], + old_val, + )?); + new_child[new_path.at(0) as usize] = Some(self.insert_raw_aux( + node.mid(common + 1), + new_child[new_path.at(0) as usize], + old_val, + )?); + + let hash = self + .db + .insert(&RlpNode::encoded_until(RlpNode::Branch(partial, new_child.into()), common)); + + Ok(hash) + } + } + Some(RlpNode::Branch(partial, mut children)) => { + let common = partial.common_prefix(&path); + + // Make new branch node and insert leaf and branch with new path + if common < partial.len() { + let mut new_child = empty_children(); + let new_partial = partial.mid(common); + let new_path = path.mid(common); + let o_branch = RlpNode::Branch(new_partial.mid(1), children); + + let b_hash = self.db.insert(&RlpNode::encoded(o_branch)); + + new_child[new_partial.at(0) as usize] = Some(b_hash); + new_child[new_path.at(0) as usize] = Some(self.insert_raw_aux( + node.mid(common + 1), + new_child[new_path.at(0) as usize], + old_val, + )?); + + let hash = self + .db + .insert(&RlpNode::encoded_until(RlpNode::Branch(partial, new_child.into()), common)); + + Ok(hash) + } else { + // Insert leaf into the branch node + let new_path = path.mid(common); + + children[new_path.at(0) as usize] = Some(self.insert_raw_aux( + node.mid(common + 1), + children[new_path.at(0) as usize], + old_val, + )?); + + let new_branch = RlpNode::Branch(partial, children); + let node_rlp = RlpNode::encoded(new_branch); + let hash = self.db.insert(&node_rlp); + + Ok(hash) + } + } + None => { + let hash = self.db.insert(&RlpNode::encoded(node)); + Ok(hash) + } + } + } + None => { + let hash = self.db.insert(&RlpNode::encoded(node)); + Ok(hash) + } + } + } + /// Remove auxiliary fn remove_aux( &mut self, From 403b519902788768648ba7871ff8d1b60a516aae Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Tue, 12 Nov 2019 18:00:02 +0900 Subject: [PATCH 18/52] Fetch snapshot header from peers --- sync/src/block/extension.rs | 284 ++++++++++++++++++++++-------------- 1 file changed, 172 insertions(+), 112 deletions(-) diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 4a8763411a..006d2d4637 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -57,8 +57,9 @@ pub struct TokenInfo { request_id: Option, } +#[derive(Debug)] enum State { - SnapshotHeader(H256), + SnapshotHeader(BlockHash, u64), SnapshotChunk(H256), Full, } @@ -90,10 +91,11 @@ impl Extension { _ => State::SnapshotChunk(*header.hash()), } } - _ => State::SnapshotHeader(hash), + _ => State::SnapshotHeader(hash.into(), num), }, None => State::Full, }; + cdebug!(SYNC, "Initial state is {:?}", state); let mut header = client.best_header(); let mut hollow_headers = vec![header.decode()]; while client.block_body(&BlockId::Hash(header.hash())).is_none() { @@ -331,35 +333,45 @@ impl NetworkExtension for Extension { fn on_timeout(&mut self, token: TimerToken) { match token { - SYNC_TIMER_TOKEN => match self.state { - State::SnapshotHeader(..) => unimplemented!(), - State::SnapshotChunk(..) => unimplemented!(), - State::Full => { - let best_proposal_score = self.client.chain_info().best_proposal_score; - let mut peer_ids: Vec<_> = self.header_downloaders.keys().cloned().collect(); - peer_ids.shuffle(&mut thread_rng()); - - for id in &peer_ids { - let request = self.header_downloaders.get_mut(id).and_then(HeaderDownloader::create_request); - if let Some(request) = request { - self.send_header_request(id, request); - break + SYNC_TIMER_TOKEN => { + let mut peer_ids: Vec<_> = self.header_downloaders.keys().cloned().collect(); + peer_ids.shuffle(&mut thread_rng()); + + match self.state { + State::SnapshotHeader(_, num) => { + for id in &peer_ids { + self.send_header_request(id, RequestMessage::Headers { + start_number: num, + max_count: 1, + }); } } + State::SnapshotChunk(..) => unimplemented!(), + State::Full => { + let best_proposal_score = self.client.chain_info().best_proposal_score; + for id in &peer_ids { + let request = + self.header_downloaders.get_mut(id).and_then(HeaderDownloader::create_request); + if let Some(request) = request { + self.send_header_request(id, request); + break + } + } - for id in peer_ids { - let peer_score = if let Some(peer) = self.header_downloaders.get(&id) { - peer.total_score() - } else { - U256::zero() - }; + for id in peer_ids { + let peer_score = if let Some(peer) = self.header_downloaders.get(&id) { + peer.total_score() + } else { + U256::zero() + }; - if peer_score > best_proposal_score { - self.send_body_request(&id); + if peer_score > best_proposal_score { + self.send_body_request(&id); + } } } } - }, + } SYNC_EXPIRE_TOKEN_BEGIN..=SYNC_EXPIRE_TOKEN_END => { self.check_sync_variable(); let (id, request_id) = { @@ -435,37 +447,70 @@ pub enum Event { impl Extension { fn new_headers(&mut self, imported: Vec, enacted: Vec, retracted: Vec) { - for peer in self.header_downloaders.values_mut() { - peer.mark_as_imported(imported.clone()); - } - let mut headers_to_download: Vec<_> = enacted - .into_iter() - .map(|hash| self.client.block_header(&BlockId::Hash(hash)).expect("Enacted header must exist")) - .collect(); - headers_to_download.sort_unstable_by_key(EncodedHeader::number); - #[allow(clippy::redundant_closure)] - // False alarm. https://github.com/rust-lang/rust-clippy/issues/1439 - headers_to_download.dedup_by_key(|h| h.hash()); - - let headers: Vec<_> = headers_to_download - .into_iter() - .filter(|header| self.client.block_body(&BlockId::Hash(header.hash())).is_none()) - .collect(); // FIXME: No need to collect here if self is not borrowed. - for header in headers { - let parent = self - .client - .block_header(&BlockId::Hash(header.parent_hash())) - .expect("Enacted header must have parent"); - let is_empty = header.transactions_root() == parent.transactions_root(); - self.body_downloader.add_target(&header.decode(), is_empty); + if let Some(next_state) = match self.state { + State::SnapshotHeader(hash, ..) => { + if imported.contains(&hash) { + let header = self.client.block_header(&BlockId::Hash(hash)).expect("Imported header must exist"); + Some(State::SnapshotChunk(header.state_root())) + } else { + None + } + } + State::SnapshotChunk(..) => unimplemented!(), + State::Full => { + for peer in self.header_downloaders.values_mut() { + peer.mark_as_imported(imported.clone()); + } + let mut headers_to_download: Vec<_> = enacted + .into_iter() + .map(|hash| self.client.block_header(&BlockId::Hash(hash)).expect("Enacted header must exist")) + .collect(); + headers_to_download.sort_unstable_by_key(EncodedHeader::number); + #[allow(clippy::redundant_closure)] + // False alarm. https://github.com/rust-lang/rust-clippy/issues/1439 + headers_to_download.dedup_by_key(|h| h.hash()); + + let headers: Vec<_> = headers_to_download + .into_iter() + .filter(|header| self.client.block_body(&BlockId::Hash(header.hash())).is_none()) + .collect(); // FIXME: No need to collect here if self is not borrowed. + for header in headers { + let parent = self + .client + .block_header(&BlockId::Hash(header.parent_hash())) + .expect("Enacted header must have parent"); + let is_empty = header.transactions_root() == parent.transactions_root(); + self.body_downloader.add_target(&header.decode(), is_empty); + } + self.body_downloader.remove_target(&retracted); + None + } + } { + cdebug!(SYNC, "Transitioning state to {:?}", next_state); + self.state = next_state; } - self.body_downloader.remove_target(&retracted); } fn new_blocks(&mut self, imported: Vec, invalid: Vec) { - self.body_downloader.remove_target(&imported); - self.body_downloader.remove_target(&invalid); - + if let Some(next_state) = match self.state { + State::SnapshotHeader(hash, ..) => { + if imported.contains(&hash) { + let header = self.client.block_header(&BlockId::Hash(hash)).expect("Imported header must exist"); + Some(State::SnapshotChunk(header.state_root())) + } else { + None + } + } + State::SnapshotChunk(..) => None, + State::Full => { + self.body_downloader.remove_target(&imported); + self.body_downloader.remove_target(&invalid); + None + } + } { + cdebug!(SYNC, "Transitioning state to {:?}", next_state); + self.state = next_state; + } self.send_status_broadcast(); } } @@ -603,37 +648,33 @@ impl Extension { return } - match self.state { - State::SnapshotHeader(..) => unimplemented!(), - State::SnapshotChunk(..) => unimplemented!(), - State::Full => match response { - ResponseMessage::Headers(headers) => { - self.dismiss_request(from, id); - self.on_header_response(from, &headers) - } - ResponseMessage::Bodies(bodies) => { - self.check_sync_variable(); - let hashes = match request { - RequestMessage::Bodies(hashes) => hashes, - _ => unreachable!(), - }; - assert_eq!(bodies.len(), hashes.len()); - if let Some(token) = self.tokens.get(from) { - if let Some(token_info) = self.tokens_info.get_mut(token) { - if token_info.request_id.is_none() { - ctrace!(SYNC, "Expired before handling response"); - return - } - self.api.clear_timer(*token).expect("Timer clear succeed"); - token_info.request_id = None; + match response { + ResponseMessage::Headers(headers) => { + self.dismiss_request(from, id); + self.on_header_response(from, &headers) + } + ResponseMessage::Bodies(bodies) => { + self.check_sync_variable(); + let hashes = match request { + RequestMessage::Bodies(hashes) => hashes, + _ => unreachable!(), + }; + assert_eq!(bodies.len(), hashes.len()); + if let Some(token) = self.tokens.get(from) { + if let Some(token_info) = self.tokens_info.get_mut(token) { + if token_info.request_id.is_none() { + ctrace!(SYNC, "Expired before handling response"); + return } + self.api.clear_timer(*token).expect("Timer clear succeed"); + token_info.request_id = None; } - self.dismiss_request(from, id); - self.on_body_response(hashes, bodies); - self.check_sync_variable(); } - _ => unimplemented!(), - }, + self.dismiss_request(from, id); + self.on_body_response(hashes, bodies); + self.check_sync_variable(); + } + _ => unimplemented!(), } } } @@ -703,42 +744,61 @@ impl Extension { fn on_header_response(&mut self, from: &NodeId, headers: &[Header]) { ctrace!(SYNC, "Received header response from({}) with length({})", from, headers.len()); - let (mut completed, pivot_score_changed) = if let Some(peer) = self.header_downloaders.get_mut(from) { - let before_pivot_score = peer.pivot_score(); - let encoded: Vec<_> = headers.iter().map(|h| EncodedHeader::new(h.rlp_bytes().to_vec())).collect(); - peer.import_headers(&encoded); - let after_pivot_score = peer.pivot_score(); - (peer.downloaded(), before_pivot_score != after_pivot_score) - } else { - (Vec::new(), false) - }; - completed.sort_unstable_by_key(EncodedHeader::number); - - let mut exists = Vec::new(); - let mut queued = Vec::new(); - - for header in completed { - let hash = header.hash(); - match self.client.import_header(header.clone().into_inner()) { - Err(BlockImportError::Import(ImportError::AlreadyInChain)) => exists.push(hash), - Err(BlockImportError::Import(ImportError::AlreadyQueued)) => queued.push(hash), - // FIXME: handle import errors - Err(err) => { - cwarn!(SYNC, "Cannot import header({}): {:?}", header.hash(), err); - break + match self.state { + State::SnapshotHeader(..) => { + for header in headers { + match self.client.import_header(header.rlp_bytes().to_vec()) { + Err(BlockImportError::Import(ImportError::AlreadyInChain)) => {} + Err(BlockImportError::Import(ImportError::AlreadyQueued)) => {} + // FIXME: handle import errors + Err(err) => { + cwarn!(SYNC, "Cannot import header({}): {:?}", header.hash(), err); + break + } + _ => {} + } } - _ => {} } - } + State::SnapshotChunk(..) => {} + State::Full => { + let (mut completed, pivot_score_changed) = if let Some(peer) = self.header_downloaders.get_mut(from) { + let before_pivot_score = peer.pivot_score(); + let encoded: Vec<_> = headers.iter().map(|h| EncodedHeader::new(h.rlp_bytes().to_vec())).collect(); + peer.import_headers(&encoded); + let after_pivot_score = peer.pivot_score(); + (peer.downloaded(), before_pivot_score != after_pivot_score) + } else { + (Vec::new(), false) + }; + completed.sort_unstable_by_key(EncodedHeader::number); + + let mut exists = Vec::new(); + let mut queued = Vec::new(); + + for header in completed { + let hash = header.hash(); + match self.client.import_header(header.clone().into_inner()) { + Err(BlockImportError::Import(ImportError::AlreadyInChain)) => exists.push(hash), + Err(BlockImportError::Import(ImportError::AlreadyQueued)) => queued.push(hash), + // FIXME: handle import errors + Err(err) => { + cwarn!(SYNC, "Cannot import header({}): {:?}", header.hash(), err); + break + } + _ => {} + } + } - let request = self.header_downloaders.get_mut(from).and_then(|peer| { - peer.mark_as_queued(queued); - peer.mark_as_imported(exists); - peer.create_request() - }); - if pivot_score_changed { - if let Some(request) = request { - self.send_header_request(from, request); + let request = self.header_downloaders.get_mut(from).and_then(|peer| { + peer.mark_as_queued(queued); + peer.mark_as_imported(exists); + peer.create_request() + }); + if pivot_score_changed { + if let Some(request) = request { + self.send_header_request(from, request); + } + } } } } From f4af59869aaa7e7a4f949cb240013b3eaa91ed16 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Thu, 14 Nov 2019 11:49:55 +0900 Subject: [PATCH 19/52] Make the snapshot header importable without parent --- codechain/run_node.rs | 2 +- core/src/blockchain/blockchain.rs | 12 +++++++++ core/src/blockchain/headerchain.rs | 42 ++++++++++++++++++++++++++++++ core/src/client/client.rs | 11 +++++++- core/src/client/importer.rs | 19 +++++++++++++- core/src/client/mod.rs | 6 ++++- core/src/client/test_client.rs | 4 +++ sync/src/block/extension.rs | 12 +++++---- 8 files changed, 99 insertions(+), 9 deletions(-) diff --git a/codechain/run_node.rs b/codechain/run_node.rs index cd93744fe8..2fb2a4b9d1 100644 --- a/codechain/run_node.rs +++ b/codechain/run_node.rs @@ -283,7 +283,7 @@ pub fn run_node(matches: &ArgMatches) -> Result<(), String> { let network_config = config.network_config()?; // XXX: What should we do if the network id has been changed. let c = client.client(); - let network_id = c.common_params(BlockId::Latest).unwrap().network_id(); + let network_id = c.common_params(BlockId::Number(0)).unwrap().network_id(); let routing_table = RoutingTable::new(); let service = network_start(network_id, timer_loop, &network_config, Arc::clone(&routing_table))?; diff --git a/core/src/blockchain/blockchain.rs b/core/src/blockchain/blockchain.rs index 0ed70027f2..fcdce4fdee 100644 --- a/core/src/blockchain/blockchain.rs +++ b/core/src/blockchain/blockchain.rs @@ -98,6 +98,18 @@ impl BlockChain { } } + pub fn insert_bootstrap_header(&self, batch: &mut DBTransaction, header: &HeaderView) { + self.headerchain.insert_bootstrap_header(batch, header); + + let hash = header.hash(); + + *self.pending_best_block_hash.write() = Some(hash); + batch.put(db::COL_EXTRA, BEST_BLOCK_KEY, &hash); + + *self.pending_best_proposal_block_hash.write() = Some(hash); + batch.put(db::COL_EXTRA, BEST_PROPOSAL_BLOCK_KEY, &hash); + } + pub fn insert_header( &self, batch: &mut DBTransaction, diff --git a/core/src/blockchain/headerchain.rs b/core/src/blockchain/headerchain.rs index 10700264a5..9629188c23 100644 --- a/core/src/blockchain/headerchain.rs +++ b/core/src/blockchain/headerchain.rs @@ -115,6 +115,48 @@ impl HeaderChain { } } + /// Inserts a bootstrap header into backing cache database. + /// Makes the imported header the best header. + /// Expects the header to be valid and already verified. + /// If the header is already known, does nothing. + // FIXME: Find better return type. Returning `None` at duplication is not natural + pub fn insert_bootstrap_header(&self, batch: &mut DBTransaction, header: &HeaderView) { + let hash = header.hash(); + + ctrace!(HEADERCHAIN, "Inserting bootstrap block header #{}({}) to the headerchain.", header.number(), hash); + + if self.is_known_header(&hash) { + ctrace!(HEADERCHAIN, "Block header #{}({}) is already known.", header.number(), hash); + return + } + + assert!(self.pending_best_header_hash.read().is_none()); + assert!(self.pending_best_proposal_block_hash.read().is_none()); + + let compressed_header = compress(header.rlp().as_raw(), blocks_swapper()); + batch.put(db::COL_HEADERS, &hash, &compressed_header); + + let mut new_hashes = HashMap::new(); + new_hashes.insert(header.number(), hash); + let mut new_details = HashMap::new(); + new_details.insert(hash, BlockDetails { + number: header.number(), + total_score: 0.into(), + parent: header.parent_hash(), + }); + + batch.put(db::COL_EXTRA, BEST_HEADER_KEY, &hash); + *self.pending_best_header_hash.write() = Some(hash); + batch.put(db::COL_EXTRA, BEST_PROPOSAL_HEADER_KEY, &hash); + *self.pending_best_proposal_block_hash.write() = Some(hash); + + let mut pending_hashes = self.pending_hashes.write(); + let mut pending_details = self.pending_details.write(); + + batch.extend_with_cache(db::COL_EXTRA, &mut *pending_details, new_details, CacheUpdatePolicy::Overwrite); + batch.extend_with_cache(db::COL_EXTRA, &mut *pending_hashes, new_hashes, CacheUpdatePolicy::Overwrite); + } + /// Inserts the header into backing cache database. /// Expects the header to be valid and already verified. /// If the header is already known, does nothing. diff --git a/core/src/client/client.rs b/core/src/client/client.rs index aaa50d431f..ccc690b22a 100644 --- a/core/src/client/client.rs +++ b/core/src/client/client.rs @@ -27,7 +27,7 @@ use cstate::{ }; use ctimer::{TimeoutHandler, TimerApi, TimerScheduleError, TimerToken}; use ctypes::transaction::{AssetTransferInput, PartialHashing, ShardTransaction}; -use ctypes::{BlockHash, BlockNumber, CommonParams, ShardId, Tracker, TxHash}; +use ctypes::{BlockHash, BlockNumber, CommonParams, Header, ShardId, Tracker, TxHash}; use cvm::{decode, execute, ChainTimeInfo, ScriptResult, VMConfig}; use hashdb::AsHashDB; use journaldb; @@ -655,6 +655,15 @@ impl ImportBlock for Client { Ok(self.importer.header_queue.import(unverified)?) } + fn import_bootstrap_header(&self, header: &Header) -> Result { + if self.block_chain().is_known_header(&header.hash()) { + return Err(BlockImportError::Import(ImportError::AlreadyInChain)) + } + let import_lock = self.importer.import_lock.lock(); + self.importer.import_bootstrap_header(header, self, &import_lock); + Ok(header.hash()) + } + fn import_sealed_block(&self, block: &SealedBlock) -> ImportResult { let h = block.header().hash(); let route = { diff --git a/core/src/client/importer.rs b/core/src/client/importer.rs index 8400fc7c36..4bd9ac606e 100644 --- a/core/src/client/importer.rs +++ b/core/src/client/importer.rs @@ -100,7 +100,7 @@ impl Importer { } { - let headers: Vec<&Header> = blocks.iter().map(|block| &block.header).collect(); + let headers: Vec<_> = blocks.iter().map(|block| &block.header).collect(); self.import_headers(headers, client, &import_lock); } @@ -362,6 +362,23 @@ impl Importer { imported.len() } + pub fn import_bootstrap_header<'a>(&'a self, header: &'a Header, client: &Client, _importer_lock: &MutexGuard<()>) { + let hash = header.hash(); + ctrace!(CLIENT, "Importing bootstrap header {}-{:?}", header.number(), hash); + + { + let chain = client.block_chain(); + let mut batch = DBTransaction::new(); + chain.insert_bootstrap_header(&mut batch, &HeaderView::new(&header.rlp_bytes())); + client.db().write_buffered(batch); + chain.commit(); + } + + client.new_headers(&[hash], &[], &[hash], &[], &[], Some(hash)); + + client.db().flush().expect("DB flush failed."); + } + fn check_header(&self, header: &Header, parent: &Header) -> bool { // FIXME: self.verifier.verify_block_family if let Err(e) = self.engine.verify_block_family(&header, &parent) { diff --git a/core/src/client/mod.rs b/core/src/client/mod.rs index edb8abc76f..8c6ca29dd1 100644 --- a/core/src/client/mod.rs +++ b/core/src/client/mod.rs @@ -37,7 +37,7 @@ use cmerkle::Result as TrieResult; use cnetwork::NodeId; use cstate::{AssetScheme, FindActionHandler, OwnedAsset, StateResult, Text, TopLevelState, TopStateView}; use ctypes::transaction::{AssetTransferInput, PartialHashing, ShardTransaction}; -use ctypes::{BlockHash, BlockNumber, CommonParams, ShardId, Tracker, TxHash}; +use ctypes::{BlockHash, BlockNumber, CommonParams, Header, ShardId, Tracker, TxHash}; use cvm::ChainTimeInfo; use kvdb::KeyValueDB; use primitives::{Bytes, H160, H256, U256}; @@ -201,6 +201,10 @@ pub trait ImportBlock { /// Import a header into the blockchain fn import_header(&self, bytes: Bytes) -> Result; + /// Import a trusted bootstrap header into the blockchain + /// Bootstrap headers don't execute any verifications + fn import_bootstrap_header(&self, bytes: &Header) -> Result; + /// Import sealed block. Skips all verifications. fn import_sealed_block(&self, block: &SealedBlock) -> ImportResult; diff --git a/core/src/client/test_client.rs b/core/src/client/test_client.rs index f14c2bf80a..ddb4455241 100644 --- a/core/src/client/test_client.rs +++ b/core/src/client/test_client.rs @@ -509,6 +509,10 @@ impl ImportBlock for TestBlockChainClient { unimplemented!() } + fn import_bootstrap_header(&self, _header: &BlockHeader) -> Result { + unimplemented!() + } + fn import_sealed_block(&self, _block: &SealedBlock) -> ImportResult { Ok(H256::default().into()) } diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 006d2d4637..8371fa7933 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -99,10 +99,12 @@ impl Extension { let mut header = client.best_header(); let mut hollow_headers = vec![header.decode()]; while client.block_body(&BlockId::Hash(header.hash())).is_none() { - header = client - .block_header(&BlockId::Hash(header.parent_hash())) - .expect("Every imported header must have parent"); - hollow_headers.push(header.decode()); + if let Some(h) = client.block_header(&BlockId::Hash(header.parent_hash())) { + header = h; + hollow_headers.push(header.decode()); + } else { + break + } } let mut body_downloader = BodyDownloader::default(); for neighbors in hollow_headers.windows(2).rev() { @@ -747,7 +749,7 @@ impl Extension { match self.state { State::SnapshotHeader(..) => { for header in headers { - match self.client.import_header(header.rlp_bytes().to_vec()) { + match self.client.import_bootstrap_header(&header) { Err(BlockImportError::Import(ImportError::AlreadyInChain)) => {} Err(BlockImportError::Import(ImportError::AlreadyQueued)) => {} // FIXME: handle import errors From be3591491a6006e1da01e9778da5da9e6905c9df Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Tue, 5 Nov 2019 15:09:50 +0900 Subject: [PATCH 20/52] Update snapshot message format --- spec/Block-Synchronization-Extension.md | 50 +++++-------------------- sync/src/block/extension.rs | 33 +++------------- sync/src/block/message/mod.rs | 21 +++-------- sync/src/block/message/request.rs | 44 +++------------------- sync/src/block/message/response.rs | 40 ++------------------ util/merkle/src/snapshot/mod.rs | 2 +- 6 files changed, 32 insertions(+), 158 deletions(-) diff --git a/spec/Block-Synchronization-Extension.md b/spec/Block-Synchronization-Extension.md index fb5a106f75..dac2cfd8f6 100644 --- a/spec/Block-Synchronization-Extension.md +++ b/spec/Block-Synchronization-Extension.md @@ -53,32 +53,17 @@ Request corresponding bodies for each hash. * Restriction: * MUST include at least one item - -### GetStateHead - -``` -GetStateHead(block_hash) -``` - -Request corresponding state head for block of `block_hash`. - -* Identifier: 0x06 -* Restriction: Block number of requested block MUST be multiple of 214. - - ### GetStateChunk ``` -GetStateChunk(block_hash, tree_root) +GetStateChunk(block_hash, [...chunk_roots]) ``` -Request entire subtree starting from `tree_root`. +Request corresponding snapshot chunk for each `chunk_root`. -* Identifier: 0x08 +* Identifier: 0x0a * Restriction: - * Block number of requested block MUST be multiple of 214. - * `tree_root` MUST be included in requested block’s state trie. - * Depth of `tree_root` inside state trie MUST be equal to 2. (Depth of state root is 0) + * All values in `[...chunk_roots]` MUST be included in requested block’s state trie. ## Response messages @@ -113,30 +98,15 @@ Response to `GetBodies` message. Snappy algorithm is used to compress content. * If received body is zero-length array, it means either body value is [], or sender doesn’t have body for requested hash -### StateHead - -``` -StateHead(compressed((key_0, value_0), …) | []) -``` - -Response to `GetStateHead` message. Key and value included in this messages are raw value stored in state trie. Snappy algorithm is used for compression of content. - -* Identifier: 0x07 -* Restriction: - * State root of requested block MUST be included - * For all nodes with depth of less than 2 included in this message, all of its child MUST also be included. - * Content MUST be empty array if sender didn’t have requested data - - ### StateChunk ``` -StateChunk(compressed((key_0, value_0), …) | []) +StateChunk([compressed([terminal_0, …] | []), ...]) ``` -Response to `GetStateChunk` message. Details of message is same as `StateHead` message. +Response to `GetStateChunk` message. Snappy algorithm is used for compression of content. -* Identifier: 0x09 +* Identifier: 0x0b * Restriction: - * Node corresponding to tree_root in request MUST be included - * Every nodes included in message MUST have all of its child in same message. - * Content MUST be empty array if sender didn’t have requested data + * Number and order of chunks included in this message MUST be equal to request information. + * Node corresponding to `chunk_root` in request MUST be included + * If sender doesn’t have a chunk for the requested hash, corresponding chunk MUST be compressed([]), not omitted. diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 8371fa7933..65497575e1 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -49,8 +49,6 @@ const SYNC_EXPIRE_TOKEN_END: TimerToken = SYNC_EXPIRE_TOKEN_BEGIN + SYNC_EXPIRE_ const SYNC_TIMER_INTERVAL: u64 = 1000; const SYNC_EXPIRE_REQUEST_INTERVAL: u64 = 15000; -const SNAPSHOT_PERIOD: u64 = (1 << 14); - #[derive(Debug, PartialEq)] pub struct TokenInfo { node_id: NodeId, @@ -563,11 +561,9 @@ impl Extension { ctrace!(SYNC, "Received body request from {}", from); self.create_bodies_response(hashes) } - RequestMessage::StateHead(hash) => self.create_state_head_response(hash), - RequestMessage::StateChunk { - block_hash, - tree_root, - } => self.create_state_chunk_response(block_hash, tree_root), + RequestMessage::StateChunk(block_hash, chunk_root) => { + self.create_state_chunk_response(block_hash, chunk_root) + } }; self.api.send(from, Arc::new(Message::Response(id, response).rlp_bytes())); @@ -579,21 +575,9 @@ impl Extension { .. } => true, RequestMessage::Bodies(hashes) => !hashes.is_empty(), - RequestMessage::StateHead(hash) => match self.client.block_number(&BlockId::Hash(*hash)) { - Some(number) if number % SNAPSHOT_PERIOD == 0 => true, - _ => false, - }, RequestMessage::StateChunk { - block_hash, .. - } => { - let _is_checkpoint = match self.client.block_number(&BlockId::Hash(*block_hash)) { - Some(number) if number % SNAPSHOT_PERIOD == 0 => true, - _ => false, - }; - // FIXME: check tree_root - unimplemented!() - } + } => unimplemented!(), } } @@ -631,11 +615,7 @@ impl Extension { ResponseMessage::Bodies(bodies) } - fn create_state_head_response(&self, _hash: BlockHash) -> ResponseMessage { - unimplemented!() - } - - fn create_state_chunk_response(&self, _hash: BlockHash, _tree_root: H256) -> ResponseMessage { + fn create_state_chunk_response(&self, _hash: BlockHash, _tree_root: Vec) -> ResponseMessage { unimplemented!() } @@ -676,7 +656,7 @@ impl Extension { self.on_body_response(hashes, bodies); self.check_sync_variable(); } - _ => unimplemented!(), + ResponseMessage::StateChunk(..) => unimplemented!(), } } } @@ -730,7 +710,6 @@ impl Extension { } true } - (RequestMessage::StateHead(..), ResponseMessage::StateHead(..)) => unimplemented!(), ( RequestMessage::StateChunk { .. diff --git a/sync/src/block/message/mod.rs b/sync/src/block/message/mod.rs index 42b9654981..733a052a90 100644 --- a/sync/src/block/message/mod.rs +++ b/sync/src/block/message/mod.rs @@ -29,10 +29,8 @@ const MESSAGE_ID_GET_HEADERS: u8 = 0x02; const MESSAGE_ID_HEADERS: u8 = 0x03; const MESSAGE_ID_GET_BODIES: u8 = 0x04; const MESSAGE_ID_BODIES: u8 = 0x05; -const MESSAGE_ID_GET_STATE_HEAD: u8 = 0x06; -const MESSAGE_ID_STATE_HEAD: u8 = 0x07; -const MESSAGE_ID_GET_STATE_CHUNK: u8 = 0x08; -const MESSAGE_ID_STATE_CHUNK: u8 = 0x09; +const MESSAGE_ID_GET_STATE_CHUNK: u8 = 0x0a; +const MESSAGE_ID_STATE_CHUNK: u8 = 0x0b; #[derive(Debug, PartialEq)] pub enum Message { @@ -114,11 +112,10 @@ impl Decodable for Message { let request_id = rlp.val_at(1)?; let message = rlp.at(2)?; match id { - MESSAGE_ID_GET_HEADERS - | MESSAGE_ID_GET_BODIES - | MESSAGE_ID_GET_STATE_HEAD - | MESSAGE_ID_GET_STATE_CHUNK => Ok(Message::Request(request_id, RequestMessage::decode(id, &message)?)), - MESSAGE_ID_HEADERS | MESSAGE_ID_BODIES | MESSAGE_ID_STATE_HEAD | MESSAGE_ID_STATE_CHUNK => { + MESSAGE_ID_GET_HEADERS | MESSAGE_ID_GET_BODIES | MESSAGE_ID_GET_STATE_CHUNK => { + Ok(Message::Request(request_id, RequestMessage::decode(id, &message)?)) + } + MESSAGE_ID_HEADERS | MESSAGE_ID_BODIES | MESSAGE_ID_STATE_CHUNK => { Ok(Message::Response(request_id, ResponseMessage::decode(id, &message)?)) } _ => Err(DecoderError::Custom("Unknown message id detected")), @@ -148,10 +145,4 @@ mod tests { let request_id = 10; rlp_encode_and_decode_test!(Message::Request(request_id, RequestMessage::Bodies(vec![]))); } - - #[test] - fn request_state_head_rlp() { - let request_id = 10; - rlp_encode_and_decode_test!(Message::Request(request_id, RequestMessage::StateHead(H256::random().into()))); - } } diff --git a/sync/src/block/message/request.rs b/sync/src/block/message/request.rs index 6c7ef53010..ddc11ecf85 100644 --- a/sync/src/block/message/request.rs +++ b/sync/src/block/message/request.rs @@ -25,11 +25,7 @@ pub enum RequestMessage { max_count: u64, }, Bodies(Vec), - StateHead(BlockHash), - StateChunk { - block_hash: BlockHash, - tree_root: H256, - }, + StateChunk(BlockHash, Vec), } impl Encodable for RequestMessage { @@ -46,17 +42,10 @@ impl Encodable for RequestMessage { RequestMessage::Bodies(hashes) => { s.append_list(hashes); } - RequestMessage::StateHead(block_hash) => { - s.begin_list(1); - s.append(block_hash); - } - RequestMessage::StateChunk { - block_hash, - tree_root, - } => { + RequestMessage::StateChunk(block_hash, merkle_roots) => { s.begin_list(2); s.append(block_hash); - s.append(tree_root); + s.append_list(merkle_roots); } }; } @@ -69,7 +58,6 @@ impl RequestMessage { .. } => super::MESSAGE_ID_GET_HEADERS, RequestMessage::Bodies(..) => super::MESSAGE_ID_GET_BODIES, - RequestMessage::StateHead(..) => super::MESSAGE_ID_GET_STATE_HEAD, RequestMessage::StateChunk { .. } => super::MESSAGE_ID_GET_STATE_CHUNK, @@ -92,16 +80,6 @@ impl RequestMessage { } } super::MESSAGE_ID_GET_BODIES => RequestMessage::Bodies(rlp.as_list()?), - super::MESSAGE_ID_GET_STATE_HEAD => { - let item_count = rlp.item_count()?; - if item_count != 1 { - return Err(DecoderError::RlpIncorrectListLen { - got: item_count, - expected: 1, - }) - } - RequestMessage::StateHead(rlp.val_at(0)?) - } super::MESSAGE_ID_GET_STATE_CHUNK => { let item_count = rlp.item_count()?; if item_count != 2 { @@ -110,10 +88,7 @@ impl RequestMessage { expected: 2, }) } - RequestMessage::StateChunk { - block_hash: rlp.val_at(0)?, - tree_root: rlp.val_at(1)?, - } + RequestMessage::StateChunk(rlp.val_at(0)?, rlp.list_at(1)?) } _ => return Err(DecoderError::Custom("Unknown message id detected")), }; @@ -149,18 +124,9 @@ mod tests { assert_eq!(message, decode_bytes(message.message_id(), message.rlp_bytes().as_ref())); } - #[test] - fn request_state_head_message_rlp() { - let message = RequestMessage::StateHead(H256::default().into()); - assert_eq!(message, decode_bytes(message.message_id(), message.rlp_bytes().as_ref())); - } - #[test] fn request_state_chunk_message_rlp() { - let message = RequestMessage::StateChunk { - block_hash: H256::default().into(), - tree_root: H256::default(), - }; + let message = RequestMessage::StateChunk(H256::default().into(), vec![H256::default()]); assert_eq!(message, decode_bytes(message.message_id(), message.rlp_bytes().as_ref())); } } diff --git a/sync/src/block/message/response.rs b/sync/src/block/message/response.rs index f6a16f2f97..823f26d490 100644 --- a/sync/src/block/message/response.rs +++ b/sync/src/block/message/response.rs @@ -24,8 +24,7 @@ use ctypes::Header; pub enum ResponseMessage { Headers(Vec

), Bodies(Vec>), - StateHead(Vec), - StateChunk(Vec), + StateChunk(Vec>), } impl Encodable for ResponseMessage { @@ -53,13 +52,8 @@ impl Encodable for ResponseMessage { s.append(&compressed); } - ResponseMessage::StateHead(bytes) => { - s.begin_list(1); - s.append(bytes); - } - ResponseMessage::StateChunk(bytes) => { - s.begin_list(1); - s.append(bytes); + ResponseMessage::StateChunk(chunks) => { + s.append_list::, Vec>(chunks); } }; } @@ -72,7 +66,6 @@ impl ResponseMessage { .. } => super::MESSAGE_ID_HEADERS, ResponseMessage::Bodies(..) => super::MESSAGE_ID_BODIES, - ResponseMessage::StateHead(..) => super::MESSAGE_ID_STATE_HEAD, ResponseMessage::StateChunk { .. } => super::MESSAGE_ID_STATE_CHUNK, @@ -109,26 +102,7 @@ impl ResponseMessage { } ResponseMessage::Bodies(bodies) } - super::MESSAGE_ID_STATE_HEAD => { - let item_count = rlp.item_count()?; - if item_count != 1 { - return Err(DecoderError::RlpIncorrectListLen { - got: item_count, - expected: 1, - }) - } - ResponseMessage::StateHead(rlp.val_at(0)?) - } - super::MESSAGE_ID_STATE_CHUNK => { - let item_count = rlp.item_count()?; - if item_count != 1 { - return Err(DecoderError::RlpIncorrectListLen { - got: item_count, - expected: 1, - }) - } - ResponseMessage::StateChunk(rlp.val_at(0)?) - } + super::MESSAGE_ID_STATE_CHUNK => ResponseMessage::StateChunk(rlp.as_list()?), _ => return Err(DecoderError::Custom("Unknown message id detected")), }; @@ -184,12 +158,6 @@ mod tests { assert_eq!(message, decode_bytes(message.message_id(), message.rlp_bytes().as_ref())); } - #[test] - fn state_head_message_rlp() { - let message = ResponseMessage::StateHead(vec![]); - assert_eq!(message, decode_bytes(message.message_id(), message.rlp_bytes().as_ref())); - } - #[test] fn state_chunk_message_rlp() { let message = ResponseMessage::StateChunk(vec![]); diff --git a/util/merkle/src/snapshot/mod.rs b/util/merkle/src/snapshot/mod.rs index f6fd8a17d8..e07d7de7fb 100644 --- a/util/merkle/src/snapshot/mod.rs +++ b/util/merkle/src/snapshot/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -mod chunk; +pub mod chunk; mod compress; mod error; mod ordered_heap; From a8627c208fd69bd954b0c4c632f8db13c157f314 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Fri, 15 Nov 2019 16:29:56 +0900 Subject: [PATCH 21/52] Import only desired bootstrap headers --- sync/src/block/extension.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 65497575e1..3b890005aa 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -726,20 +726,28 @@ impl Extension { fn on_header_response(&mut self, from: &NodeId, headers: &[Header]) { ctrace!(SYNC, "Received header response from({}) with length({})", from, headers.len()); match self.state { - State::SnapshotHeader(..) => { - for header in headers { + State::SnapshotHeader(hash, _) => match headers { + [header] if header.hash() == hash => { match self.client.import_bootstrap_header(&header) { - Err(BlockImportError::Import(ImportError::AlreadyInChain)) => {} + Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { + self.state = State::SnapshotChunk(*header.state_root()); + } Err(BlockImportError::Import(ImportError::AlreadyQueued)) => {} // FIXME: handle import errors Err(err) => { cwarn!(SYNC, "Cannot import header({}): {:?}", header.hash(), err); - break } _ => {} } } - } + _ => cdebug!( + SYNC, + "Peer {} responded with a invalid response. requested hash: {}, response length: {}", + from, + hash, + headers.len() + ), + }, State::SnapshotChunk(..) => {} State::Full => { let (mut completed, pivot_score_changed) = if let Some(peer) = self.header_downloaders.get_mut(from) { From ecdaa3ad8133e8f4755dfb32b7495d14b70c00bb Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Fri, 6 Dec 2019 14:50:16 +0900 Subject: [PATCH 22/52] Implement snapshot service --- codechain/run_node.rs | 14 ++++ core/src/client/mod.rs | 1 + core/src/client/snapshot_notify.rs | 81 +++++++++++++++++++++++ core/src/consensus/mod.rs | 3 + core/src/lib.rs | 1 + sync/src/lib.rs | 1 + sync/src/snapshot/mod.rs | 103 +++++++++++++++++++++++++++++ util/merkle/src/snapshot/error.rs | 26 ++++++++ util/merkle/src/snapshot/mod.rs | 2 + 9 files changed, 232 insertions(+) create mode 100644 core/src/client/snapshot_notify.rs create mode 100644 sync/src/snapshot/mod.rs diff --git a/codechain/run_node.rs b/codechain/run_node.rs index 2fb2a4b9d1..ddf1c892fd 100644 --- a/codechain/run_node.rs +++ b/codechain/run_node.rs @@ -19,6 +19,7 @@ use std::path::Path; use std::sync::{Arc, Weak}; use std::time::{SystemTime, UNIX_EPOCH}; +use ccore::snapshot_notify; use ccore::{ AccountProvider, AccountProviderError, BlockId, ChainNotify, Client, ClientConfig, ClientService, EngineInfo, EngineType, Miner, MinerService, Scheme, Stratum, StratumConfig, StratumError, NUM_COLUMNS, @@ -30,6 +31,7 @@ use ckeystore::KeyStore; use clap::ArgMatches; use clogger::{self, EmailAlarm, LoggerConfig}; use cnetwork::{Filters, NetworkConfig, NetworkControl, NetworkService, RoutingTable, SocketAddr}; +use csync::snapshot::Service as SnapshotService; use csync::{BlockSyncExtension, BlockSyncSender, TransactionSyncExtension}; use ctimer::TimerLoop; use ctrlc::CtrlC; @@ -363,6 +365,18 @@ pub fn run_node(matches: &ArgMatches) -> Result<(), String> { stratum_start(&config.stratum_config(), &miner, client.client())? } + let _snapshot_service = { + if !config.snapshot.disable.unwrap() { + let client = client.client(); + let (tx, rx) = snapshot_notify::create(); + client.engine().register_snapshot_notify_sender(tx); + let service = Arc::new(SnapshotService::new(client, rx, config.snapshot.path.unwrap())); + Some(service) + } else { + None + } + }; + // drop the scheme to free up genesis state. drop(scheme); client.client().engine().complete_register(); diff --git a/core/src/client/mod.rs b/core/src/client/mod.rs index 8c6ca29dd1..c9bac13215 100644 --- a/core/src/client/mod.rs +++ b/core/src/client/mod.rs @@ -20,6 +20,7 @@ mod client; mod config; mod error; mod importer; +pub mod snapshot_notify; mod test_client; pub use self::chain_notify::ChainNotify; diff --git a/core/src/client/snapshot_notify.rs b/core/src/client/snapshot_notify.rs new file mode 100644 index 0000000000..8e0a372cbb --- /dev/null +++ b/core/src/client/snapshot_notify.rs @@ -0,0 +1,81 @@ +use ctypes::BlockHash; + +use parking_lot::RwLock; +use std::sync::mpsc::{sync_channel, Receiver, RecvError, SyncSender}; +use std::sync::{Arc, Weak}; + +pub fn create() -> (NotifySender, NotifyReceiverSource) { + let (tx, rx) = sync_channel(1); + let tx = Arc::new(RwLock::new(Some(tx))); + let tx_weak = Arc::downgrade(&tx); + ( + NotifySender { + tx, + }, + NotifyReceiverSource( + ReceiverCanceller { + tx: tx_weak, + }, + NotifyReceiver { + rx, + }, + ), + ) +} + +pub struct NotifySender { + tx: Arc>>>, +} + +impl NotifySender { + pub fn notify(&self, block_hash: BlockHash) { + let guard = self.tx.read(); + if let Some(tx) = guard.as_ref() { + // TODO: Ignore the error. Receiver thread might be terminated or congested. + let _ = tx.try_send(block_hash); + } else { + // ReceiverCanceller is dropped. + } + } +} + +pub struct NotifyReceiverSource(pub ReceiverCanceller, pub NotifyReceiver); + +/// Dropping this makes the receiver stopped. +/// +/// `recv()` method of the `Receiver` will stop and return `RecvError` when corresponding `Sender` is dropped. +/// This is an inherited behaviour of `std::sync::mpsc::{Sender, Receiver}`. +/// However, we need another way to stop the `Receiver`, since `Sender` is usually shared throughout our codes. +/// We can't collect them all and destory one by one. We need a kill switch. +/// +/// `ReceiverCanceller` holds weak reference to the `Sender`, so it doesn't prohibit the default behaviour. +/// Then, we can upgrade the weak reference and get the shared reference to `Sender` itself, and manually drop it with this. +pub struct ReceiverCanceller { + tx: Weak>>>, +} + +impl Drop for ReceiverCanceller { + fn drop(&mut self) { + if let Some(tx) = self.tx.upgrade() { + let mut guard = tx.write(); + if let Some(sender) = guard.take() { + drop(sender) + } + } else { + // All NotifySender is dropped. No droppable Sender. + } + } +} + +/// Receiver is dropped when +/// 1. There are no NotifySenders out there. +/// 2. ReceiverCanceller is dropped. See the comment of `ReceiverCanceller`. +pub struct NotifyReceiver { + rx: Receiver, +} + +impl NotifyReceiver { + pub fn recv(&self) -> Result { + self.rx.recv() + } +} diff --git a/core/src/consensus/mod.rs b/core/src/consensus/mod.rs index de08a49a7c..4a88bc73a8 100644 --- a/core/src/consensus/mod.rs +++ b/core/src/consensus/mod.rs @@ -51,6 +51,7 @@ use primitives::{Bytes, U256}; use self::bit_set::BitSet; use crate::account_provider::AccountProvider; use crate::block::{ExecutedBlock, SealedBlock}; +use crate::client::snapshot_notify::NotifySender as SnapshotNotifySender; use crate::client::ConsensusClient; use crate::codechain_machine::CodeChainMachine; use crate::error::Error; @@ -265,6 +266,8 @@ pub trait ConsensusEngine: Sync + Send { fn register_chain_notify(&self, _: &Client) {} + fn register_snapshot_notify_sender(&self, _sender: SnapshotNotifySender) {} + fn complete_register(&self) {} fn get_best_block_from_best_proposal_header(&self, header: &HeaderView) -> BlockHash { diff --git a/core/src/lib.rs b/core/src/lib.rs index db2d091932..0585b963b3 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -84,6 +84,7 @@ mod tests; pub use crate::account_provider::{AccountProvider, Error as AccountProviderError}; pub use crate::block::Block; +pub use crate::client::snapshot_notify; pub use crate::client::{ AccountData, AssetClient, BlockChainClient, BlockChainTrait, ChainNotify, Client, ClientConfig, DatabaseClient, EngineClient, EngineInfo, ExecuteClient, ImportBlock, MiningBlockChainClient, Shard, StateInfo, TermInfo, diff --git a/sync/src/lib.rs b/sync/src/lib.rs index b89deb036d..5a0b1c5910 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -46,6 +46,7 @@ extern crate trie_standardmap; extern crate util_error; mod block; +pub mod snapshot; mod transaction; pub use crate::block::{BlockSyncEvent, BlockSyncExtension, BlockSyncSender}; diff --git a/sync/src/snapshot/mod.rs b/sync/src/snapshot/mod.rs new file mode 100644 index 0000000000..3c27edfb11 --- /dev/null +++ b/sync/src/snapshot/mod.rs @@ -0,0 +1,103 @@ +// Copyright 2019 Kodebox, Inc. +// This file is part of CodeChain. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +use std::fs::{create_dir_all, File}; +use std::path::PathBuf; +use std::sync::Arc; +use std::thread::{spawn, JoinHandle}; + +use ccore::snapshot_notify::{NotifyReceiverSource, ReceiverCanceller}; +use ccore::{BlockChainTrait, BlockId, Client}; +use cmerkle::snapshot::{ChunkCompressor, Error as SnapshotError, Snapshot}; +use ctypes::BlockHash; +use hashdb::{AsHashDB, HashDB}; +use primitives::H256; +use std::ops::Deref; + +pub struct Service { + join_handle: Option>, + canceller: Option, +} + +impl Service { + pub fn new(client: Arc, notify_receiver_source: NotifyReceiverSource, root_dir: String) -> Self { + let NotifyReceiverSource(canceller, receiver) = notify_receiver_source; + let join_handle = spawn(move || { + cinfo!(SYNC, "Snapshot service is on"); + while let Ok(block_hash) = receiver.recv() { + cinfo!(SYNC, "Snapshot is requested for block: {}", block_hash); + let state_root = if let Some(header) = client.block_header(&BlockId::Hash(block_hash)) { + header.state_root() + } else { + cerror!(SYNC, "There isn't corresponding header for the requested block hash: {}", block_hash,); + continue + }; + let db_lock = client.state_db().read(); + if let Some(err) = snapshot(db_lock.as_hashdb(), block_hash, state_root, &root_dir).err() { + cerror!( + SYNC, + "Snapshot request failed for block: {}, chunk_root: {}, err: {}", + block_hash, + state_root, + err + ); + } else { + cinfo!(SYNC, "Snapshot is ready for block: {}", block_hash) + } + } + cinfo!(SYNC, "Snapshot service is stopped") + }); + + Self { + canceller: Some(canceller), + join_handle: Some(join_handle), + } + } +} + +fn snapshot(db: &dyn HashDB, block_hash: BlockHash, chunk_root: H256, root_dir: &str) -> Result<(), SnapshotError> { + let snapshot_dir = { + let mut res = PathBuf::new(); + res.push(root_dir); + res.push(format!("{:x}", block_hash.deref())); + res + }; + create_dir_all(&snapshot_dir)?; + + for chunk in Snapshot::from_hashdb(db, chunk_root) { + let mut chunk_path = snapshot_dir.clone(); + chunk_path.push(format!("{:x}", chunk.root)); + let chunk_file = File::create(chunk_path)?; + let compressor = ChunkCompressor::new(chunk_file); + compressor.compress_chunk(&chunk)?; + } + + Ok(()) +} + +impl Drop for Service { + fn drop(&mut self) { + if let Some(canceller) = self.canceller.take() { + // The thread corresponding to the `self.join_handle` waits for the `self.canceller` is dropped. + // It must be dropped first not to make deadlock at `handle.join()`. + drop(canceller); + } + + if let Some(handle) = self.join_handle.take() { + handle.join().expect("Snapshot service thread shouldn't panic"); + } + } +} diff --git a/util/merkle/src/snapshot/error.rs b/util/merkle/src/snapshot/error.rs index 19f6876b06..5077312818 100644 --- a/util/merkle/src/snapshot/error.rs +++ b/util/merkle/src/snapshot/error.rs @@ -20,6 +20,7 @@ use primitives::H256; use rlp::DecoderError as RlpDecoderError; use crate::TrieError; +use std::fmt::{Display, Formatter}; #[derive(Debug)] pub enum Error { @@ -53,6 +54,17 @@ impl From for Error { } } +impl Display for Error { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { + match self { + Error::IoError(err) => write!(f, "IoError: {}", err), + Error::RlpDecoderError(err) => write!(f, "RlpDecoderError: {}", err), + Error::TrieError(err) => write!(f, "TrieError: {}", err), + Error::ChunkError(err) => write!(f, "ChunkError: {}", err), + } + } +} + #[derive(Debug)] pub enum ChunkError { TooBig, @@ -63,3 +75,17 @@ pub enum ChunkError { }, InvalidContent, } + +impl Display for ChunkError { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { + match self { + ChunkError::TooBig => write!(f, "Chunk has too many elements"), + ChunkError::InvalidHeight => write!(f, "Chunk height is unexpected height"), + ChunkError::ChunkRootMismatch { + expected, + actual, + } => write!(f, "Chunk root is different from expected. expected: {}, actual: {}", expected, actual), + ChunkError::InvalidContent => write!(f, "Chunk content is invalid"), + } + } +} diff --git a/util/merkle/src/snapshot/mod.rs b/util/merkle/src/snapshot/mod.rs index e07d7de7fb..159bcfa823 100644 --- a/util/merkle/src/snapshot/mod.rs +++ b/util/merkle/src/snapshot/mod.rs @@ -26,6 +26,8 @@ use hashdb::HashDB; use primitives::H256; use self::chunk::{Chunk, RecoveredChunk, UnresolvedChunk}; +pub use self::compress::{ChunkCompressor, ChunkDecompressor}; +pub use self::error::Error; use self::ordered_heap::OrderedHeap; use crate::nibbleslice::NibbleSlice; From 27e3c4e173dd06f40a453289637c1938eea444b7 Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Fri, 15 Nov 2019 20:41:04 +0900 Subject: [PATCH 23/52] Add RPC devel_snapshot --- core/src/client/client.rs | 10 +++++++++- core/src/client/mod.rs | 4 ++++ core/src/consensus/mod.rs | 2 ++ core/src/consensus/solo/mod.rs | 17 ++++++++++++++++- core/src/lib.rs | 4 ++-- rpc/src/v1/impls/devel.rs | 11 ++++++++--- rpc/src/v1/traits/devel.rs | 4 ++++ spec/JSON-RPC.md | 28 ++++++++++++++++++++++++++++ 8 files changed, 73 insertions(+), 7 deletions(-) diff --git a/core/src/client/client.rs b/core/src/client/client.rs index ccc690b22a..5c1f96789f 100644 --- a/core/src/client/client.rs +++ b/core/src/client/client.rs @@ -44,7 +44,7 @@ use super::{ }; use crate::block::{ClosedBlock, IsBlock, OpenBlock, SealedBlock}; use crate::blockchain::{BlockChain, BlockProvider, BodyProvider, HeaderProvider, InvoiceProvider, TransactionAddress}; -use crate::client::{ConsensusClient, TermInfo}; +use crate::client::{ConsensusClient, SnapshotClient, TermInfo}; use crate::consensus::{CodeChainEngine, EngineError}; use crate::encoded; use crate::error::{BlockImportError, Error, ImportError, SchemeError}; @@ -952,3 +952,11 @@ impl FindActionHandler for Client { self.engine.find_action_handler_for(id) } } + +impl SnapshotClient for Client { + fn notify_snapshot(&self, id: BlockId) { + if let Some(header) = self.block_header(&id) { + self.engine.send_snapshot_notify(header.hash()) + } + } +} diff --git a/core/src/client/mod.rs b/core/src/client/mod.rs index c9bac13215..c85dce75e5 100644 --- a/core/src/client/mod.rs +++ b/core/src/client/mod.rs @@ -348,3 +348,7 @@ pub trait StateInfo { /// is unknown. fn state_at(&self, id: BlockId) -> Option; } + +pub trait SnapshotClient { + fn notify_snapshot(&self, id: BlockId); +} diff --git a/core/src/consensus/mod.rs b/core/src/consensus/mod.rs index 4a88bc73a8..c20b6c886b 100644 --- a/core/src/consensus/mod.rs +++ b/core/src/consensus/mod.rs @@ -270,6 +270,8 @@ pub trait ConsensusEngine: Sync + Send { fn complete_register(&self) {} + fn send_snapshot_notify(&self, _block_hash: BlockHash) {} + fn get_best_block_from_best_proposal_header(&self, header: &HeaderView) -> BlockHash { header.hash() } diff --git a/core/src/consensus/solo/mod.rs b/core/src/consensus/solo/mod.rs index 77a41dd9bb..0c7632e54e 100644 --- a/core/src/consensus/solo/mod.rs +++ b/core/src/consensus/solo/mod.rs @@ -20,13 +20,14 @@ use std::sync::{Arc, Weak}; use ckey::Address; use cstate::{ActionHandler, HitHandler}; -use ctypes::{CommonParams, Header}; +use ctypes::{BlockHash, CommonParams, Header}; use parking_lot::RwLock; use self::params::SoloParams; use super::stake; use super::{ConsensusEngine, Seal}; use crate::block::{ExecutedBlock, IsBlock}; +use crate::client::snapshot_notify::NotifySender; use crate::client::ConsensusClient; use crate::codechain_machine::CodeChainMachine; use crate::consensus::{EngineError, EngineType}; @@ -38,6 +39,7 @@ pub struct Solo { params: SoloParams, machine: CodeChainMachine, action_handlers: Vec>, + snapshot_notify_sender: Arc>>, } impl Solo { @@ -54,6 +56,7 @@ impl Solo { params, machine, action_handlers, + snapshot_notify_sender: Arc::new(RwLock::new(None)), } } @@ -150,6 +153,18 @@ impl ConsensusEngine for Solo { 1 } + fn register_snapshot_notify_sender(&self, sender: NotifySender) { + let mut guard = self.snapshot_notify_sender.write(); + assert!(guard.is_none(), "snapshot_notify_sender is registered twice"); + *guard = Some(sender); + } + + fn send_snapshot_notify(&self, block_hash: BlockHash) { + if let Some(sender) = self.snapshot_notify_sender.read().as_ref() { + sender.notify(block_hash) + } + } + fn action_handlers(&self) -> &[Arc] { &self.action_handlers } diff --git a/core/src/lib.rs b/core/src/lib.rs index 0585b963b3..7ce513ea83 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -87,8 +87,8 @@ pub use crate::block::Block; pub use crate::client::snapshot_notify; pub use crate::client::{ AccountData, AssetClient, BlockChainClient, BlockChainTrait, ChainNotify, Client, ClientConfig, DatabaseClient, - EngineClient, EngineInfo, ExecuteClient, ImportBlock, MiningBlockChainClient, Shard, StateInfo, TermInfo, - TestBlockChainClient, TextClient, + EngineClient, EngineInfo, ExecuteClient, ImportBlock, MiningBlockChainClient, Shard, SnapshotClient, StateInfo, + TermInfo, TestBlockChainClient, TextClient, }; pub use crate::consensus::{EngineType, TimeGapParams}; pub use crate::db::{COL_STATE, NUM_COLUMNS}; diff --git a/rpc/src/v1/impls/devel.rs b/rpc/src/v1/impls/devel.rs index 588b4f8f83..75af7f3ffe 100644 --- a/rpc/src/v1/impls/devel.rs +++ b/rpc/src/v1/impls/devel.rs @@ -23,7 +23,7 @@ use std::vec::Vec; use ccore::{ BlockId, DatabaseClient, EngineClient, EngineInfo, MinerService, MiningBlockChainClient, SignedTransaction, - TermInfo, COL_STATE, + SnapshotClient, TermInfo, COL_STATE, }; use ccrypto::Blake; use cjson::bytes::Bytes; @@ -33,7 +33,7 @@ use csync::BlockSyncEvent; use ctypes::transaction::{ Action, AssetMintOutput, AssetOutPoint, AssetTransferInput, AssetTransferOutput, Transaction, }; -use ctypes::{Tracker, TxHash}; +use ctypes::{BlockHash, Tracker, TxHash}; use jsonrpc_core::Result; use kvdb::KeyValueDB; use primitives::{H160, H256}; @@ -70,7 +70,7 @@ where impl Devel for DevelClient where - C: DatabaseClient + EngineInfo + EngineClient + MiningBlockChainClient + TermInfo + 'static, + C: DatabaseClient + EngineInfo + EngineClient + MiningBlockChainClient + TermInfo + SnapshotClient + 'static, M: MinerService + 'static, { fn get_state_trie_keys(&self, offset: usize, limit: usize) -> Result> { @@ -108,6 +108,11 @@ where } } + fn snapshot(&self, block_hash: BlockHash) -> Result<()> { + self.client.notify_snapshot(BlockId::Hash(block_hash)); + Ok(()) + } + fn test_tps(&self, setting: TPSTestSetting) -> Result { let common_params = self.client.common_params(BlockId::Latest).unwrap(); let mint_fee = common_params.min_asset_mint_cost(); diff --git a/rpc/src/v1/traits/devel.rs b/rpc/src/v1/traits/devel.rs index e8604e910e..565a331976 100644 --- a/rpc/src/v1/traits/devel.rs +++ b/rpc/src/v1/traits/devel.rs @@ -17,6 +17,7 @@ use std::net::SocketAddr; use cjson::bytes::Bytes; +use ctypes::BlockHash; use jsonrpc_core::Result; use primitives::H256; @@ -39,6 +40,9 @@ pub trait Devel { #[rpc(name = "devel_getBlockSyncPeers")] fn get_block_sync_peers(&self) -> Result>; + #[rpc(name = "devel_snapshot")] + fn snapshot(&self, hash: BlockHash) -> Result<()>; + #[rpc(name = "devel_testTPS")] fn test_tps(&self, setting: TPSTestSetting) -> Result; } diff --git a/spec/JSON-RPC.md b/spec/JSON-RPC.md index cefee266dc..870192c22b 100644 --- a/spec/JSON-RPC.md +++ b/spec/JSON-RPC.md @@ -367,6 +367,7 @@ When `Transaction` is included in any response, there will be an additional fiel *** * [devel_getStateTrieKeys](#devel_getstatetriekeys) * [devel_getStateTrieValue](#devel_getstatetrievalue) + * [devel_snapshot](#devel_snapshot) * [devel_startSealing](#devel_startsealing) * [devel_stopSealing](#devel_stopsealing) * [devel_getBlockSyncPeers](#devel_getblocksyncpeers) @@ -2979,6 +2980,33 @@ Gets the value of the state trie with the given key. [Back to **List of methods**](#list-of-methods) +## devel_snapshot +Snapshot the state of the given block hash. + +### Params + 1. key: `H256` + +### Returns + +### Request Example +``` + curl \ + -H 'Content-Type: application/json' \ + -d '{"jsonrpc": "2.0", "method": "devel_snapshot", "params": ["0xfc196ede542b03b55aee9f106004e7e3d7ea6a9600692e964b4735a260356b50"], "id": null}' \ + localhost:8080 +``` + +### Response Example +``` +{ + "jsonrpc":"2.0", + "result":[], + "id":null +} +``` + +[Back to **List of methods**](#list-of-methods) + ## devel_startSealing Starts and enables sealing blocks by the miner. From ef1a176aa9e79dcac7bd90f7a0e9d690fe21e887 Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Mon, 18 Nov 2019 14:52:26 +0900 Subject: [PATCH 24/52] Add basic e2e test for snapshot --- test/src/e2e/snapshot.test.ts | 66 +++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 test/src/e2e/snapshot.test.ts diff --git a/test/src/e2e/snapshot.test.ts b/test/src/e2e/snapshot.test.ts new file mode 100644 index 0000000000..d0b81f614d --- /dev/null +++ b/test/src/e2e/snapshot.test.ts @@ -0,0 +1,66 @@ +// Copyright 2018-2019 Kodebox, Inc. +// This file is part of CodeChain. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +import { expect } from "chai"; +import * as fs from "fs"; +import "mocha"; +import * as path from "path"; + +import { aliceAddress } from "../helper/constants"; +import CodeChain from "../helper/spawn"; + +const SNAPSHOT_PATH = `${__dirname}/../../../snapshot/`; + +describe("Snapshot", async function() { + let node: CodeChain; + before(async function() { + node = new CodeChain({ + argv: ["--snapshot-path", SNAPSHOT_PATH] + }); + await node.start(); + }); + + it("can make a snapshot when it is requsted with devel rpc", async function() { + const pay = await node.sendPayTx({ + quantity: 100, + recipient: aliceAddress + }); + + const blockHash = (await node.sdk.rpc.chain.getTransaction(pay.hash()))! + .blockHash!; + await node.sdk.rpc.sendRpcRequest("devel_snapshot", [ + blockHash.toJSON() + ]); + // Wait for 1 secs + await new Promise(resolve => setTimeout(resolve, 1000)); + + const stateRoot = (await node.sdk.rpc.chain.getBlock(blockHash))! + .stateRoot; + expect( + path.join(SNAPSHOT_PATH, blockHash.toString(), stateRoot.toString()) + ).to.satisfies(fs.existsSync); + }); + + afterEach(function() { + if (this.currentTest!.state === "failed") { + node.keepLogs(); + } + }); + + after(async function() { + await node.clean(); + }); +}); From b3cc277dde37815d55a403db03b03d6d490e78d5 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Thu, 14 Nov 2019 19:09:51 +0900 Subject: [PATCH 25/52] Rename total_score to nonce in sync message When CodeChain is synchronized by snapshot sync, `total_score` doesn't mean the accumulated score from the genesis block. To handle this, the sync extension is updated to not rely on `total_score` for deciding whether a peer is leading or not, and use the value only as a monotonically increasing nonce. --- core/src/lib.rs | 2 +- spec/Block-Synchronization-Extension.md | 5 +- sync/src/block/downloader/header.rs | 81 ++++++++---------------- sync/src/block/extension.rs | 64 ++++++++----------- sync/src/block/message/mod.rs | 10 +-- test/src/helper/mock/blockSyncMessage.ts | 4 +- 6 files changed, 68 insertions(+), 98 deletions(-) diff --git a/core/src/lib.rs b/core/src/lib.rs index 7ce513ea83..c0ada218b2 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -99,4 +99,4 @@ pub use crate::service::ClientService; pub use crate::transaction::{ LocalizedTransaction, PendingSignedTransactions, SignedTransaction, UnverifiedTransaction, }; -pub use crate::types::{BlockId, TransactionId}; +pub use crate::types::{BlockId, BlockStatus, TransactionId}; diff --git a/spec/Block-Synchronization-Extension.md b/spec/Block-Synchronization-Extension.md index dac2cfd8f6..2a70ed0e8c 100644 --- a/spec/Block-Synchronization-Extension.md +++ b/spec/Block-Synchronization-Extension.md @@ -19,13 +19,14 @@ Message := ### Status ``` -Status(total_score, best_hash, genesis_hash) +Status(nonce, best_hash, genesis_hash) ``` Send current chain status to peer. * Identifier: 0x01 -* Restriction: None +* Restriction: + * `nonce` SHOULD be monotonically increasing every time the message is sent. ## Request messages diff --git a/sync/src/block/downloader/header.rs b/sync/src/block/downloader/header.rs index 963c2135e5..1422c1acf9 100644 --- a/sync/src/block/downloader/header.rs +++ b/sync/src/block/downloader/header.rs @@ -31,21 +31,14 @@ const MAX_HEADER_QUEUE_LENGTH: usize = 1024; const MAX_RETRY: usize = 3; const MAX_WAIT: u64 = 15; -#[derive(Clone)] -struct Pivot { - hash: BlockHash, - total_score: U256, -} - #[derive(Clone)] pub struct HeaderDownloader { // NOTE: Use this member as minimum as possible. client: Arc, - total_score: U256, + nonce: U256, best_hash: BlockHash, - - pivot: Pivot, + pivot: BlockHash, request_time: Option, downloaded: HashMap, queued: HashMap, @@ -53,24 +46,15 @@ pub struct HeaderDownloader { } impl HeaderDownloader { - pub fn total_score(&self) -> U256 { - self.total_score - } - - pub fn new(client: Arc, total_score: U256, best_hash: BlockHash) -> Self { + pub fn new(client: Arc, nonce: U256, best_hash: BlockHash) -> Self { let best_header_hash = client.best_block_header().hash(); - let best_score = client.block_total_score(&BlockId::Latest).expect("Best block always exist"); Self { client, - total_score, + nonce, best_hash, - - pivot: Pivot { - hash: best_header_hash, - total_score: best_score, - }, + pivot: best_header_hash, request_time: None, downloaded: HashMap::new(), queued: HashMap::new(), @@ -78,18 +62,19 @@ impl HeaderDownloader { } } - pub fn update(&mut self, total_score: U256, best_hash: BlockHash) -> bool { - match self.total_score.cmp(&total_score) { + pub fn best_hash(&self) -> BlockHash { + self.best_hash + } + + pub fn update(&mut self, nonce: U256, best_hash: BlockHash) -> bool { + match self.nonce.cmp(&nonce) { Ordering::Equal => true, Ordering::Less => { - self.total_score = total_score; + self.nonce = nonce; self.best_hash = best_hash; if self.client.block_header(&BlockId::Hash(best_hash)).is_some() { - self.pivot = Pivot { - hash: best_hash, - total_score, - } + self.pivot = best_hash; } true } @@ -108,25 +93,25 @@ impl HeaderDownloader { /// Find header from queued headers, downloaded cache and then from blockchain /// Panics if header dosn't exist fn pivot_header(&self) -> Header { - match self.queued.get(&self.pivot.hash) { + match self.queued.get(&self.pivot) { Some(header) => header.clone(), - None => match self.downloaded.get(&self.pivot.hash) { + None => match self.downloaded.get(&self.pivot) { Some(header) => header.clone(), - None => self.client.block_header(&BlockId::Hash(self.pivot.hash)).unwrap(), + None => self.client.block_header(&BlockId::Hash(self.pivot)).unwrap(), }, } } - pub fn pivot_score(&self) -> U256 { - self.pivot.total_score - } - pub fn is_idle(&self) -> bool { - let can_request = self.request_time.is_none() && self.total_score > self.pivot.total_score; + let can_request = self.request_time.is_none() && self.best_hash != self.pivot; self.is_valid() && (can_request || self.is_expired()) } + pub fn is_caught_up(&self) -> bool { + self.pivot == self.best_hash + } + pub fn create_request(&mut self) -> Option { if !self.is_idle() { return None @@ -154,19 +139,15 @@ impl HeaderDownloader { let pivot_header = self.pivot_header(); // This happens when best_hash is imported by other peer. - if self.best_hash == self.pivot.hash { + if self.best_hash == self.pivot { ctrace!(SYNC, "Ignore received headers, pivot already reached the best hash"); - } else if first_header_hash == self.pivot.hash { + } else if first_header_hash == self.pivot { for header in headers.iter() { self.downloaded.insert(header.hash(), header.clone()); } // FIXME: skip known headers - let new_scores = headers[1..].iter().fold(U256::zero(), |acc, header| acc + header.score()); - self.pivot = Pivot { - hash: headers.last().expect("Last downloaded header must exist").hash(), - total_score: self.pivot.total_score + new_scores, - } + self.pivot = headers.last().expect("Last downloaded header must exist").hash(); } else if first_header_number < pivot_header.number() { ctrace!( SYNC, @@ -174,17 +155,14 @@ impl HeaderDownloader { ); } else if first_header_number == pivot_header.number() { if pivot_header.number() != 0 { - self.pivot = Pivot { - hash: pivot_header.parent_hash(), - total_score: self.pivot.total_score - pivot_header.score(), - } + self.pivot = pivot_header.parent_hash(); } } else { cerror!( SYNC, - "Invalid header update state. best_hash: {}, self.pivot.hash: {}, first_header_hash: {}", + "Invalid header update state. best_hash: {}, self.pivot: {}, first_header_hash: {}", self.best_hash, - self.pivot.hash, + self.pivot, first_header_hash ); } @@ -203,10 +181,7 @@ impl HeaderDownloader { self.downloaded.remove(&hash); if self.best_hash == hash { - self.pivot = Pivot { - hash, - total_score: self.total_score, - } + self.pivot = hash; } } self.queued.shrink_to_fit(); diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 3b890005aa..fb42f566ab 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -21,8 +21,8 @@ use std::time::Duration; use ccore::encoded::Header as EncodedHeader; use ccore::{ - Block, BlockChainClient, BlockChainTrait, BlockId, BlockImportError, ChainNotify, Client, ImportBlock, ImportError, - UnverifiedTransaction, + Block, BlockChainClient, BlockChainTrait, BlockId, BlockImportError, BlockStatus, ChainNotify, Client, ImportBlock, + ImportError, UnverifiedTransaction, }; use cmerkle::TrieFactory; use cnetwork::{Api, EventSender, NetworkExtension, NodeId}; @@ -74,6 +74,7 @@ pub struct Extension { client: Arc, api: Box, last_request: u64, + nonce: u64, } impl Extension { @@ -125,6 +126,7 @@ impl Extension { client, api, last_request: Default::default(), + nonce: Default::default(), } } @@ -140,13 +142,14 @@ impl Extension { id, Arc::new( Message::Status { - total_score: chain_info.best_proposal_score, + nonce: U256::from(self.nonce), best_hash: chain_info.best_proposal_block_hash, genesis_hash: chain_info.genesis_hash, } .rlp_bytes(), ), ); + self.nonce += 1; } fn send_status_broadcast(&mut self) { @@ -156,13 +159,14 @@ impl Extension { id, Arc::new( Message::Status { - total_score: chain_info.best_proposal_score, + nonce: U256::from(self.nonce), best_hash: chain_info.best_proposal_block_hash, genesis_hash: chain_info.genesis_hash, } .rlp_bytes(), ), ); + self.nonce += 1; } } @@ -177,6 +181,14 @@ impl Extension { } fn send_body_request(&mut self, id: &NodeId) { + if let Some(downloader) = self.header_downloaders.get(&id) { + if self.client.block_status(&BlockId::Hash(downloader.best_hash())) == BlockStatus::InChain { + // Peer is lagging behind the local blockchain. + // We don't need to request block bodies to this peer + return + } + } + self.check_sync_variable(); if let Some(requests) = self.requests.get_mut(id) { let have_body_request = { @@ -319,10 +331,10 @@ impl NetworkExtension for Extension { if let Ok(received_message) = Rlp::new(data).as_val() { match received_message { Message::Status { - total_score, + nonce, best_hash, genesis_hash, - } => self.on_peer_status(id, total_score, best_hash, genesis_hash), + } => self.on_peer_status(id, nonce, best_hash, genesis_hash), Message::Request(request_id, request) => self.on_peer_request(id, request_id, request), Message::Response(request_id, response) => self.on_peer_response(id, request_id, response), } @@ -348,7 +360,6 @@ impl NetworkExtension for Extension { } State::SnapshotChunk(..) => unimplemented!(), State::Full => { - let best_proposal_score = self.client.chain_info().best_proposal_score; for id in &peer_ids { let request = self.header_downloaders.get_mut(id).and_then(HeaderDownloader::create_request); @@ -359,15 +370,7 @@ impl NetworkExtension for Extension { } for id in peer_ids { - let peer_score = if let Some(peer) = self.header_downloaders.get(&id) { - peer.total_score() - } else { - U256::zero() - }; - - if peer_score > best_proposal_score { - self.send_body_request(&id); - } + self.send_body_request(&id); } } } @@ -516,7 +519,7 @@ impl Extension { } impl Extension { - fn on_peer_status(&mut self, from: &NodeId, total_score: U256, best_hash: BlockHash, genesis_hash: BlockHash) { + fn on_peer_status(&mut self, from: &NodeId, nonce: U256, best_hash: BlockHash, genesis_hash: BlockHash) { // Validity check if genesis_hash != self.client.chain_info().genesis_hash { cinfo!(SYNC, "Genesis hash mismatch with peer {}", from); @@ -525,17 +528,17 @@ impl Extension { match self.header_downloaders.entry(*from) { Entry::Occupied(mut peer) => { - if !peer.get_mut().update(total_score, best_hash) { + if !peer.get_mut().update(nonce, best_hash) { // FIXME: It should be an error level if the consensus is PoW. cdebug!(SYNC, "Peer #{} status updated but score is less than before", from); return } } Entry::Vacant(e) => { - e.insert(HeaderDownloader::new(self.client.clone(), total_score, best_hash)); + e.insert(HeaderDownloader::new(self.client.clone(), nonce, best_hash)); } } - cinfo!(SYNC, "Peer #{} status update: total_score: {}, best_hash: {}", from, total_score, best_hash); + cinfo!(SYNC, "Peer #{} status update: nonce: {}, best_hash: {}", from, nonce, best_hash); } fn on_peer_request(&self, from: &NodeId, id: u64, request: RequestMessage) { @@ -750,14 +753,12 @@ impl Extension { }, State::SnapshotChunk(..) => {} State::Full => { - let (mut completed, pivot_score_changed) = if let Some(peer) = self.header_downloaders.get_mut(from) { - let before_pivot_score = peer.pivot_score(); + let (mut completed, peer_is_caught_up) = if let Some(peer) = self.header_downloaders.get_mut(from) { let encoded: Vec<_> = headers.iter().map(|h| EncodedHeader::new(h.rlp_bytes().to_vec())).collect(); peer.import_headers(&encoded); - let after_pivot_score = peer.pivot_score(); - (peer.downloaded(), before_pivot_score != after_pivot_score) + (peer.downloaded(), peer.is_caught_up()) } else { - (Vec::new(), false) + (Vec::new(), true) }; completed.sort_unstable_by_key(EncodedHeader::number); @@ -783,7 +784,7 @@ impl Extension { peer.mark_as_imported(exists); peer.create_request() }); - if pivot_score_changed { + if !peer_is_caught_up { if let Some(request) = request { self.send_header_request(from, request); } @@ -825,20 +826,11 @@ impl Extension { } } - let total_score = self.client.chain_info().best_proposal_score; let mut peer_ids: Vec<_> = self.header_downloaders.keys().cloned().collect(); peer_ids.shuffle(&mut thread_rng()); for id in peer_ids { - let peer_score = if let Some(peer) = self.header_downloaders.get(&id) { - peer.total_score() - } else { - U256::zero() - }; - - if peer_score > total_score { - self.send_body_request(&id); - } + self.send_body_request(&id); } } } diff --git a/sync/src/block/message/mod.rs b/sync/src/block/message/mod.rs index 733a052a90..04b3236f2a 100644 --- a/sync/src/block/message/mod.rs +++ b/sync/src/block/message/mod.rs @@ -35,7 +35,7 @@ const MESSAGE_ID_STATE_CHUNK: u8 = 0x0b; #[derive(Debug, PartialEq)] pub enum Message { Status { - total_score: U256, + nonce: U256, best_hash: BlockHash, genesis_hash: BlockHash, }, @@ -47,7 +47,7 @@ impl Encodable for Message { fn rlp_append(&self, s: &mut RlpStream) { match self { Message::Status { - total_score, + nonce, best_hash, genesis_hash, } => { @@ -55,7 +55,7 @@ impl Encodable for Message { s.append(&MESSAGE_ID_STATUS); s.begin_list(3); - s.append(total_score); + s.append(nonce); s.append(best_hash); s.append(genesis_hash); } @@ -97,7 +97,7 @@ impl Decodable for Message { } Ok(Message::Status { - total_score: message.val_at(0)?, + nonce: message.val_at(0)?, best_hash: message.val_at(1)?, genesis_hash: message.val_at(2)?, }) @@ -134,7 +134,7 @@ mod tests { #[test] fn status_message_rlp() { rlp_encode_and_decode_test!(Message::Status { - total_score: U256::default(), + nonce: U256::zero(), best_hash: H256::default().into(), genesis_hash: H256::default().into(), }); diff --git a/test/src/helper/mock/blockSyncMessage.ts b/test/src/helper/mock/blockSyncMessage.ts index d064d825b4..19385edd0d 100644 --- a/test/src/helper/mock/blockSyncMessage.ts +++ b/test/src/helper/mock/blockSyncMessage.ts @@ -64,7 +64,9 @@ export class BlockSyncMessage { if (msgId === MessageType.MESSAGE_ID_STATUS) { Emitter.emit("status"); const msg = decodedmsg[1]; - const totalScore = new U256(parseInt(msg[0].toString("hex"), 16)); + const totalScore = new U256( + parseInt(msg[0].toString("hex"), 16) || 0 + ); const bestHash = new H256(msg[1].toString("hex")); const genesisHash = new H256(msg[2].toString("hex")); return new BlockSyncMessage({ From 443a17848232f6cf59a961abb20e426db3d28bae Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Mon, 18 Nov 2019 20:13:39 +0900 Subject: [PATCH 26/52] Add SnapshotNotifySender to Tendermint worker --- codechain/run_node.rs | 7 +++--- core/src/consensus/tendermint/engine.rs | 5 ++++ core/src/consensus/tendermint/mod.rs | 13 +++++++++-- core/src/consensus/tendermint/worker.rs | 31 +++++++++++++++++++++++-- 4 files changed, 49 insertions(+), 7 deletions(-) diff --git a/codechain/run_node.rs b/codechain/run_node.rs index ddf1c892fd..7667cbeb3b 100644 --- a/codechain/run_node.rs +++ b/codechain/run_node.rs @@ -366,10 +366,11 @@ pub fn run_node(matches: &ArgMatches) -> Result<(), String> { } let _snapshot_service = { + let client = client.client(); + let (tx, rx) = snapshot_notify::create(); + client.engine().register_snapshot_notify_sender(tx); + if !config.snapshot.disable.unwrap() { - let client = client.client(); - let (tx, rx) = snapshot_notify::create(); - client.engine().register_snapshot_notify_sender(tx); let service = Arc::new(SnapshotService::new(client, rx, config.snapshot.path.unwrap())); Some(service) } else { diff --git a/core/src/consensus/tendermint/engine.rs b/core/src/consensus/tendermint/engine.rs index 22af949ea7..0216f38ffd 100644 --- a/core/src/consensus/tendermint/engine.rs +++ b/core/src/consensus/tendermint/engine.rs @@ -37,6 +37,7 @@ use super::worker; use super::{ChainNotify, Tendermint, SEAL_FIELDS}; use crate::account_provider::AccountProvider; use crate::block::*; +use crate::client::snapshot_notify::NotifySender as SnapshotNotifySender; use crate::client::{Client, ConsensusClient}; use crate::codechain_machine::CodeChainMachine; use crate::consensus::tendermint::params::TimeGapParams; @@ -359,6 +360,10 @@ impl ConsensusEngine for Tendermint { client.add_notify(Arc::downgrade(&self.chain_notify) as Weak); } + fn register_snapshot_notify_sender(&self, sender: SnapshotNotifySender) { + self.snapshot_notify_sender_initializer.send(sender).unwrap(); + } + fn complete_register(&self) { let (result, receiver) = crossbeam::bounded(1); self.inner.send(worker::Event::Restore(result)).unwrap(); diff --git a/core/src/consensus/tendermint/mod.rs b/core/src/consensus/tendermint/mod.rs index 721ce1cb34..13cd8a4e15 100644 --- a/core/src/consensus/tendermint/mod.rs +++ b/core/src/consensus/tendermint/mod.rs @@ -41,6 +41,7 @@ pub use self::types::{Height, Step, View}; use super::{stake, ValidatorSet}; use crate::client::ConsensusClient; use crate::codechain_machine::CodeChainMachine; +use crate::snapshot_notify::NotifySender as SnapshotNotifySender; use crate::ChainNotify; /// Timer token representing the consensus step timeouts. @@ -58,6 +59,7 @@ pub struct Tendermint { client: RwLock>>, external_params_initializer: crossbeam::Sender, extension_initializer: crossbeam::Sender<(crossbeam::Sender, Weak)>, + snapshot_notify_sender_initializer: crossbeam::Sender, timeouts: TimeoutParams, join: Option>, quit_tendermint: crossbeam::Sender<()>, @@ -93,8 +95,14 @@ impl Tendermint { let timeouts = our_params.timeouts; let machine = Arc::new(machine); - let (join, external_params_initializer, extension_initializer, inner, quit_tendermint) = - worker::spawn(our_params.validators); + let ( + join, + external_params_initializer, + extension_initializer, + snapshot_notify_sender_initializer, + inner, + quit_tendermint, + ) = worker::spawn(our_params.validators); let action_handlers: Vec> = vec![stake.clone()]; let chain_notify = Arc::new(TendermintChainNotify::new(inner.clone())); @@ -102,6 +110,7 @@ impl Tendermint { client: Default::default(), external_params_initializer, extension_initializer, + snapshot_notify_sender_initializer, timeouts, join: Some(join), quit_tendermint, diff --git a/core/src/consensus/tendermint/worker.rs b/core/src/consensus/tendermint/worker.rs index aec882b908..f26048136f 100644 --- a/core/src/consensus/tendermint/worker.rs +++ b/core/src/consensus/tendermint/worker.rs @@ -50,6 +50,7 @@ use crate::consensus::validator_set::{DynamicValidator, ValidatorSet}; use crate::consensus::{EngineError, Seal}; use crate::encoded; use crate::error::{BlockError, Error}; +use crate::snapshot_notify::NotifySender as SnapshotNotifySender; use crate::transaction::{SignedTransaction, UnverifiedTransaction}; use crate::views::BlockView; use crate::BlockId; @@ -59,6 +60,7 @@ type SpawnResult = ( JoinHandle<()>, crossbeam::Sender, crossbeam::Sender<(crossbeam::Sender, Weak)>, + crossbeam::Sender, crossbeam::Sender, crossbeam::Sender<()>, ); @@ -97,6 +99,7 @@ struct Worker { time_gap_params: TimeGapParams, timeout_token_nonce: usize, vote_regression_checker: VoteRegressionChecker, + snapshot_notify_sender: SnapshotNotifySender, } pub enum Event { @@ -180,6 +183,7 @@ impl Worker { extension: EventSender, client: Weak, time_gap_params: TimeGapParams, + snapshot_notify_sender: SnapshotNotifySender, ) -> Self { Worker { client, @@ -198,6 +202,7 @@ impl Worker { time_gap_params, timeout_token_nonce: ENGINE_TIMEOUT_TOKEN_NONCE_BASE, vote_regression_checker: VoteRegressionChecker::new(), + snapshot_notify_sender, } } @@ -206,6 +211,7 @@ impl Worker { let (quit, quit_receiver) = crossbeam::bounded(1); let (external_params_initializer, external_params_receiver) = crossbeam::bounded(1); let (extension_initializer, extension_receiver) = crossbeam::bounded(1); + let (snapshot_notify_sender_initializer, snapshot_notify_sender_receiver) = crossbeam::bounded(1); let join = Builder::new() .name("tendermint".to_string()) .spawn(move || { @@ -249,8 +255,29 @@ impl Worker { return } }; + // TODO: Make initialization steps to order insensitive. + let snapshot_notify_sender = crossbeam::select! { + recv(snapshot_notify_sender_receiver) -> msg => { + match msg { + Ok(sender) => sender, + Err(crossbeam::RecvError) => { + cerror!(ENGINE, "The tendermint extension is not initalized."); + return + } + } + } + recv(quit_receiver) -> msg => { + match msg { + Ok(()) => {}, + Err(crossbeam::RecvError) => { + cerror!(ENGINE, "The quit channel for tendermint thread had been closed."); + } + } + return + } + }; validators.register_client(Weak::clone(&client)); - let mut inner = Self::new(validators, extension, client, time_gap_params); + let mut inner = Self::new(validators, extension, client, time_gap_params, snapshot_notify_sender); loop { crossbeam::select! { recv(receiver) -> msg => { @@ -374,7 +401,7 @@ impl Worker { } }) .unwrap(); - (join, external_params_initializer, extension_initializer, sender, quit) + (join, external_params_initializer, extension_initializer, snapshot_notify_sender_initializer, sender, quit) } /// The client is a thread-safe struct. Using it in multi-threads is safe. From 4e5ba6b176c2c155a6c0d21aa0670bea6f82269b Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Tue, 19 Nov 2019 13:55:01 +0900 Subject: [PATCH 27/52] Implement snapshot on term end --- core/src/consensus/tendermint/engine.rs | 2 +- core/src/consensus/tendermint/worker.rs | 22 ++++++++++++++++++++++ sync/src/snapshot/mod.rs | 1 + 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/core/src/consensus/tendermint/engine.rs b/core/src/consensus/tendermint/engine.rs index 0216f38ffd..4404cdaef2 100644 --- a/core/src/consensus/tendermint/engine.rs +++ b/core/src/consensus/tendermint/engine.rs @@ -404,7 +404,7 @@ impl ConsensusEngine for Tendermint { } } -fn is_term_changed(header: &Header, parent: &Header, term_seconds: u64) -> bool { +pub(crate) fn is_term_changed(header: &Header, parent: &Header, term_seconds: u64) -> bool { if term_seconds == 0 { return false } diff --git a/core/src/consensus/tendermint/worker.rs b/core/src/consensus/tendermint/worker.rs index f26048136f..749b5252e1 100644 --- a/core/src/consensus/tendermint/worker.rs +++ b/core/src/consensus/tendermint/worker.rs @@ -1660,6 +1660,28 @@ impl Worker { } } + let mut last_term_end = None; + for block_hash in &enacted { + let header = c.block_header(&BlockId::Hash(*block_hash)).expect("Block is enacted").decode(); + if header.number() == 0 { + continue + } + let parent_header = + c.block_header(&BlockId::Hash(*header.parent_hash())).expect("Parent block should be enacted").decode(); + let term_seconds = if let Some(p) = c.term_common_params(parent_header.hash().into()) { + p.term_seconds() + } else { + continue + }; + if super::engine::is_term_changed(&header, &parent_header, term_seconds) { + last_term_end = Some(*block_hash); + } + } + if let Some(last_term_end) = last_term_end { + // TODO: Reduce the snapshot frequency. + self.snapshot_notify_sender.notify(last_term_end); + } + if let Some((last, rest)) = imported.split_last() { let (imported, last_proposal_header) = { let header = diff --git a/sync/src/snapshot/mod.rs b/sync/src/snapshot/mod.rs index 3c27edfb11..80d0835da2 100644 --- a/sync/src/snapshot/mod.rs +++ b/sync/src/snapshot/mod.rs @@ -57,6 +57,7 @@ impl Service { } else { cinfo!(SYNC, "Snapshot is ready for block: {}", block_hash) } + // TODO: Prune old snapshots } cinfo!(SYNC, "Snapshot service is stopped") }); From 7490d25a888e04b6e0e7e3640354397f8e696975 Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Fri, 6 Dec 2019 14:53:13 +0900 Subject: [PATCH 28/52] Add snapshot sync test with Tendermint dynamic validator --- test/src/e2e.dynval/2/snapshot.test.ts | 92 ++++++++++++++++++++++ test/src/e2e.dynval/setup.ts | 37 ++++++--- test/tendermint.dynval/snapshot-config.yml | 19 +++++ 3 files changed, 138 insertions(+), 10 deletions(-) create mode 100644 test/src/e2e.dynval/2/snapshot.test.ts create mode 100644 test/tendermint.dynval/snapshot-config.yml diff --git a/test/src/e2e.dynval/2/snapshot.test.ts b/test/src/e2e.dynval/2/snapshot.test.ts new file mode 100644 index 0000000000..32374c992d --- /dev/null +++ b/test/src/e2e.dynval/2/snapshot.test.ts @@ -0,0 +1,92 @@ +// Copyright 2019 Kodebox, Inc. +// This file is part of CodeChain. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +import * as chai from "chai"; +import { expect } from "chai"; +import * as chaiAsPromised from "chai-as-promised"; +import * as fs from "fs"; +import "mocha"; +import * as path from "path"; + +import mkdirp = require("mkdirp"); +import { validators } from "../../../tendermint.dynval/constants"; +import { PromiseExpect } from "../../helper/promise"; +import { setTermTestTimeout, withNodes } from "../setup"; + +chai.use(chaiAsPromised); + +const SNAPSHOT_CONFIG = `${__dirname}/../../../tendermint.dynval/snapshot-config.yml`; +const SNAPSHOT_PATH = `${__dirname}/../../../../snapshot/`; + +describe("Snapshot for Tendermint with Dynamic Validator", function() { + const promiseExpect = new PromiseExpect(); + const snapshotValidators = validators.slice(0, 3); + const { nodes } = withNodes(this, { + promiseExpect, + overrideParams: { + maxNumOfValidators: 3 + }, + validators: snapshotValidators.map((signer, index) => ({ + signer, + delegation: 5000, + deposit: 10_000_000 - index // tie-breaker + })), + modify: () => { + mkdirp.sync(SNAPSHOT_PATH); + const snapshotPath = fs.mkdtempSync(SNAPSHOT_PATH); + return { + additionalArgv: [ + "--snapshot-path", + snapshotPath, + "--config", + SNAPSHOT_CONFIG + ], + nodeAdditionalProperties: { + snapshotPath + } + }; + } + }); + + it("should be exist after some time", async function() { + const termWaiter = setTermTestTimeout(this, { + terms: 1 + }); + const termMetadata = await termWaiter.waitNodeUntilTerm(nodes[0], { + target: 2, + termPeriods: 1 + }); + + const blockHash = (await nodes[0].sdk.rpc.chain.getBlockHash( + termMetadata.lastTermFinishedBlockNumber + ))!; + const stateRoot = (await nodes[0].sdk.rpc.chain.getBlock(blockHash))! + .stateRoot; + expect( + fs.existsSync( + path.join( + nodes[0].snapshotPath, + blockHash.toString(), + stateRoot.toString() + ) + ) + ).to.be.true; + }); + + afterEach(async function() { + promiseExpect.checkFulfilled(); + }); +}); diff --git a/test/src/e2e.dynval/setup.ts b/test/src/e2e.dynval/setup.ts index 2c4aefa645..6cc695e2fb 100644 --- a/test/src/e2e.dynval/setup.ts +++ b/test/src/e2e.dynval/setup.ts @@ -39,17 +39,29 @@ interface ValidatorConfig { delegation?: U64Value; } -export function withNodes( +interface NodePropertyModifier { + additionalArgv: string[]; + nodeAdditionalProperties: T; +} + +export function withNodes( suite: Suite, options: { promiseExpect: PromiseExpect; validators: ValidatorConfig[]; overrideParams?: Partial; onBeforeEnable?: (nodes: CodeChain[]) => Promise; + modify?: (signer: Signer, index: number) => NodePropertyModifier; } ) { - const nodes: CodeChain[] = []; - const { overrideParams = {} } = options; + const nodes: (CodeChain & T)[] = []; + const { + overrideParams = {}, + modify = () => ({ + additionalArgv: [], + nodeAdditionalProperties: {} as T + }) + } = options; const initialParams = { ...defaultParams, ...overrideParams @@ -62,7 +74,8 @@ export function withNodes( nodes.length = 0; const newNodes = await createNodes({ ...options, - initialParams + initialParams, + modify }); nodes.push(...newNodes); }); @@ -95,14 +108,15 @@ export function findNode(nodes: CodeChain[], signer: Signer) { ); } -async function createNodes(options: { +async function createNodes(options: { promiseExpect: PromiseExpect; validators: ValidatorConfig[]; initialParams: CommonParams; onBeforeEnable?: (nodes: CodeChain[]) => Promise; -}): Promise { + modify: (signer: Signer, index: number) => NodePropertyModifier; +}): Promise<(CodeChain & T)[]> { const chain = `${__dirname}/../scheme/tendermint-dynval.json`; - const { promiseExpect, validators, initialParams } = options; + const { promiseExpect, validators, initialParams, modify } = options; const initialNodes: CodeChain[] = []; const initialValidators = [ @@ -124,20 +138,23 @@ async function createNodes(options: { }); } - const nodes: CodeChain[] = []; + const nodes: (CodeChain & T)[] = []; for (let i = 0; i < validators.length; i++) { const { signer: validator } = validators[i]; - nodes[i] = new CodeChain({ + const modifier = modify(validator, i); + const node = new CodeChain({ chain, argv: [ "--engine-signer", validator.platformAddress.value, "--password-path", `test/tendermint.dynval/${validator.platformAddress.value}/password.json`, - "--force-sealing" + "--force-sealing", + ...modifier.additionalArgv ], additionalKeysPath: `tendermint.dynval/${validator.platformAddress.value}/keys` }); + nodes[i] = Object.assign(node, modifier.nodeAdditionalProperties); nodes[i].signer = validator; } let bootstrapFailed = false; diff --git a/test/tendermint.dynval/snapshot-config.yml b/test/tendermint.dynval/snapshot-config.yml new file mode 100644 index 0000000000..9f5c890280 --- /dev/null +++ b/test/tendermint.dynval/snapshot-config.yml @@ -0,0 +1,19 @@ +[codechain] + +[mining] + +[network] + +[rpc] + +[ipc] + +[ws] + +[snapshot] +disable = false + +[stratum] + +[email_alarm] + From 6d5af4de7be89d1a5e839a47454a777bb3591311 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Mon, 18 Nov 2019 16:53:58 +0900 Subject: [PATCH 29/52] Send and receive snapshot chunk requests --- spec/Block-Synchronization-Extension.md | 2 +- sync/src/block/extension.rs | 190 +++++++++++++++++++++--- util/merkle/src/snapshot/compress.rs | 2 +- util/merkle/src/snapshot/mod.rs | 31 ++-- 4 files changed, 189 insertions(+), 36 deletions(-) diff --git a/spec/Block-Synchronization-Extension.md b/spec/Block-Synchronization-Extension.md index 2a70ed0e8c..aa6f3f0372 100644 --- a/spec/Block-Synchronization-Extension.md +++ b/spec/Block-Synchronization-Extension.md @@ -110,4 +110,4 @@ Response to `GetStateChunk` message. Snappy algorithm is used for compression of * Restriction: * Number and order of chunks included in this message MUST be equal to request information. * Node corresponding to `chunk_root` in request MUST be included - * If sender doesn’t have a chunk for the requested hash, corresponding chunk MUST be compressed([]), not omitted. + * If sender doesn’t have a chunk for the requested hash, corresponding chunk MUST be `[]`(uncompressed), not omitted. diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index fb42f566ab..d4e29dcbda 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -24,6 +24,8 @@ use ccore::{ Block, BlockChainClient, BlockChainTrait, BlockId, BlockImportError, BlockStatus, ChainNotify, Client, ImportBlock, ImportError, UnverifiedTransaction, }; +use cmerkle::snapshot::ChunkDecompressor; +use cmerkle::snapshot::Restore as SnapshotRestore; use cmerkle::TrieFactory; use cnetwork::{Api, EventSender, NetworkExtension, NodeId}; use cstate::FindActionHandler; @@ -32,6 +34,7 @@ use ctypes::header::{Header, Seal}; use ctypes::transaction::Action; use ctypes::{BlockHash, BlockNumber}; use hashdb::AsHashDB; +use kvdb::DBTransaction; use primitives::{H256, U256}; use rand::prelude::SliceRandom; use rand::thread_rng; @@ -58,7 +61,10 @@ pub struct TokenInfo { #[derive(Debug)] enum State { SnapshotHeader(BlockHash, u64), - SnapshotChunk(H256), + SnapshotChunk { + block: BlockHash, + restore: SnapshotRestore, + }, Full, } @@ -85,9 +91,13 @@ impl Extension { Some((hash, num)) => match client.block_header(&BlockId::Number(num)) { Some(ref header) if *header.hash() == hash => { let state_db = client.state_db().read(); - match TrieFactory::readonly(state_db.as_hashdb(), &header.state_root()) { + let state_root = header.state_root(); + match TrieFactory::readonly(state_db.as_hashdb(), &state_root) { Ok(ref trie) if trie.is_complete() => State::Full, - _ => State::SnapshotChunk(*header.hash()), + _ => State::SnapshotChunk { + block: hash.into(), + restore: SnapshotRestore::new(state_root), + }, } } _ => State::SnapshotHeader(hash.into(), num), @@ -222,6 +232,37 @@ impl Extension { self.check_sync_variable(); } + fn send_chunk_request(&mut self, block: &BlockHash, root: &H256) { + let have_chunk_request = self.requests.values().flatten().any(|r| match r { + (_, RequestMessage::StateChunk(..)) => true, + _ => false, + }); + + if !have_chunk_request { + let mut peer_ids: Vec<_> = self.header_downloaders.keys().cloned().collect(); + peer_ids.shuffle(&mut thread_rng()); + if let Some(id) = peer_ids.first() { + if let Some(requests) = self.requests.get_mut(&id) { + let req = RequestMessage::StateChunk(*block, vec![*root]); + cdebug!(SYNC, "Request chunk to {} {:?}", id, req); + let request_id = self.last_request; + self.last_request += 1; + requests.push((request_id, req.clone())); + self.api.send(id, Arc::new(Message::Request(request_id, req).rlp_bytes())); + + let token = &self.tokens[id]; + let token_info = self.tokens_info.get_mut(token).unwrap(); + + let _ = self.api.clear_timer(*token); + self.api + .set_timer_once(*token, Duration::from_millis(SYNC_EXPIRE_REQUEST_INTERVAL)) + .expect("Timer set succeeds"); + token_info.request_id = Some(request_id); + } + } + } + } + fn check_sync_variable(&self) { let mut has_error = false; for id in self.header_downloaders.keys() { @@ -238,6 +279,14 @@ impl Extension { }) .collect(); + let chunk_requests: Vec = requests + .iter() + .filter_map(|r| match r { + (_, RequestMessage::StateChunk(..)) => Some(r.1.clone()), + _ => None, + }) + .collect(); + if body_requests.len() > 1 { cerror!(SYNC, "Body request length {} > 1, body_requests: {:?}", body_requests.len(), body_requests); has_error = true; @@ -246,16 +295,18 @@ impl Extension { let token = &self.tokens[id]; let token_info = &self.tokens_info[token]; - match (token_info.request_id, body_requests.len()) { + match (token_info.request_id, body_requests.len() + chunk_requests.len()) { (Some(_), 1) => {} (None, 0) => {} _ => { cerror!( SYNC, - "request_id: {:?}, body_requests.len(): {}, body_requests: {:?}", + "request_id: {:?}, body_requests.len(): {}, body_requests: {:?}, chunk_requests.len(): {}, chunk_requests: {:?}", token_info.request_id, body_requests.len(), - body_requests + body_requests, + chunk_requests.len(), + chunk_requests ); has_error = true; } @@ -358,7 +409,17 @@ impl NetworkExtension for Extension { }); } } - State::SnapshotChunk(..) => unimplemented!(), + State::SnapshotChunk { + block, + ref mut restore, + } => { + if let Some(root) = restore.next_to_feed() { + self.send_chunk_request(&block, &root); + } else { + cdebug!(SYNC, "Transitioning state to {:?}", State::Full); + self.state = State::Full; + } + } State::Full => { for id in &peer_ids { let request = @@ -454,12 +515,17 @@ impl Extension { State::SnapshotHeader(hash, ..) => { if imported.contains(&hash) { let header = self.client.block_header(&BlockId::Hash(hash)).expect("Imported header must exist"); - Some(State::SnapshotChunk(header.state_root())) + Some(State::SnapshotChunk { + block: hash, + restore: SnapshotRestore::new(header.state_root()), + }) } else { None } } - State::SnapshotChunk(..) => unimplemented!(), + State::SnapshotChunk { + .. + } => None, State::Full => { for peer in self.header_downloaders.values_mut() { peer.mark_as_imported(imported.clone()); @@ -499,12 +565,17 @@ impl Extension { State::SnapshotHeader(hash, ..) => { if imported.contains(&hash) { let header = self.client.block_header(&BlockId::Hash(hash)).expect("Imported header must exist"); - Some(State::SnapshotChunk(header.state_root())) + Some(State::SnapshotChunk { + block: hash, + restore: SnapshotRestore::new(header.state_root()), + }) } else { None } } - State::SnapshotChunk(..) => None, + State::SnapshotChunk { + .. + } => None, State::Full => { self.body_downloader.remove_target(&imported); self.body_downloader.remove_target(&invalid); @@ -580,7 +651,7 @@ impl Extension { RequestMessage::Bodies(hashes) => !hashes.is_empty(), RequestMessage::StateChunk { .. - } => unimplemented!(), + } => true, } } @@ -659,7 +730,24 @@ impl Extension { self.on_body_response(hashes, bodies); self.check_sync_variable(); } - ResponseMessage::StateChunk(..) => unimplemented!(), + ResponseMessage::StateChunk(chunks) => { + let roots = match request { + RequestMessage::StateChunk(_, roots) => roots, + _ => unreachable!(), + }; + if let Some(token) = self.tokens.get(from) { + if let Some(token_info) = self.tokens_info.get_mut(token) { + if token_info.request_id.is_none() { + ctrace!(SYNC, "Expired before handling response"); + return + } + self.api.clear_timer(*token).expect("Timer clear succeed"); + token_info.request_id = None; + } + } + self.dismiss_request(from, id); + self.on_chunk_response(from, &roots, &chunks); + } } } } @@ -713,12 +801,10 @@ impl Extension { } true } - ( - RequestMessage::StateChunk { - .. - }, - ResponseMessage::StateChunk(..), - ) => unimplemented!(), + (RequestMessage::StateChunk(_, roots), ResponseMessage::StateChunk(chunks)) => { + // Check length + roots.len() == chunks.len() + } _ => { cwarn!(SYNC, "Invalid response type"); false @@ -733,7 +819,10 @@ impl Extension { [header] if header.hash() == hash => { match self.client.import_bootstrap_header(&header) { Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { - self.state = State::SnapshotChunk(*header.state_root()); + self.state = State::SnapshotChunk { + block: hash, + restore: SnapshotRestore::new(*header.state_root()), + }; } Err(BlockImportError::Import(ImportError::AlreadyQueued)) => {} // FIXME: handle import errors @@ -751,7 +840,9 @@ impl Extension { headers.len() ), }, - State::SnapshotChunk(..) => {} + State::SnapshotChunk { + .. + } => {} State::Full => { let (mut completed, peer_is_caught_up) = if let Some(peer) = self.header_downloaders.get_mut(from) { let encoded: Vec<_> = headers.iter().map(|h| EncodedHeader::new(h.rlp_bytes().to_vec())).collect(); @@ -833,6 +924,63 @@ impl Extension { self.send_body_request(&id); } } + + fn on_chunk_response(&mut self, from: &NodeId, roots: &[H256], chunks: &[Vec]) { + if let State::SnapshotChunk { + block, + ref mut restore, + } = self.state + { + for (r, c) in roots.iter().zip(chunks) { + if c.is_empty() { + cdebug!(SYNC, "Peer {} sent empty response for chunk request {}", from, r); + continue + } + let decompressor = ChunkDecompressor::from_slice(c); + let raw_chunk = match decompressor.decompress() { + Ok(chunk) => chunk, + Err(e) => { + cwarn!(SYNC, "Decode failed for chunk response from peer {}: {}", from, e); + continue + } + }; + let recovered = match raw_chunk.recover(*r) { + Ok(chunk) => chunk, + Err(e) => { + cwarn!(SYNC, "Invalid chunk response from peer {}: {}", from, e); + continue + } + }; + + let batch = { + let mut state_db = self.client.state_db().write(); + let hash_db = state_db.as_hashdb_mut(); + restore.feed(hash_db, recovered); + + let mut batch = DBTransaction::new(); + match state_db.journal_under(&mut batch, 0, H256::zero()) { + Ok(_) => batch, + Err(e) => { + cwarn!(SYNC, "Failed to write state chunk to database: {}", e); + continue + } + } + }; + self.client.db().write_buffered(batch); + match self.client.db().flush() { + Ok(_) => cdebug!(SYNC, "Wrote state chunk to database: {}", r), + Err(e) => cwarn!(SYNC, "Failed to flush database: {}", e), + } + } + + if let Some(root) = restore.next_to_feed() { + self.send_chunk_request(&block, &root); + } else { + cdebug!(SYNC, "Transitioning state to {:?}", State::Full); + self.state = State::Full; + } + } + } } pub struct BlockSyncSender(EventSender); diff --git a/util/merkle/src/snapshot/compress.rs b/util/merkle/src/snapshot/compress.rs index c03ea0cc08..e5733baf2a 100644 --- a/util/merkle/src/snapshot/compress.rs +++ b/util/merkle/src/snapshot/compress.rs @@ -35,7 +35,7 @@ impl ChunkDecompressor { } impl<'a> ChunkDecompressor> { - fn from_slice(slice: &'a [u8]) -> Self { + pub fn from_slice(slice: &'a [u8]) -> Self { ChunkDecompressor::new(Cursor::new(slice)) } } diff --git a/util/merkle/src/snapshot/mod.rs b/util/merkle/src/snapshot/mod.rs index 159bcfa823..860fc71d94 100644 --- a/util/merkle/src/snapshot/mod.rs +++ b/util/merkle/src/snapshot/mod.rs @@ -36,22 +36,20 @@ const CHUNK_MAX_NODES: usize = 256; // 16 ^ (CHUNK_HEIGHT-1) /// Example: /// use codechain_merkle::snapshot::Restore; -/// let mut rm = Restore::new(db, root); +/// let mut rm = Restore::new(root); /// while let Some(root) = rm.next_to_feed() { /// let raw_chunk = request(block_hash, root)?; /// let chunk = raw_chunk.recover(root)?; -/// rm.feed(chunk); +/// rm.feed(db, chunk); /// } -pub struct Restore<'a> { - db: &'a mut dyn HashDB, +pub struct Restore { pending: Option, unresolved: OrderedHeap>, } -impl<'a> Restore<'a> { - pub fn new(db: &'a mut dyn HashDB, merkle_root: H256) -> Self { +impl Restore { + pub fn new(merkle_root: H256) -> Self { let mut result = Restore { - db, pending: None, unresolved: OrderedHeap::new(), }; @@ -61,13 +59,13 @@ impl<'a> Restore<'a> { result } - pub fn feed(&mut self, chunk: RecoveredChunk) { + pub fn feed(&mut self, db: &mut dyn HashDB, chunk: RecoveredChunk) { let pending_path = self.pending.take().expect("feed() should be called after next()"); assert_eq!(pending_path.chunk_root, chunk.root, "Unexpected chunk"); // Pour nodes into the DB for (key, value) in chunk.nodes { - self.db.emplace(key, value); + db.emplace(key, value); } // Extend search paths @@ -79,8 +77,9 @@ impl<'a> Restore<'a> { } pub fn next_to_feed(&mut self) -> Option { - if let Some(path) = self.unresolved.pop() { - assert!(self.pending.is_none(), "Previous feed() was failed"); + if let Some(pending) = &self.pending { + Some(pending.chunk_root) + } else if let Some(path) = self.unresolved.pop() { let chunk_root = path.chunk_root; self.pending = Some(path.0); @@ -91,6 +90,12 @@ impl<'a> Restore<'a> { } } +impl std::fmt::Debug for Restore { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + f.debug_struct("Restore").field("pending", &self.pending).field("unresolved", &"<...>".to_string()).finish() + } +} + /// Example: /// use std::fs::File; /// use codechain_merkle::snapshot::Snapshot; @@ -293,10 +298,10 @@ mod tests { dbg!(chunks.len()); let mut db = MemoryDB::new(); - let mut recover = Restore::new(&mut db, root); + let mut recover = Restore::new(root); while let Some(chunk_root) = recover.next_to_feed() { let recovered = chunks[&chunk_root].recover(chunk_root).unwrap(); - recover.feed(recovered); + recover.feed(&mut db, recovered); } let trie = TrieDB::try_new(&db, &root).unwrap(); From fe9c7852e0a813ad069ce263bbdf7879091a4bb7 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Mon, 18 Nov 2019 19:02:52 +0900 Subject: [PATCH 30/52] Serve snapshot responses to peers --- codechain/run_node.rs | 5 ++++- sync/src/block/extension.rs | 25 ++++++++++++++++++++++--- sync/src/snapshot/mod.rs | 25 ++++++++++++++++--------- 3 files changed, 42 insertions(+), 13 deletions(-) diff --git a/codechain/run_node.rs b/codechain/run_node.rs index 7667cbeb3b..a4b2d157bb 100644 --- a/codechain/run_node.rs +++ b/codechain/run_node.rs @@ -302,7 +302,10 @@ pub fn run_node(matches: &ArgMatches) -> Result<(), String> { (Some(hash), Some(num)) => Some((hash, num)), _ => None, }; - service.register_extension(move |api| BlockSyncExtension::new(client, api, snapshot_target)) + let snapshot_dir = config.snapshot.path.clone(); + service.register_extension(move |api| { + BlockSyncExtension::new(client, api, snapshot_target, snapshot_dir) + }) }; let sync = Arc::new(BlockSyncSender::from(sync_sender.clone())); client.client().add_notify(Arc::downgrade(&sync) as Weak); diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index d4e29dcbda..b3582f2fea 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -16,6 +16,7 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; +use std::fs; use std::sync::Arc; use std::time::Duration; @@ -43,6 +44,7 @@ use token_generator::TokenGenerator; use super::downloader::{BodyDownloader, HeaderDownloader}; use super::message::{Message, RequestMessage, ResponseMessage}; +use crate::snapshot::snapshot_path; const SYNC_TIMER_TOKEN: TimerToken = 0; const SYNC_EXPIRE_TOKEN_BEGIN: TimerToken = SYNC_TIMER_TOKEN + 1; @@ -81,10 +83,16 @@ pub struct Extension { api: Box, last_request: u64, nonce: u64, + snapshot_dir: Option, } impl Extension { - pub fn new(client: Arc, api: Box, snapshot_target: Option<(H256, u64)>) -> Extension { + pub fn new( + client: Arc, + api: Box, + snapshot_target: Option<(H256, u64)>, + snapshot_dir: Option, + ) -> Extension { api.set_timer(SYNC_TIMER_TOKEN, Duration::from_millis(SYNC_TIMER_INTERVAL)).expect("Timer set succeeds"); let state = match snapshot_target { @@ -137,6 +145,7 @@ impl Extension { api, last_request: Default::default(), nonce: Default::default(), + snapshot_dir, } } @@ -689,8 +698,18 @@ impl Extension { ResponseMessage::Bodies(bodies) } - fn create_state_chunk_response(&self, _hash: BlockHash, _tree_root: Vec) -> ResponseMessage { - unimplemented!() + fn create_state_chunk_response(&self, hash: BlockHash, chunk_roots: Vec) -> ResponseMessage { + let mut result = Vec::new(); + for root in chunk_roots { + if let Some(dir) = &self.snapshot_dir { + let chunk_path = snapshot_path(&dir, &hash, &root); + match fs::read(chunk_path) { + Ok(chunk) => result.push(chunk), + _ => result.push(Vec::new()), + } + } + } + ResponseMessage::StateChunk(result) } fn on_peer_response(&mut self, from: &NodeId, id: u64, mut response: ResponseMessage) { diff --git a/sync/src/snapshot/mod.rs b/sync/src/snapshot/mod.rs index 80d0835da2..760c4469ae 100644 --- a/sync/src/snapshot/mod.rs +++ b/sync/src/snapshot/mod.rs @@ -32,6 +32,19 @@ pub struct Service { canceller: Option, } +pub fn snapshot_dir(root_dir: &str, block: &BlockHash) -> PathBuf { + let mut path = PathBuf::new(); + path.push(root_dir); + path.push(format!("{:x}", block.deref())); + path +} + +pub fn snapshot_path(root_dir: &str, block: &BlockHash, chunk_root: &H256) -> PathBuf { + let mut path = snapshot_dir(root_dir, block); + path.push(format!("{:x}", chunk_root)); + path +} + impl Service { pub fn new(client: Arc, notify_receiver_source: NotifyReceiverSource, root_dir: String) -> Self { let NotifyReceiverSource(canceller, receiver) = notify_receiver_source; @@ -70,17 +83,11 @@ impl Service { } fn snapshot(db: &dyn HashDB, block_hash: BlockHash, chunk_root: H256, root_dir: &str) -> Result<(), SnapshotError> { - let snapshot_dir = { - let mut res = PathBuf::new(); - res.push(root_dir); - res.push(format!("{:x}", block_hash.deref())); - res - }; - create_dir_all(&snapshot_dir)?; + let snapshot_dir = snapshot_dir(root_dir, &block_hash); + create_dir_all(snapshot_dir)?; for chunk in Snapshot::from_hashdb(db, chunk_root) { - let mut chunk_path = snapshot_dir.clone(); - chunk_path.push(format!("{:x}", chunk.root)); + let chunk_path = snapshot_path(root_dir, &block_hash, &chunk.root); let chunk_file = File::create(chunk_path)?; let compressor = ChunkCompressor::new(chunk_file); compressor.compress_chunk(&chunk)?; From ef3ad4fc3de804c730c56e11fd71c2f5c4f37c41 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Tue, 19 Nov 2019 20:21:22 +0900 Subject: [PATCH 31/52] Use binding for match clause --- sync/src/block/extension.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index b3582f2fea..9ff1ca59d3 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -283,7 +283,7 @@ impl Extension { let body_requests: Vec = requests .iter() .filter_map(|r| match r { - (_, RequestMessage::Bodies(..)) => Some(r.1.clone()), + (_, msg @ RequestMessage::Bodies(..)) => Some(msg.clone()), _ => None, }) .collect(); @@ -291,7 +291,7 @@ impl Extension { let chunk_requests: Vec = requests .iter() .filter_map(|r| match r { - (_, RequestMessage::StateChunk(..)) => Some(r.1.clone()), + (_, msg @ RequestMessage::StateChunk(..)) => Some(msg.clone()), _ => None, }) .collect(); From cbe8322b3f6c5ef930464368837ce20fd79b6d66 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Wed, 20 Nov 2019 12:40:36 +0900 Subject: [PATCH 32/52] Add RPC snapshot_getList --- codechain/rpc_apis.rs | 1 + rpc/src/v1/errors.rs | 8 ++++ rpc/src/v1/impls/mod.rs | 2 + rpc/src/v1/impls/snapshot.rs | 88 +++++++++++++++++++++++++++++++++++ rpc/src/v1/traits/mod.rs | 2 + rpc/src/v1/traits/snapshot.rs | 26 +++++++++++ 6 files changed, 127 insertions(+) create mode 100644 rpc/src/v1/impls/snapshot.rs create mode 100644 rpc/src/v1/traits/snapshot.rs diff --git a/codechain/rpc_apis.rs b/codechain/rpc_apis.rs index a447b43b42..bc701275df 100644 --- a/codechain/rpc_apis.rs +++ b/codechain/rpc_apis.rs @@ -37,6 +37,7 @@ impl ApiDependencies { use crpc::v1::*; handler.extend_with(ChainClient::new(Arc::clone(&self.client)).to_delegate()); handler.extend_with(MempoolClient::new(Arc::clone(&self.client)).to_delegate()); + handler.extend_with(SnapshotClient::new(Arc::clone(&self.client), config.snapshot.path.clone()).to_delegate()); if config.rpc.enable_devel_api { handler.extend_with( DevelClient::new(Arc::clone(&self.client), Arc::clone(&self.miner), self.block_sync.clone()) diff --git a/rpc/src/v1/errors.rs b/rpc/src/v1/errors.rs index 2dc961786b..eaf98cf5d4 100644 --- a/rpc/src/v1/errors.rs +++ b/rpc/src/v1/errors.rs @@ -294,6 +294,14 @@ pub fn invalid_custom_action(err: String) -> Error { } } +pub fn io(error: std::io::Error) -> Error { + Error { + code: ErrorCode::InternalError, + message: format!("{}", error), + data: None, + } +} + /// Internal error signifying a logic error in code. /// Should not be used when function can just fail /// because of invalid parameters or incomplete node state. diff --git a/rpc/src/v1/impls/mod.rs b/rpc/src/v1/impls/mod.rs index 45e7678459..3360f5682e 100644 --- a/rpc/src/v1/impls/mod.rs +++ b/rpc/src/v1/impls/mod.rs @@ -21,6 +21,7 @@ mod engine; mod mempool; mod miner; mod net; +mod snapshot; pub use self::account::AccountClient; pub use self::chain::ChainClient; @@ -29,3 +30,4 @@ pub use self::engine::EngineClient; pub use self::mempool::MempoolClient; pub use self::miner::MinerClient; pub use self::net::NetClient; +pub use self::snapshot::SnapshotClient; diff --git a/rpc/src/v1/impls/snapshot.rs b/rpc/src/v1/impls/snapshot.rs new file mode 100644 index 0000000000..f030c9bbd3 --- /dev/null +++ b/rpc/src/v1/impls/snapshot.rs @@ -0,0 +1,88 @@ +// Copyright 2018-2019 Kodebox, Inc. +// This file is part of CodeChain. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +use std::fs; +use std::str::FromStr; +use std::sync::Arc; + +use ccore::{BlockChainClient, BlockId}; +use ctypes::BlockHash; +use primitives::H256; + +use jsonrpc_core::Result; + +use super::super::errors; +use super::super::traits::Snapshot; +use super::super::types::BlockNumberAndHash; + +pub struct SnapshotClient +where + C: BlockChainClient, { + client: Arc, + snapshot_path: Option, +} + +impl SnapshotClient +where + C: BlockChainClient, +{ + pub fn new(client: Arc, snapshot_path: Option) -> Self { + SnapshotClient { + client, + snapshot_path, + } + } +} + +impl Snapshot for SnapshotClient +where + C: BlockChainClient + 'static, +{ + fn get_snapshot_list(&self) -> Result> { + if let Some(snapshot_path) = &self.snapshot_path { + let mut result = Vec::new(); + for entry in fs::read_dir(snapshot_path).map_err(errors::io)? { + let entry = entry.map_err(errors::io)?; + + // Check if the entry is a directory + let file_type = entry.file_type().map_err(errors::io)?; + if !file_type.is_dir() { + continue + } + + let path = entry.path(); + let name = match path.file_name().expect("Directories always have file name").to_str() { + Some(n) => n, + None => continue, + }; + let hash = match H256::from_str(name) { + Ok(h) => BlockHash::from(h), + Err(_) => continue, + }; + if let Some(number) = self.client.block_number(&BlockId::Hash(hash)) { + result.push(BlockNumberAndHash { + number, + hash, + }); + } + } + result.sort_unstable_by(|a, b| b.number.cmp(&a.number)); + Ok(result) + } else { + Ok(Vec::new()) + } + } +} diff --git a/rpc/src/v1/traits/mod.rs b/rpc/src/v1/traits/mod.rs index 719f186e49..7f2bd04599 100644 --- a/rpc/src/v1/traits/mod.rs +++ b/rpc/src/v1/traits/mod.rs @@ -21,6 +21,7 @@ mod engine; mod mempool; mod miner; mod net; +mod snapshot; pub use self::account::Account; pub use self::chain::Chain; @@ -29,3 +30,4 @@ pub use self::engine::Engine; pub use self::mempool::Mempool; pub use self::miner::Miner; pub use self::net::Net; +pub use self::snapshot::Snapshot; diff --git a/rpc/src/v1/traits/snapshot.rs b/rpc/src/v1/traits/snapshot.rs new file mode 100644 index 0000000000..0fd9c18366 --- /dev/null +++ b/rpc/src/v1/traits/snapshot.rs @@ -0,0 +1,26 @@ +// Copyright 2018-2019 Kodebox, Inc. +// This file is part of CodeChain. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +use jsonrpc_core::Result; + +use super::super::types::BlockNumberAndHash; + +#[rpc(server)] +pub trait Snapshot { + /// Gets list of block numbers and block hashes of the snapshots. + #[rpc(name = "snapshot_getList")] + fn get_snapshot_list(&self) -> Result>; +} From af90610e7c632f92eadf008f5d7069a718e09d1d Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Wed, 20 Nov 2019 02:46:02 +0900 Subject: [PATCH 33/52] Add autoremoving expired snapshot feature --- codechain/config/mod.rs | 5 + codechain/config/presets/config.dev.toml | 1 + codechain/config/presets/config.prod.toml | 1 + codechain/run_node.rs | 3 +- sync/src/snapshot/mod.rs | 113 ++++++++++++++++++---- 5 files changed, 103 insertions(+), 20 deletions(-) diff --git a/codechain/config/mod.rs b/codechain/config/mod.rs index f2bc321895..e5dac23f3b 100644 --- a/codechain/config/mod.rs +++ b/codechain/config/mod.rs @@ -316,6 +316,8 @@ fn default_enable_devel_api() -> bool { pub struct Snapshot { pub disable: Option, pub path: Option, + // Snapshot's age in blocks + pub expiration: Option, } #[derive(Deserialize)] @@ -754,6 +756,9 @@ impl Snapshot { if other.path.is_some() { self.path = other.path.clone(); } + if other.expiration.is_some() { + self.expiration = other.expiration; + } } pub fn overwrite_with(&mut self, matches: &clap::ArgMatches) -> Result<(), String> { diff --git a/codechain/config/presets/config.dev.toml b/codechain/config/presets/config.dev.toml index 2b8890b4f7..279704592a 100644 --- a/codechain/config/presets/config.dev.toml +++ b/codechain/config/presets/config.dev.toml @@ -52,6 +52,7 @@ max_connections = 100 [snapshot] disable = false path = "snapshot" +expiration = 100000 # blocks. About a week [stratum] disable = false diff --git a/codechain/config/presets/config.prod.toml b/codechain/config/presets/config.prod.toml index b67e2746bb..013883673d 100644 --- a/codechain/config/presets/config.prod.toml +++ b/codechain/config/presets/config.prod.toml @@ -52,6 +52,7 @@ max_connections = 100 [snapshot] disable = true path = "snapshot" +expiration = 100000 # blocks. About a week [stratum] disable = true diff --git a/codechain/run_node.rs b/codechain/run_node.rs index a4b2d157bb..1bfa40e569 100644 --- a/codechain/run_node.rs +++ b/codechain/run_node.rs @@ -374,7 +374,8 @@ pub fn run_node(matches: &ArgMatches) -> Result<(), String> { client.engine().register_snapshot_notify_sender(tx); if !config.snapshot.disable.unwrap() { - let service = Arc::new(SnapshotService::new(client, rx, config.snapshot.path.unwrap())); + let service = + Arc::new(SnapshotService::new(client, rx, config.snapshot.path.unwrap(), config.snapshot.expiration)); Some(service) } else { None diff --git a/sync/src/snapshot/mod.rs b/sync/src/snapshot/mod.rs index 760c4469ae..7a075284a2 100644 --- a/sync/src/snapshot/mod.rs +++ b/sync/src/snapshot/mod.rs @@ -14,18 +14,19 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -use std::fs::{create_dir_all, File}; + +use std::fs; use std::path::PathBuf; +use std::str::FromStr; use std::sync::Arc; use std::thread::{spawn, JoinHandle}; use ccore::snapshot_notify::{NotifyReceiverSource, ReceiverCanceller}; -use ccore::{BlockChainTrait, BlockId, Client}; +use ccore::{BlockChainClient, BlockChainTrait, BlockId, Client}; use cmerkle::snapshot::{ChunkCompressor, Error as SnapshotError, Snapshot}; use ctypes::BlockHash; use hashdb::{AsHashDB, HashDB}; use primitives::H256; -use std::ops::Deref; pub struct Service { join_handle: Option>, @@ -35,7 +36,7 @@ pub struct Service { pub fn snapshot_dir(root_dir: &str, block: &BlockHash) -> PathBuf { let mut path = PathBuf::new(); path.push(root_dir); - path.push(format!("{:x}", block.deref())); + path.push(format!("{:x}", **block)); path } @@ -46,7 +47,12 @@ pub fn snapshot_path(root_dir: &str, block: &BlockHash, chunk_root: &H256) -> Pa } impl Service { - pub fn new(client: Arc, notify_receiver_source: NotifyReceiverSource, root_dir: String) -> Self { + pub fn new( + client: Arc, + notify_receiver_source: NotifyReceiverSource, + root_dir: String, + expiration: Option, + ) -> Self { let NotifyReceiverSource(canceller, receiver) = notify_receiver_source; let join_handle = spawn(move || { cinfo!(SYNC, "Snapshot service is on"); @@ -58,19 +64,26 @@ impl Service { cerror!(SYNC, "There isn't corresponding header for the requested block hash: {}", block_hash,); continue }; - let db_lock = client.state_db().read(); - if let Some(err) = snapshot(db_lock.as_hashdb(), block_hash, state_root, &root_dir).err() { - cerror!( - SYNC, - "Snapshot request failed for block: {}, chunk_root: {}, err: {}", - block_hash, - state_root, - err - ); - } else { - cinfo!(SYNC, "Snapshot is ready for block: {}", block_hash) + { + let db_lock = client.state_db().read(); + if let Err(err) = snapshot(db_lock.as_hashdb(), block_hash, state_root, &root_dir) { + cerror!( + SYNC, + "Snapshot request failed for block: {}, chunk_root: {}, err: {}", + block_hash, + state_root, + err + ); + } else { + cinfo!(SYNC, "Snapshot is ready for block: {}", block_hash) + } + } + + if let Some(expiration) = expiration { + if let Err(err) = cleanup_expired(&client, &root_dir, expiration) { + cerror!(SYNC, "Snapshot cleanup error after block hash {}, err: {}", block_hash, err); + } } - // TODO: Prune old snapshots } cinfo!(SYNC, "Snapshot service is stopped") }); @@ -84,11 +97,11 @@ impl Service { fn snapshot(db: &dyn HashDB, block_hash: BlockHash, chunk_root: H256, root_dir: &str) -> Result<(), SnapshotError> { let snapshot_dir = snapshot_dir(root_dir, &block_hash); - create_dir_all(snapshot_dir)?; + fs::create_dir_all(snapshot_dir)?; for chunk in Snapshot::from_hashdb(db, chunk_root) { let chunk_path = snapshot_path(root_dir, &block_hash, &chunk.root); - let chunk_file = File::create(chunk_path)?; + let chunk_file = fs::File::create(chunk_path)?; let compressor = ChunkCompressor::new(chunk_file); compressor.compress_chunk(&chunk)?; } @@ -96,6 +109,68 @@ fn snapshot(db: &dyn HashDB, block_hash: BlockHash, chunk_root: H256, root_dir: Ok(()) } +fn cleanup_expired(client: &Client, root_dir: &str, expiration: u64) -> Result<(), SnapshotError> { + for entry in fs::read_dir(root_dir)? { + let entry = match entry { + Ok(entry) => entry, + Err(err) => { + cerror!(SYNC, "Snapshot cleanup can't retrieve entry. err: {}", err); + continue + } + }; + let path = entry.path(); + + match entry.file_type().map(|x| x.is_dir()) { + Ok(true) => {} + Ok(false) => continue, + Err(err) => { + cerror!(SYNC, "Snapshot cleanup can't retrieve file info: {}, err: {}", path.to_string_lossy(), err); + continue + } + } + + let name = match path.file_name().expect("Directories always have file name").to_str() { + Some(n) => n, + None => continue, + }; + let hash = match H256::from_str(name) { + Ok(h) => BlockHash::from(h), + Err(_) => continue, + }; + let number = if let Some(number) = client.block_number(&BlockId::Hash(hash)) { + number + } else { + cerror!(SYNC, "Snapshot cleanup can't retrieve block number for block_hash: {}", hash); + continue + }; + + if number + expiration < client.best_block_header().number() { + cleanup_snapshot(root_dir, hash) + } + } + Ok(()) +} + +/// Remove all files in `root_dir/block_hash` +fn cleanup_snapshot(root_dir: &str, block_hash: BlockHash) { + let path = snapshot_dir(root_dir, &block_hash); + let rename_to = PathBuf::from(root_dir).join(format!("{:x}.old", *block_hash)); + // It is okay to ignore errors. We just wanted them to be removed. + match fs::rename(path, &rename_to) { + Ok(()) => {} + Err(err) => { + cerror!(SYNC, "Snapshot cleanup: renaming {} failed, reason: {}", block_hash, err); + } + } + // Ignore the error. Cleanup failure is not a critical error. + match fs::remove_dir_all(rename_to) { + Ok(()) => {} + Err(err) => { + cerror!(SYNC, "Snapshot cleanup: removing {} failed, reason: {}", block_hash, err); + } + } +} + impl Drop for Service { fn drop(&mut self) { if let Some(canceller) = self.canceller.take() { From a96282de61253f90a3eb9d1a8f7b9c7b8788559f Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Fri, 22 Nov 2019 09:31:52 +0900 Subject: [PATCH 34/52] Detect bootstrap header in tendermint worker Tendermint worker skips some process for the genesis block since it doesn't have a parent, and this should be applied to the bootstrap header too. --- core/src/consensus/tendermint/worker.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/core/src/consensus/tendermint/worker.rs b/core/src/consensus/tendermint/worker.rs index 749b5252e1..5329fa342f 100644 --- a/core/src/consensus/tendermint/worker.rs +++ b/core/src/consensus/tendermint/worker.rs @@ -52,6 +52,7 @@ use crate::encoded; use crate::error::{BlockError, Error}; use crate::snapshot_notify::NotifySender as SnapshotNotifySender; use crate::transaction::{SignedTransaction, UnverifiedTransaction}; +use crate::types::BlockStatus; use crate::views::BlockView; use crate::BlockId; use std::cell::Cell; @@ -961,7 +962,8 @@ impl Worker { } fn on_imported_proposal(&mut self, proposal: &Header) { - if proposal.number() < 1 { + // NOTE: Only the genesis block and the snapshot target don't have the parent in the blockchain + if self.client().block_status(&BlockId::Hash(*proposal.parent_hash())) == BlockStatus::Unknown { return } @@ -1663,11 +1665,11 @@ impl Worker { let mut last_term_end = None; for block_hash in &enacted { let header = c.block_header(&BlockId::Hash(*block_hash)).expect("Block is enacted").decode(); - if header.number() == 0 { - continue - } - let parent_header = - c.block_header(&BlockId::Hash(*header.parent_hash())).expect("Parent block should be enacted").decode(); + let parent_header = match c.block_header(&BlockId::Hash(*header.parent_hash())) { + Some(h) => h.decode(), + // NOTE: Only the genesis block and the snapshot target don't have the parent in the blockchain + None => continue, + }; let term_seconds = if let Some(p) = c.term_common_params(parent_header.hash().into()) { p.term_seconds() } else { From cc236fd936dd352449aa4d15746e24faee43cac5 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Thu, 21 Nov 2019 18:07:45 +0900 Subject: [PATCH 35/52] Add sync state SnapshotBody --- codechain/run_node.rs | 3 +- sync/src/block/extension.rs | 66 +++++++++++++++++++++++++++---------- 2 files changed, 50 insertions(+), 19 deletions(-) diff --git a/codechain/run_node.rs b/codechain/run_node.rs index 1bfa40e569..1e93f1a9d6 100644 --- a/codechain/run_node.rs +++ b/codechain/run_node.rs @@ -35,6 +35,7 @@ use csync::snapshot::Service as SnapshotService; use csync::{BlockSyncExtension, BlockSyncSender, TransactionSyncExtension}; use ctimer::TimerLoop; use ctrlc::CtrlC; +use ctypes::BlockHash; use fdlimit::raise_fd_limit; use kvdb::KeyValueDB; use kvdb_rocksdb::{Database, DatabaseConfig}; @@ -299,7 +300,7 @@ pub fn run_node(matches: &ArgMatches) -> Result<(), String> { let sync_sender = { let client = client.client(); let snapshot_target = match (config.network.snapshot_hash, config.network.snapshot_number) { - (Some(hash), Some(num)) => Some((hash, num)), + (Some(hash), Some(num)) => Some((BlockHash::from(hash), num)), _ => None, }; let snapshot_dir = config.snapshot.path.clone(); diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 9ff1ca59d3..397bb9ed6a 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -63,6 +63,10 @@ pub struct TokenInfo { #[derive(Debug)] enum State { SnapshotHeader(BlockHash, u64), + SnapshotBody { + block: BlockHash, + prev_root: H256, + }, SnapshotChunk { block: BlockHash, restore: SnapshotRestore, @@ -90,28 +94,12 @@ impl Extension { pub fn new( client: Arc, api: Box, - snapshot_target: Option<(H256, u64)>, + snapshot_target: Option<(BlockHash, u64)>, snapshot_dir: Option, ) -> Extension { api.set_timer(SYNC_TIMER_TOKEN, Duration::from_millis(SYNC_TIMER_INTERVAL)).expect("Timer set succeeds"); - let state = match snapshot_target { - Some((hash, num)) => match client.block_header(&BlockId::Number(num)) { - Some(ref header) if *header.hash() == hash => { - let state_db = client.state_db().read(); - let state_root = header.state_root(); - match TrieFactory::readonly(state_db.as_hashdb(), &state_root) { - Ok(ref trie) if trie.is_complete() => State::Full, - _ => State::SnapshotChunk { - block: hash.into(), - restore: SnapshotRestore::new(state_root), - }, - } - } - _ => State::SnapshotHeader(hash.into(), num), - }, - None => State::Full, - }; + let state = Extension::initial_state(client.clone(), snapshot_target); cdebug!(SYNC, "Initial state is {:?}", state); let mut header = client.best_header(); let mut hollow_headers = vec![header.decode()]; @@ -149,6 +137,36 @@ impl Extension { } } + fn initial_state(client: Arc, snapshot_target: Option<(BlockHash, u64)>) -> State { + let (hash, num) = match snapshot_target { + Some(target) => target, + None => return State::Full, + }; + let header = match client.block_header(&num.into()) { + Some(ref h) if h.hash() == hash => h.clone(), + _ => return State::SnapshotHeader(hash, num), + }; + if client.block_body(&hash.into()).is_none() { + let parent_hash = header.parent_hash(); + let parent = + client.block_header(&parent_hash.into()).expect("Parent header of the snapshot header must exist"); + return State::SnapshotBody { + block: hash, + prev_root: parent.state_root(), + } + } + + let state_db = client.state_db().read(); + let state_root = header.state_root(); + match TrieFactory::readonly(state_db.as_hashdb(), &state_root) { + Ok(ref trie) if trie.is_complete() => State::Full, + _ => State::SnapshotChunk { + block: hash, + restore: SnapshotRestore::new(state_root), + }, + } + } + fn dismiss_request(&mut self, id: &NodeId, request_id: u64) { if let Some(requests) = self.requests.get_mut(id) { requests.retain(|(i, _)| *i != request_id); @@ -418,6 +436,9 @@ impl NetworkExtension for Extension { }); } } + State::SnapshotBody { + .. + } => unimplemented!(), State::SnapshotChunk { block, ref mut restore, @@ -532,6 +553,9 @@ impl Extension { None } } + State::SnapshotBody { + .. + } => None, State::SnapshotChunk { .. } => None, @@ -582,6 +606,9 @@ impl Extension { None } } + State::SnapshotBody { + .. + } => unimplemented!(), State::SnapshotChunk { .. } => None, @@ -859,6 +886,9 @@ impl Extension { headers.len() ), }, + State::SnapshotBody { + .. + } => {} State::SnapshotChunk { .. } => {} From 0ba5323e86ee0db264c005166a81eb85f8a449f7 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Thu, 21 Nov 2019 18:30:55 +0900 Subject: [PATCH 36/52] Move to SnapshotBody state after downloading the snapthot header --- sync/src/block/extension.rs | 116 +++++++++++------------------------- 1 file changed, 34 insertions(+), 82 deletions(-) diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 397bb9ed6a..e972ee9af0 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -152,7 +152,7 @@ impl Extension { client.block_header(&parent_hash.into()).expect("Parent header of the snapshot header must exist"); return State::SnapshotBody { block: hash, - prev_root: parent.state_root(), + prev_root: parent.transactions_root(), } } @@ -431,8 +431,8 @@ impl NetworkExtension for Extension { State::SnapshotHeader(_, num) => { for id in &peer_ids { self.send_header_request(id, RequestMessage::Headers { - start_number: num, - max_count: 1, + start_number: num - 1, + max_count: 2, }); } } @@ -541,86 +541,38 @@ pub enum Event { impl Extension { fn new_headers(&mut self, imported: Vec, enacted: Vec, retracted: Vec) { - if let Some(next_state) = match self.state { - State::SnapshotHeader(hash, ..) => { - if imported.contains(&hash) { - let header = self.client.block_header(&BlockId::Hash(hash)).expect("Imported header must exist"); - Some(State::SnapshotChunk { - block: hash, - restore: SnapshotRestore::new(header.state_root()), - }) - } else { - None - } + if let State::Full = self.state { + for peer in self.header_downloaders.values_mut() { + peer.mark_as_imported(imported.clone()); } - State::SnapshotBody { - .. - } => None, - State::SnapshotChunk { - .. - } => None, - State::Full => { - for peer in self.header_downloaders.values_mut() { - peer.mark_as_imported(imported.clone()); - } - let mut headers_to_download: Vec<_> = enacted - .into_iter() - .map(|hash| self.client.block_header(&BlockId::Hash(hash)).expect("Enacted header must exist")) - .collect(); - headers_to_download.sort_unstable_by_key(EncodedHeader::number); - #[allow(clippy::redundant_closure)] - // False alarm. https://github.com/rust-lang/rust-clippy/issues/1439 - headers_to_download.dedup_by_key(|h| h.hash()); - - let headers: Vec<_> = headers_to_download - .into_iter() - .filter(|header| self.client.block_body(&BlockId::Hash(header.hash())).is_none()) - .collect(); // FIXME: No need to collect here if self is not borrowed. - for header in headers { - let parent = self - .client - .block_header(&BlockId::Hash(header.parent_hash())) - .expect("Enacted header must have parent"); - let is_empty = header.transactions_root() == parent.transactions_root(); - self.body_downloader.add_target(&header.decode(), is_empty); - } - self.body_downloader.remove_target(&retracted); - None + let mut headers_to_download: Vec<_> = enacted + .into_iter() + .map(|hash| self.client.block_header(&BlockId::Hash(hash)).expect("Enacted header must exist")) + .collect(); + headers_to_download.sort_unstable_by_key(EncodedHeader::number); + #[allow(clippy::redundant_closure)] + // False alarm. https://github.com/rust-lang/rust-clippy/issues/1439 + headers_to_download.dedup_by_key(|h| h.hash()); + + let headers: Vec<_> = headers_to_download + .into_iter() + .filter(|header| self.client.block_body(&BlockId::Hash(header.hash())).is_none()) + .collect(); // FIXME: No need to collect here if self is not borrowed. + for header in headers { + let parent = self + .client + .block_header(&BlockId::Hash(header.parent_hash())) + .expect("Enacted header must have parent"); + let is_empty = header.transactions_root() == parent.transactions_root(); + self.body_downloader.add_target(&header.decode(), is_empty); } - } { - cdebug!(SYNC, "Transitioning state to {:?}", next_state); - self.state = next_state; + self.body_downloader.remove_target(&retracted); } } fn new_blocks(&mut self, imported: Vec, invalid: Vec) { - if let Some(next_state) = match self.state { - State::SnapshotHeader(hash, ..) => { - if imported.contains(&hash) { - let header = self.client.block_header(&BlockId::Hash(hash)).expect("Imported header must exist"); - Some(State::SnapshotChunk { - block: hash, - restore: SnapshotRestore::new(header.state_root()), - }) - } else { - None - } - } - State::SnapshotBody { - .. - } => unimplemented!(), - State::SnapshotChunk { - .. - } => None, - State::Full => { - self.body_downloader.remove_target(&imported); - self.body_downloader.remove_target(&invalid); - None - } - } { - cdebug!(SYNC, "Transitioning state to {:?}", next_state); - self.state = next_state; - } + self.body_downloader.remove_target(&imported); + self.body_downloader.remove_target(&invalid); self.send_status_broadcast(); } } @@ -862,20 +814,20 @@ impl Extension { ctrace!(SYNC, "Received header response from({}) with length({})", from, headers.len()); match self.state { State::SnapshotHeader(hash, _) => match headers { - [header] if header.hash() == hash => { + [parent, header] if header.hash() == hash => { match self.client.import_bootstrap_header(&header) { - Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { - self.state = State::SnapshotChunk { + Ok(_) | Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { + self.state = State::SnapshotBody { block: hash, - restore: SnapshotRestore::new(*header.state_root()), + prev_root: *parent.transactions_root(), }; + cdebug!(SYNC, "Transitioning state to {:?}", self.state); } Err(BlockImportError::Import(ImportError::AlreadyQueued)) => {} // FIXME: handle import errors Err(err) => { cwarn!(SYNC, "Cannot import header({}): {:?}", header.hash(), err); } - _ => {} } } _ => cdebug!( From abe60a324665e9c07b8fadb3fd5dbba96026c30e Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Thu, 21 Nov 2019 20:10:48 +0900 Subject: [PATCH 37/52] Import snapshot block with body --- core/src/blockchain/blockchain.rs | 40 ++++++--- core/src/client/client.rs | 12 +-- core/src/client/importer.rs | 14 +-- core/src/client/mod.rs | 6 +- core/src/client/test_client.rs | 4 +- sync/src/block/extension.rs | 141 ++++++++++++++++++++---------- 6 files changed, 140 insertions(+), 77 deletions(-) diff --git a/core/src/blockchain/blockchain.rs b/core/src/blockchain/blockchain.rs index fcdce4fdee..12f3ccc358 100644 --- a/core/src/blockchain/blockchain.rs +++ b/core/src/blockchain/blockchain.rs @@ -98,18 +98,6 @@ impl BlockChain { } } - pub fn insert_bootstrap_header(&self, batch: &mut DBTransaction, header: &HeaderView) { - self.headerchain.insert_bootstrap_header(batch, header); - - let hash = header.hash(); - - *self.pending_best_block_hash.write() = Some(hash); - batch.put(db::COL_EXTRA, BEST_BLOCK_KEY, &hash); - - *self.pending_best_proposal_block_hash.write() = Some(hash); - batch.put(db::COL_EXTRA, BEST_PROPOSAL_BLOCK_KEY, &hash); - } - pub fn insert_header( &self, batch: &mut DBTransaction, @@ -122,6 +110,34 @@ impl BlockChain { } } + pub fn insert_bootstrap_block(&self, batch: &mut DBTransaction, bytes: &[u8]) { + let block = BlockView::new(bytes); + let header = block.header_view(); + let hash = header.hash(); + + ctrace!(BLOCKCHAIN, "Inserting bootstrap block #{}({}) to the blockchain.", header.number(), hash); + + if self.is_known(&hash) { + cdebug!(BLOCKCHAIN, "Block #{}({}) is already known.", header.number(), hash); + return + } + + assert!(self.pending_best_block_hash.read().is_none()); + assert!(self.pending_best_proposal_block_hash.read().is_none()); + + self.headerchain.insert_bootstrap_header(batch, &header); + self.body_db.insert_body(batch, &block); + self.body_db.update_best_block(batch, &BestBlockChanged::CanonChainAppended { + best_block: bytes.to_vec(), + }); + + *self.pending_best_block_hash.write() = Some(hash); + batch.put(db::COL_EXTRA, BEST_BLOCK_KEY, &hash); + + *self.pending_best_proposal_block_hash.write() = Some(hash); + batch.put(db::COL_EXTRA, BEST_PROPOSAL_BLOCK_KEY, &hash); + } + /// Inserts the block into backing cache database. /// Expects the block to be valid and already verified. /// If the block is already known, does nothing. diff --git a/core/src/client/client.rs b/core/src/client/client.rs index 5c1f96789f..f0e96a4d75 100644 --- a/core/src/client/client.rs +++ b/core/src/client/client.rs @@ -27,7 +27,7 @@ use cstate::{ }; use ctimer::{TimeoutHandler, TimerApi, TimerScheduleError, TimerToken}; use ctypes::transaction::{AssetTransferInput, PartialHashing, ShardTransaction}; -use ctypes::{BlockHash, BlockNumber, CommonParams, Header, ShardId, Tracker, TxHash}; +use ctypes::{BlockHash, BlockNumber, CommonParams, ShardId, Tracker, TxHash}; use cvm::{decode, execute, ChainTimeInfo, ScriptResult, VMConfig}; use hashdb::AsHashDB; use journaldb; @@ -42,7 +42,7 @@ use super::{ ClientConfig, DatabaseClient, EngineClient, EngineInfo, Error as ClientError, ExecuteClient, ImportBlock, ImportResult, MiningBlockChainClient, Shard, StateInfo, StateOrBlock, TextClient, }; -use crate::block::{ClosedBlock, IsBlock, OpenBlock, SealedBlock}; +use crate::block::{Block, ClosedBlock, IsBlock, OpenBlock, SealedBlock}; use crate::blockchain::{BlockChain, BlockProvider, BodyProvider, HeaderProvider, InvoiceProvider, TransactionAddress}; use crate::client::{ConsensusClient, SnapshotClient, TermInfo}; use crate::consensus::{CodeChainEngine, EngineError}; @@ -655,13 +655,13 @@ impl ImportBlock for Client { Ok(self.importer.header_queue.import(unverified)?) } - fn import_bootstrap_header(&self, header: &Header) -> Result { - if self.block_chain().is_known_header(&header.hash()) { + fn import_bootstrap_block(&self, block: &Block) -> Result { + if self.block_chain().is_known(&block.header.hash()) { return Err(BlockImportError::Import(ImportError::AlreadyInChain)) } let import_lock = self.importer.import_lock.lock(); - self.importer.import_bootstrap_header(header, self, &import_lock); - Ok(header.hash()) + self.importer.import_bootstrap_block(block, self, &import_lock); + Ok(block.header.hash()) } fn import_sealed_block(&self, block: &SealedBlock) -> ImportResult { diff --git a/core/src/client/importer.rs b/core/src/client/importer.rs index 4bd9ac606e..bc8161b4d4 100644 --- a/core/src/client/importer.rs +++ b/core/src/client/importer.rs @@ -19,14 +19,14 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; use cio::IoChannel; -use ctypes::header::Header; +use ctypes::header::{Header, Seal}; use ctypes::BlockHash; use kvdb::DBTransaction; use parking_lot::{Mutex, MutexGuard}; use rlp::Encodable; use super::{BlockChainTrait, Client, ClientConfig}; -use crate::block::{enact, IsBlock, LockedBlock}; +use crate::block::{enact, Block, IsBlock, LockedBlock}; use crate::blockchain::{BodyProvider, HeaderProvider, ImportRoute}; use crate::client::EngineInfo; use crate::consensus::CodeChainEngine; @@ -362,19 +362,21 @@ impl Importer { imported.len() } - pub fn import_bootstrap_header<'a>(&'a self, header: &'a Header, client: &Client, _importer_lock: &MutexGuard<()>) { + pub fn import_bootstrap_block<'a>(&'a self, block: &'a Block, client: &Client, _importer_lock: &MutexGuard<()>) { + let header = &block.header; let hash = header.hash(); - ctrace!(CLIENT, "Importing bootstrap header {}-{:?}", header.number(), hash); + ctrace!(CLIENT, "Importing bootstrap block #{}-{:?}", header.number(), hash); { let chain = client.block_chain(); let mut batch = DBTransaction::new(); - chain.insert_bootstrap_header(&mut batch, &HeaderView::new(&header.rlp_bytes())); + chain.insert_bootstrap_block(&mut batch, &block.rlp_bytes(&Seal::With)); client.db().write_buffered(batch); chain.commit(); } - client.new_headers(&[hash], &[], &[hash], &[], &[], Some(hash)); + self.miner.chain_new_blocks(client, &[hash], &[], &[hash], &[]); + client.new_blocks(&[hash], &[], &[hash], &[], &[]); client.db().flush().expect("DB flush failed."); } diff --git a/core/src/client/mod.rs b/core/src/client/mod.rs index c85dce75e5..a48c62da84 100644 --- a/core/src/client/mod.rs +++ b/core/src/client/mod.rs @@ -38,12 +38,12 @@ use cmerkle::Result as TrieResult; use cnetwork::NodeId; use cstate::{AssetScheme, FindActionHandler, OwnedAsset, StateResult, Text, TopLevelState, TopStateView}; use ctypes::transaction::{AssetTransferInput, PartialHashing, ShardTransaction}; -use ctypes::{BlockHash, BlockNumber, CommonParams, Header, ShardId, Tracker, TxHash}; +use ctypes::{BlockHash, BlockNumber, CommonParams, ShardId, Tracker, TxHash}; use cvm::ChainTimeInfo; use kvdb::KeyValueDB; use primitives::{Bytes, H160, H256, U256}; -use crate::block::{ClosedBlock, OpenBlock, SealedBlock}; +use crate::block::{Block, ClosedBlock, OpenBlock, SealedBlock}; use crate::blockchain_info::BlockChainInfo; use crate::consensus::EngineError; use crate::encoded; @@ -204,7 +204,7 @@ pub trait ImportBlock { /// Import a trusted bootstrap header into the blockchain /// Bootstrap headers don't execute any verifications - fn import_bootstrap_header(&self, bytes: &Header) -> Result; + fn import_bootstrap_block(&self, bytes: &Block) -> Result; /// Import sealed block. Skips all verifications. fn import_sealed_block(&self, block: &SealedBlock) -> ImportResult; diff --git a/core/src/client/test_client.rs b/core/src/client/test_client.rs index ddb4455241..3e4cd3425d 100644 --- a/core/src/client/test_client.rs +++ b/core/src/client/test_client.rs @@ -52,7 +52,7 @@ use parking_lot::RwLock; use primitives::{Bytes, H256, U256}; use rlp::*; -use crate::block::{ClosedBlock, OpenBlock, SealedBlock}; +use crate::block::{Block, ClosedBlock, OpenBlock, SealedBlock}; use crate::blockchain_info::BlockChainInfo; use crate::client::{ AccountData, BlockChainClient, BlockChainTrait, BlockProducer, BlockStatus, ConsensusClient, EngineInfo, @@ -509,7 +509,7 @@ impl ImportBlock for TestBlockChainClient { unimplemented!() } - fn import_bootstrap_header(&self, _header: &BlockHeader) -> Result { + fn import_bootstrap_block(&self, _header: &Block) -> Result { unimplemented!() } diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index e972ee9af0..bfbe900c36 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -27,7 +27,7 @@ use ccore::{ }; use cmerkle::snapshot::ChunkDecompressor; use cmerkle::snapshot::Restore as SnapshotRestore; -use cmerkle::TrieFactory; +use cmerkle::{skewed_merkle_root, TrieFactory}; use cnetwork::{Api, EventSender, NetworkExtension, NodeId}; use cstate::FindActionHandler; use ctimer::TimerToken; @@ -64,7 +64,7 @@ pub struct TokenInfo { enum State { SnapshotHeader(BlockHash, u64), SnapshotBody { - block: BlockHash, + header: EncodedHeader, prev_root: H256, }, SnapshotChunk { @@ -151,7 +151,7 @@ impl Extension { let parent = client.block_header(&parent_hash.into()).expect("Parent header of the snapshot header must exist"); return State::SnapshotBody { - block: hash, + header, prev_root: parent.transactions_root(), } } @@ -437,8 +437,29 @@ impl NetworkExtension for Extension { } } State::SnapshotBody { + ref header, .. - } => unimplemented!(), + } => { + for id in &peer_ids { + if let Some(requests) = self.requests.get_mut(id) { + ctrace!(SYNC, "Send snapshot body request to {}", id); + let request = RequestMessage::Bodies(vec![header.hash()]); + let request_id = self.last_request; + self.last_request += 1; + requests.push((request_id, request.clone())); + self.api.send(id, Arc::new(Message::Request(request_id, request).rlp_bytes())); + + let token = &self.tokens[id]; + let token_info = self.tokens_info.get_mut(token).unwrap(); + + let _ = self.api.clear_timer(*token); + self.api + .set_timer_once(*token, Duration::from_millis(SYNC_EXPIRE_REQUEST_INTERVAL)) + .expect("Timer set succeeds"); + token_info.request_id = Some(request_id); + } + } + } State::SnapshotChunk { block, ref mut restore, @@ -815,20 +836,11 @@ impl Extension { match self.state { State::SnapshotHeader(hash, _) => match headers { [parent, header] if header.hash() == hash => { - match self.client.import_bootstrap_header(&header) { - Ok(_) | Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { - self.state = State::SnapshotBody { - block: hash, - prev_root: *parent.transactions_root(), - }; - cdebug!(SYNC, "Transitioning state to {:?}", self.state); - } - Err(BlockImportError::Import(ImportError::AlreadyQueued)) => {} - // FIXME: handle import errors - Err(err) => { - cwarn!(SYNC, "Cannot import header({}): {:?}", header.hash(), err); - } - } + self.state = State::SnapshotBody { + header: EncodedHeader::new(header.rlp_bytes().to_vec()), + prev_root: *parent.transactions_root(), + }; + cdebug!(SYNC, "Transitioning state to {:?}", self.state); } _ => cdebug!( SYNC, @@ -887,42 +899,75 @@ impl Extension { fn on_body_response(&mut self, hashes: Vec, bodies: Vec>) { ctrace!(SYNC, "Received body response with lenth({}) {:?}", hashes.len(), hashes); - { - self.body_downloader.import_bodies(hashes, bodies); - let completed = self.body_downloader.drain(); - for (hash, transactions) in completed { - let header = self - .client - .block_header(&BlockId::Hash(hash)) - .expect("Downloaded body's header must exist") - .decode(); - let block = Block { - header, - transactions, - }; - cdebug!(SYNC, "Body download completed for #{}({})", block.header.number(), hash); - match self.client.import_block(block.rlp_bytes(&Seal::With)) { - Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { - cwarn!(SYNC, "Downloaded already existing block({})", hash) - } - Err(BlockImportError::Import(ImportError::AlreadyQueued)) => { - cwarn!(SYNC, "Downloaded already queued in the verification queue({})", hash) - } - Err(err) => { + + match self.state { + State::SnapshotBody { + ref header, + prev_root, + } => { + let body = bodies.first().expect("Body response in SnapshotBody state has only one body"); + let new_root = skewed_merkle_root(prev_root, body.iter().map(Encodable::rlp_bytes)); + if header.transactions_root() == new_root { + let block = Block { + header: header.decode(), + transactions: body.clone(), + }; + match self.client.import_bootstrap_block(&block) { + Ok(_) | Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { + self.state = State::SnapshotChunk { + block: header.hash(), + restore: SnapshotRestore::new(header.state_root()), + }; + cdebug!(SYNC, "Transitioning state to {:?}", self.state); + } + Err(BlockImportError::Import(ImportError::AlreadyQueued)) => {} // FIXME: handle import errors - cwarn!(SYNC, "Cannot import block({}): {:?}", hash, err); - break + Err(err) => { + cwarn!(SYNC, "Cannot import header({}): {:?}", header.hash(), err); + } } - _ => {} } } - } + State::Full => { + { + self.body_downloader.import_bodies(hashes, bodies); + let completed = self.body_downloader.drain(); + for (hash, transactions) in completed { + let header = self + .client + .block_header(&BlockId::Hash(hash)) + .expect("Downloaded body's header must exist") + .decode(); + let block = Block { + header, + transactions, + }; + cdebug!(SYNC, "Body download completed for #{}({})", block.header.number(), hash); + match self.client.import_block(block.rlp_bytes(&Seal::With)) { + Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { + cwarn!(SYNC, "Downloaded already existing block({})", hash) + } + Err(BlockImportError::Import(ImportError::AlreadyQueued)) => { + cwarn!(SYNC, "Downloaded already queued in the verification queue({})", hash) + } + Err(err) => { + // FIXME: handle import errors + cwarn!(SYNC, "Cannot import block({}): {:?}", hash, err); + break + } + _ => {} + } + } + } - let mut peer_ids: Vec<_> = self.header_downloaders.keys().cloned().collect(); - peer_ids.shuffle(&mut thread_rng()); + let mut peer_ids: Vec<_> = self.header_downloaders.keys().cloned().collect(); + peer_ids.shuffle(&mut thread_rng()); - for id in peer_ids { - self.send_body_request(&id); + for id in peer_ids { + self.send_body_request(&id); + } + } + _ => {} } } From 9354d8d37dc983a86bafbc08d25e4bc86d6413f1 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Mon, 25 Nov 2019 12:12:22 +0900 Subject: [PATCH 38/52] Update the best block after importing snapshot chunks --- core/src/blockchain/blockchain.rs | 29 ++++++++++++++++------- core/src/blockchain/headerchain.rs | 30 ++++++++++++----------- core/src/client/client.rs | 18 ++++++++++++-- core/src/client/importer.rs | 38 +++++++++++++++++++++++++----- core/src/client/mod.rs | 14 ++++++++--- core/src/client/test_client.rs | 11 ++++++++- sync/src/block/extension.rs | 24 +++++++++++++++++-- 7 files changed, 127 insertions(+), 37 deletions(-) diff --git a/core/src/blockchain/blockchain.rs b/core/src/blockchain/blockchain.rs index 12f3ccc358..bbf028afe3 100644 --- a/core/src/blockchain/blockchain.rs +++ b/core/src/blockchain/blockchain.rs @@ -110,7 +110,11 @@ impl BlockChain { } } - pub fn insert_bootstrap_block(&self, batch: &mut DBTransaction, bytes: &[u8]) { + pub fn insert_floating_header(&self, batch: &mut DBTransaction, header: &HeaderView) { + self.headerchain.insert_floating_header(batch, header); + } + + pub fn insert_floating_block(&self, batch: &mut DBTransaction, bytes: &[u8]) { let block = BlockView::new(bytes); let header = block.header_view(); let hash = header.hash(); @@ -122,20 +126,27 @@ impl BlockChain { return } + self.insert_floating_header(batch, &header); + self.body_db.insert_body(batch, &block); + } + + pub fn force_update_best_block(&self, batch: &mut DBTransaction, hash: &BlockHash) { + ctrace!(BLOCKCHAIN, "Forcefully updating the best block to {}", hash); + + assert!(self.is_known(hash)); assert!(self.pending_best_block_hash.read().is_none()); assert!(self.pending_best_proposal_block_hash.read().is_none()); - self.headerchain.insert_bootstrap_header(batch, &header); - self.body_db.insert_body(batch, &block); + let block = self.block(hash).expect("Target block is known"); + self.headerchain.force_update_best_header(batch, hash); self.body_db.update_best_block(batch, &BestBlockChanged::CanonChainAppended { - best_block: bytes.to_vec(), + best_block: block.into_inner(), }); - *self.pending_best_block_hash.write() = Some(hash); - batch.put(db::COL_EXTRA, BEST_BLOCK_KEY, &hash); - - *self.pending_best_proposal_block_hash.write() = Some(hash); - batch.put(db::COL_EXTRA, BEST_PROPOSAL_BLOCK_KEY, &hash); + batch.put(db::COL_EXTRA, BEST_BLOCK_KEY, hash); + *self.pending_best_block_hash.write() = Some(*hash); + batch.put(db::COL_EXTRA, BEST_PROPOSAL_BLOCK_KEY, hash); + *self.pending_best_proposal_block_hash.write() = Some(*hash); } /// Inserts the block into backing cache database. diff --git a/core/src/blockchain/headerchain.rs b/core/src/blockchain/headerchain.rs index 9629188c23..3aa3d3ec74 100644 --- a/core/src/blockchain/headerchain.rs +++ b/core/src/blockchain/headerchain.rs @@ -115,24 +115,19 @@ impl HeaderChain { } } - /// Inserts a bootstrap header into backing cache database. - /// Makes the imported header the best header. - /// Expects the header to be valid and already verified. + /// Inserts a floating header into backing cache database. + /// Expects the header to be valid. /// If the header is already known, does nothing. - // FIXME: Find better return type. Returning `None` at duplication is not natural - pub fn insert_bootstrap_header(&self, batch: &mut DBTransaction, header: &HeaderView) { + pub fn insert_floating_header(&self, batch: &mut DBTransaction, header: &HeaderView) { let hash = header.hash(); - ctrace!(HEADERCHAIN, "Inserting bootstrap block header #{}({}) to the headerchain.", header.number(), hash); + ctrace!(HEADERCHAIN, "Inserting a floating block header #{}({}) to the headerchain.", header.number(), hash); if self.is_known_header(&hash) { ctrace!(HEADERCHAIN, "Block header #{}({}) is already known.", header.number(), hash); return } - assert!(self.pending_best_header_hash.read().is_none()); - assert!(self.pending_best_proposal_block_hash.read().is_none()); - let compressed_header = compress(header.rlp().as_raw(), blocks_swapper()); batch.put(db::COL_HEADERS, &hash, &compressed_header); @@ -145,11 +140,6 @@ impl HeaderChain { parent: header.parent_hash(), }); - batch.put(db::COL_EXTRA, BEST_HEADER_KEY, &hash); - *self.pending_best_header_hash.write() = Some(hash); - batch.put(db::COL_EXTRA, BEST_PROPOSAL_HEADER_KEY, &hash); - *self.pending_best_proposal_block_hash.write() = Some(hash); - let mut pending_hashes = self.pending_hashes.write(); let mut pending_details = self.pending_details.write(); @@ -157,6 +147,18 @@ impl HeaderChain { batch.extend_with_cache(db::COL_EXTRA, &mut *pending_hashes, new_hashes, CacheUpdatePolicy::Overwrite); } + pub fn force_update_best_header(&self, batch: &mut DBTransaction, hash: &BlockHash) { + ctrace!(HEADERCHAIN, "Forcefully updating the best header to {}", hash); + assert!(self.is_known_header(hash)); + assert!(self.pending_best_header_hash.read().is_none()); + assert!(self.pending_best_proposal_block_hash.read().is_none()); + + batch.put(db::COL_EXTRA, BEST_HEADER_KEY, hash); + *self.pending_best_header_hash.write() = Some(*hash); + batch.put(db::COL_EXTRA, BEST_PROPOSAL_HEADER_KEY, hash); + *self.pending_best_proposal_block_hash.write() = Some(*hash); + } + /// Inserts the header into backing cache database. /// Expects the header to be valid and already verified. /// If the header is already known, does nothing. diff --git a/core/src/client/client.rs b/core/src/client/client.rs index f0e96a4d75..22c6192684 100644 --- a/core/src/client/client.rs +++ b/core/src/client/client.rs @@ -26,6 +26,7 @@ use cstate::{ ActionHandler, AssetScheme, FindActionHandler, OwnedAsset, StateDB, StateResult, Text, TopLevelState, TopStateView, }; use ctimer::{TimeoutHandler, TimerApi, TimerScheduleError, TimerToken}; +use ctypes::header::Header; use ctypes::transaction::{AssetTransferInput, PartialHashing, ShardTransaction}; use ctypes::{BlockHash, BlockNumber, CommonParams, ShardId, Tracker, TxHash}; use cvm::{decode, execute, ChainTimeInfo, ScriptResult, VMConfig}; @@ -655,15 +656,28 @@ impl ImportBlock for Client { Ok(self.importer.header_queue.import(unverified)?) } - fn import_bootstrap_block(&self, block: &Block) -> Result { + fn import_trusted_header(&self, header: &Header) -> Result { + if self.block_chain().is_known_header(&header.hash()) { + return Err(BlockImportError::Import(ImportError::AlreadyInChain)) + } + let import_lock = self.importer.import_lock.lock(); + self.importer.import_trusted_header(header, self, &import_lock); + Ok(header.hash()) + } + + fn import_trusted_block(&self, block: &Block) -> Result { if self.block_chain().is_known(&block.header.hash()) { return Err(BlockImportError::Import(ImportError::AlreadyInChain)) } let import_lock = self.importer.import_lock.lock(); - self.importer.import_bootstrap_block(block, self, &import_lock); + self.importer.import_trusted_block(block, self, &import_lock); Ok(block.header.hash()) } + fn force_update_best_block(&self, hash: &BlockHash) { + self.importer.force_update_best_block(hash, self) + } + fn import_sealed_block(&self, block: &SealedBlock) -> ImportResult { let h = block.header().hash(); let route = { diff --git a/core/src/client/importer.rs b/core/src/client/importer.rs index bc8161b4d4..4290ee7ab9 100644 --- a/core/src/client/importer.rs +++ b/core/src/client/importer.rs @@ -362,21 +362,47 @@ impl Importer { imported.len() } - pub fn import_bootstrap_block<'a>(&'a self, block: &'a Block, client: &Client, _importer_lock: &MutexGuard<()>) { + pub fn import_trusted_header<'a>(&'a self, header: &'a Header, client: &Client, _importer_lock: &MutexGuard<()>) { + let hash = header.hash(); + ctrace!(CLIENT, "Importing trusted header #{}-{:?}", header.number(), hash); + + { + let chain = client.block_chain(); + let mut batch = DBTransaction::new(); + chain.insert_floating_header(&mut batch, &HeaderView::new(&header.rlp_bytes())); + client.db().write_buffered(batch); + chain.commit(); + } + client.new_headers(&[hash], &[], &[], &[], &[], None); + + client.db().flush().expect("DB flush failed."); + } + + pub fn import_trusted_block<'a>(&'a self, block: &'a Block, client: &Client, importer_lock: &MutexGuard<()>) { let header = &block.header; let hash = header.hash(); - ctrace!(CLIENT, "Importing bootstrap block #{}-{:?}", header.number(), hash); + ctrace!(CLIENT, "Importing trusted block #{}-{:?}", header.number(), hash); + self.import_trusted_header(header, client, importer_lock); { let chain = client.block_chain(); let mut batch = DBTransaction::new(); - chain.insert_bootstrap_block(&mut batch, &block.rlp_bytes(&Seal::With)); + chain.insert_floating_block(&mut batch, &block.rlp_bytes(&Seal::With)); client.db().write_buffered(batch); chain.commit(); } - client.new_headers(&[hash], &[], &[hash], &[], &[], Some(hash)); - self.miner.chain_new_blocks(client, &[hash], &[], &[hash], &[]); - client.new_blocks(&[hash], &[], &[hash], &[], &[]); + self.miner.chain_new_blocks(client, &[hash], &[], &[], &[]); + client.new_blocks(&[hash], &[], &[], &[], &[]); + + client.db().flush().expect("DB flush failed."); + } + + pub fn force_update_best_block(&self, hash: &BlockHash, client: &Client) { + let chain = client.block_chain(); + let mut batch = DBTransaction::new(); + chain.force_update_best_block(&mut batch, hash); + client.db().write_buffered(batch); + chain.commit(); client.db().flush().expect("DB flush failed."); } diff --git a/core/src/client/mod.rs b/core/src/client/mod.rs index a48c62da84..02c71223f4 100644 --- a/core/src/client/mod.rs +++ b/core/src/client/mod.rs @@ -37,6 +37,7 @@ use ckey::{Address, NetworkId, PlatformAddress, Public}; use cmerkle::Result as TrieResult; use cnetwork::NodeId; use cstate::{AssetScheme, FindActionHandler, OwnedAsset, StateResult, Text, TopLevelState, TopStateView}; +use ctypes::header::Header; use ctypes::transaction::{AssetTransferInput, PartialHashing, ShardTransaction}; use ctypes::{BlockHash, BlockNumber, CommonParams, ShardId, Tracker, TxHash}; use cvm::ChainTimeInfo; @@ -202,9 +203,16 @@ pub trait ImportBlock { /// Import a header into the blockchain fn import_header(&self, bytes: Bytes) -> Result; - /// Import a trusted bootstrap header into the blockchain - /// Bootstrap headers don't execute any verifications - fn import_bootstrap_block(&self, bytes: &Block) -> Result; + /// Import a trusted header into the blockchain + /// Trusted header doesn't go through any verifications and doesn't update the best header + fn import_trusted_header(&self, header: &Header) -> Result; + + /// Import a trusted block into the blockchain + /// Trusted block doesn't go through any verifications and doesn't update the best block + fn import_trusted_block(&self, block: &Block) -> Result; + + /// Forcefully update the best block + fn force_update_best_block(&self, hash: &BlockHash); /// Import sealed block. Skips all verifications. fn import_sealed_block(&self, block: &SealedBlock) -> ImportResult; diff --git a/core/src/client/test_client.rs b/core/src/client/test_client.rs index 3e4cd3425d..b2b0514778 100644 --- a/core/src/client/test_client.rs +++ b/core/src/client/test_client.rs @@ -42,6 +42,7 @@ use cnetwork::NodeId; use cstate::tests::helpers::empty_top_state; use cstate::{FindActionHandler, StateDB, TopLevelState}; use ctimer::{TimeoutHandler, TimerToken}; +use ctypes::header::Header; use ctypes::transaction::{Action, Transaction}; use ctypes::{BlockHash, BlockNumber, CommonParams, Header as BlockHeader, Tracker, TxHash}; use cvm::ChainTimeInfo; @@ -509,7 +510,15 @@ impl ImportBlock for TestBlockChainClient { unimplemented!() } - fn import_bootstrap_block(&self, _header: &Block) -> Result { + fn import_trusted_header(&self, _header: &Header) -> Result { + unimplemented!() + } + + fn import_trusted_block(&self, _block: &Block) -> Result { + unimplemented!() + } + + fn force_update_best_block(&self, _hash: &BlockHash) { unimplemented!() } diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index bfbe900c36..d6240e68e0 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -468,6 +468,7 @@ impl NetworkExtension for Extension { self.send_chunk_request(&block, &root); } else { cdebug!(SYNC, "Transitioning state to {:?}", State::Full); + self.client.force_update_best_block(&block); self.state = State::Full; } } @@ -836,6 +837,24 @@ impl Extension { match self.state { State::SnapshotHeader(hash, _) => match headers { [parent, header] if header.hash() == hash => { + match self.client.import_trusted_header(parent) { + Ok(_) + | Err(BlockImportError::Import(ImportError::AlreadyInChain)) + | Err(BlockImportError::Import(ImportError::AlreadyQueued)) => {} + Err(err) => { + cwarn!(SYNC, "Cannot import header({}): {:?}", parent.hash(), err); + return + } + } + match self.client.import_trusted_header(header) { + Ok(_) + | Err(BlockImportError::Import(ImportError::AlreadyInChain)) + | Err(BlockImportError::Import(ImportError::AlreadyQueued)) => {} + Err(err) => { + cwarn!(SYNC, "Cannot import header({}): {:?}", header.hash(), err); + return + } + } self.state = State::SnapshotBody { header: EncodedHeader::new(header.rlp_bytes().to_vec()), prev_root: *parent.transactions_root(), @@ -912,7 +931,7 @@ impl Extension { header: header.decode(), transactions: body.clone(), }; - match self.client.import_bootstrap_block(&block) { + match self.client.import_trusted_block(&block) { Ok(_) | Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { self.state = State::SnapshotChunk { block: header.hash(), @@ -923,7 +942,7 @@ impl Extension { Err(BlockImportError::Import(ImportError::AlreadyQueued)) => {} // FIXME: handle import errors Err(err) => { - cwarn!(SYNC, "Cannot import header({}): {:?}", header.hash(), err); + cwarn!(SYNC, "Cannot import block({}): {:?}", header.hash(), err); } } } @@ -1023,6 +1042,7 @@ impl Extension { self.send_chunk_request(&block, &root); } else { cdebug!(SYNC, "Transitioning state to {:?}", State::Full); + self.client.force_update_best_block(&block); self.state = State::Full; } } From eb6b204ce82c402df094a09dd1815ea5b0555403 Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Wed, 4 Dec 2019 19:18:28 +0900 Subject: [PATCH 39/52] Refactor block::Extension::transtition_to_full() --- sync/src/block/extension.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index d6240e68e0..c14650fe19 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -467,9 +467,8 @@ impl NetworkExtension for Extension { if let Some(root) = restore.next_to_feed() { self.send_chunk_request(&block, &root); } else { - cdebug!(SYNC, "Transitioning state to {:?}", State::Full); self.client.force_update_best_block(&block); - self.state = State::Full; + self.transition_to_full(); } } State::Full => { @@ -1041,12 +1040,16 @@ impl Extension { if let Some(root) = restore.next_to_feed() { self.send_chunk_request(&block, &root); } else { - cdebug!(SYNC, "Transitioning state to {:?}", State::Full); self.client.force_update_best_block(&block); - self.state = State::Full; + self.transition_to_full(); } } } + + fn transition_to_full(&mut self) { + cdebug!(SYNC, "Transitioning state to {:?}", State::Full); + self.state = State::Full; + } } pub struct BlockSyncSender(EventSender); From afcf587b8516d5f4b86048e7b53e59db79cc9fca Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Wed, 4 Dec 2019 19:19:38 +0900 Subject: [PATCH 40/52] Change sync extension to update pivot after transitioning to Full --- sync/src/block/downloader/header.rs | 4 ++++ sync/src/block/extension.rs | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/sync/src/block/downloader/header.rs b/sync/src/block/downloader/header.rs index 1422c1acf9..68857da46e 100644 --- a/sync/src/block/downloader/header.rs +++ b/sync/src/block/downloader/header.rs @@ -62,6 +62,10 @@ impl HeaderDownloader { } } + pub fn update_pivot(&mut self, hash: BlockHash) { + self.pivot = hash; + } + pub fn best_hash(&self) -> BlockHash { self.best_hash } diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index c14650fe19..795a07876f 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -1048,6 +1048,10 @@ impl Extension { fn transition_to_full(&mut self) { cdebug!(SYNC, "Transitioning state to {:?}", State::Full); + let best_hash = self.client.best_block_header().hash(); + for downloader in self.header_downloaders.values_mut() { + downloader.update_pivot(best_hash); + } self.state = State::Full; } } From 0c598ced1e75c191393fec2a901ead6adc1db2c3 Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Tue, 3 Dec 2019 16:38:46 +0900 Subject: [PATCH 41/52] Fix to receive peer requests from just connected nodes All `self.header_downloaders.keys()` are in `connected_nodes`, and `self.header_downloaders` keep tracks of nodes which have sent at least one peer status out of connected nodes. The following commit will prohibit the node sending peer requests before it finishes to snapshot sync, so it can send peer requests before share peer status. So this changes make the node accept requests from the node who didn't send any peer status. --- sync/src/block/extension.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 795a07876f..db219e7a28 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -622,7 +622,7 @@ impl Extension { } fn on_peer_request(&self, from: &NodeId, id: u64, request: RequestMessage) { - if !self.header_downloaders.contains_key(from) { + if !self.connected_nodes.contains(from) { cinfo!(SYNC, "Request from invalid peer #{} received", from); return } From 28776858b6dd07df1fac906b78df4751b5f3cfff Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Tue, 3 Dec 2019 16:41:54 +0900 Subject: [PATCH 42/52] Defer sending peer status after transitioning to full sync mode --- sync/src/block/extension.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index db219e7a28..0899560367 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -17,6 +17,7 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::fs; +use std::mem::discriminant; use std::sync::Arc; use std::time::Duration; @@ -174,6 +175,10 @@ impl Extension { } fn send_status(&mut self, id: &NodeId) { + if discriminant(&self.state) != discriminant(&State::Full) { + return + } + let chain_info = self.client.chain_info(); self.api.send( id, @@ -190,6 +195,10 @@ impl Extension { } fn send_status_broadcast(&mut self) { + if discriminant(&self.state) != discriminant(&State::Full) { + return + } + let chain_info = self.client.chain_info(); for id in self.connected_nodes.iter() { self.api.send( @@ -1053,6 +1062,7 @@ impl Extension { downloader.update_pivot(best_hash); } self.state = State::Full; + self.send_status_broadcast(); } } From d8368c5f62d3bb9f27b052f4f577055efe76a218 Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Mon, 2 Dec 2019 14:16:03 +0900 Subject: [PATCH 43/52] Factor out snapshot notify --- core/src/consensus/tendermint/worker.rs | 47 ++++++++++++++----------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/core/src/consensus/tendermint/worker.rs b/core/src/consensus/tendermint/worker.rs index 5329fa342f..e314c561fc 100644 --- a/core/src/consensus/tendermint/worker.rs +++ b/core/src/consensus/tendermint/worker.rs @@ -1662,27 +1662,7 @@ impl Worker { } } - let mut last_term_end = None; - for block_hash in &enacted { - let header = c.block_header(&BlockId::Hash(*block_hash)).expect("Block is enacted").decode(); - let parent_header = match c.block_header(&BlockId::Hash(*header.parent_hash())) { - Some(h) => h.decode(), - // NOTE: Only the genesis block and the snapshot target don't have the parent in the blockchain - None => continue, - }; - let term_seconds = if let Some(p) = c.term_common_params(parent_header.hash().into()) { - p.term_seconds() - } else { - continue - }; - if super::engine::is_term_changed(&header, &parent_header, term_seconds) { - last_term_end = Some(*block_hash); - } - } - if let Some(last_term_end) = last_term_end { - // TODO: Reduce the snapshot frequency. - self.snapshot_notify_sender.notify(last_term_end); - } + self.send_snapshot_notify(c.as_ref(), enacted.as_slice()); if let Some((last, rest)) = imported.split_last() { let (imported, last_proposal_header) = { @@ -1718,6 +1698,31 @@ impl Worker { } } + // Notify once for the latest block even if multiple blocks have been enacted. + fn send_snapshot_notify(&mut self, c: &dyn ConsensusClient, enacted: &[BlockHash]) { + let mut last_term_end = None; + for block_hash in enacted { + let header = c.block_header(&BlockId::Hash(*block_hash)).expect("Block is enacted").decode(); + let parent_header = match c.block_header(&BlockId::Hash(*header.parent_hash())) { + Some(h) => h.decode(), + // NOTE: Only the genesis block and the snapshot target don't have the parent in the blockchain + None => continue, + }; + let term_seconds = if let Some(p) = c.term_common_params(parent_header.hash().into()) { + p.term_seconds() + } else { + continue + }; + if super::engine::is_term_changed(&header, &parent_header, term_seconds) { + last_term_end = Some(*block_hash); + } + } + if let Some(last_term_end) = last_term_end { + // TODO: Reduce the snapshot frequency. + self.snapshot_notify_sender.notify(last_term_end); + } + } + fn send_proposal_block( &self, signature: SchnorrSignature, From 9ce70ad3175d8cacbbd097d4e88d2e193377e5f9 Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Wed, 4 Dec 2019 19:46:04 +0900 Subject: [PATCH 44/52] Move send_snapshot_notify() before the early return --- core/src/consensus/tendermint/worker.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/consensus/tendermint/worker.rs b/core/src/consensus/tendermint/worker.rs index e314c561fc..144aa8d248 100644 --- a/core/src/consensus/tendermint/worker.rs +++ b/core/src/consensus/tendermint/worker.rs @@ -1645,6 +1645,8 @@ impl Worker { } }; + self.send_snapshot_notify(c.as_ref(), enacted.as_slice()); + if self.step.is_commit() && (imported.len() + enacted.len() == 1) { let (_, committed_block_hash) = self.step.committed().expect("Commit state always has block_hash"); if imported.first() == Some(&committed_block_hash) { @@ -1662,8 +1664,6 @@ impl Worker { } } - self.send_snapshot_notify(c.as_ref(), enacted.as_slice()); - if let Some((last, rest)) = imported.split_last() { let (imported, last_proposal_header) = { let header = From ed78493d74900d242efcc94c7b6a59fa4d4843ee Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Mon, 2 Dec 2019 14:47:19 +0900 Subject: [PATCH 45/52] Defer snapshot timing by one block in Tendermint Also make tendermint::Worker::send_snapshot_notify use only block number --- core/src/consensus/tendermint/worker.rs | 29 ++++++++++--------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/core/src/consensus/tendermint/worker.rs b/core/src/consensus/tendermint/worker.rs index 144aa8d248..f9abe27922 100644 --- a/core/src/consensus/tendermint/worker.rs +++ b/core/src/consensus/tendermint/worker.rs @@ -1700,26 +1700,21 @@ impl Worker { // Notify once for the latest block even if multiple blocks have been enacted. fn send_snapshot_notify(&mut self, c: &dyn ConsensusClient, enacted: &[BlockHash]) { - let mut last_term_end = None; - for block_hash in enacted { - let header = c.block_header(&BlockId::Hash(*block_hash)).expect("Block is enacted").decode(); - let parent_header = match c.block_header(&BlockId::Hash(*header.parent_hash())) { - Some(h) => h.decode(), - // NOTE: Only the genesis block and the snapshot target don't have the parent in the blockchain - None => continue, - }; - let term_seconds = if let Some(p) = c.term_common_params(parent_header.hash().into()) { - p.term_seconds() - } else { - continue - }; - if super::engine::is_term_changed(&header, &parent_header, term_seconds) { - last_term_end = Some(*block_hash); + let mut last_snapshot_point = None; + for block_hash in enacted.iter().rev() { + let block_id = BlockId::Hash(*block_hash); + let last_term_finished_block_num = c.last_term_finished_block_num(block_id).expect("Block is enacted"); + let block_number = c.block_number(&block_id).expect("Block number should exist for enacted block"); + + if let Some(params) = c.term_common_params(block_id) { + if params.era() == 1 && (last_term_finished_block_num + 1 == block_number) { + last_snapshot_point = Some(block_hash); + } } } - if let Some(last_term_end) = last_term_end { + if let Some(last_snapshot_point) = last_snapshot_point { // TODO: Reduce the snapshot frequency. - self.snapshot_notify_sender.notify(last_term_end); + self.snapshot_notify_sender.notify(*last_snapshot_point); } } From d925fec3bcced18d2d3dc73e195589e52af21288 Mon Sep 17 00:00:00 2001 From: SeongChan Lee Date: Wed, 4 Dec 2019 18:05:08 +0900 Subject: [PATCH 46/52] Fix snapshot block estimation in test --- test/src/e2e.dynval/2/snapshot.test.ts | 35 +++++++++++++++----------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/test/src/e2e.dynval/2/snapshot.test.ts b/test/src/e2e.dynval/2/snapshot.test.ts index 32374c992d..bb548d4aef 100644 --- a/test/src/e2e.dynval/2/snapshot.test.ts +++ b/test/src/e2e.dynval/2/snapshot.test.ts @@ -17,6 +17,7 @@ import * as chai from "chai"; import { expect } from "chai"; import * as chaiAsPromised from "chai-as-promised"; +import * as stake from "codechain-stakeholder-sdk"; import * as fs from "fs"; import "mocha"; import * as path from "path"; @@ -24,6 +25,7 @@ import * as path from "path"; import mkdirp = require("mkdirp"); import { validators } from "../../../tendermint.dynval/constants"; import { PromiseExpect } from "../../helper/promise"; +import CodeChain from "../../helper/spawn"; import { setTermTestTimeout, withNodes } from "../setup"; chai.use(chaiAsPromised); @@ -37,7 +39,8 @@ describe("Snapshot for Tendermint with Dynamic Validator", function() { const { nodes } = withNodes(this, { promiseExpect, overrideParams: { - maxNumOfValidators: 3 + maxNumOfValidators: 3, + era: 1 }, validators: snapshotValidators.map((signer, index) => ({ signer, @@ -63,30 +66,32 @@ describe("Snapshot for Tendermint with Dynamic Validator", function() { it("should be exist after some time", async function() { const termWaiter = setTermTestTimeout(this, { - terms: 1 + terms: 2 }); const termMetadata = await termWaiter.waitNodeUntilTerm(nodes[0], { target: 2, termPeriods: 1 }); - - const blockHash = (await nodes[0].sdk.rpc.chain.getBlockHash( - termMetadata.lastTermFinishedBlockNumber - ))!; - const stateRoot = (await nodes[0].sdk.rpc.chain.getBlock(blockHash))! - .stateRoot; + const snapshotBlock = await getSnapshotBlock(nodes[0], termMetadata); expect( - fs.existsSync( - path.join( - nodes[0].snapshotPath, - blockHash.toString(), - stateRoot.toString() - ) + path.join( + nodes[0].snapshotPath, + snapshotBlock.hash.toString(), + snapshotBlock.stateRoot.toString() ) - ).to.be.true; + ).to.satisfy(fs.existsSync); }); afterEach(async function() { promiseExpect.checkFulfilled(); }); }); + +async function getSnapshotBlock( + node: CodeChain, + termMetadata: stake.TermMetadata +) { + const blockNumber = termMetadata.lastTermFinishedBlockNumber + 1; + await node.waitBlockNumber(blockNumber); + return (await node.sdk.rpc.chain.getBlock(blockNumber))!; +} From 18f8fa778f4e315d9229d5b896760981e8543179 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Fri, 6 Dec 2019 16:17:12 +0900 Subject: [PATCH 47/52] Introduce CurrentValidators CurrentValidators represent the list of validators for the current block. Its value is the same as the NextValidators of the previous block's state. --- core/src/client/test_client.rs | 10 ++-- core/src/consensus/stake/action_data.rs | 61 +++++++++++++++++++------ core/src/consensus/stake/mod.rs | 14 +++--- core/src/consensus/tendermint/engine.rs | 15 +++++- 4 files changed, 73 insertions(+), 27 deletions(-) diff --git a/core/src/client/test_client.rs b/core/src/client/test_client.rs index b2b0514778..6ccfb1676c 100644 --- a/core/src/client/test_client.rs +++ b/core/src/client/test_client.rs @@ -59,7 +59,7 @@ use crate::client::{ AccountData, BlockChainClient, BlockChainTrait, BlockProducer, BlockStatus, ConsensusClient, EngineInfo, ImportBlock, ImportResult, MiningBlockChainClient, StateInfo, StateOrBlock, TermInfo, }; -use crate::consensus::stake::{Validator, Validators}; +use crate::consensus::stake::{NextValidators, Validator}; use crate::consensus::EngineError; use crate::db::{COL_STATE, NUM_COLUMNS}; use crate::encoded; @@ -106,7 +106,7 @@ pub struct TestBlockChainClient { /// Fixed validator keys pub validator_keys: RwLock>, /// Fixed validators - pub validators: Validators, + pub validators: NextValidators, } impl Default for TestBlockChainClient { @@ -160,7 +160,7 @@ impl TestBlockChainClient { history: RwLock::new(None), term_id: Some(1), validator_keys: RwLock::new(HashMap::new()), - validators: Validators::from_vector_to_test(vec![]), + validators: NextValidators::from_vector_to_test(vec![]), }; // insert genesis hash. @@ -325,14 +325,14 @@ impl TestBlockChainClient { self.validator_keys.write().insert(*key_pair.public(), *key_pair.private()); pubkeys.push(*key_pair.public()); } - let fixed_validators: Validators = Validators::from_vector_to_test( + let fixed_validators: NextValidators = NextValidators::from_vector_to_test( pubkeys.into_iter().map(|pubkey| Validator::new_for_test(0, 0, pubkey)).collect(), ); self.validators = fixed_validators; } - pub fn get_validators(&self) -> &Validators { + pub fn get_validators(&self) -> &NextValidators { &self.validators } } diff --git a/core/src/consensus/stake/action_data.rs b/core/src/consensus/stake/action_data.rs index af14c8b33e..14b1138634 100644 --- a/core/src/consensus/stake/action_data.rs +++ b/core/src/consensus/stake/action_data.rs @@ -41,8 +41,10 @@ lazy_static! { pub static ref JAIL_KEY: H256 = ActionDataKeyBuilder::new(CUSTOM_ACTION_HANDLER_ID, 1).append(&"Jail").into_key(); pub static ref BANNED_KEY: H256 = ActionDataKeyBuilder::new(CUSTOM_ACTION_HANDLER_ID, 1).append(&"Banned").into_key(); - pub static ref VALIDATORS_KEY: H256 = + pub static ref NEXT_VALIDATORS_KEY: H256 = ActionDataKeyBuilder::new(CUSTOM_ACTION_HANDLER_ID, 1).append(&"Validators").into_key(); + pub static ref CURRENT_VALIDATORS_KEY: H256 = + ActionDataKeyBuilder::new(CUSTOM_ACTION_HANDLER_ID, 1).append(&"CurrentValidators").into_key(); } pub fn get_delegation_key(address: &Address) -> H256 { @@ -274,17 +276,17 @@ impl Validator { } #[derive(Debug)] -pub struct Validators(Vec); -impl Validators { +pub struct NextValidators(Vec); +impl NextValidators { pub fn from_vector_to_test(vec: Vec) -> Self { - Validators(vec) + Self(vec) } pub fn load_from_state(state: &TopLevelState) -> StateResult { - let key = &*VALIDATORS_KEY; + let key = &*NEXT_VALIDATORS_KEY; let validators = state.action_data(&key)?.map(|data| decode_list(&data)).unwrap_or_default(); - Ok(Validators(validators)) + Ok(Self(validators)) } pub fn elect(state: &TopLevelState) -> StateResult { @@ -335,7 +337,7 @@ impl Validators { pub fn save_to_state(&self, state: &mut TopLevelState) -> StateResult<()> { - let key = &*VALIDATORS_KEY; + let key = &*NEXT_VALIDATORS_KEY; if !self.is_empty() { state.update_action_data(&key, encode_list(&self.0).to_vec())?; } else { @@ -384,7 +386,7 @@ impl Validators { } } -impl Deref for Validators { +impl Deref for NextValidators { type Target = Vec; fn deref(&self) -> &Self::Target { @@ -392,13 +394,13 @@ impl Deref for Validators { } } -impl From for Vec { - fn from(val: Validators) -> Self { +impl From for Vec { + fn from(val: NextValidators) -> Self { val.0 } } -impl IntoIterator for Validators { +impl IntoIterator for NextValidators { type Item = Validator; type IntoIter = vec::IntoIter; @@ -407,6 +409,39 @@ impl IntoIterator for Validators { } } +#[derive(Debug)] +pub struct CurrentValidators(Vec); +impl CurrentValidators { + pub fn load_from_state(state: &TopLevelState) -> StateResult { + let key = &*CURRENT_VALIDATORS_KEY; + let validators = state.action_data(&key)?.map(|data| decode_list(&data)).unwrap_or_default(); + + Ok(Self(validators)) + } + + pub fn save_to_state(&self, state: &mut TopLevelState) -> StateResult<()> { + let key = &*CURRENT_VALIDATORS_KEY; + if !self.is_empty() { + state.update_action_data(&key, encode_list(&self.0).to_vec())?; + } else { + state.remove_action_data(&key); + } + Ok(()) + } + + pub fn update(&mut self, validators: Vec) { + self.0 = validators; + } +} + +impl Deref for CurrentValidators { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + pub mod v0 { use std::mem; @@ -603,7 +638,7 @@ impl Candidates { pub fn renew_candidates( &mut self, - validators: &Validators, + validators: &NextValidators, nomination_ends_at: u64, inactive_validators: &[Address], banned: &Banned, @@ -1868,7 +1903,7 @@ mod tests { } candidates.save_to_state(&mut state).unwrap(); - let dummy_validators = Validators( + let dummy_validators = NextValidators( pubkeys[0..5] .iter() .map(|pubkey| Validator { diff --git a/core/src/consensus/stake/mod.rs b/core/src/consensus/stake/mod.rs index 5134367362..771013c7eb 100644 --- a/core/src/consensus/stake/mod.rs +++ b/core/src/consensus/stake/mod.rs @@ -33,7 +33,7 @@ use parking_lot::RwLock; use primitives::{Bytes, H256}; use rlp::{Decodable, Rlp}; -pub use self::action_data::{Banned, Validator, Validators}; +pub use self::action_data::{Banned, CurrentValidators, NextValidators, Validator}; use self::action_data::{Candidates, Delegation, Jail, ReleaseResult, StakeAccount, Stakeholders}; pub use self::actions::Action; pub use self::distribute::fee_distribute; @@ -317,8 +317,8 @@ pub fn get_stakes(state: &TopLevelState) -> StateResult> { Ok(result) } -pub fn get_validators(state: &TopLevelState) -> StateResult { - Validators::load_from_state(state) +pub fn get_validators(state: &TopLevelState) -> StateResult { + NextValidators::load_from_state(state) } pub mod v0 { @@ -379,7 +379,7 @@ pub mod v1 { } pub fn update_validator_weights(state: &mut TopLevelState, block_author: &Address) -> StateResult<()> { - let mut validators = Validators::load_from_state(state)?; + let mut validators = NextValidators::load_from_state(state)?; validators.update_weight(block_author); validators.save_to_state(state) } @@ -451,7 +451,7 @@ pub fn on_term_close( jail(state, inactive_validators, custody_until, kick_at)?; - let validators = Validators::elect(state)?; + let validators = NextValidators::elect(state)?; validators.save_to_state(state)?; state.increase_term_id(last_term_finished_block_num)?; @@ -469,7 +469,7 @@ fn update_candidates( let mut candidates = Candidates::load_from_state(state)?; let nomination_ends_at = current_term + nomination_expiration; - let current_validators = Validators::load_from_state(state)?; + let current_validators = NextValidators::load_from_state(state)?; candidates.renew_candidates(¤t_validators, nomination_ends_at, &inactive_validators, &banned); let expired = candidates.drain_expired_candidates(current_term); @@ -519,7 +519,7 @@ pub fn ban(state: &mut TopLevelState, informant: &Public, criminal: Address) -> let mut candidates = Candidates::load_from_state(state)?; let mut jailed = Jail::load_from_state(state)?; - let mut validators = Validators::load_from_state(state)?; + let mut validators = NextValidators::load_from_state(state)?; let deposit = match (candidates.remove(&criminal), jailed.remove(&criminal)) { (Some(_), Some(_)) => unreachable!("A candidate that are jailed cannot exist"), diff --git a/core/src/consensus/tendermint/engine.rs b/core/src/consensus/tendermint/engine.rs index 4404cdaef2..76d42ec846 100644 --- a/core/src/consensus/tendermint/engine.rs +++ b/core/src/consensus/tendermint/engine.rs @@ -142,6 +142,17 @@ impl ConsensusEngine for Tendermint { let block_number = block.header().number(); let metadata = block.state().metadata()?.expect("Metadata must exist"); let era = metadata.term_params().map_or(0, |p| p.era()); + + match era { + 0 => {} + 1 => { + let mut validators = stake::CurrentValidators::load_from_state(block.state())?; + validators.update(stake::NextValidators::load_from_state(block.state())?.clone()); + validators.save_to_state(block.state_mut())?; + } + _ => unimplemented!(), + } + if block_number == metadata.last_term_finished_block_num() + 1 { match era { 0 => {} @@ -274,7 +285,7 @@ impl ConsensusEngine for Tendermint { stake::v0::move_current_to_previous_intermediate_rewards(block.state_mut())?; - let validators = stake::Validators::load_from_state(block.state())? + let validators = stake::NextValidators::load_from_state(block.state())? .into_iter() .map(|val| public_to_address(val.pubkey())) .collect(); @@ -286,7 +297,7 @@ impl ConsensusEngine for Tendermint { } let start_of_the_current_term = metadata.last_term_finished_block_num() + 1; - let validators = stake::Validators::load_from_state(block.state())? + let validators = stake::NextValidators::load_from_state(block.state())? .into_iter() .map(|val| public_to_address(val.pubkey())) .collect(); From 275a671dd8c7981fbb50c5560cb8a61db28b71a6 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Fri, 6 Dec 2019 19:11:47 +0900 Subject: [PATCH 48/52] Get tendermint validators with CurrentValidators --- core/src/consensus/stake/action_data.rs | 10 ++ core/src/consensus/tendermint/engine.rs | 39 ++++++- core/src/consensus/tendermint/worker.rs | 22 ++-- .../validator_set/dynamic_validator.rs | 103 +++++++++++++----- 4 files changed, 130 insertions(+), 44 deletions(-) diff --git a/core/src/consensus/stake/action_data.rs b/core/src/consensus/stake/action_data.rs index 14b1138634..6272b0559a 100644 --- a/core/src/consensus/stake/action_data.rs +++ b/core/src/consensus/stake/action_data.rs @@ -432,6 +432,16 @@ impl CurrentValidators { pub fn update(&mut self, validators: Vec) { self.0 = validators; } + + pub fn addresses(&self) -> Vec
{ + self.0.iter().rev().map(|v| public_to_address(&v.pubkey)).collect() + } + + pub fn get_validator(&self, index: usize) -> &Validator { + let len = self.0.len(); + // NOTE: validator list is reversed when reading a validator by index + self.0.iter().nth_back(index % len).unwrap() + } } impl Deref for CurrentValidators { diff --git a/core/src/consensus/tendermint/engine.rs b/core/src/consensus/tendermint/engine.rs index 76d42ec846..69bc34b565 100644 --- a/core/src/consensus/tendermint/engine.rs +++ b/core/src/consensus/tendermint/engine.rs @@ -459,10 +459,30 @@ fn calculate_pending_rewards_of_the_previous_term( let mut missed_signatures = HashMap::::with_capacity(MAX_NUM_OF_VALIDATORS); let mut signed_blocks = HashMap::::with_capacity(MAX_NUM_OF_VALIDATORS); + let era = { + let end_of_the_current_term_header = chain + .block_header(&start_of_the_current_term_header.parent_hash().into()) + .expect("The parent of the term end block must exist"); + let state = chain + .state_at(end_of_the_current_term_header.parent_hash().into()) + .expect("The state at parent of the term end block must exist"); + let metadata = state.metadata()?.expect("Metadata of the term end block should exist"); + metadata.term_params().map_or(0, |p| p.era()) + }; + let mut header = start_of_the_current_term_header; let mut parent_validators = { - let grand_parent_header = chain.block_header(&header.parent_hash().into()).unwrap(); - validators.addresses(&grand_parent_header.parent_hash()) + match era { + 0 => { + let grand_parent_header = chain.block_header(&header.parent_hash().into()).unwrap(); + validators.addresses(&grand_parent_header.parent_hash()) + } + 1 => { + let state = chain.state_at(header.parent_hash().into()).expect("The block's state must exist"); + stake::CurrentValidators::load_from_state(&state)?.addresses() + } + _ => unimplemented!(), + } }; while start_of_the_previous_term != header.number() { for index in TendermintSealView::new(&header.seal()).bitset()?.true_index_iter() { @@ -472,10 +492,17 @@ fn calculate_pending_rewards_of_the_previous_term( header = chain.block_header(&header.parent_hash().into()).unwrap(); parent_validators = { - // The seal of the current block has the signatures of the parent block. - // It needs the hash of the grand parent block to find the validators of the parent block. - let grand_parent_header = chain.block_header(&header.parent_hash().into()).unwrap(); - validators.addresses(&grand_parent_header.parent_hash()) + match era { + 0 => { + let grand_parent_header = chain.block_header(&header.parent_hash().into()).unwrap(); + validators.addresses(&grand_parent_header.parent_hash()) + } + 1 => { + let state = chain.state_at(header.hash().into()).expect("The block's state must exist"); + stake::CurrentValidators::load_from_state(&state)?.addresses() + } + _ => unimplemented!(), + } }; let author = header.author(); diff --git a/core/src/consensus/tendermint/worker.rs b/core/src/consensus/tendermint/worker.rs index f9abe27922..627d0a77d8 100644 --- a/core/src/consensus/tendermint/worker.rs +++ b/core/src/consensus/tendermint/worker.rs @@ -35,7 +35,7 @@ use super::backup::{backup, restore, BackupView}; use super::message::*; use super::network; use super::params::TimeGapParams; -use super::stake::CUSTOM_ACTION_HANDLER_ID; +use super::stake::{CurrentValidators, CUSTOM_ACTION_HANDLER_ID}; use super::types::{Height, Proposal, Step, TendermintSealView, TendermintState, TwoThirdsMajority, View}; use super::vote_collector::{DoubleVote, VoteCollector}; use super::vote_regression_checker::VoteRegressionChecker; @@ -1244,13 +1244,19 @@ impl Worker { }; let mut voted_validators = BitSet::new(); - let grand_parent_hash = self - .client() - .block_header(&(*header.parent_hash()).into()) - .expect("The parent block must exist") - .parent_hash(); + let parent = self.client().block_header(&(*header.parent_hash()).into()).expect("The parent block must exist"); + let grand_parent_hash = parent.parent_hash(); for (bitset_index, signature) in seal_view.signatures()? { - let public = self.validators.get(&grand_parent_hash, bitset_index); + let public = { + let state = self.client().state_at(parent.hash().into()).expect("The parent state must exist"); + let validators = CurrentValidators::load_from_state(&state)?; + // This happens when era == 0 + if validators.is_empty() { + self.validators.get(&grand_parent_hash, bitset_index) + } else { + *validators.get_validator(bitset_index).pubkey() + } + }; if !verify_schnorr(&public, &signature, &precommit_vote_on.hash())? { let address = public_to_address(&public); return Err(EngineError::BlockNotAuthorized(address.to_owned()).into()) @@ -1263,7 +1269,7 @@ impl Worker { if header.number() == 1 { return Ok(()) } - self.validators.check_enough_votes(&grand_parent_hash, &voted_validators)?; + self.validators.check_enough_votes_with_header(&parent.decode(), &voted_validators)?; Ok(()) } diff --git a/core/src/consensus/validator_set/dynamic_validator.rs b/core/src/consensus/validator_set/dynamic_validator.rs index 829e9cd5df..1bcaacf13c 100644 --- a/core/src/consensus/validator_set/dynamic_validator.rs +++ b/core/src/consensus/validator_set/dynamic_validator.rs @@ -18,13 +18,13 @@ use std::sync::{Arc, Weak}; use ckey::{public_to_address, Address, Public}; use ctypes::util::unexpected::OutOfBounds; -use ctypes::BlockHash; +use ctypes::{BlockHash, Header}; use parking_lot::RwLock; use super::{RoundRobinValidator, ValidatorSet}; use crate::client::ConsensusClient; use crate::consensus::bit_set::BitSet; -use crate::consensus::stake::{get_validators, Validator}; +use crate::consensus::stake::{get_validators, CurrentValidators, Validator}; use crate::consensus::EngineError; /// Validator set containing a known set of public keys. @@ -41,7 +41,7 @@ impl DynamicValidator { } } - fn validators(&self, parent: BlockHash) -> Option> { + fn next_validators(&self, parent: BlockHash) -> Option> { let client: Arc = self.client.read().as_ref().and_then(Weak::upgrade).expect("Client is not initialized"); let block_id = parent.into(); @@ -64,12 +64,35 @@ impl DynamicValidator { } } + fn current_validators(&self, hash: BlockHash) -> Option> { + let client: Arc = + self.client.read().as_ref().and_then(Weak::upgrade).expect("Client is not initialized"); + let block_id = hash.into(); + let term_id = client.current_term_id(block_id).expect( + "valdators() is called when creating a block or verifying a block. + Minor creates a block only when the parent block is imported. + The n'th block is verified only when the parent block is imported.", + ); + if term_id == 0 { + return None + } + let state = client.state_at(block_id)?; + let validators = CurrentValidators::load_from_state(&state).unwrap(); + if validators.is_empty() { + None + } else { + let mut validators: Vec<_> = (*validators).clone(); + validators.reverse(); + Some(validators) + } + } + fn validators_pubkey(&self, parent: BlockHash) -> Option> { - self.validators(parent).map(|validators| validators.into_iter().map(|val| *val.pubkey()).collect()) + self.next_validators(parent).map(|validators| validators.into_iter().map(|val| *val.pubkey()).collect()) } pub fn proposer_index(&self, parent: BlockHash, prev_proposer_index: usize, proposed_view: usize) -> usize { - if let Some(validators) = self.validators(parent) { + if let Some(validators) = self.next_validators(parent) { let num_validators = validators.len(); proposed_view % num_validators } else { @@ -77,6 +100,48 @@ impl DynamicValidator { (prev_proposer_index + proposed_view + 1) % num_validators } } + + pub fn check_enough_votes_with_validators( + &self, + validators: &[Validator], + votes: &BitSet, + ) -> Result<(), EngineError> { + let mut voted_delegation = 0u64; + let n_validators = validators.len(); + for index in votes.true_index_iter() { + assert!(index < n_validators); + let validator = validators.get(index).ok_or_else(|| { + EngineError::ValidatorNotExist { + height: 0, // FIXME + index, + } + })?; + voted_delegation += validator.delegation(); + } + let total_delegation: u64 = validators.iter().map(Validator::delegation).sum(); + if voted_delegation * 3 > total_delegation * 2 { + Ok(()) + } else { + let threshold = total_delegation as usize * 2 / 3; + Err(EngineError::BadSealFieldSize(OutOfBounds { + min: Some(threshold), + max: Some(total_delegation as usize), + found: voted_delegation as usize, + })) + } + } + + pub fn check_enough_votes_with_header(&self, header: &Header, votes: &BitSet) -> Result<(), EngineError> { + let hash = header.hash(); + let parent = *header.parent_hash(); + let validators = self.current_validators(hash).or_else(move || self.next_validators(parent)); + + if let Some(validators) = validators { + self.check_enough_votes_with_validators(&validators, votes) + } else { + self.initial_list.check_enough_votes(header.parent_hash(), votes) + } + } } impl ValidatorSet for DynamicValidator { @@ -136,7 +201,7 @@ impl ValidatorSet for DynamicValidator { } fn count(&self, parent: &BlockHash) -> usize { - if let Some(validators) = self.validators(*parent) { + if let Some(validators) = self.next_validators(*parent) { validators.len() } else { self.initial_list.count(parent) @@ -144,30 +209,8 @@ impl ValidatorSet for DynamicValidator { } fn check_enough_votes(&self, parent: &BlockHash, votes: &BitSet) -> Result<(), EngineError> { - if let Some(validators) = self.validators(*parent) { - let mut voted_delegation = 0u64; - let n_validators = validators.len(); - for index in votes.true_index_iter() { - assert!(index < n_validators); - let validator = validators.get(index).ok_or_else(|| { - EngineError::ValidatorNotExist { - height: 0, // FIXME - index, - } - })?; - voted_delegation += validator.delegation(); - } - let total_delegation: u64 = validators.iter().map(Validator::delegation).sum(); - if voted_delegation * 3 > total_delegation * 2 { - Ok(()) - } else { - let threshold = total_delegation as usize * 2 / 3; - Err(EngineError::BadSealFieldSize(OutOfBounds { - min: Some(threshold), - max: Some(total_delegation as usize), - found: voted_delegation as usize, - })) - } + if let Some(validators) = self.next_validators(*parent) { + self.check_enough_votes_with_validators(&validators, votes) } else { self.initial_list.check_enough_votes(parent, votes) } From f192340bf32e8c2b0a3c60ac7121255da19a68c9 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Thu, 12 Dec 2019 12:28:30 +0900 Subject: [PATCH 49/52] Generate snapshot chunks for shard level state --- sync/src/snapshot/mod.rs | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/sync/src/snapshot/mod.rs b/sync/src/snapshot/mod.rs index 7a075284a2..7d3aade1b0 100644 --- a/sync/src/snapshot/mod.rs +++ b/sync/src/snapshot/mod.rs @@ -24,6 +24,7 @@ use std::thread::{spawn, JoinHandle}; use ccore::snapshot_notify::{NotifyReceiverSource, ReceiverCanceller}; use ccore::{BlockChainClient, BlockChainTrait, BlockId, Client}; use cmerkle::snapshot::{ChunkCompressor, Error as SnapshotError, Snapshot}; +use cstate::{StateDB, TopLevelState, TopStateView}; use ctypes::BlockHash; use hashdb::{AsHashDB, HashDB}; use primitives::H256; @@ -66,7 +67,7 @@ impl Service { }; { let db_lock = client.state_db().read(); - if let Err(err) = snapshot(db_lock.as_hashdb(), block_hash, state_root, &root_dir) { + if let Err(err) = snapshot(&db_lock, block_hash, state_root, &root_dir) { cerror!( SYNC, "Snapshot request failed for block: {}, chunk_root: {}, err: {}", @@ -94,12 +95,26 @@ impl Service { } } } +fn snapshot(db: &StateDB, block_hash: BlockHash, root: H256, dir: &str) -> Result<(), SnapshotError> { + snapshot_trie(db.as_hashdb(), block_hash, root, dir)?; + + let top_state = TopLevelState::from_existing(db.clone(&root), root)?; + let shard_roots = { + let metadata = top_state.metadata()?.expect("Metadata must exist for snapshot block"); + let shard_num = *metadata.number_of_shards(); + (0..shard_num).map(|n| top_state.shard_root(n)) + }; + for sr in shard_roots { + snapshot_trie(db.as_hashdb(), block_hash, sr?.expect("Shard root must exist"), dir)?; + } + Ok(()) +} -fn snapshot(db: &dyn HashDB, block_hash: BlockHash, chunk_root: H256, root_dir: &str) -> Result<(), SnapshotError> { +fn snapshot_trie(db: &dyn HashDB, block_hash: BlockHash, root: H256, root_dir: &str) -> Result<(), SnapshotError> { let snapshot_dir = snapshot_dir(root_dir, &block_hash); fs::create_dir_all(snapshot_dir)?; - for chunk in Snapshot::from_hashdb(db, chunk_root) { + for chunk in Snapshot::from_hashdb(db, root) { let chunk_path = snapshot_path(root_dir, &block_hash, &chunk.root); let chunk_file = fs::File::create(chunk_path)?; let compressor = ChunkCompressor::new(chunk_file); From 8a428cbf523d32e99e795363d5f4323b11abf827 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Fri, 13 Dec 2019 13:59:24 +0900 Subject: [PATCH 50/52] Refactor on_chunk_response --- sync/src/block/extension.rs | 91 +++++++++++++++++++------------------ 1 file changed, 46 insertions(+), 45 deletions(-) diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 0899560367..02ee658f09 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -999,59 +999,60 @@ impl Extension { } fn on_chunk_response(&mut self, from: &NodeId, roots: &[H256], chunks: &[Vec]) { - if let State::SnapshotChunk { - block, - ref mut restore, - } = self.state - { - for (r, c) in roots.iter().zip(chunks) { - if c.is_empty() { - cdebug!(SYNC, "Peer {} sent empty response for chunk request {}", from, r); + let (block, restore) = match self.state { + State::SnapshotChunk { + block, + ref mut restore, + } => (block, restore), + _ => return, + }; + for (r, c) in roots.iter().zip(chunks) { + if c.is_empty() { + cdebug!(SYNC, "Peer {} sent empty response for chunk request {}", from, r); + continue + } + let decompressor = ChunkDecompressor::from_slice(c); + let raw_chunk = match decompressor.decompress() { + Ok(chunk) => chunk, + Err(e) => { + cwarn!(SYNC, "Decode failed for chunk response from peer {}: {}", from, e); continue } - let decompressor = ChunkDecompressor::from_slice(c); - let raw_chunk = match decompressor.decompress() { - Ok(chunk) => chunk, - Err(e) => { - cwarn!(SYNC, "Decode failed for chunk response from peer {}: {}", from, e); - continue - } - }; - let recovered = match raw_chunk.recover(*r) { - Ok(chunk) => chunk, + }; + let recovered = match raw_chunk.recover(*r) { + Ok(chunk) => chunk, + Err(e) => { + cwarn!(SYNC, "Invalid chunk response from peer {}: {}", from, e); + continue + } + }; + + let batch = { + let mut state_db = self.client.state_db().write(); + let hash_db = state_db.as_hashdb_mut(); + restore.feed(hash_db, recovered); + + let mut batch = DBTransaction::new(); + match state_db.journal_under(&mut batch, 0, H256::zero()) { + Ok(_) => batch, Err(e) => { - cwarn!(SYNC, "Invalid chunk response from peer {}: {}", from, e); + cwarn!(SYNC, "Failed to write state chunk to database: {}", e); continue } - }; - - let batch = { - let mut state_db = self.client.state_db().write(); - let hash_db = state_db.as_hashdb_mut(); - restore.feed(hash_db, recovered); - - let mut batch = DBTransaction::new(); - match state_db.journal_under(&mut batch, 0, H256::zero()) { - Ok(_) => batch, - Err(e) => { - cwarn!(SYNC, "Failed to write state chunk to database: {}", e); - continue - } - } - }; - self.client.db().write_buffered(batch); - match self.client.db().flush() { - Ok(_) => cdebug!(SYNC, "Wrote state chunk to database: {}", r), - Err(e) => cwarn!(SYNC, "Failed to flush database: {}", e), } + }; + self.client.db().write_buffered(batch); + match self.client.db().flush() { + Ok(_) => cdebug!(SYNC, "Wrote state chunk to database: {}", r), + Err(e) => cwarn!(SYNC, "Failed to flush database: {}", e), } + } - if let Some(root) = restore.next_to_feed() { - self.send_chunk_request(&block, &root); - } else { - self.client.force_update_best_block(&block); - self.transition_to_full(); - } + if let Some(root) = restore.next_to_feed() { + self.send_chunk_request(&block, &root); + } else { + self.client.force_update_best_block(&block); + self.transition_to_full(); } } From f6283507a60fd67c4343f3726eaa5358f099fcfe Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Fri, 13 Dec 2019 15:37:17 +0900 Subject: [PATCH 51/52] Refactor sync state transition --- sync/src/block/extension.rs | 135 ++++++++++++++++++++++-------------- 1 file changed, 84 insertions(+), 51 deletions(-) diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 02ee658f09..6fd05ca332 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -75,6 +75,64 @@ enum State { Full, } +impl State { + fn initial(client: &Client, snapshot_target: Option<(BlockHash, u64)>) -> Self { + let (hash, num) = match snapshot_target { + Some(target) => target, + None => return State::Full, + }; + let header = match client.block_header(&num.into()) { + Some(ref h) if h.hash() == hash => h.clone(), + _ => return State::SnapshotHeader(hash, num), + }; + if client.block_body(&hash.into()).is_none() { + let parent_hash = header.parent_hash(); + let parent = + client.block_header(&parent_hash.into()).expect("Parent header of the snapshot header must exist"); + return State::SnapshotBody { + header, + prev_root: parent.transactions_root(), + } + } + + let state_db = client.state_db().read(); + let state_root = header.state_root(); + match TrieFactory::readonly(state_db.as_hashdb(), &state_root) { + Ok(ref trie) if trie.is_complete() => State::Full, + _ => State::SnapshotChunk { + block: hash, + restore: SnapshotRestore::new(state_root), + }, + } + } + + fn next(&self, client: &Client) -> Self { + match self { + State::SnapshotHeader(hash, _) => { + let header = client.block_header(&(*hash).into()).expect("Snapshot header is imported"); + let parent = client + .block_header(&header.parent_hash().into()) + .expect("Parent of the snapshot header must be imported"); + State::SnapshotBody { + header, + prev_root: parent.transactions_root(), + } + } + State::SnapshotBody { + header, + .. + } => State::SnapshotChunk { + block: header.hash(), + restore: SnapshotRestore::new(header.state_root()), + }, + State::SnapshotChunk { + .. + } => State::Full, + State::Full => State::Full, + } + } +} + pub struct Extension { state: State, requests: HashMap>, @@ -100,7 +158,7 @@ impl Extension { ) -> Extension { api.set_timer(SYNC_TIMER_TOKEN, Duration::from_millis(SYNC_TIMER_INTERVAL)).expect("Timer set succeeds"); - let state = Extension::initial_state(client.clone(), snapshot_target); + let state = State::initial(&client, snapshot_target); cdebug!(SYNC, "Initial state is {:?}", state); let mut header = client.best_header(); let mut hollow_headers = vec![header.decode()]; @@ -138,34 +196,29 @@ impl Extension { } } - fn initial_state(client: Arc, snapshot_target: Option<(BlockHash, u64)>) -> State { - let (hash, num) = match snapshot_target { - Some(target) => target, - None => return State::Full, - }; - let header = match client.block_header(&num.into()) { - Some(ref h) if h.hash() == hash => h.clone(), - _ => return State::SnapshotHeader(hash, num), - }; - if client.block_body(&hash.into()).is_none() { - let parent_hash = header.parent_hash(); - let parent = - client.block_header(&parent_hash.into()).expect("Parent header of the snapshot header must exist"); - return State::SnapshotBody { - header, - prev_root: parent.transactions_root(), + fn move_state(&mut self) { + let next_state = self.state.next(&self.client); + cdebug!(SYNC, "Transitioning the state to {:?}", next_state); + if let State::Full = next_state { + let best_hash = match &self.state { + State::SnapshotHeader(hash, _) => *hash, + State::SnapshotBody { + header, + .. + } => header.hash(), + State::SnapshotChunk { + block, + .. + } => *block, + State::Full => unreachable!("Trying to transition state from State::Full"), + }; + self.client.force_update_best_block(&best_hash); + for downloader in self.header_downloaders.values_mut() { + downloader.update_pivot(best_hash); } + self.send_status_broadcast(); } - - let state_db = client.state_db().read(); - let state_root = header.state_root(); - match TrieFactory::readonly(state_db.as_hashdb(), &state_root) { - Ok(ref trie) if trie.is_complete() => State::Full, - _ => State::SnapshotChunk { - block: hash, - restore: SnapshotRestore::new(state_root), - }, - } + self.state = next_state; } fn dismiss_request(&mut self, id: &NodeId, request_id: u64) { @@ -476,8 +529,7 @@ impl NetworkExtension for Extension { if let Some(root) = restore.next_to_feed() { self.send_chunk_request(&block, &root); } else { - self.client.force_update_best_block(&block); - self.transition_to_full(); + self.move_state(); } } State::Full => { @@ -863,11 +915,7 @@ impl Extension { return } } - self.state = State::SnapshotBody { - header: EncodedHeader::new(header.rlp_bytes().to_vec()), - prev_root: *parent.transactions_root(), - }; - cdebug!(SYNC, "Transitioning state to {:?}", self.state); + self.move_state(); } _ => cdebug!( SYNC, @@ -941,11 +989,7 @@ impl Extension { }; match self.client.import_trusted_block(&block) { Ok(_) | Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { - self.state = State::SnapshotChunk { - block: header.hash(), - restore: SnapshotRestore::new(header.state_root()), - }; - cdebug!(SYNC, "Transitioning state to {:?}", self.state); + self.move_state(); } Err(BlockImportError::Import(ImportError::AlreadyQueued)) => {} // FIXME: handle import errors @@ -1051,20 +1095,9 @@ impl Extension { if let Some(root) = restore.next_to_feed() { self.send_chunk_request(&block, &root); } else { - self.client.force_update_best_block(&block); - self.transition_to_full(); + self.move_state(); } } - - fn transition_to_full(&mut self) { - cdebug!(SYNC, "Transitioning state to {:?}", State::Full); - let best_hash = self.client.best_block_header().hash(); - for downloader in self.header_downloaders.values_mut() { - downloader.update_pivot(best_hash); - } - self.state = State::Full; - self.send_status_broadcast(); - } } pub struct BlockSyncSender(EventSender); From 364fa1897a304c34cd38c21bf40263aa1a1efce2 Mon Sep 17 00:00:00 2001 From: Joonmo Yang Date: Fri, 13 Dec 2019 16:31:31 +0900 Subject: [PATCH 52/52] Import shard trie chunks in sync extension --- sync/src/block/extension.rs | 122 +++++++++++++++++++++++++++++------- 1 file changed, 100 insertions(+), 22 deletions(-) diff --git a/sync/src/block/extension.rs b/sync/src/block/extension.rs index 6fd05ca332..44586e51dd 100644 --- a/sync/src/block/extension.rs +++ b/sync/src/block/extension.rs @@ -24,17 +24,17 @@ use std::time::Duration; use ccore::encoded::Header as EncodedHeader; use ccore::{ Block, BlockChainClient, BlockChainTrait, BlockId, BlockImportError, BlockStatus, ChainNotify, Client, ImportBlock, - ImportError, UnverifiedTransaction, + ImportError, StateInfo, UnverifiedTransaction, }; use cmerkle::snapshot::ChunkDecompressor; use cmerkle::snapshot::Restore as SnapshotRestore; use cmerkle::{skewed_merkle_root, TrieFactory}; use cnetwork::{Api, EventSender, NetworkExtension, NodeId}; -use cstate::FindActionHandler; +use cstate::{FindActionHandler, TopLevelState, TopStateView}; use ctimer::TimerToken; use ctypes::header::{Header, Seal}; use ctypes::transaction::Action; -use ctypes::{BlockHash, BlockNumber}; +use ctypes::{BlockHash, BlockNumber, ShardId}; use hashdb::AsHashDB; use kvdb::DBTransaction; use primitives::{H256, U256}; @@ -68,10 +68,15 @@ enum State { header: EncodedHeader, prev_root: H256, }, - SnapshotChunk { + SnapshotTopChunk { block: BlockHash, restore: SnapshotRestore, }, + SnapshotShardChunk { + block: BlockHash, + shard_id: ShardId, + restore: SnapshotRestore, + }, Full, } @@ -82,7 +87,7 @@ impl State { None => return State::Full, }; let header = match client.block_header(&num.into()) { - Some(ref h) if h.hash() == hash => h.clone(), + Some(h) if h.hash() == hash => h, _ => return State::SnapshotHeader(hash, num), }; if client.block_body(&hash.into()).is_none() { @@ -97,13 +102,35 @@ impl State { let state_db = client.state_db().read(); let state_root = header.state_root(); - match TrieFactory::readonly(state_db.as_hashdb(), &state_root) { - Ok(ref trie) if trie.is_complete() => State::Full, - _ => State::SnapshotChunk { + let top_trie = TrieFactory::readonly(state_db.as_hashdb(), &state_root); + if !top_trie.map(|t| t.is_complete()).unwrap_or(false) { + return State::SnapshotTopChunk { block: hash, restore: SnapshotRestore::new(state_root), - }, + } + } + + let top_state = client.state_at(hash.into()).expect("Top level state at the snapshot header exists"); + let metadata = top_state.metadata().unwrap().expect("Metadata must exist for the snapshot block"); + let shard_num = *metadata.number_of_shards(); + let empty_shard = (0..shard_num).find_map(|n| { + let shard_root = top_state.shard_root(n).unwrap().expect("Shard root must exist"); + let trie = TrieFactory::readonly(state_db.as_hashdb(), &shard_root); + if !trie.map(|t| t.is_complete()).unwrap_or(false) { + Some((n, shard_root)) + } else { + None + } + }); + if let Some((shard_id, shard_root)) = empty_shard { + return State::SnapshotShardChunk { + block: hash, + shard_id, + restore: SnapshotRestore::new(shard_root), + } } + + State::Full } fn next(&self, client: &Client) -> Self { @@ -121,13 +148,48 @@ impl State { State::SnapshotBody { header, .. - } => State::SnapshotChunk { + } => State::SnapshotTopChunk { block: header.hash(), restore: SnapshotRestore::new(header.state_root()), }, - State::SnapshotChunk { + State::SnapshotTopChunk { + block, .. - } => State::Full, + } => { + let header = client.block_header(&(*block).into()).expect("Snapshot header must exist"); + let state_root = header.state_root(); + let state_db = client.state_db().read(); + let top_state = TopLevelState::from_existing(state_db.clone(&state_root), state_root).unwrap(); + let shard_root = top_state.shard_root(0).unwrap().expect("Shard 0 always exists"); + State::SnapshotShardChunk { + block: *block, + shard_id: 0, + restore: SnapshotRestore::new(shard_root), + } + } + State::SnapshotShardChunk { + block, + shard_id, + .. + } => { + let top_state = client.state_at((*block).into()).expect("State at the snapshot header must exist"); + let metadata = top_state.metadata().unwrap().expect("Metadata must exist for snapshot block"); + let shard_num = *metadata.number_of_shards(); + if shard_id + 1 == shard_num { + State::Full + } else { + let next_shard = shard_id + 1; + let shard_root = top_state + .shard_root(next_shard) + .expect("Top level state must be valid") + .expect("Shard root must exist"); + State::SnapshotShardChunk { + block: *block, + shard_id: next_shard, + restore: SnapshotRestore::new(shard_root), + } + } + } State::Full => State::Full, } } @@ -206,11 +268,15 @@ impl Extension { header, .. } => header.hash(), - State::SnapshotChunk { + State::SnapshotTopChunk { + block, + .. + } => *block, + State::SnapshotShardChunk { block, .. } => *block, - State::Full => unreachable!("Trying to transition state from State::Full"), + State::Full => panic!("Trying to transit the state from State::Full"), }; self.client.force_update_best_block(&best_hash); for downloader in self.header_downloaders.values_mut() { @@ -522,9 +588,20 @@ impl NetworkExtension for Extension { } } } - State::SnapshotChunk { + State::SnapshotTopChunk { + block, + ref mut restore, + } => { + if let Some(root) = restore.next_to_feed() { + self.send_chunk_request(&block, &root); + } else { + self.move_state(); + } + } + State::SnapshotShardChunk { block, ref mut restore, + .. } => { if let Some(root) = restore.next_to_feed() { self.send_chunk_request(&block, &root); @@ -925,12 +1002,6 @@ impl Extension { headers.len() ), }, - State::SnapshotBody { - .. - } => {} - State::SnapshotChunk { - .. - } => {} State::Full => { let (mut completed, peer_is_caught_up) = if let Some(peer) = self.header_downloaders.get_mut(from) { let encoded: Vec<_> = headers.iter().map(|h| EncodedHeader::new(h.rlp_bytes().to_vec())).collect(); @@ -969,6 +1040,7 @@ impl Extension { } } } + _ => {} } } @@ -1044,12 +1116,18 @@ impl Extension { fn on_chunk_response(&mut self, from: &NodeId, roots: &[H256], chunks: &[Vec]) { let (block, restore) = match self.state { - State::SnapshotChunk { + State::SnapshotTopChunk { block, ref mut restore, } => (block, restore), + State::SnapshotShardChunk { + block, + ref mut restore, + .. + } => (block, restore), _ => return, }; + assert_eq!(roots.len(), chunks.len()); for (r, c) in roots.iter().zip(chunks) { if c.is_empty() { cdebug!(SYNC, "Peer {} sent empty response for chunk request {}", from, r);