Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Fast sync (#8884)
Browse files Browse the repository at this point in the history
* State sync

* Importing state fixes

* Bugfixes

* Sync with proof

* Status reporting

* Unsafe sync mode

* Sync test

* Cleanup

* Apply suggestions from code review

Co-authored-by: cheme <emericchevalier.pro@gmail.com>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>

* set_genesis_storage

* Extract keys from range proof

* Detect iter completion

* Download and import bodies with fast sync

* Replaced meta updates tuple with a struct

* Fixed reverting finalized state

* Reverted timeout

* Typo

* Doc

* Doc

* Fixed light client test

* Fixed error handling

* Tweaks

* More UpdateMeta changes

* Rename convert_transaction

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Code review suggestions

* Fixed count handling

Co-authored-by: cheme <emericchevalier.pro@gmail.com>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
  • Loading branch information
4 people authored and athei committed Jun 25, 2021
1 parent 631ceb4 commit 668eb63
Show file tree
Hide file tree
Showing 54 changed files with 2,120 additions and 371 deletions.
5 changes: 3 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions client/api/src/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ use sp_consensus::BlockOrigin;
use parking_lot::RwLock;

pub use sp_state_machine::Backend as StateBackend;
pub use sp_consensus::ImportedState;
use std::marker::PhantomData;

/// Extracts the state backend type for the given backend.
Expand Down Expand Up @@ -161,6 +162,10 @@ pub trait BlockImportOperation<Block: BlockT> {
update: TransactionForSB<Self::State, Block>,
) -> sp_blockchain::Result<()>;

/// Set genesis state. If `commit` is `false` the state is saved in memory, but is not written
/// to the database.
fn set_genesis_state(&mut self, storage: Storage, commit: bool) -> sp_blockchain::Result<Block::Hash>;

/// Inject storage data into the database replacing any existing data.
fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result<Block::Hash>;

Expand Down
57 changes: 38 additions & 19 deletions client/api/src/in_mem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,11 @@ impl<Block: BlockT> HeaderBackend<Block> for Blockchain<Block> {
genesis_hash: storage.genesis_hash,
finalized_hash: storage.finalized_hash,
finalized_number: storage.finalized_number,
finalized_state: if storage.finalized_hash != Default::default() {
Some((storage.finalized_hash.clone(), storage.finalized_number))
} else {
None
},
number_leaves: storage.leaves.count()
}
}
Expand Down Expand Up @@ -528,6 +533,32 @@ pub struct BlockImportOperation<Block: BlockT> {
set_head: Option<BlockId<Block>>,
}

impl<Block: BlockT> BlockImportOperation<Block> where
Block::Hash: Ord,
{
fn apply_storage(&mut self, storage: Storage, commit: bool) -> sp_blockchain::Result<Block::Hash> {
check_genesis_storage(&storage)?;

let child_delta = storage.children_default.iter()
.map(|(_storage_key, child_content)|
(
&child_content.child_info,
child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref())))
)
);

let (root, transaction) = self.old_state.full_storage_root(
storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))),
child_delta,
);

if commit {
self.new_state = Some(transaction);
}
Ok(root)
}
}

impl<Block: BlockT> backend::BlockImportOperation<Block> for BlockImportOperation<Block> where
Block::Hash: Ord,
{
Expand Down Expand Up @@ -569,24 +600,12 @@ impl<Block: BlockT> backend::BlockImportOperation<Block> for BlockImportOperatio
Ok(())
}

fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result<Block::Hash> {
check_genesis_storage(&storage)?;

let child_delta = storage.children_default.iter()
.map(|(_storage_key, child_content)|
(
&child_content.child_info,
child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref())))
)
);

let (root, transaction) = self.old_state.full_storage_root(
storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))),
child_delta,
);
fn set_genesis_state(&mut self, storage: Storage, commit: bool) -> sp_blockchain::Result<Block::Hash> {
self.apply_storage(storage, commit)
}

self.new_state = Some(transaction);
Ok(root)
fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result<Block::Hash> {
self.apply_storage(storage, true)
}

fn insert_aux<I>(&mut self, ops: I) -> sp_blockchain::Result<()>
Expand Down Expand Up @@ -806,12 +825,12 @@ impl<Block: BlockT> backend::RemoteBackend<Block> for Backend<Block> where Block
/// Check that genesis storage is valid.
pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> {
if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) {
return Err(sp_blockchain::Error::GenesisInvalid.into());
return Err(sp_blockchain::Error::InvalidState.into());
}

if storage.children_default.keys()
.any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) {
return Err(sp_blockchain::Error::GenesisInvalid.into());
return Err(sp_blockchain::Error::InvalidState.into());
}

Ok(())
Expand Down
1 change: 1 addition & 0 deletions client/api/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ pub use proof_provider::*;
pub use sp_blockchain::HeaderBackend;

pub use sp_state_machine::{StorageProof, ExecutionStrategy};
pub use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo};

/// Usage Information Provider interface
///
Expand Down
27 changes: 27 additions & 0 deletions client/api/src/proof_provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,4 +70,31 @@ pub trait ProofProvider<Block: BlockT> {
storage_key: Option<&PrefixedStorageKey>,
key: &StorageKey,
) -> sp_blockchain::Result<ChangesProof<Block::Header>>;

/// Given a `BlockId` iterate over all storage values starting at `start_key` exclusively,
/// building proofs until size limit is reached. Returns combined proof and the number of collected keys.
fn read_proof_collection(
&self,
id: &BlockId<Block>,
start_key: &[u8],
size_limit: usize,
) -> sp_blockchain::Result<(StorageProof, u32)>;

/// Given a `BlockId` iterate over all storage values starting at `start_key`.
/// Returns collected keys and values.
fn storage_collection(
&self,
id: &BlockId<Block>,
start_key: &[u8],
size_limit: usize,
) -> sp_blockchain::Result<Vec<(Vec<u8>, Vec<u8>)>>;

/// Verify read storage proof for a set of keys.
/// Returns collected key-value pairs and a flag indicating if iteration is complete.
fn verify_range_proof(
&self,
root: Block::Hash,
proof: StorageProof,
start_key: &[u8],
) -> sp_blockchain::Result<(Vec<(Vec<u8>, Vec<u8>)>, bool)>;
}
1 change: 1 addition & 0 deletions client/authority-discovery/src/worker/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ impl<Block: BlockT> HeaderBackend<Block> for TestApi {
finalized_number: Zero::zero(),
genesis_hash: Default::default(),
number_leaves: Default::default(),
finalized_state: None,
}
}

Expand Down
24 changes: 24 additions & 0 deletions client/cli/src/arg_enums.rs
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,30 @@ arg_enum! {
}
}

arg_enum! {
/// Syncing mode.
#[allow(missing_docs)]
#[derive(Debug, Clone, Copy)]
pub enum SyncMode {
// Full sync. Donwnload end verify all blocks.
Full,
// Download blocks without executing them. Download latest state with proofs.
Fast,
// Download blocks without executing them. Download latest state without proofs.
FastUnsafe,
}
}

impl Into<sc_network::config::SyncMode> for SyncMode {
fn into(self) -> sc_network::config::SyncMode {
match self {
SyncMode::Full => sc_network::config::SyncMode::Full,
SyncMode::Fast => sc_network::config::SyncMode::Fast { skip_proofs: false },
SyncMode::FastUnsafe => sc_network::config::SyncMode::Fast { skip_proofs: true },
}
}
}

/// Default value for the `--execution-syncing` parameter.
pub const DEFAULT_EXECUTION_SYNCING: ExecutionStrategy = ExecutionStrategy::NativeElseWasm;
/// Default value for the `--execution-import-block` parameter.
Expand Down
9 changes: 9 additions & 0 deletions client/cli/src/params/network_params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
// along with this program. If not, see <https://www.gnu.org/licenses/>.

use crate::params::node_key_params::NodeKeyParams;
use crate::arg_enums::SyncMode;
use sc_network::{
config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig},
multiaddr::Protocol,
Expand Down Expand Up @@ -125,6 +126,13 @@ pub struct NetworkParams {
/// Join the IPFS network and serve transactions over bitswap protocol.
#[structopt(long)]
pub ipfs_server: bool,

/// Blockchain syncing mode.
/// Full - Download and validate full blockchain history (Default).
/// Fast - Download blocks and the latest state only.
/// FastUnsafe - Same as Fast, but do skips downloading state proofs.
#[structopt(long, default_value = "Full")]
pub sync: SyncMode,
}

impl NetworkParams {
Expand Down Expand Up @@ -218,6 +226,7 @@ impl NetworkParams {
kademlia_disjoint_query_paths: self.kademlia_disjoint_query_paths,
yamux_window_size: None,
ipfs_server: self.ipfs_server,
sync_mode: self.sync.into(),
}
}
}
6 changes: 4 additions & 2 deletions client/consensus/aura/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ use codec::{Encode, Decode, Codec};

use sp_consensus::{
BlockImport, Environment, Proposer, CanAuthorWith, ForkChoiceStrategy, BlockImportParams,
BlockOrigin, Error as ConsensusError, SelectChain,
BlockOrigin, Error as ConsensusError, SelectChain, StateAction,
};
use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider};
use sp_blockchain::{Result as CResult, ProvideCache, HeaderBackend};
Expand Down Expand Up @@ -421,7 +421,9 @@ where
let mut import_block = BlockImportParams::new(BlockOrigin::Own, header);
import_block.post_digests.push(signature_digest_item);
import_block.body = Some(body);
import_block.storage_changes = Some(storage_changes);
import_block.state_action = StateAction::ApplyChanges(
sp_consensus::StorageChanges::Changes(storage_changes)
);
import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain);

Ok(import_block)
Expand Down
12 changes: 10 additions & 2 deletions client/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ use sp_consensus::{
import_queue::{BasicQueue, CacheKeyId, DefaultImportQueue, Verifier},
BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, Environment,
Error as ConsensusError, ForkChoiceStrategy, Proposer, SelectChain, SlotData,
StateAction,
};
use sp_consensus_babe::inherents::BabeInherentData;
use sp_consensus_slots::Slot;
Expand Down Expand Up @@ -790,7 +791,9 @@ where
let mut import_block = BlockImportParams::new(BlockOrigin::Own, header);
import_block.post_digests.push(digest_item);
import_block.body = Some(body);
import_block.storage_changes = Some(storage_changes);
import_block.state_action = StateAction::ApplyChanges(
sp_consensus::StorageChanges::Changes(storage_changes)
);
import_block.intermediates.insert(
Cow::from(INTERMEDIATE_KEY),
Box::new(BabeIntermediate::<B> { epoch_descriptor }) as Box<_>,
Expand Down Expand Up @@ -1295,7 +1298,12 @@ impl<Block, Client, Inner> BlockImport<Block> for BabeBlockImport<Block, Client,
// early exit if block already in chain, otherwise the check for
// epoch changes will error when trying to re-import an epoch change
match self.client.status(BlockId::Hash(hash)) {
Ok(sp_blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain),
Ok(sp_blockchain::BlockStatus::InChain) => {
// When re-importing existing block strip away intermediates.
let _ = block.take_intermediate::<BabeIntermediate<Block>>(INTERMEDIATE_KEY)?;
block.fork_choice = Some(ForkChoiceStrategy::Custom(false));
return self.inner.import_block(block, new_cache).await.map_err(Into::into)
},
Ok(sp_blockchain::BlockStatus::Unknown) => {},
Err(e) => return Err(ConsensusError::ClientImport(e.to_string())),
}
Expand Down
6 changes: 4 additions & 2 deletions client/consensus/manual-seal/src/seal_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ use futures::prelude::*;
use sc_transaction_pool::txpool;
use sp_consensus::{
self, BlockImport, Environment, Proposer, ForkChoiceStrategy,
BlockImportParams, BlockOrigin, ImportResult, SelectChain,
BlockImportParams, BlockOrigin, ImportResult, SelectChain, StateAction,
};
use sp_blockchain::HeaderBackend;
use std::collections::HashMap;
Expand Down Expand Up @@ -145,7 +145,9 @@ pub async fn seal_block<B, BI, SC, C, E, P, CIDP>(
params.body = Some(body);
params.finalized = finalize;
params.fork_choice = Some(ForkChoiceStrategy::LongestChain);
params.storage_changes = Some(proposal.storage_changes);
params.state_action = StateAction::ApplyChanges(
sp_consensus::StorageChanges::Changes(proposal.storage_changes)
);

if let Some(digest_provider) = digest_provider {
digest_provider.append_block_import(&parent, &mut params, &inherent_data)?;
Expand Down
7 changes: 5 additions & 2 deletions client/consensus/pow/src/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@

use std::{pin::Pin, time::Duration, collections::HashMap, borrow::Cow};
use sc_client_api::ImportNotifications;
use sp_consensus::{Proposal, BlockOrigin, BlockImportParams, import_queue::BoxBlockImport};
use sp_consensus::{Proposal, BlockOrigin, BlockImportParams, StorageChanges,
StateAction, import_queue::BoxBlockImport};
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT},
Expand Down Expand Up @@ -136,7 +137,9 @@ where
let mut import_block = BlockImportParams::new(BlockOrigin::Own, header);
import_block.post_digests.push(seal);
import_block.body = Some(body);
import_block.storage_changes = Some(build.proposal.storage_changes);
import_block.state_action = StateAction::ApplyChanges(
StorageChanges::Changes(build.proposal.storage_changes)
);

let intermediate = PowIntermediate::<Algorithm::Difficulty> {
difficulty: Some(build.metadata.difficulty),
Expand Down
12 changes: 12 additions & 0 deletions client/db/src/bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,18 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
}
}

fn apply_to_key_values_while<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
allow_missing: bool,
) -> Result<bool, Self::Error> {
self.state.borrow().as_ref().ok_or_else(state_err)?
.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing)
}

fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
Expand Down
Loading

0 comments on commit 668eb63

Please sign in to comment.