Skip to content

Commit

Permalink
Changed consensus to manual-seal (#42)
Browse files Browse the repository at this point in the history
* Changed consensus to manual-seal

* clean code according to PR comments
  • Loading branch information
PierreOssun authored Mar 18, 2022
1 parent 42a2cc6 commit 9d140d5
Show file tree
Hide file tree
Showing 5 changed files with 52 additions and 292 deletions.
5 changes: 1 addition & 4 deletions node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,9 @@ sc-keystore = { git = "/~https://github.com/paritytech/substrate", package = "sc-k
sp-inherents = { git = "/~https://github.com/paritytech/substrate", package = "sp-inherents" }
sc-transaction-pool = { git = "/~https://github.com/paritytech/substrate", package = "sc-transaction-pool" }
sc-transaction-pool-api = { git = "/~https://github.com/paritytech/substrate", package = "sc-transaction-pool-api" }
sc-consensus-aura = { git = "/~https://github.com/paritytech/substrate", package = "sc-consensus-aura" }
sp-consensus-aura = { git = "/~https://github.com/paritytech/substrate", package = "sp-consensus-aura" }
sp-consensus = { git = "/~https://github.com/paritytech/substrate", package = "sp-consensus" }
sc-consensus = { git = "/~https://github.com/paritytech/substrate", package = "sc-consensus" }
sc-finality-grandpa = { git = "/~https://github.com/paritytech/substrate", package = "sc-finality-grandpa" }
sp-finality-grandpa = { git = "/~https://github.com/paritytech/substrate", package = "sp-finality-grandpa" }
sc-consensus-manual-seal = { git = "/~https://github.com/paritytech/substrate", package = "sc-consensus-manual-seal" }
sc-client-api = { git = "/~https://github.com/paritytech/substrate", package = "sc-client-api" }
sp-runtime = { git = "/~https://github.com/paritytech/substrate", package = "sp-runtime" }
sp-timestamp = { git = "/~https://github.com/paritytech/substrate", package = "sp-timestamp" }
Expand Down
21 changes: 1 addition & 20 deletions node/src/chain_spec.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,8 @@
use contracts_node_runtime::{
AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, Signature, SudoConfig,
SystemConfig, WASM_BINARY,
AccountId, BalancesConfig, GenesisConfig, Signature, SudoConfig, SystemConfig, WASM_BINARY,
};
use sc_service::ChainType;
use sp_consensus_aura::sr25519::AuthorityId as AuraId;
use sp_core::{sr25519, Pair, Public};
use sp_finality_grandpa::AuthorityId as GrandpaId;
use sp_runtime::traits::{IdentifyAccount, Verify};

// The URL for the telemetry server.
Expand All @@ -31,11 +28,6 @@ where
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}

/// Generate an Aura authority key.
pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) {
(get_from_seed::<AuraId>(s), get_from_seed::<GrandpaId>(s))
}

pub fn development_config() -> Result<ChainSpec, String> {
let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?;

Expand All @@ -48,8 +40,6 @@ pub fn development_config() -> Result<ChainSpec, String> {
move || {
testnet_genesis(
wasm_binary,
// Initial PoA authorities
vec![authority_keys_from_seed("Alice")],
// Sudo account
get_account_id_from_seed::<sr25519::Public>("Alice"),
// Pre-funded accounts
Expand Down Expand Up @@ -97,8 +87,6 @@ pub fn local_testnet_config() -> Result<ChainSpec, String> {
move || {
testnet_genesis(
wasm_binary,
// Initial PoA authorities
vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")],
// Sudo account
get_account_id_from_seed::<sr25519::Public>("Alice"),
// Pre-funded accounts
Expand Down Expand Up @@ -137,7 +125,6 @@ pub fn local_testnet_config() -> Result<ChainSpec, String> {
/// Configure initial storage state for FRAME modules.
fn testnet_genesis(
wasm_binary: &[u8],
initial_authorities: Vec<(AuraId, GrandpaId)>,
root_key: AccountId,
endowed_accounts: Vec<AccountId>,
_enable_println: bool,
Expand All @@ -151,12 +138,6 @@ fn testnet_genesis(
// Configure endowed accounts with initial balance of 1 << 60.
balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(),
},
aura: AuraConfig {
authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(),
},
grandpa: GrandpaConfig {
authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(),
},
sudo: SudoConfig {
// Assign network admin rights.
key: Some(root_key),
Expand Down
201 changes: 36 additions & 165 deletions node/src/service.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,11 @@
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
use contracts_node_runtime::{self, opaque::Block, RuntimeApi};
use sc_client_api::{BlockBackend, ExecutorProvider};
use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
pub use sc_executor::NativeElseWasmExecutor;
use sc_finality_grandpa::SharedVoterState;
use sc_keystore::LocalKeystore;
use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
use sc_telemetry::{Telemetry, TelemetryWorker};
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
use std::{sync::Arc, time::Duration};
use std::sync::Arc;

// Our native executor instance.
pub struct ExecutorDispatch;
Expand Down Expand Up @@ -45,16 +41,7 @@ pub fn new_partial(
FullSelectChain,
sc_consensus::DefaultImportQueue<Block, FullClient>,
sc_transaction_pool::FullPool<Block, FullClient>,
(
sc_finality_grandpa::GrandpaBlockImport<
FullBackend,
Block,
FullClient,
FullSelectChain,
>,
sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
Option<Telemetry>,
),
(Option<Telemetry>,),
>,
ServiceError,
> {
Expand Down Expand Up @@ -88,12 +75,18 @@ pub fn new_partial(
)?;
let client = Arc::new(client);

let select_chain = sc_consensus::LongestChain::new(backend.clone());

let telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", None, worker.run());
telemetry
});

let select_chain = sc_consensus::LongestChain::new(backend.clone());
let import_queue = sc_consensus_manual_seal::import_queue(
Box::new(client.clone()),
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
);

let transaction_pool = sc_transaction_pool::BasicPool::new_full(
config.transaction_pool.clone(),
Expand All @@ -103,40 +96,6 @@ pub fn new_partial(
client.clone(),
);

let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;

let slot_duration = sc_consensus_aura::slot_duration(&*client)?;

let import_queue =
sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
block_import: grandpa_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import.clone())),
client: client.clone(),
create_inherent_data_providers: move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();

let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
slot_duration,
);

Ok((timestamp, slot))
},
spawner: &task_manager.spawn_essential_handle(),
can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(
client.executor().clone(),
),
registry: config.prometheus_registry(),
check_for_equivocation: Default::default(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
})?;

Ok(sc_service::PartialComponents {
client,
backend,
Expand All @@ -145,7 +104,7 @@ pub fn new_partial(
keystore_container,
select_chain,
transaction_pool,
other: (grandpa_block_import, grandpa_link, telemetry),
other: (telemetry,),
})
}

Expand All @@ -157,7 +116,7 @@ fn remote_keystore(_url: &String) -> Result<Arc<LocalKeystore>, &'static str> {
}

/// Builds a new service for a full client.
pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError> {
pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
let sc_service::PartialComponents {
client,
backend,
Expand All @@ -166,7 +125,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
mut keystore_container,
select_chain,
transaction_pool,
other: (block_import, grandpa_link, mut telemetry),
other: (mut telemetry,),
} = new_partial(&config)?;

if let Some(url) = &config.keystore_remote {
Expand All @@ -179,20 +138,6 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
))),
};
}
let grandpa_protocol_name = sc_finality_grandpa::protocol_standard_name(
&client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"),
&config.chain_spec,
);

config
.network
.extra_sets
.push(sc_finality_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone()));
let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new(
backend.clone(),
grandpa_link.shared_authority_set().clone(),
Vec::default(),
));

let (network, system_rpc_tx, network_starter) =
sc_service::build_network(sc_service::BuildNetworkParams {
Expand All @@ -202,7 +147,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
spawn_handle: task_manager.spawn_handle(),
import_queue,
block_announce_validator_builder: None,
warp_sync: Some(warp_sync),
warp_sync: None,
})?;

if config.offchain_worker.enabled {
Expand All @@ -214,13 +159,6 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
);
}

let role = config.role.clone();
let force_authoring = config.force_authoring;
let backoff_authoring_blocks: Option<()> = None;
let name = config.network.node_name.clone();
let enable_grandpa = !config.disable_grandpa;
let prometheus_registry = config.prometheus_registry().cloned();

let rpc_extensions_builder = {
let client = client.clone();
let pool = transaction_pool.clone();
Expand All @@ -232,9 +170,10 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
Ok(crate::rpc::create_full(deps))
})
};
let prometheus_registry = config.prometheus_registry().cloned();

let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
network: network.clone(),
network,
client: client.clone(),
keystore: keystore_container.sync_keystore(),
task_manager: &mut task_manager,
Expand All @@ -246,99 +185,31 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
telemetry: telemetry.as_mut(),
})?;

if role.is_authority() {
let proposer_factory = sc_basic_authorship::ProposerFactory::new(
task_manager.spawn_handle(),
client.clone(),
transaction_pool,
prometheus_registry.as_ref(),
telemetry.as_ref().map(|x| x.handle()),
);

let can_author_with =
sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());

let slot_duration = sc_consensus_aura::slot_duration(&*client)?;

let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _, _>(
StartAuraParams {
slot_duration,
client: client.clone(),
select_chain,
block_import,
proposer_factory,
create_inherent_data_providers: move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();

let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
slot_duration,
);

Ok((timestamp, slot))
},
force_authoring,
backoff_authoring_blocks,
keystore: keystore_container.sync_keystore(),
can_author_with,
sync_oracle: network.clone(),
justification_sync_link: network.clone(),
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
max_block_proposal_slot_portion: None,
telemetry: telemetry.as_ref().map(|x| x.handle()),
},
)?;

// the AURA authoring task is considered essential, i.e. if it
// fails we take down the service with it.
task_manager
.spawn_essential_handle()
.spawn_blocking("aura", Some("block-authoring"), aura);
}

// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
let keystore =
if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None };
let proposer = sc_basic_authorship::ProposerFactory::new(
task_manager.spawn_handle(),
client.clone(),
transaction_pool.clone(),
prometheus_registry.as_ref(),
telemetry.as_ref().map(|x| x.handle()),
);

let grandpa_config = sc_finality_grandpa::Config {
// FIXME #1578 make this available through chainspec
gossip_duration: Duration::from_millis(333),
justification_period: 512,
name: Some(name),
observer_enabled: false,
keystore,
local_role: role,
telemetry: telemetry.as_ref().map(|x| x.handle()),
protocol_name: grandpa_protocol_name.clone(),
let params = sc_consensus_manual_seal::InstantSealParams {
block_import: client.clone(),
env: proposer,
client,
pool: transaction_pool,
select_chain,
consensus_data_provider: None,
create_inherent_data_providers: move |_, ()| async move {
Ok(sp_timestamp::InherentDataProvider::from_system_time())
},
};

if enable_grandpa {
// start the full GRANDPA voter
// NOTE: non-authorities could run the GRANDPA observer protocol, but at
// this point the full voter should provide better guarantees of block
// and vote data availability than the observer. The observer has not
// been tested extensively yet and having most nodes in a network run it
// could lead to finality stalls.
let grandpa_config = sc_finality_grandpa::GrandpaParams {
config: grandpa_config,
link: grandpa_link,
network,
voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(),
prometheus_registry,
shared_voter_state: SharedVoterState::empty(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
};
let authorship_future = sc_consensus_manual_seal::run_instant_seal(params);

// the GRANDPA voter task is considered infallible, i.e.
// if it fails we take down the service with it.
task_manager.spawn_essential_handle().spawn_blocking(
"grandpa-voter",
None,
sc_finality_grandpa::run_grandpa_voter(grandpa_config)?,
);
}
task_manager
.spawn_essential_handle()
.spawn_blocking("instant-seal", None, authorship_future);

network_starter.start_network();
Ok(task_manager)
Expand Down
Loading

0 comments on commit 9d140d5

Please sign in to comment.