761 lines
25 KiB
Rust
761 lines
25 KiB
Rust
|
#![deny(unused_results)]
|
||
|
|
||
|
pub mod benchmarking;
|
||
|
pub mod chain_spec;
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
use {
|
||
|
grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider},
|
||
|
sc_client_api::BlockBackend,
|
||
|
tx_pool_api::OffchainTransactionPoolFactory,
|
||
|
sp_blockchain::HeaderBackend,
|
||
|
sc_service::{KeystoreContainer, RpcHandlers},
|
||
|
telemetry::{Telemetry, TelemetryWorkerHandle},
|
||
|
};
|
||
|
|
||
|
use std::{sync::Arc, time::Duration};
|
||
|
use telemetry::TelemetryWorker;
|
||
|
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
|
||
|
|
||
|
pub use chain_spec::GenericChainSpec;
|
||
|
pub use consensus_common::{Proposal, SelectChain};
|
||
|
pub use primitives::{Block, BlockId, BlockNumber, Hash};
|
||
|
pub use sc_client_api::{Backend, CallExecutor};
|
||
|
pub use sc_consensus::BlockImport;
|
||
|
pub use sc_executor::NativeExecutionDispatch;
|
||
|
pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi};
|
||
|
pub use sc_service::{
|
||
|
config::{DatabaseSource, PrometheusConfig},
|
||
|
ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role,
|
||
|
RuntimeGenesis, TFullBackend, TFullCallExecutor, TFullClient, TaskManager,
|
||
|
TransactionPoolOptions,
|
||
|
};
|
||
|
pub use sp_runtime::{
|
||
|
generic,
|
||
|
traits::{
|
||
|
self as runtime_traits, BlakeTwo256, Block as BlockT, Header as HeaderT,
|
||
|
NumberFor,
|
||
|
},
|
||
|
};
|
||
|
|
||
|
#[cfg(feature = "casper-native")]
|
||
|
pub use chain_spec::CasperChainSpec;
|
||
|
#[cfg(feature = "casper-native")]
|
||
|
pub use {casper_runtime, casper_runtime_constants};
|
||
|
#[cfg(feature = "casper-native")]
|
||
|
use casper_runtime::RuntimeApi;
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
pub type FullBackend = sc_service::TFullBackend<Block>;
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
pub type FullClient = sc_service::TFullClient<
|
||
|
Block,
|
||
|
RuntimeApi,
|
||
|
WasmExecutor<(
|
||
|
sp_io::SubstrateHostFunctions,
|
||
|
frame_benchmarking::benchmarking::HostFunctions,
|
||
|
)>,
|
||
|
>;
|
||
|
|
||
|
const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512;
|
||
|
|
||
|
pub trait HeaderProvider<Block, Error = sp_blockchain::Error>: Send + Sync + 'static
|
||
|
where
|
||
|
Block: BlockT,
|
||
|
Error: std::fmt::Debug + Send + Sync + 'static,
|
||
|
{
|
||
|
fn header(
|
||
|
&self,
|
||
|
hash: <Block as BlockT>::Hash,
|
||
|
) -> Result<Option<<Block as BlockT>::Header>, Error>;
|
||
|
|
||
|
fn number(
|
||
|
&self,
|
||
|
hash: <Block as BlockT>::Hash,
|
||
|
) -> Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>, Error>;
|
||
|
}
|
||
|
|
||
|
impl<Block, T> HeaderProvider<Block> for T
|
||
|
where
|
||
|
Block: BlockT,
|
||
|
T: sp_blockchain::HeaderBackend<Block> + 'static,
|
||
|
{
|
||
|
fn header(
|
||
|
&self,
|
||
|
hash: Block::Hash,
|
||
|
) -> sp_blockchain::Result<Option<<Block as BlockT>::Header>> {
|
||
|
<Self as sp_blockchain::HeaderBackend<Block>>::header(self, hash)
|
||
|
}
|
||
|
|
||
|
fn number(
|
||
|
&self,
|
||
|
hash: Block::Hash,
|
||
|
) -> sp_blockchain::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
|
||
|
<Self as sp_blockchain::HeaderBackend<Block>>::number(self, hash)
|
||
|
}
|
||
|
}
|
||
|
|
||
|
pub trait HeaderProviderProvider<Block>: Send + Sync + 'static
|
||
|
where
|
||
|
Block: BlockT,
|
||
|
{
|
||
|
type Provider: HeaderProvider<Block> + 'static;
|
||
|
|
||
|
fn header_provider(&self) -> &Self::Provider;
|
||
|
}
|
||
|
|
||
|
impl<Block, T> HeaderProviderProvider<Block> for T
|
||
|
where
|
||
|
Block: BlockT,
|
||
|
T: sc_client_api::Backend<Block> + 'static,
|
||
|
{
|
||
|
type Provider = <T as sc_client_api::Backend<Block>>::Blockchain;
|
||
|
|
||
|
fn header_provider(&self) -> &Self::Provider {
|
||
|
self.blockchain()
|
||
|
}
|
||
|
}
|
||
|
|
||
|
#[derive(thiserror::Error, Debug)]
|
||
|
pub enum Error {
|
||
|
#[error(transparent)]
|
||
|
Io(#[from] std::io::Error),
|
||
|
|
||
|
#[error(transparent)]
|
||
|
AddrFormatInvalid(#[from] std::net::AddrParseError),
|
||
|
|
||
|
#[error(transparent)]
|
||
|
Sub(#[from] SubstrateServiceError),
|
||
|
|
||
|
#[error(transparent)]
|
||
|
Blockchain(#[from] sp_blockchain::Error),
|
||
|
|
||
|
#[error(transparent)]
|
||
|
Consensus(#[from] consensus_common::Error),
|
||
|
|
||
|
#[error(transparent)]
|
||
|
Prometheus(#[from] prometheus_endpoint::PrometheusError),
|
||
|
|
||
|
#[error(transparent)]
|
||
|
Telemetry(#[from] telemetry::Error),
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
#[error("Creating a custom database is required for validators")]
|
||
|
DatabasePathRequired,
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
#[error("Expected at least one of ghost or casper runtime feature")]
|
||
|
NoRuntime,
|
||
|
}
|
||
|
|
||
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
||
|
pub enum Chain {
|
||
|
Ghost,
|
||
|
Casper,
|
||
|
Unknown,
|
||
|
}
|
||
|
|
||
|
pub trait IdentifyVariant {
|
||
|
fn is_ghost(&self) -> bool;
|
||
|
fn is_casper(&self) -> bool;
|
||
|
fn is_dev(&self) -> bool;
|
||
|
fn identify_chain(&self) -> Chain;
|
||
|
}
|
||
|
|
||
|
impl IdentifyVariant for Box<dyn ChainSpec> {
|
||
|
fn is_ghost(&self) -> bool {
|
||
|
self.id().starts_with("ghost")
|
||
|
}
|
||
|
|
||
|
fn is_casper(&self) -> bool {
|
||
|
self.id().starts_with("casper")
|
||
|
}
|
||
|
|
||
|
fn is_dev(&self) -> bool {
|
||
|
self.id().ends_with("dev")
|
||
|
}
|
||
|
|
||
|
fn identify_chain(&self) -> Chain {
|
||
|
if self.is_ghost() { Chain::Ghost }
|
||
|
else if self.is_casper() { Chain::Casper }
|
||
|
else { Chain::Unknown }
|
||
|
}
|
||
|
}
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
|
||
|
#[cfg(feature = "full-node")]
|
||
|
type FullGrandpaBlockImport<ChainSelection = FullSelectChain> =
|
||
|
grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, ChainSelection>;
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
struct Basics {
|
||
|
task_manager: TaskManager,
|
||
|
client: Arc<FullClient>,
|
||
|
backend: Arc<FullBackend>,
|
||
|
keystore_container: KeystoreContainer,
|
||
|
telemetry: Option<Telemetry>,
|
||
|
}
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
fn new_partial_basics(
|
||
|
config: &mut Configuration,
|
||
|
telemetry_worker_handle: Option<TelemetryWorkerHandle>,
|
||
|
) -> Result<Basics, Error> {
|
||
|
let telemetry = config
|
||
|
.telemetry_endpoints
|
||
|
.clone()
|
||
|
.filter(|x| !x.is_empty())
|
||
|
.map(move |endpoints| -> Result<_, telemetry::Error> {
|
||
|
let (worker, mut worker_handle) = if let Some(worker_handle) = telemetry_worker_handle {
|
||
|
(None, worker_handle)
|
||
|
} else {
|
||
|
let worker = TelemetryWorker::new(16)?;
|
||
|
let worker_handle = worker.handle();
|
||
|
(Some(worker), worker_handle)
|
||
|
};
|
||
|
let telemetry = worker_handle.new_telemetry(endpoints);
|
||
|
Ok((worker, telemetry))
|
||
|
})
|
||
|
.transpose()?;
|
||
|
|
||
|
let heap_pages = config
|
||
|
.default_heap_pages
|
||
|
.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ });
|
||
|
|
||
|
let executor = WasmExecutor::builder()
|
||
|
.with_execution_method(config.wasm_method)
|
||
|
.with_onchain_heap_alloc_strategy(heap_pages)
|
||
|
.with_offchain_heap_alloc_strategy(heap_pages)
|
||
|
.with_max_runtime_instances(config.max_runtime_instances)
|
||
|
.with_runtime_cache_size(config.runtime_cache_size)
|
||
|
.build();
|
||
|
|
||
|
let (client, backend, keystore_container, task_manager) =
|
||
|
sc_service::new_full_parts::<Block, RuntimeApi, _>(
|
||
|
&config,
|
||
|
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
|
||
|
executor,
|
||
|
)?;
|
||
|
let client = Arc::new(client);
|
||
|
|
||
|
let telemetry = telemetry.map(|(worker, telemetry)| {
|
||
|
if let Some(worker) = worker {
|
||
|
task_manager.spawn_handle().spawn(
|
||
|
"telemetry",
|
||
|
Some("telemetry"),
|
||
|
Box::pin(worker.run()),
|
||
|
);
|
||
|
}
|
||
|
telemetry
|
||
|
});
|
||
|
|
||
|
Ok(Basics { task_manager, client, backend, keystore_container, telemetry })
|
||
|
}
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
fn new_partial<ChainSelection>(
|
||
|
config: &mut Configuration,
|
||
|
Basics { task_manager, backend, client, keystore_container, telemetry }: Basics,
|
||
|
select_chain: ChainSelection,
|
||
|
) -> Result<
|
||
|
sc_service::PartialComponents<
|
||
|
FullClient,
|
||
|
FullBackend,
|
||
|
ChainSelection,
|
||
|
sc_consensus::DefaultImportQueue<Block>,
|
||
|
sc_transaction_pool::FullPool<Block, FullClient>,
|
||
|
(
|
||
|
impl Fn(
|
||
|
ghost_rpc::DenyUnsafe,
|
||
|
ghost_rpc::SubscriptionTaskExecutor,
|
||
|
) -> Result<ghost_rpc::RpcExtension, SubstrateServiceError>,
|
||
|
(
|
||
|
babe::BabeBlockImport<
|
||
|
Block,
|
||
|
FullClient,
|
||
|
FullGrandpaBlockImport<ChainSelection>,
|
||
|
>,
|
||
|
grandpa::LinkHalf<Block, FullClient, ChainSelection>,
|
||
|
babe::BabeLink<Block>,
|
||
|
),
|
||
|
grandpa::SharedVoterState,
|
||
|
Option<Telemetry>,
|
||
|
),
|
||
|
>,
|
||
|
Error,
|
||
|
>
|
||
|
where
|
||
|
ChainSelection: 'static + SelectChain<Block>,
|
||
|
{
|
||
|
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
|
||
|
config.transaction_pool.clone(),
|
||
|
config.role.is_authority().into(),
|
||
|
config.prometheus_registry(),
|
||
|
task_manager.spawn_essential_handle(),
|
||
|
client.clone(),
|
||
|
);
|
||
|
|
||
|
let (grandpa_block_import, grandpa_link) =
|
||
|
grandpa::block_import(
|
||
|
client.clone(),
|
||
|
GRANDPA_JUSTIFICATION_PERIOD,
|
||
|
&(client.clone() as Arc<_>),
|
||
|
select_chain.clone(),
|
||
|
telemetry.as_ref().map(|x| x.handle()),
|
||
|
)?;
|
||
|
let justification_import = grandpa_block_import.clone();
|
||
|
|
||
|
let babe_config = babe::configuration(&*client)?;
|
||
|
let (block_import, babe_link) =
|
||
|
babe::block_import(babe_config.clone(), grandpa_block_import, client.clone())?;
|
||
|
|
||
|
let slot_duration = babe_link.config().slot_duration();
|
||
|
let (import_queue, babe_worker_handle) =
|
||
|
babe::import_queue(babe::ImportQueueParams {
|
||
|
link: babe_link.clone(),
|
||
|
block_import: block_import.clone(),
|
||
|
justification_import: Some(Box::new(justification_import)),
|
||
|
client: client.clone(),
|
||
|
select_chain: select_chain.clone(),
|
||
|
create_inherent_data_providers: move |_, ()| async move {
|
||
|
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
|
||
|
let slot =
|
||
|
babe_primitives::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
|
||
|
*timestamp,
|
||
|
slot_duration,
|
||
|
);
|
||
|
Ok((slot, timestamp))
|
||
|
},
|
||
|
spawner: &task_manager.spawn_essential_handle(),
|
||
|
registry: config.prometheus_registry(),
|
||
|
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||
|
offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()),
|
||
|
})?;
|
||
|
|
||
|
let justification_stream = grandpa_link.justification_stream();
|
||
|
let shared_authority_set = grandpa_link.shared_authority_set().clone();
|
||
|
let shared_voter_state = grandpa::SharedVoterState::empty();
|
||
|
let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(
|
||
|
backend.clone(),
|
||
|
Some(shared_authority_set.clone()),
|
||
|
);
|
||
|
|
||
|
let import_setup = (block_import, grandpa_link, babe_link);
|
||
|
let rpc_setup = shared_voter_state.clone();
|
||
|
|
||
|
let rpc_extensions_builder = {
|
||
|
let client = client.clone();
|
||
|
let keystore = keystore_container.keystore();
|
||
|
let transaction_pool = transaction_pool.clone();
|
||
|
let select_chain = select_chain.clone();
|
||
|
let chain_spec = config.chain_spec.cloned_box();
|
||
|
let backend = backend.clone();
|
||
|
|
||
|
move |
|
||
|
deny_unsafe,
|
||
|
subscription_executor: ghost_rpc::SubscriptionTaskExecutor,
|
||
|
| -> Result<ghost_rpc::RpcExtension, sc_service::Error> {
|
||
|
let deps = ghost_rpc::FullDeps {
|
||
|
client: client.clone(),
|
||
|
pool: transaction_pool.clone(),
|
||
|
select_chain: select_chain.clone(),
|
||
|
chain_spec: chain_spec.cloned_box(),
|
||
|
deny_unsafe,
|
||
|
babe: ghost_rpc::BabeDeps {
|
||
|
babe_worker_handle: babe_worker_handle.clone(),
|
||
|
keystore: keystore.clone(),
|
||
|
},
|
||
|
grandpa: ghost_rpc::GrandpaDeps {
|
||
|
shared_voter_state: shared_voter_state.clone(),
|
||
|
shared_authority_set: shared_authority_set.clone(),
|
||
|
justification_stream: justification_stream.clone(),
|
||
|
subscription_executor: subscription_executor.clone(),
|
||
|
finality_provider: finality_proof_provider.clone(),
|
||
|
},
|
||
|
backend: backend.clone(),
|
||
|
};
|
||
|
|
||
|
ghost_rpc::create_full_rpc(deps).map_err(Into::into)
|
||
|
}
|
||
|
};
|
||
|
|
||
|
Ok(sc_service::PartialComponents {
|
||
|
client,
|
||
|
backend,
|
||
|
task_manager,
|
||
|
keystore_container,
|
||
|
select_chain,
|
||
|
import_queue,
|
||
|
transaction_pool,
|
||
|
other: (rpc_extensions_builder, import_setup, rpc_setup, telemetry),
|
||
|
})
|
||
|
}
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
pub struct NewFullParams {
|
||
|
/// Whether to enable the block authoring backoff on production networks
|
||
|
/// where it isn't enabled by default.
|
||
|
pub force_authoring_backoff: bool,
|
||
|
pub telemetry_worker_handle: Option<TelemetryWorkerHandle>,
|
||
|
pub hwbench: Option<sc_sysinfo::HwBench>,
|
||
|
}
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
pub struct NewFull {
|
||
|
pub task_manager: TaskManager,
|
||
|
pub client: Arc<FullClient>,
|
||
|
pub network: Arc<dyn sc_network::service::traits::NetworkService>,
|
||
|
pub sync_service: Arc<sc_network_sync::SyncingService<Block>>,
|
||
|
pub rpc_handlers: RpcHandlers,
|
||
|
pub backend: Arc<FullBackend>,
|
||
|
}
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>>(
|
||
|
mut config: Configuration,
|
||
|
NewFullParams {
|
||
|
force_authoring_backoff,
|
||
|
telemetry_worker_handle,
|
||
|
hwbench,
|
||
|
}: NewFullParams,
|
||
|
) -> Result<NewFull, Error> {
|
||
|
use sc_network_sync::WarpSyncParams;
|
||
|
|
||
|
let role = config.role.clone();
|
||
|
let force_authoring = config.force_authoring;
|
||
|
let backoff_authoring_blocks = if !force_authoring_backoff {
|
||
|
None
|
||
|
} else {
|
||
|
let mut backoff =
|
||
|
sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default();
|
||
|
|
||
|
if config.chain_spec.is_dev() {
|
||
|
backoff.max_interval = 10;
|
||
|
}
|
||
|
|
||
|
Some(backoff)
|
||
|
};
|
||
|
|
||
|
let disable_grandpa = config.disable_grandpa;
|
||
|
let name = config.network.node_name.clone();
|
||
|
|
||
|
let basics = new_partial_basics(&mut config, telemetry_worker_handle)?;
|
||
|
|
||
|
let prometheus_registry = config.prometheus_registry().cloned();
|
||
|
let select_chain = sc_consensus::LongestChain::new(basics.backend.clone());
|
||
|
|
||
|
let sc_service::PartialComponents::<_, _, sc_consensus::LongestChain<FullBackend, Block>, _, _, _,> {
|
||
|
client,
|
||
|
backend,
|
||
|
mut task_manager,
|
||
|
keystore_container,
|
||
|
select_chain,
|
||
|
import_queue,
|
||
|
transaction_pool,
|
||
|
other: (
|
||
|
rpc_extensions_builder,
|
||
|
import_setup,
|
||
|
rpc_setup,
|
||
|
mut telemetry,
|
||
|
),
|
||
|
} = new_partial::<sc_consensus::LongestChain<FullBackend, Block>>(
|
||
|
&mut config,
|
||
|
basics,
|
||
|
select_chain,
|
||
|
)?;
|
||
|
|
||
|
let metrics = Network::register_notification_metrics(
|
||
|
config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
|
||
|
);
|
||
|
let shared_voter_state = rpc_setup;
|
||
|
let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht;
|
||
|
let auth_disc_public_addresses = config.network.public_addresses.clone();
|
||
|
|
||
|
let mut net_config =
|
||
|
sc_network::config::FullNetworkConfiguration::<_, _, Network>::new(&config.network);
|
||
|
|
||
|
let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed");
|
||
|
let peer_store_handle = net_config.peer_store_handle();
|
||
|
|
||
|
let grandpa_protocol_name = grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec);
|
||
|
let (grandpa_protocol_config, grandpa_notification_service) =
|
||
|
grandpa::grandpa_peers_set_config::<_, Network>(
|
||
|
grandpa_protocol_name.clone(),
|
||
|
metrics.clone(),
|
||
|
Arc::clone(&peer_store_handle),
|
||
|
);
|
||
|
net_config.add_notification_protocol(grandpa_protocol_config);
|
||
|
|
||
|
let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new(
|
||
|
backend.clone(),
|
||
|
import_setup.1.shared_authority_set().clone(),
|
||
|
Vec::new(),
|
||
|
));
|
||
|
|
||
|
let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
|
||
|
sc_service::build_network(sc_service::BuildNetworkParams {
|
||
|
config: &config,
|
||
|
net_config,
|
||
|
client: client.clone(),
|
||
|
transaction_pool: transaction_pool.clone(),
|
||
|
spawn_handle: task_manager.spawn_handle(),
|
||
|
import_queue,
|
||
|
block_announce_validator_builder: None,
|
||
|
warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)),
|
||
|
block_relay: None,
|
||
|
metrics,
|
||
|
})?;
|
||
|
|
||
|
if config.offchain_worker.enabled {
|
||
|
use futures::FutureExt;
|
||
|
|
||
|
task_manager.spawn_handle().spawn(
|
||
|
"offchain-workers-runner",
|
||
|
"offchain-worker",
|
||
|
sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
|
||
|
runtime_api_provider: client.clone(),
|
||
|
keystore: Some(keystore_container.keystore()),
|
||
|
offchain_db: backend.offchain_storage(),
|
||
|
transaction_pool: Some(OffchainTransactionPoolFactory::new(
|
||
|
transaction_pool.clone(),
|
||
|
)),
|
||
|
network_provider: Arc::new(network.clone()),
|
||
|
is_validator: role.is_authority(),
|
||
|
enable_http_requests: true,
|
||
|
custom_extensions: move |_| vec![],
|
||
|
})
|
||
|
.run(client.clone(), task_manager.spawn_handle())
|
||
|
.boxed(),
|
||
|
);
|
||
|
}
|
||
|
|
||
|
let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
|
||
|
config,
|
||
|
backend: backend.clone(),
|
||
|
client: client.clone(),
|
||
|
keystore: keystore_container.keystore(),
|
||
|
network: network.clone(),
|
||
|
sync_service: sync_service.clone(),
|
||
|
rpc_builder: Box::new(rpc_extensions_builder),
|
||
|
transaction_pool: transaction_pool.clone(),
|
||
|
task_manager: &mut task_manager,
|
||
|
system_rpc_tx,
|
||
|
tx_handler_controller,
|
||
|
telemetry: telemetry.as_mut(),
|
||
|
})?;
|
||
|
|
||
|
if let Some(hwbench) = hwbench {
|
||
|
sc_sysinfo::print_hwbench(&hwbench);
|
||
|
match ghost_machine_primitives::GHOST_NODE_REFERENCE_HARDWARE.check_hardware(&hwbench) {
|
||
|
Err(err) if role.is_authority() => {
|
||
|
log::warn!(
|
||
|
"⚠️ The hardware does not meet the minimal requirements {} for role 'Authority'",
|
||
|
err
|
||
|
);
|
||
|
},
|
||
|
_ => {},
|
||
|
}
|
||
|
|
||
|
if let Some(ref mut telemetry) = telemetry {
|
||
|
let telemetry_handle = telemetry.handle();
|
||
|
task_manager.spawn_handle().spawn(
|
||
|
"telemetry_hwbench",
|
||
|
None,
|
||
|
sc_sysinfo::initialize_hwbench_telemetry(
|
||
|
telemetry_handle,
|
||
|
hwbench,
|
||
|
),
|
||
|
);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
let (block_import, link_half, babe_link) = import_setup;
|
||
|
|
||
|
if role.is_authority() {
|
||
|
use futures::StreamExt;
|
||
|
use sc_network::{Event, NetworkEventStream};
|
||
|
|
||
|
let authority_discovery_role =
|
||
|
sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore());
|
||
|
let dht_event_stream =
|
||
|
network.event_stream("authority-discovery").filter_map(|e| async move {
|
||
|
match e {
|
||
|
Event::Dht(e) => Some(e),
|
||
|
_ => None,
|
||
|
}
|
||
|
});
|
||
|
let (worker, _service) = sc_authority_discovery::new_worker_and_service_with_config(
|
||
|
sc_authority_discovery::WorkerConfig {
|
||
|
publish_non_global_ips: auth_disc_publish_non_global_ips,
|
||
|
public_addresses: auth_disc_public_addresses,
|
||
|
strict_record_validation: true,
|
||
|
..Default::default()
|
||
|
},
|
||
|
client.clone(),
|
||
|
Arc::new(network.clone()),
|
||
|
Box::pin(dht_event_stream),
|
||
|
authority_discovery_role,
|
||
|
prometheus_registry.clone()
|
||
|
);
|
||
|
|
||
|
task_manager.spawn_handle().spawn(
|
||
|
"authority-discovery-worker",
|
||
|
Some("authority-discovery"),
|
||
|
Box::pin(worker.run()),
|
||
|
);
|
||
|
}
|
||
|
|
||
|
if role.is_authority() {
|
||
|
let proposer = sc_basic_authorship::ProposerFactory::new(
|
||
|
task_manager.spawn_handle(),
|
||
|
client.clone(),
|
||
|
transaction_pool.clone(),
|
||
|
prometheus_registry.as_ref(),
|
||
|
telemetry.as_ref().map(|x| x.handle()),
|
||
|
);
|
||
|
|
||
|
let slot_duration = babe_link.config().slot_duration();
|
||
|
let babe_config = babe::BabeParams {
|
||
|
keystore: keystore_container.keystore(),
|
||
|
client: client.clone(),
|
||
|
select_chain,
|
||
|
block_import,
|
||
|
env: proposer,
|
||
|
sync_oracle: sync_service.clone(),
|
||
|
justification_sync_link: sync_service.clone(),
|
||
|
create_inherent_data_providers: move |_, ()| {
|
||
|
async move {
|
||
|
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
|
||
|
let slot =
|
||
|
babe_primitives::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
|
||
|
*timestamp,
|
||
|
slot_duration,
|
||
|
);
|
||
|
Ok((slot, timestamp))
|
||
|
}
|
||
|
},
|
||
|
force_authoring,
|
||
|
backoff_authoring_blocks,
|
||
|
babe_link,
|
||
|
block_proposal_slot_portion: babe::SlotProportion::new(2f32 / 3f32),
|
||
|
max_block_proposal_slot_portion: None,
|
||
|
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||
|
};
|
||
|
|
||
|
let babe = babe::start_babe(babe_config)?;
|
||
|
|
||
|
task_manager.spawn_essential_handle().spawn_blocking("babe", None, babe);
|
||
|
}
|
||
|
|
||
|
let keystore_opt = if role.is_authority() {
|
||
|
Some(keystore_container.keystore())
|
||
|
} else {
|
||
|
None
|
||
|
};
|
||
|
|
||
|
let config = grandpa::Config {
|
||
|
gossip_duration: Duration::from_millis(1000),
|
||
|
justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD,
|
||
|
name: Some(name),
|
||
|
observer_enabled: false,
|
||
|
keystore: keystore_opt,
|
||
|
local_role: role,
|
||
|
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||
|
protocol_name: grandpa_protocol_name,
|
||
|
};
|
||
|
|
||
|
let enable_grandpa = !disable_grandpa;
|
||
|
if enable_grandpa {
|
||
|
let voting_rules_builder = grandpa::VotingRulesBuilder::default();
|
||
|
|
||
|
let granpda_config = grandpa::GrandpaParams {
|
||
|
config,
|
||
|
link: link_half,
|
||
|
network: network.clone(),
|
||
|
sync: sync_service.clone(),
|
||
|
voting_rule: voting_rules_builder.build(),
|
||
|
prometheus_registry: prometheus_registry.clone(),
|
||
|
shared_voter_state,
|
||
|
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||
|
notification_service: grandpa_notification_service,
|
||
|
offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()),
|
||
|
};
|
||
|
|
||
|
task_manager.spawn_essential_handle().spawn_blocking(
|
||
|
"granpda-voter",
|
||
|
None,
|
||
|
grandpa::run_grandpa_voter(granpda_config)?,
|
||
|
);
|
||
|
}
|
||
|
|
||
|
network_starter.start_network();
|
||
|
|
||
|
Ok(NewFull {
|
||
|
task_manager,
|
||
|
client,
|
||
|
network,
|
||
|
sync_service,
|
||
|
rpc_handlers,
|
||
|
backend,
|
||
|
})
|
||
|
}
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
macro_rules! chain_ops {
|
||
|
($config:expr, $telemetry_worker_handle:expr) => {{
|
||
|
let telemetry_worker_handle = $telemetry_worker_handle;
|
||
|
let mut config = $config;
|
||
|
let basics = new_partial_basics(config, telemetry_worker_handle)?;
|
||
|
|
||
|
let chain_selection = sc_consensus::LongestChain::new(basics.backend.clone());
|
||
|
|
||
|
let sc_service::PartialComponents { client, backend, import_queue, task_manager, .. } =
|
||
|
new_partial::<sc_consensus::LongestChain<FullBackend, Block>>(&mut config, basics, chain_selection)?;
|
||
|
Ok((client, backend, import_queue, task_manager))
|
||
|
}};
|
||
|
}
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
pub fn new_chain_ops(
|
||
|
config: &mut Configuration,
|
||
|
) -> Result<(Arc<FullClient>, Arc<FullBackend>, sc_consensus::BasicQueue<Block>, TaskManager), Error>
|
||
|
{
|
||
|
config.keystore = sc_service::config::KeystoreConfig::InMemory;
|
||
|
chain_ops!(config, None)
|
||
|
}
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
pub fn build_full(
|
||
|
config: Configuration,
|
||
|
params: NewFullParams,
|
||
|
) -> Result<NewFull, Error> {
|
||
|
match config.network.network_backend {
|
||
|
sc_network::config::NetworkBackendType::Libp2p =>
|
||
|
new_full::<sc_network::NetworkWorker<Block, Hash>>(config, params),
|
||
|
sc_network::config::NetworkBackendType::Litep2p =>
|
||
|
new_full::<sc_network::Litep2pNetworkBackend>(config, params),
|
||
|
}
|
||
|
}
|
||
|
|
||
|
#[cfg(feature = "full-node")]
|
||
|
pub fn revert_backend(
|
||
|
client: Arc<FullClient>,
|
||
|
backend: Arc<FullBackend>,
|
||
|
blocks: BlockNumber,
|
||
|
) -> Result<(), Error> {
|
||
|
let best_number = client.info().best_number;
|
||
|
let finalized = client.info().finalized_number;
|
||
|
let revertible = blocks.min(best_number - finalized);
|
||
|
|
||
|
if revertible == 0 {
|
||
|
return Ok(())
|
||
|
}
|
||
|
|
||
|
babe::revert(client.clone(), backend, blocks)?;
|
||
|
grandpa::revert(client, blocks)?;
|
||
|
|
||
|
Ok(())
|
||
|
}
|