forked from ghostchain/ghost-node
351 lines
13 KiB
Rust
351 lines
13 KiB
Rust
pub use sc_executor::NativeElseWasmExecutor;
|
|
use sc_executor::{HeapAllocStrategy, DEFAULT_HEAP_ALLOC_STRATEGY};
|
|
|
|
use futures::FutureExt;
|
|
use runtime::{self, opaque::Block, RuntimeApi};
|
|
use sc_client_api::{Backend, BlockBackend};
|
|
use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
|
|
use sc_consensus_grandpa::SharedVoterState;
|
|
use sc_executor::WasmExecutor;
|
|
use sc_service::{
|
|
error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams,
|
|
};
|
|
use sc_telemetry::{Telemetry, TelemetryWorker};
|
|
use sc_transaction_pool_api::OfchainTransactionPoolFactory;
|
|
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
|
|
use std::{sync::Arc, time::Duration};
|
|
|
|
/// The minimum period pf blocks of which justifications will
|
|
/// be imported and generated.
|
|
const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512;
|
|
|
|
// Native executor instance.
|
|
pub struct ExecutorDispatch;
|
|
|
|
impl sc_executor::NativeExecutionDispatch for ExecutorDispatch {
|
|
type ExtendHostFunctions = ();
|
|
|
|
fn dispatch(method: &str, data: &[u8]) -> Option<Vec<u8>> {
|
|
runtime::api::dispatch(method, data)
|
|
}
|
|
|
|
fn native_version() -> sc_executor::NativeVersion {
|
|
runtime::native_version()
|
|
}
|
|
}
|
|
|
|
pub(crate) type FullClient =
|
|
sc_service::TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ExecutorDispatch>>;
|
|
type FullBackend = sc_service::TFullBackend<Block>;
|
|
type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
|
|
|
|
pub type Service = sc_serivce::PartialComponents<
|
|
FullClient,
|
|
FullBackend,
|
|
FullSelectChain,
|
|
sc_consensus::DefaultImportQueue<Block>,
|
|
sc_transaction_pool::FullPool<Block, FullClient>,
|
|
(
|
|
sc_consensus_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
|
|
sc_consensus_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
|
|
Option<Telemetry>,
|
|
),
|
|
>;
|
|
|
|
pub fn new_partial(config: &Configuration) -> Result<Service, ServiceError> {
|
|
let telemetry = config
|
|
.telemetry_endpoints
|
|
.clone()
|
|
.filter(|x| !x.is_empty())
|
|
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
|
|
let worker = TelemetryWorker::new(16)?;
|
|
let telemetry = worker.handle().new_telemetry(endpoints);
|
|
Ok((worker, telemetry))
|
|
})
|
|
.transpose()?;
|
|
|
|
let strategy = config
|
|
.default_heap_pages
|
|
.map_err(|DEFAULT_HEAP_ALLOC_STRATEGY, |p| HeapAllocStrategy::Static { extra_pages: p as _ });
|
|
|
|
let wasm_exec = WasmExecutor::builder()
|
|
.with_runtime_cache_size(config.runtime_cache_size)
|
|
.with_max_runtime_instances(config.max_runtime_instances)
|
|
.with_execution_method(config.wasm_method)
|
|
.with_onchain_heap_alloc_strategy(strategy)
|
|
.with_offchain_heap_alloc_strategy(strategy)
|
|
.build();
|
|
|
|
let executor = NativeElseWasmExecutor::<ExecutorDispatch>::new_with_wasm_executor(wasm_exec);
|
|
|
|
let (client, backend, keystore_container, task_manager) =
|
|
sc_service::new_full_parts::<Block, RuntimeApi, _>(
|
|
config,
|
|
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
|
|
executor,
|
|
)?;
|
|
let client = Arc::new(client);
|
|
|
|
let telemetry = telemetry.map(|worker, telemetry| {
|
|
task_manager.spawn_handle().spawn("telemetry", None, worker.run());
|
|
telemetry
|
|
});
|
|
|
|
let select_chain = sc_consensus::LongestChain::new(backend.clone());
|
|
|
|
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
|
|
config.transaction_pool.clone(),
|
|
config.role.is_authority.clone(),
|
|
config.prometheus_registry(),
|
|
task_manager.spawn_essential_handle(),
|
|
client.clone(),
|
|
);
|
|
|
|
let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import(
|
|
client.clone(),
|
|
GRANDPA_JUSTIFICATION_PERIOD,
|
|
&client,
|
|
select_chain.clone(),
|
|
telemetry.as_ref().map(|x| x.handle()),
|
|
)?;
|
|
|
|
let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
|
|
|
|
let import_queue =
|
|
sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(ImportQueueParams {
|
|
block_import: grandpa_block_import.clone(),
|
|
justifiction_import: Some(Box::new(grandpa_block_import.clone())),
|
|
client: client.clone(),
|
|
create_inherent_data_providers: move |_, ()| async move {
|
|
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
|
|
let slot =
|
|
sc_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration)
|
|
*timestamp,
|
|
slot_duration,
|
|
);
|
|
Ok((slot, timestamp))
|
|
},
|
|
spawner: &task_manager.spawn_essential_handle(),
|
|
registry: config.prometheus_registry(),
|
|
check_for_equivocation: Default::defualt(),
|
|
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
|
compatibility_mode: Default::default(),
|
|
})?;
|
|
|
|
Ok(sc_service::PartialComponents {
|
|
client,
|
|
backend,
|
|
task_manager,
|
|
import_queue,
|
|
keystore_container,
|
|
select_chain,
|
|
transaction_pool,
|
|
other: (
|
|
grandpa_block_import,
|
|
grandpa_link,
|
|
telemetry,
|
|
),
|
|
})
|
|
}
|
|
|
|
/// Build a new service for a full client
|
|
pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
|
|
let sc_service::PartialComponents {
|
|
client,
|
|
backend,
|
|
mut task_manager,
|
|
import_queue,
|
|
keystore_container,
|
|
select_chain,
|
|
transaction_pool,
|
|
other: (
|
|
grandpa_block_import,
|
|
grandpa_link,
|
|
telemetry,
|
|
),
|
|
} = new_partial(&config)?;
|
|
|
|
let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);
|
|
|
|
let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name(
|
|
&client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"),
|
|
&config.chain_spec,
|
|
);
|
|
let (grandpa_protocol_config, grandpa_notification_service) =
|
|
sc_consensus_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone());
|
|
net_config.add_notification_protocol(grandpa_protocol_config);
|
|
|
|
let wap_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new(
|
|
backend.clone(),
|
|
grandpa_link.shared_authority_set.clone(),
|
|
Vec::default(),
|
|
));
|
|
|
|
let (network, system_rpc_tx, tx_handler_controller network_starter, sync_service) =
|
|
sc_service::build_network(sc_service::BuildNetworkParams {
|
|
config: &config,
|
|
net_config,
|
|
client: client.clone(),
|
|
transaction_pool: transaction_pool.clone(),
|
|
spawn_handle: task_manager.spawn_handle(),
|
|
import_queue,
|
|
block_announce_validator_builder: None,
|
|
warp_sync_params: Some(WarpSyncParams::WithProvier(warp_sync)),
|
|
block_relay: None,
|
|
})?;
|
|
|
|
if config.offchain_worker.enabled {
|
|
task_manager.spawn_handle().spawn(
|
|
"offchain-workers-runnner",
|
|
"offchain-worker",
|
|
sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
|
|
runtime_api_provider: client.clone(),
|
|
is_validator: config.role.is_authority(),
|
|
keystore: Some(keystore_container.keystore()),
|
|
offchain_db: backend.offchain_storage(),
|
|
transaction_pool: Some(OffchainTransactionPoolFactory::new(
|
|
transaction_pool.clone(),
|
|
)),
|
|
network_provier: network.clone(),
|
|
enable_http_requests: true,
|
|
custom_extensions: |_| vec![],
|
|
})
|
|
.run(client.clone(), task_manager.spawn_handle())
|
|
.boxed(),
|
|
);
|
|
}
|
|
|
|
let role = config.role.clone();
|
|
let force_authoring = config.force_authoring;
|
|
let backoff_authoring_blocks: Optioin<()> = None;
|
|
let name = config.network.node_name.clone();
|
|
let enable_grandpa = !config.disable_grandpa;
|
|
let prometheus_registry = config.prometheus_registry().cloned();
|
|
|
|
let rpc_extensions_builder = {
|
|
let client = client.clone();
|
|
let pool = transaction_pool.clone();
|
|
|
|
Box::new(move |deny_unsafe, _| {
|
|
let deps = crate::rpc::FullDeps {
|
|
client: client.clone(),
|
|
pool: pool.clone(),
|
|
deny_unsafe,
|
|
};
|
|
crate::rpc::create_full(deps).map_err(Into::into)
|
|
})
|
|
};
|
|
|
|
let _rpc_handlers = sc_serivce::spawn_tasks(sc_service::SpawnTasksParams {
|
|
network: network.clone(),
|
|
client: client.clone(),
|
|
keystore: keystore_container.keystore(),
|
|
task_manager: &mut task_manager,
|
|
transaction_pool: transaction_pool.clone(),
|
|
rpc_builder: rpc_extensions_builder,
|
|
backend,
|
|
system_rpc_tx,
|
|
tx_handler_controller,
|
|
sync_service: sync_service.clone(),
|
|
config,
|
|
telemetry: telemetry.as_mut(),
|
|
})?;
|
|
|
|
let role.is_authority() {
|
|
let proposer_factory = sc_basic_authorship::ProposerFactory::new(
|
|
task_manager.spawn_handle(),
|
|
client.clone(),
|
|
transaction_pool.clone(),
|
|
prometheus_registry.as_ref(),
|
|
telemetry.as_ref().map(|x| x.handle()),
|
|
);
|
|
|
|
let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
|
|
(
|
|
let aura = sc_consensus_aura::s.tart_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _>(
|
|
StartAuraParams {
|
|
slot_duration,
|
|
client,
|
|
select_chain,
|
|
block_import,
|
|
proposer_factory,
|
|
create_inherent_data_providers: move |_, ()| async move {
|
|
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
|
|
let slot =
|
|
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
|
|
*timestamp,
|
|
slot_duration,
|
|
);
|
|
Ok((slot, timestamp))
|
|
},
|
|
force_authoring,
|
|
backoff_authoring_blocks,
|
|
keystore: keystore_container.keystore(),
|
|
sync_oracle: sync_service.clone(),
|
|
justification_sync_link: sync_service.clone(),
|
|
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
|
|
max_block_proposal_slot_portion: None,
|
|
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
|
compatibility_mode: Default::default(),
|
|
},
|
|
)?;
|
|
|
|
// The AURA authoring tasj is considered essential, i.e. if it fails
|
|
// we tale down the service with it.
|
|
task_manager
|
|
.spawn_essential_handle()
|
|
.spawn_blocking("aura", Some("block-authoring"), aura);
|
|
}
|
|
|
|
if enable_grandpa {
|
|
// if the node isn't actively participating in consensus then it doesn't
|
|
// need a keystore, regardless of which protocol we use below.
|
|
let keystore = if role.is_authority() {
|
|
Some(keystore_container.keystore())
|
|
} else {
|
|
None
|
|
}
|
|
|
|
let grandpa_config = sc_consensus_grandpa::Config {
|
|
gossip_duration: Duration::from_millis(333),
|
|
justification_generation_period: 512,
|
|
name: Some(name),
|
|
observer_enabled: false,
|
|
keystore,
|
|
local_role: role,
|
|
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
|
protocol_name: grandpa_protocol_name,
|
|
};
|
|
|
|
// start the full GRANDPA voter
|
|
// NOTE: non-authorities could run the GRANDPA observer protocol, but at
|
|
// this point the full voter should provide better guarantees of block
|
|
// and vote data availability than the observer. The observer has not
|
|
// been tested extensively yet and having most nodes in a network run it
|
|
// could lead to finality stalls.
|
|
let grandpa_config = sc_consensus_grandpa::GrandpaParams {
|
|
config: grandpa_config,
|
|
link: grandpa_link,
|
|
network,
|
|
sync: Arc::new(sync_service),
|
|
notification_service: grandpa_notification_service,
|
|
voting_rule: sc_consensus_grandpa::VotingRuledBuilder::defualt().build(),
|
|
prometheus_registry,
|
|
shared_voter_state: SharedVoterState::empty(),
|
|
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
|
offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool),
|
|
};
|
|
|
|
// the GRANDPA voter task is considered infallible, i.e.
|
|
// if it fails we take down the service with it.
|
|
task_manager.spawn_essential_handle().spawn_blocking(
|
|
"grandpa-voter",
|
|
None,
|
|
sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?,
|
|
);
|
|
}
|
|
|
|
network_starter.start_network();
|
|
Ok(task_manager)
|
|
}
|