Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 4 additions & 8 deletions bin/node-template/node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ pub use sc_executor::NativeExecutor;
use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair};
use sc_finality_grandpa::SharedVoterState;
use sc_keystore::LocalKeystore;
use sc_telemetry::TelemetrySpan;

// Our native executor instance.
native_executor_instance!(
Expand All @@ -37,7 +36,6 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
AuraPair
>,
sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
Option<TelemetrySpan>,
)
>, ServiceError> {
if config.keystore_remote.is_some() {
Expand All @@ -46,7 +44,7 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
}
let inherent_data_providers = sp_inherents::InherentDataProviders::new();

let (client, backend, keystore_container, task_manager, telemetry_span) =
let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, Executor>(&config)?;
let client = Arc::new(client);

Expand Down Expand Up @@ -87,7 +85,7 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
select_chain,
transaction_pool,
inherent_data_providers,
other: (aura_block_import, grandpa_link, telemetry_span),
other: (aura_block_import, grandpa_link),
})
}

Expand All @@ -109,7 +107,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
select_chain,
transaction_pool,
inherent_data_providers,
other: (block_import, grandpa_link, telemetry_span),
other: (block_import, grandpa_link),
} = new_partial(&config)?;

if let Some(url) = &config.keystore_remote {
Expand Down Expand Up @@ -177,7 +175,6 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
network_status_sinks,
system_rpc_tx,
config,
telemetry_span,
},
)?;

Expand Down Expand Up @@ -260,7 +257,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>

/// Builds a new service for a light client.
pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError> {
let (client, backend, keystore_container, mut task_manager, on_demand, telemetry_span) =
let (client, backend, keystore_container, mut task_manager, on_demand) =
sc_service::new_light_parts::<Block, RuntimeApi, Executor>(&config)?;

config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());
Expand Down Expand Up @@ -327,7 +324,6 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
network,
network_status_sinks,
system_rpc_tx,
telemetry_span,
})?;

network_starter.start_network();
Expand Down
13 changes: 5 additions & 8 deletions bin/node/cli/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ use sp_runtime::traits::Block as BlockT;
use futures::prelude::*;
use sc_client_api::{ExecutorProvider, RemoteBackend};
use node_executor::Executor;
use sc_telemetry::{TelemetryConnectionNotifier, TelemetrySpan};
use sc_telemetry::TelemetryConnectionNotifier;

type FullClient = sc_service::TFullClient<Block, RuntimeApi, Executor>;
type FullBackend = sc_service::TFullBackend<Block>;
Expand All @@ -58,10 +58,9 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
sc_consensus_babe::BabeLink<Block>,
),
grandpa::SharedVoterState,
Option<TelemetrySpan>,
)
>, ServiceError> {
let (client, backend, keystore_container, task_manager, telemetry_span) =
let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, Executor>(&config)?;
let client = Arc::new(client);

Expand Down Expand Up @@ -159,7 +158,7 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
import_queue,
transaction_pool,
inherent_data_providers,
other: (rpc_extensions_builder, import_setup, rpc_setup, telemetry_span),
other: (rpc_extensions_builder, import_setup, rpc_setup),
})
}

Expand Down Expand Up @@ -189,7 +188,7 @@ pub fn new_full_base(
select_chain,
transaction_pool,
inherent_data_providers,
other: (rpc_extensions_builder, import_setup, rpc_setup, telemetry_span),
other: (rpc_extensions_builder, import_setup, rpc_setup),
} = new_partial(&config)?;

let shared_voter_state = rpc_setup;
Expand Down Expand Up @@ -240,7 +239,6 @@ pub fn new_full_base(
remote_blockchain: None,
network_status_sinks: network_status_sinks.clone(),
system_rpc_tx,
telemetry_span,
},
)?;

Expand Down Expand Up @@ -365,7 +363,7 @@ pub fn new_light_base(mut config: Configuration) -> Result<(
Arc<NetworkService<Block, <Block as BlockT>::Hash>>,
Arc<sc_transaction_pool::LightPool<Block, LightClient, sc_network::config::OnDemand<Block>>>
), ServiceError> {
let (client, backend, keystore_container, mut task_manager, on_demand, telemetry_span) =
let (client, backend, keystore_container, mut task_manager, on_demand) =
sc_service::new_light_parts::<Block, RuntimeApi, Executor>(&config)?;

config.network.extra_sets.push(grandpa::grandpa_peers_set_config());
Expand Down Expand Up @@ -445,7 +443,6 @@ pub fn new_light_base(mut config: Configuration) -> Result<(
config, backend, network_status_sinks, system_rpc_tx,
network: network.clone(),
task_manager: &mut task_manager,
telemetry_span,
})?;

Ok((
Expand Down
12 changes: 10 additions & 2 deletions client/cli/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ use sc_service::config::{
TaskExecutor, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod,
};
use sc_service::{ChainSpec, TracingReceiver, KeepBlocks, TransactionStorageMode};
use sc_telemetry::TelemetryHandle;
use sc_telemetry::{TelemetryHandle, TelemetrySpan};
use sc_tracing::logging::GlobalLoggerBuilder;
use std::net::SocketAddr;
use std::path::PathBuf;
Expand Down Expand Up @@ -488,6 +488,13 @@ pub trait CliConfiguration<DCV: DefaultConfigurationValues = ()>: Sized {
let max_runtime_instances = self.max_runtime_instances()?.unwrap_or(8);
let is_validator = role.is_network_authority();
let (keystore_remote, keystore) = self.keystore_config(&config_dir)?;
let telemetry_endpoints = telemetry_handle
.as_ref()
.and_then(|_| self.telemetry_endpoints(&chain_spec).transpose())
.transpose()?
// Don't initialise telemetry if `telemetry_endpoints` == Some([])
.filter(|x| !x.is_empty());
let telemetry_span = telemetry_endpoints.as_ref().map(|_| TelemetrySpan::new());

let unsafe_pruning = self
.import_params()
Expand Down Expand Up @@ -526,7 +533,8 @@ pub trait CliConfiguration<DCV: DefaultConfigurationValues = ()>: Sized {
rpc_ws_max_connections: self.rpc_ws_max_connections()?,
rpc_cors: self.rpc_cors(is_dev)?,
prometheus_config: self.prometheus_config(DCV::prometheus_listen_port())?,
telemetry_endpoints: self.telemetry_endpoints(&chain_spec)?,
telemetry_endpoints,
telemetry_span,
telemetry_external_transport: self.telemetry_external_transport()?,
default_heap_pages: self.default_heap_pages()?,
offchain_worker: self.offchain_worker(&role)?,
Expand Down
39 changes: 10 additions & 29 deletions client/service/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ use sc_telemetry::{
telemetry,
ConnectionMessage,
TelemetryConnectionNotifier,
TelemetrySpan,
SUBSTRATE_INFO,
};
use sp_transaction_pool::MaintainedTransactionPool;
Expand Down Expand Up @@ -184,7 +183,6 @@ type TFullParts<TBl, TRtApi, TExecDisp> = (
Arc<TFullBackend<TBl>>,
KeystoreContainer,
TaskManager,
Option<TelemetrySpan>,
);

type TLightParts<TBl, TRtApi, TExecDisp> = (
Expand All @@ -193,7 +191,6 @@ type TLightParts<TBl, TRtApi, TExecDisp> = (
KeystoreContainer,
TaskManager,
Arc<OnDemand<TBl>>,
Option<TelemetrySpan>,
);

/// Light client backend type with a specific hash type.
Expand Down Expand Up @@ -308,14 +305,9 @@ pub fn new_full_parts<TBl, TRtApi, TExecDisp>(
{
let keystore_container = KeystoreContainer::new(&config.keystore)?;

let telemetry_span = if config.telemetry_endpoints.is_some() {
Some(TelemetrySpan::new())
} else {
None
};
let task_manager = {
let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry);
TaskManager::new(config.task_executor.clone(), registry, telemetry_span.clone())?
TaskManager::new(config.task_executor.clone(), registry, config.telemetry_span.clone())?
};

let executor = NativeExecutor::<TExecDisp>::new(
Expand Down Expand Up @@ -371,7 +363,6 @@ pub fn new_full_parts<TBl, TRtApi, TExecDisp>(
backend,
keystore_container,
task_manager,
telemetry_span,
))
}

Expand All @@ -383,14 +374,9 @@ pub fn new_light_parts<TBl, TRtApi, TExecDisp>(
TExecDisp: NativeExecutionDispatch + 'static,
{
let keystore_container = KeystoreContainer::new(&config.keystore)?;
let telemetry_span = if config.telemetry_endpoints.is_some() {
Some(TelemetrySpan::new())
} else {
None
};
let task_manager = {
let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry);
TaskManager::new(config.task_executor.clone(), registry, telemetry_span.clone())?
TaskManager::new(config.task_executor.clone(), registry, config.telemetry_span.clone())?
};

let executor = NativeExecutor::<TExecDisp>::new(
Expand Down Expand Up @@ -429,7 +415,7 @@ pub fn new_light_parts<TBl, TRtApi, TExecDisp>(
config.prometheus_config.as_ref().map(|config| config.registry.clone()),
)?);

Ok((client, backend, keystore_container, task_manager, on_demand, telemetry_span))
Ok((client, backend, keystore_container, task_manager, on_demand))
}

/// Create an instance of db-backed client.
Expand Down Expand Up @@ -481,8 +467,6 @@ pub fn new_client<E, Block, RA>(
pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> {
/// The service configuration.
pub config: Configuration,
/// Telemetry span, if any.
pub telemetry_span: Option<TelemetrySpan>,
/// A shared client returned by `new_full_parts`/`new_light_parts`.
pub client: Arc<TCl>,
/// A shared backend returned by `new_full_parts`/`new_light_parts`.
Expand Down Expand Up @@ -575,7 +559,6 @@ pub fn spawn_tasks<TBl, TBackend, TExPool, TRpc, TCl>(
let SpawnTasksParams {
mut config,
task_manager,
telemetry_span,
client,
on_demand,
backend,
Expand All @@ -596,13 +579,11 @@ pub fn spawn_tasks<TBl, TBackend, TExPool, TRpc, TCl>(
config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(),
)?;

let telemetry_connection_notifier = telemetry_span
.and_then(|span| init_telemetry(
&mut config,
span,
network.clone(),
client.clone(),
));
let telemetry_connection_notifier = init_telemetry(
&mut config,
network.clone(),
client.clone(),
);

info!("📦 Highest known block at #{}", chain_info.best_number);

Expand Down Expand Up @@ -700,11 +681,11 @@ async fn transaction_notifications<TBl, TExPool>(

fn init_telemetry<TBl: BlockT, TCl: BlockBackend<TBl>>(
config: &mut Configuration,
telemetry_span: TelemetrySpan,
network: Arc<NetworkService<TBl, <TBl as BlockT>::Hash>>,
client: Arc<TCl>,
) -> Option<TelemetryConnectionNotifier> {
let endpoints = config.telemetry_endpoints()?.clone();
let telemetry_span = config.telemetry_span.clone()?;
let endpoints = config.telemetry_endpoints.clone()?;
let genesis_hash = client.block_hash(Zero::zero()).ok().flatten().unwrap_or_default();
let connection_message = ConnectionMessage {
name: config.network.node_name.to_owned(),
Expand Down
17 changes: 4 additions & 13 deletions client/service/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,10 @@ pub struct Configuration {
/// This is a handle to a `TelemetryWorker` instance. It is used to initialize the telemetry for
/// a substrate node.
pub telemetry_handle: Option<sc_telemetry::TelemetryHandle>,
/// Telemetry span.
///
/// This span is entered for every background task spawned using the TaskManager.
pub telemetry_span: Option<sc_telemetry::TelemetrySpan>,
/// The default number of 64KB pages to allocate for Wasm execution
pub default_heap_pages: Option<u64>,
/// Should offchain workers be executed.
Expand Down Expand Up @@ -207,19 +211,6 @@ impl Configuration {
self.prometheus_config.as_ref().map(|config| &config.registry)
}

/// Returns the telemetry endpoints if any and if the telemetry handle exists.
pub(crate) fn telemetry_endpoints(&self) -> Option<&TelemetryEndpoints> {
if self.telemetry_handle.is_none() {
return None;
}

match self.telemetry_endpoints.as_ref() {
// Don't initialise telemetry if `telemetry_endpoints` == Some([])
Some(endpoints) if !endpoints.is_empty() => Some(endpoints),
_ => None,
}
}

/// Returns the network protocol id from the chain spec, or the default.
pub fn protocol_id(&self) -> sc_network::config::ProtocolId {
let protocol_id_full = match self.chain_spec.protocol_id() {
Expand Down
17 changes: 13 additions & 4 deletions client/service/src/task_manager/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,7 @@ impl SpawnTaskHandle {
metrics.tasks_ended.with_label_values(&[name, "finished"]).inc_by(0);
}

let telemetry_span = self.telemetry_span.clone();
let future = async move {
let _telemetry_entered = telemetry_span.as_ref().map(|x| x.enter());

if let Some(metrics) = metrics {
// Add some wrappers around `task`.
let task = {
Expand Down Expand Up @@ -127,7 +124,19 @@ impl SpawnTaskHandle {
}
};

let join_handle = self.executor.spawn(Box::pin(future.in_current_span()), task_type);
let future: BoxFuture<()> = if let Some(telemetry_span) = self.telemetry_span.clone() {
// This will preserve the telemetry span and by extension all of its
// parents as they are linked together upon creation.
// Used by [`sc_telemetry::layer::TelemetryLayer`].
Box::pin(future.instrument(telemetry_span.span()))
} else {
// This will preserve the current span (if any) and by extension all
// of its parents as they are linked together upon creation.
// Used by [`sc_tracing::logging::prefix_logs_with`].
Box::pin(future.in_current_span())
};

let join_handle = self.executor.spawn(future, task_type);
let mut task_notifier = self.task_notifier.clone();
self.executor.spawn(
Box::pin(async move {
Expand Down
1 change: 1 addition & 0 deletions client/service/test/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,7 @@ fn node_config<G: RuntimeGenesis + 'static, E: ChainSpecExtension + Clone + 'sta
telemetry_endpoints: None,
telemetry_external_transport: None,
telemetry_handle: None,
telemetry_span: None,
default_heap_pages: None,
offchain_worker: Default::default(),
force_authoring: false,
Expand Down
5 changes: 5 additions & 0 deletions client/telemetry/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,11 @@ impl TelemetrySpan {
pub fn new() -> Self {
Self(tracing::info_span!(TELEMETRY_LOG_SPAN))
}

/// Return a clone of the underlying `tracing::Span` instance.
pub fn span(&self) -> tracing::Span {
self.0.clone()
}
}

/// Message sent when the connection (re-)establishes.
Expand Down
4 changes: 3 additions & 1 deletion utils/browser/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ use sc_service::{
GenericChainSpec, RuntimeGenesis,
KeepBlocks, TransactionStorageMode,
};
use sc_telemetry::TelemetryHandle;
use sc_telemetry::{TelemetryHandle, TelemetrySpan};
use sc_tracing::logging::GlobalLoggerBuilder;
use wasm_bindgen::prelude::*;
use futures::{
Expand Down Expand Up @@ -72,6 +72,7 @@ where
allow_private_ipv4: true,
enable_mdns: false,
};
let telemetry_span = telemetry_handle.as_ref().map(|_| TelemetrySpan::new());

let config = Configuration {
network,
Expand All @@ -83,6 +84,7 @@ where
}).into(),
telemetry_external_transport: Some(transport),
telemetry_handle,
telemetry_span,
role: Role::Light,
database: {
info!("Opening Indexed DB database '{}'...", name);
Expand Down