diff --git a/Cargo.lock b/Cargo.lock index ae938b2016643..5b5a47bb050f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4443,6 +4443,7 @@ dependencies = [ "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-slots", + "sc-network-types", "sc-telemetry", "sc-utils", "schnellru", @@ -4590,6 +4591,7 @@ dependencies = [ "parity-scale-codec", "sc-client-api", "sc-consensus-babe", + "sc-network-types", "sp-crypto-hashing 0.1.0", "sp-inherents", "sp-runtime", diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 8dca303ffebdb..28fa9e32eae77 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -27,6 +27,7 @@ sc-consensus = { workspace = true, default-features = true } sc-consensus-aura = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-slots = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index e372162f21332..457be6e632df3 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -44,6 +44,7 @@ use crate::collators::RelayParentData; use futures::prelude::*; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; use sc_consensus_aura::standalone as aura_internal; +use sc_network_types::PeerId; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_consensus::BlockOrigin; @@ -69,6 +70,8 @@ pub struct Params { pub relay_client: RClient, /// The keystore handle used for accessing parachain key material. pub keystore: KeystorePtr, + /// The collator network peer id. + pub collator_peer_id: PeerId, /// The identifier of the parachain within the relay-chain. pub para_id: ParaId, /// The block proposer used for building blocks. @@ -128,6 +131,7 @@ where parent_hash: Block::Hash, timestamp: impl Into>, relay_parent_descendants: Option, + collator_peer_id: PeerId, ) -> Result<(ParachainInherentData, InherentData), Box> { let paras_inherent_data = ParachainInherentDataProvider::create_at( relay_parent, @@ -138,6 +142,7 @@ where .map(RelayParentData::into_inherent_descendant_list) .unwrap_or_default(), Vec::new(), + collator_peer_id, ) .await; @@ -173,6 +178,7 @@ where validation_data: &PersistedValidationData, parent_hash: Block::Hash, timestamp: impl Into>, + collator_peer_id: PeerId, ) -> Result<(ParachainInherentData, InherentData), Box> { self.create_inherent_data_with_rp_offset( relay_parent, @@ -180,6 +186,7 @@ where parent_hash, timestamp, None, + collator_peer_id, ) .await } diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index a66abf979d683..c10c42e233215 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -39,6 +39,7 @@ use polkadot_primitives::{CollatorPair, Id as ParaId, ValidationCode}; use futures::{channel::mpsc::Receiver, prelude::*}; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; use sc_consensus::BlockImport; +use sc_network_types::PeerId; use sp_api::{CallApiAt, ProvideRuntimeApi}; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; @@ -68,6 +69,8 @@ pub struct Params { pub keystore: KeystorePtr, /// The collator key used to sign collations before submitting to validators. pub collator_key: CollatorPair, + /// The collator network peer id. + pub collator_peer_id: PeerId, /// The para's ID. pub para_id: ParaId, /// A handle to the relay-chain client's "Overseer" or task orchestrator. @@ -130,6 +133,7 @@ where block_import: params.block_import, relay_client: params.relay_client.clone(), keystore: params.keystore.clone(), + collator_peer_id: params.collator_peer_id, para_id: params.para_id, proposer: params.proposer, collator_service: params.collator_service, @@ -234,6 +238,7 @@ where &validation_data, parent_hash, claim.timestamp(), + params.collator_peer_id, ) .await ); diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 55835ef4dcb8b..2f0250268a011 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -49,6 +49,7 @@ use crate::{collator as collator_util, collators::claim_queue_at, export_pov_to_ use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; use sc_consensus::BlockImport; +use sc_network_types::PeerId; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; @@ -79,6 +80,8 @@ pub struct Params { pub keystore: KeystorePtr, /// The collator key used to sign collations before submitting to validators. pub collator_key: CollatorPair, + /// The collator network peer id. + pub collator_peer_id: PeerId, /// The para's ID. pub para_id: ParaId, /// A handle to the relay-chain client's "Overseer" or task orchestrator. @@ -207,6 +210,7 @@ where block_import: params.block_import, relay_client: params.relay_client.clone(), keystore: params.keystore.clone(), + collator_peer_id: params.collator_peer_id, para_id: params.para_id, proposer: params.proposer, collator_service: params.collator_service, @@ -345,6 +349,7 @@ where &validation_data, parent_hash, slot_claim.timestamp(), + params.collator_peer_id, ) .await { diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index e6360a3c8408e..d632a5dd080a7 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -46,6 +46,7 @@ use polkadot_primitives::{ use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; use sc_consensus_aura::SlotDuration; +use sc_network_types::PeerId; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; @@ -84,6 +85,8 @@ pub struct BuilderTaskParams< pub code_hash_provider: CHP, /// The underlying keystore, which should contain Aura consensus keys. pub keystore: KeystorePtr, + /// The collator network peer id. + pub collator_peer_id: PeerId, /// The para's ID. pub para_id: ParaId, /// The underlying block proposer this should call into. @@ -146,6 +149,7 @@ where para_client, keystore, block_import, + collator_peer_id, para_id, proposer, collator_service, @@ -170,6 +174,7 @@ where block_import, relay_client: relay_client.clone(), keystore: keystore.clone(), + collator_peer_id, para_id, proposer, collator_service, @@ -350,6 +355,7 @@ where parent_hash, slot_claim.timestamp(), Some(rp_data), + collator_peer_id, ) .await { diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index c939fb8d1275a..b7e21b0789883 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -82,6 +82,7 @@ use polkadot_primitives::{ }; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; +use sc_network_types::PeerId; use sc_utils::mpsc::tracing_unbounded; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; @@ -122,6 +123,8 @@ pub struct Params, additional_relay_state_keys: Vec>, + collator_peer_id: PeerId, ) -> Option { + let collator_peer_id = ApprovedPeerId::try_from(collator_peer_id.to_bytes()) + .inspect_err(|_e| { + tracing::warn!( + target: LOG_TARGET, + "Could not convert collator_peer_id into ApprovedPeerId. The collator_peer_id \ + should contain a sequence of at most 64 bytes", + ); + }) + .ok(); + // Only include next epoch authorities when the descendants include an epoch digest. // Skip the first entry because this is the relay parent itself. let include_next_authorities = relay_parent_descendants.iter().skip(1).any(|header| { @@ -218,7 +231,7 @@ impl ParachainInherentDataProvider { validation_data: validation_data.clone(), relay_chain_state, relay_parent_descendants, - collator_peer_id: None, + collator_peer_id, }) } } diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 489a7452480c0..c5da2150288a7 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -33,9 +33,10 @@ use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; use codec::{Decode, DecodeLimit, Encode}; use core::cmp; use cumulus_primitives_core::{ - relay_chain, AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, - CumulusDigestItem, GetChannelInfo, ListChannelInfos, MessageSendError, OutboundHrmpMessage, - ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, + relay_chain::{self, UMPSignal, UMP_SEPARATOR}, + AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, CumulusDigestItem, + GetChannelInfo, ListChannelInfos, MessageSendError, OutboundHrmpMessage, ParaId, + PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, XcmpMessageSource, }; use cumulus_primitives_parachain_inherent::{v0, MessageQueueChain, ParachainInherentData}; @@ -361,8 +362,19 @@ pub mod pallet { UpwardMessages::::put(&up[..num as usize]); *up = up.split_off(num as usize); - // Send the core selector UMP signal. - Self::send_ump_signal(); + if let Some(core_info) = + CumulusDigestItem::find_core_info(&frame_system::Pallet::::digest()) + { + PendingUpwardSignals::::mutate(|signals| { + signals.push( + UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset) + .encode(), + ); + }); + } + + // Send the pending UMP signals. + Self::send_ump_signals(); // If the total size of the pending messages is less than the threshold, // we decrease the fee factor, since the queue is less congested. @@ -585,7 +597,7 @@ pub mod pallet { validation_data: vfp, relay_chain_state, relay_parent_descendants, - collator_peer_id: _, + collator_peer_id, } = data; // Check that the associated relay chain block number is as expected. @@ -693,6 +705,12 @@ pub mod pallet { ::on_validation_data(&vfp); + if let Some(collator_peer_id) = collator_peer_id { + PendingUpwardSignals::::mutate(|signals| { + signals.push(UMPSignal::ApprovedPeer(collator_peer_id).encode()); + }); + } + total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages( relevant_messaging_state.dmq_mqc_head, inbound_messages_data.downward_messages, @@ -905,14 +923,20 @@ pub mod pallet { /// Upward messages that were sent in a block. /// - /// This will be cleared in `on_initialize` of each new block. + /// This will be cleared in `on_initialize` for each new block. #[pallet::storage] pub type UpwardMessages = StorageValue<_, Vec, ValueQuery>; - /// Upward messages that are still pending and not yet send to the relay chain. + /// Upward messages that are still pending and not yet sent to the relay chain. #[pallet::storage] pub type PendingUpwardMessages = StorageValue<_, Vec, ValueQuery>; + /// Upward signals that are still pending and not yet sent to the relay chain. + /// + /// This will be cleared in `on_finalize` for each block. + #[pallet::storage] + pub type PendingUpwardSignals = StorageValue<_, Vec, ValueQuery>; + /// The factor to multiply the base delivery fee by for UMP. #[pallet::storage] pub type UpwardDeliveryFeeFactor = @@ -1507,23 +1531,15 @@ impl Pallet { CustomValidationHeadData::::put(head_data); } - /// Send the ump signals - fn send_ump_signal() { - use cumulus_primitives_core::relay_chain::{UMPSignal, UMP_SEPARATOR}; - - UpwardMessages::::mutate(|up| { - if let Some(core_info) = - CumulusDigestItem::find_core_info(&frame_system::Pallet::::digest()) - { + /// Send the pending ump signals + fn send_ump_signals() { + let mut ump_signals = PendingUpwardSignals::::take(); + if !ump_signals.is_empty() { + UpwardMessages::::mutate(|up| { up.push(UMP_SEPARATOR); - - // Send the core selector signal. - up.push( - UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset) - .encode(), - ); - } - }); + up.append(&mut ump_signals); + }); + } } /// Open HRMP channel for using it in benchmarks or tests. diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index 0b1ad28b4e82a..4da9dbb7f2fe0 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -19,8 +19,12 @@ use super::*; use crate::mock::*; +use alloc::collections::BTreeMap; use core::num::NonZeroU32; -use cumulus_primitives_core::{AbridgedHrmpChannel, InboundDownwardMessage, InboundHrmpMessage}; +use cumulus_primitives_core::{ + relay_chain::ApprovedPeerId, AbridgedHrmpChannel, ClaimQueueOffset, CoreInfo, CoreSelector, + InboundDownwardMessage, InboundHrmpMessage, CUMULUS_CONSENSUS_ID, +}; use cumulus_primitives_parachain_inherent::{ v0, INHERENT_IDENTIFIER, PARACHAIN_INHERENT_IDENTIFIER_V0, }; @@ -31,6 +35,7 @@ use rand::Rng; use relay_chain::HrmpChannelId; use sp_core::H256; use sp_inherents::InherentDataProvider; +use sp_runtime::DigestItem; use sp_trie::StorageProof; #[test] @@ -1655,3 +1660,74 @@ fn ump_fee_factor_increases_and_decreases() { }, ); } + +#[test] +fn ump_signals_are_sent_correctly() { + let core_info = CoreInfo { + selector: CoreSelector(1), + claim_queue_offset: ClaimQueueOffset(1), + number_of_cores: codec::Compact(1), + }; + + // Test cases list with the following format: + // `((expect_approved_peer, expect_select_core), expected_upward_messages)` + let test_cases = BTreeMap::from([ + ((false, false), vec![b"Test".to_vec()]), + ( + (true, false), + vec![ + b"Test".to_vec(), + UMP_SEPARATOR, + UMPSignal::ApprovedPeer(ApprovedPeerId::try_from(b"12345".to_vec()).unwrap()) + .encode(), + ], + ), + ( + (false, true), + vec![ + b"Test".to_vec(), + UMP_SEPARATOR, + UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset).encode(), + ], + ), + ( + (true, true), + vec![ + b"Test".to_vec(), + UMP_SEPARATOR, + UMPSignal::ApprovedPeer(ApprovedPeerId::try_from(b"12345".to_vec()).unwrap()) + .encode(), + UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset).encode(), + ], + ), + ]); + + for ((expect_approved_peer, expect_select_core), expected_upward_messages) in test_cases { + let core_info_digest = CumulusDigestItem::CoreInfo(core_info.clone()).encode(); + + BlockTests::new() + .with_inherent_data(move |_, _, data| { + if expect_approved_peer { + data.collator_peer_id = + Some(ApprovedPeerId::try_from(b"12345".to_vec()).unwrap()); + } + }) + .add_with_post_test( + 1, + move || { + ParachainSystem::send_upward_message(b"Test".to_vec()).unwrap(); + + if expect_select_core { + System::deposit_log(DigestItem::PreRuntime( + CUMULUS_CONSENSUS_ID, + core_info_digest.clone(), + )); + } + }, + move || { + assert_eq!(PendingUpwardSignals::::get(), Vec::>::new()); + assert_eq!(UpwardMessages::::get(), expected_upward_messages); + }, + ); + } +} diff --git a/cumulus/polkadot-omni-node/lib/src/common/spec.rs b/cumulus/polkadot-omni-node/lib/src/common/spec.rs index 3e1f69e94c5a7..007be9d5fa765 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/spec.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/spec.rs @@ -45,7 +45,9 @@ use prometheus_endpoint::Registry; use sc_client_api::Backend; use sc_consensus::DefaultImportQueue; use sc_executor::{HeapAllocStrategy, DEFAULT_HEAP_ALLOC_STRATEGY}; -use sc_network::{config::FullNetworkConfiguration, NetworkBackend, NetworkBlock}; +use sc_network::{ + config::FullNetworkConfiguration, NetworkBackend, NetworkBlock, NetworkStateInfo, PeerId, +}; use sc_service::{Configuration, ImportQueue, PartialComponents, TaskManager}; use sc_statement_store::Store; use sc_sysinfo::HwBench; @@ -89,6 +91,7 @@ where relay_chain_slot_duration: Duration, para_id: ParaId, collator_key: CollatorPair, + collator_peer_id: PeerId, overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, backend: Arc>, @@ -380,6 +383,7 @@ pub(crate) trait NodeSpec: BaseNodeSpec { metrics, }) .await?; + let peer_id = network.local_peer_id(); let statement_store = statement_handler_proto .map(|statement_handler_proto| { @@ -536,6 +540,7 @@ pub(crate) trait NodeSpec: BaseNodeSpec { relay_chain_slot_duration, para_id, collator_key.expect("Command line arguments do not allow this. qed"), + peer_id, overseer_handle, announce_block, backend.clone(), diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index 2a7ef5cf537e2..5eae88aac98e5 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -63,7 +63,7 @@ use sc_consensus::{ BlockImportParams, DefaultImportQueue, LongestChain, }; use sc_consensus_manual_seal::consensus::aura::AuraConsensusDataProvider; -use sc_network::{config::FullNetworkConfiguration, NotificationMetrics}; +use sc_network::{config::FullNetworkConfiguration, NotificationMetrics, PeerId}; use sc_service::{Configuration, Error, PartialComponents, TaskManager}; use sc_telemetry::TelemetryHandle; use sc_transaction_pool::TransactionPoolHandle; @@ -564,6 +564,7 @@ where relay_chain_slot_duration: Duration, para_id: ParaId, collator_key: CollatorPair, + collator_peer_id: PeerId, _overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, backend: Arc>, @@ -598,6 +599,7 @@ where }, keystore, collator_key, + collator_peer_id, para_id, proposer, collator_service, @@ -688,6 +690,7 @@ where relay_chain_slot_duration: Duration, para_id: ParaId, collator_key: CollatorPair, + collator_peer_id: PeerId, overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, backend: Arc>, @@ -701,7 +704,6 @@ where prometheus_registry, telemetry.clone(), ); - let collator_service = CollatorService::new( client.clone(), Arc::new(task_manager.spawn_handle()), @@ -725,6 +727,7 @@ where }, keystore, collator_key, + collator_peer_id, para_id, overseer_handle, relay_chain_slot_duration, diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 4c9c98b301052..ab08b525c53b6 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -431,6 +431,7 @@ where prometheus_registry: None, })?; + let collator_peer_id = network.local_peer_id(); if let Some(collator_key) = collator_key { let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), @@ -473,6 +474,7 @@ where spawner: task_manager.spawn_handle(), export_pov: None, max_pov_percentage: None, + collator_peer_id, }; slot_based::run::(params); @@ -489,6 +491,7 @@ where }, keystore, collator_key, + collator_peer_id, para_id, overseer_handle, relay_chain_slot_duration, diff --git a/prdoc/pr_10145.prdoc b/prdoc/pr_10145.prdoc new file mode 100644 index 0000000000000..2b8fea5183a47 --- /dev/null +++ b/prdoc/pr_10145.prdoc @@ -0,0 +1,19 @@ +title: Send PeerId via UMP +doc: +- audience: + - Node Dev + - Runtime Dev + description: |- + Resolves https://github.com/paritytech/polkadot-sdk/issues/7749 + + This PR propagates the network `PeerId` of the collator to the parachain inherent. + Then the parachain runtime emits a UMP signal containing the collator `PeerId`. +crates: +- name: cumulus-pallet-parachain-system + bump: minor +- name: cumulus-client-consensus-aura + bump: major +- name: cumulus-client-parachain-inherent + bump: major +- name: polkadot-omni-node-lib + bump: patch diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index b622f2dc14cb0..ce39607e8bd64 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -22,7 +22,7 @@ jsonrpsee = { features = ["server"], workspace = true } log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } -polkadot-sdk = { workspace = true, features = ["node"] } +polkadot-sdk = { workspace = true, features = ["node", "polkadot-primitives"] } parachain-template-runtime = { workspace = true } diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 7d95673eafdc9..a757ff07bab51 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -32,6 +32,7 @@ use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; // Substrate Imports use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; +use polkadot_sdk::sc_network::PeerId; use prometheus_endpoint::Registry; use sc_client_api::Backend; use sc_consensus::ImportQueue; @@ -182,6 +183,7 @@ fn start_consensus( relay_chain_slot_duration: Duration, para_id: ParaId, collator_key: CollatorPair, + collator_peer_id: PeerId, overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, ) -> Result<(), sc_service::Error> { @@ -192,7 +194,6 @@ fn start_consensus( prometheus_registry, telemetry.clone(), ); - let collator_service = CollatorService::new( client.clone(), Arc::new(task_manager.spawn_handle()), @@ -211,6 +212,7 @@ fn start_consensus( }, keystore, collator_key, + collator_peer_id, para_id, overseer_handle, relay_chain_slot_duration, @@ -298,6 +300,7 @@ pub async fn start_parachain_node( ), }) .await?; + let collator_peer_id = network.local_peer_id(); if parachain_config.offchain_worker.enabled { use futures::FutureExt; @@ -434,6 +437,7 @@ pub async fn start_parachain_node( relay_chain_slot_duration, para_id, collator_key.expect("Command line arguments do not allow this. qed"), + collator_peer_id, overseer_handle, announce_block, )?;