-
Notifications
You must be signed in to change notification settings - Fork 1.1k
Feat: Add API and mechanism to retrieve additional top-level and child proofs via the relay state proof #10678
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
4ac6204
25fa5d4
b9fac0d
a9f3bc7
f8c4aea
de2e7c8
1f2a62e
a384b4c
500ad3d
8f87300
cbc8986
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -24,25 +24,26 @@ use cumulus_primitives_core::{ | |
| self, ApprovedPeerId, Block as RelayBlock, Hash as PHash, Header as RelayHeader, | ||
| HrmpChannelId, | ||
| }, | ||
| ParaId, PersistedValidationData, | ||
| ParaId, PersistedValidationData, RelayProofRequest, RelayStorageKey, | ||
| }; | ||
| pub use cumulus_primitives_parachain_inherent::{ParachainInherentData, INHERENT_IDENTIFIER}; | ||
| use cumulus_relay_chain_interface::RelayChainInterface; | ||
| pub use mock::{MockValidationDataInherentDataProvider, MockXcmConfig}; | ||
| use sc_network_types::PeerId; | ||
| use sp_state_machine::StorageProof; | ||
| use sp_storage::ChildInfo; | ||
|
|
||
| const LOG_TARGET: &str = "parachain-inherent"; | ||
|
|
||
| /// Collect the relevant relay chain state in form of a proof for putting it into the validation | ||
| /// data inherent. | ||
| async fn collect_relay_storage_proof( | ||
| /// Builds the list of static relay chain storage keys that are always needed for parachain | ||
| /// validation. | ||
| async fn get_static_relay_storage_keys( | ||
| relay_chain_interface: &impl RelayChainInterface, | ||
| para_id: ParaId, | ||
| relay_parent: PHash, | ||
| include_authorities: bool, | ||
| include_next_authorities: bool, | ||
| additional_relay_state_keys: Vec<Vec<u8>>, | ||
| ) -> Option<sp_state_machine::StorageProof> { | ||
| ) -> Option<Vec<Vec<u8>>> { | ||
| use relay_chain::well_known_keys as relay_well_known_keys; | ||
|
|
||
| let ingress_channels = relay_chain_interface | ||
|
|
@@ -136,25 +137,95 @@ async fn collect_relay_storage_proof( | |
| relevant_keys.push(relay_well_known_keys::NEXT_AUTHORITIES.to_vec()); | ||
| } | ||
|
|
||
| // Add additional relay state keys | ||
| Some(relevant_keys) | ||
| } | ||
|
|
||
| /// Collect the relevant relay chain state in form of a proof for putting it into the validation | ||
| /// data inherent. | ||
| async fn collect_relay_storage_proof( | ||
| relay_chain_interface: &impl RelayChainInterface, | ||
| para_id: ParaId, | ||
| relay_parent: PHash, | ||
| include_authorities: bool, | ||
| include_next_authorities: bool, | ||
| additional_relay_state_keys: Vec<Vec<u8>>, | ||
| relay_proof_request: RelayProofRequest, | ||
|
Comment on lines
+151
to
+152
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. To me it would make sense to merge
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Valid, and I did consider this during the implementation, but kept them separate due to their different origins and trying to minimize impact on previous flows.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In the end both are about adding new storage keys. Downstream users need to adjust anyway due to the new type and argument here, keeping both is just redundant. |
||
| ) -> Option<StorageProof> { | ||
| // Get static keys that are always needed | ||
| let mut all_top_keys = get_static_relay_storage_keys( | ||
| relay_chain_interface, | ||
| para_id, | ||
| relay_parent, | ||
| include_authorities, | ||
| include_next_authorities, | ||
| ) | ||
| .await?; | ||
|
|
||
| // Add additional_relay_state_keys | ||
| let unique_keys: Vec<Vec<u8>> = additional_relay_state_keys | ||
| .into_iter() | ||
| .filter(|key| !relevant_keys.contains(key)) | ||
| .filter(|key| !all_top_keys.contains(key)) | ||
| .collect(); | ||
| relevant_keys.extend(unique_keys); | ||
| all_top_keys.extend(unique_keys); | ||
|
|
||
| relay_chain_interface | ||
| .prove_read(relay_parent, &relevant_keys) | ||
| .await | ||
| .map_err(|e| { | ||
| // Group requested keys by storage type | ||
| let RelayProofRequest { keys } = relay_proof_request; | ||
| let mut child_keys: std::collections::BTreeMap<Vec<u8>, Vec<Vec<u8>>> = | ||
| std::collections::BTreeMap::new(); | ||
|
|
||
| for key in keys { | ||
| match key { | ||
| RelayStorageKey::Top(k) => { | ||
| if !all_top_keys.contains(&k) { | ||
| all_top_keys.push(k); | ||
| } | ||
| }, | ||
| RelayStorageKey::Child { storage_key, key } => { | ||
| child_keys.entry(storage_key).or_default().push(key); | ||
| }, | ||
| } | ||
| } | ||
|
|
||
| // Collect all storage proofs | ||
| let mut all_proofs = Vec::new(); | ||
|
|
||
| // Collect top-level storage proof. | ||
| match relay_chain_interface.prove_read(relay_parent, &all_top_keys).await { | ||
| Ok(top_proof) => { | ||
| all_proofs.push(top_proof); | ||
| }, | ||
| Err(e) => { | ||
| tracing::error!( | ||
| target: LOG_TARGET, | ||
| relay_parent = ?relay_parent, | ||
| error = ?e, | ||
| "Cannot obtain read proof from relay chain.", | ||
| "Cannot obtain relay chain storage proof.", | ||
| ); | ||
| }) | ||
| .ok() | ||
| return None; | ||
| }, | ||
| } | ||
|
|
||
| // Collect child trie proofs | ||
| for (storage_key, data_keys) in child_keys { | ||
| let child_info = ChildInfo::new_default(&storage_key); | ||
| match relay_chain_interface.prove_child_read(relay_parent, &child_info, &data_keys).await { | ||
| Ok(child_proof) => { | ||
| all_proofs.push(child_proof); | ||
| }, | ||
| Err(e) => { | ||
| tracing::error!( | ||
| target: LOG_TARGET, | ||
| relay_parent = ?relay_parent, | ||
| child_trie_id = ?child_info.storage_key(), | ||
| error = ?e, | ||
| "Cannot obtain child trie proof from relay chain.", | ||
| ); | ||
| }, | ||
| } | ||
| } | ||
|
|
||
| // Merge all proofs | ||
| Some(StorageProof::merge(all_proofs)) | ||
| } | ||
|
|
||
| pub struct ParachainInherentDataProvider; | ||
|
|
@@ -170,6 +241,7 @@ impl ParachainInherentDataProvider { | |
| para_id: ParaId, | ||
| relay_parent_descendants: Vec<RelayHeader>, | ||
| additional_relay_state_keys: Vec<Vec<u8>>, | ||
| relay_proof_request: RelayProofRequest, | ||
| collator_peer_id: PeerId, | ||
| ) -> Option<ParachainInherentData> { | ||
| let collator_peer_id = ApprovedPeerId::try_from(collator_peer_id.to_bytes()) | ||
|
|
@@ -195,6 +267,7 @@ impl ParachainInherentDataProvider { | |
| !relay_parent_descendants.is_empty(), | ||
| include_next_authorities, | ||
| additional_relay_state_keys, | ||
| relay_proof_request, | ||
| ) | ||
| .await?; | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why should this not be properly implemented for the slot-based collator?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Hey, thanks for the review.
The reasoning was that while the API is generic, its development is driven by a concrete use case (#10679 ). I didn’t include a slot-based implementation, as I saw no immediate application and the goal was to keep the PR functional and as lean as possible.
It is something that could be added in a future iteration if needed.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I understand the use-case. But all parachains that use elastic scaling like asset-hub use the slot-based collator. An implementation for it is absolutely necessary. Both lookahead and slot-based are currently in use for parachains.