Skip to content
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
ee90a9b
Add ultimate test for checking all the chunk index build/verify proof…
bkontur Oct 28, 2025
d5506de
Refactor proof verification to separate `ensure_chunk_proof` for easi…
bkontur Oct 28, 2025
1453f12
Actual fix
bkontur Oct 28, 2025
4c3d67f
fmt
bkontur Oct 28, 2025
cd2d2d3
Fix for 0 total_chunks
bkontur Oct 29, 2025
3085804
Unify usage of num_chunks/random_chunk + more docs
bkontur Oct 29, 2025
98ef883
Fix the correct docs
bkontur Oct 29, 2025
d0bc14c
clippy
bkontur Oct 29, 2025
92923d7
Update from github-actions[bot] running command 'fmt'
github-actions[bot] Oct 29, 2025
f0eed0b
Update from github-actions[bot] running command 'prdoc --audience run…
github-actions[bot] Oct 29, 2025
bc743b3
Update substrate/primitives/transaction-storage-proof/src/lib.rs
bkontur Oct 29, 2025
b306af7
Update prdoc/pr_10153.prdoc
bkontur Oct 29, 2025
98fcd56
Update substrate/primitives/transaction-storage-proof/src/lib.rs
bkontur Oct 29, 2025
29ffae2
Update substrate/frame/transaction-storage/src/lib.rs
bkontur Oct 29, 2025
f670f90
Update substrate/primitives/transaction-storage-proof/src/lib.rs
bkontur Oct 29, 2025
513fa70
Rename to `verify_chunk_proof` + doc
bkontur Oct 29, 2025
f96557b
Merge branch 'master' into bko-pallet-tx-storage-nits
bkontur Oct 29, 2025
04cb7d7
Return `Ok(None)` when empty transactions/chunks
bkontur Oct 29, 2025
36aea9d
Update from github-actions[bot] running command 'fmt'
github-actions[bot] Oct 29, 2025
23eee5b
Introduced `ChunkIndex`
bkontur Oct 29, 2025
7d1b78a
Removed `ChunkCount` storage item
bkontur Oct 29, 2025
15ad008
Update from github-actions[bot] running command 'fmt'
github-actions[bot] Oct 29, 2025
4556d1f
Merge branch 'master' into bko-pallet-tx-storage-nits
bkontur Oct 29, 2025
ba6a7a1
Update prdoc/pr_10153.prdoc
bkontur Oct 29, 2025
d988c36
Update substrate/frame/transaction-storage/src/lib.rs
bkontur Oct 29, 2025
03def22
Nits for `on_initialize` weight metering
bkontur Oct 30, 2025
c6057b0
Merge remote-tracking branch 'origin/master' into bko-pallet-tx-stora…
bkontur Oct 30, 2025
59e7642
Merge branch 'master' into bko-pallet-tx-storage-nits
bkontur Oct 31, 2025
fae8ba9
Merge branch 'master' into bko-pallet-tx-storage-nits
bkontur Oct 31, 2025
f0c12a8
Update substrate/frame/transaction-storage/src/lib.rs
bkontur Nov 1, 2025
596b27b
Update substrate/frame/transaction-storage/src/lib.rs
bkontur Nov 1, 2025
cd1fb01
Update substrate/frame/transaction-storage/src/lib.rs
bkontur Nov 1, 2025
390ee76
Merge branch 'master' into bko-pallet-tx-storage-nits
bkontur Nov 1, 2025
fca0b92
Chage rustdoc
bkontur Nov 4, 2025
97cf2fa
Return error instead
bkontur Nov 4, 2025
b7d6d08
Merge remote-tracking branch 'origin/master' into bko-pallet-tx-stora…
bkontur Nov 4, 2025
084a681
Update substrate/frame/transaction-storage/src/lib.rs
bkontur Nov 4, 2025
4e35eab
Merge branch 'master' into bko-pallet-tx-storage-nits
bkontur Nov 4, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions prdoc/pr_10153.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
title: '[pallet-transaction-storage] Improved `check_proof` check + tests + docs'
doc:
- audience: Runtime Dev
description: |-
**This PR:**

* Fixes `check_proof` and its `binary_search_by_key` chunk
* Adds the `ensure_chunk_proof_works` test, which covers all possible chunk index build/verify proof roundtrips (to catch all corner cases)
* Improves docs around `pallet-transaction-storage`
crates:
- name: pallet-transaction-storage
bump: patch
- name: sp-transaction-storage-proof
bump: patch
- name: sp-io
bump: patch
111 changes: 75 additions & 36 deletions substrate/frame/transaction-storage/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ use frame_support::{
};
use sp_runtime::traits::{BlakeTwo256, Dispatchable, Hash, One, Saturating, Zero};
use sp_transaction_storage_proof::{
encode_index, random_chunk, InherentError, TransactionStorageProof, CHUNK_SIZE,
encode_index, num_chunks, random_chunk, InherentError, TransactionStorageProof, CHUNK_SIZE,
INHERENT_IDENTIFIER,
};

Expand Down Expand Up @@ -80,14 +80,13 @@ pub struct TransactionInfo {
/// Size of indexed data in bytes.
size: u32,
/// Total number of chunks added in the block with this transaction. This
/// is used find transaction info by block chunk index using binary search.
/// is used to find transaction info by block chunk index using binary search.
///
/// Cumulative value of all previous transactions in the block; the last transaction holds the
/// total chunk value.
block_chunks: u32,
}

fn num_chunks(bytes: u32) -> u32 {
(bytes as u64).div_ceil(CHUNK_SIZE as u64) as u32
}

#[frame_support::pallet]
pub mod pallet {
use super::*;
Expand Down Expand Up @@ -133,7 +132,7 @@ pub mod pallet {
NotConfigured,
/// Renewed extrinsic is not found.
RenewedNotFound,
/// Attempting to store empty transaction
/// Attempting to store an empty transaction
EmptyTransaction,
/// Proof was not expected in this block.
UnexpectedProof,
Expand Down Expand Up @@ -225,7 +224,7 @@ pub mod pallet {
let mut index = 0;
BlockTransactions::<T>::mutate(|transactions| {
if transactions.len() + 1 > T::MaxBlockTransactions::get() as usize {
return Err(Error::<T>::TooManyTransactions)
return Err(Error::<T>::TooManyTransactions);
}
let total_chunks = transactions.last().map_or(0, |t| t.block_chunks) + chunk_count;
index = transactions.len() as u32;
Expand Down Expand Up @@ -269,7 +268,7 @@ pub mod pallet {
let mut index = 0;
BlockTransactions::<T>::mutate(|transactions| {
if transactions.len() + 1 > T::MaxBlockTransactions::get() as usize {
return Err(Error::<T>::TooManyTransactions)
return Err(Error::<T>::TooManyTransactions);
}
let chunks = num_chunks(info.size);
let total_chunks = transactions.last().map_or(0, |t| t.block_chunks) + chunks;
Expand Down Expand Up @@ -301,39 +300,24 @@ pub mod pallet {
) -> DispatchResultWithPostInfo {
ensure_none(origin)?;
ensure!(!ProofChecked::<T>::get(), Error::<T>::DoubleCheck);

// Get the target block metadata.
let number = frame_system::Pallet::<T>::block_number();
let period = StoragePeriod::<T>::get();
let target_number = number.saturating_sub(period);
ensure!(!target_number.is_zero(), Error::<T>::UnexpectedProof);
let total_chunks = ChunkCount::<T>::get(target_number);
ensure!(total_chunks != 0, Error::<T>::UnexpectedProof);
let transactions =
Transactions::<T>::get(target_number).ok_or(Error::<T>::MissingStateData)?;

// Verify the proof with a "random" chunk (randomness is based on the parent hash).
let parent_hash = frame_system::Pallet::<T>::parent_hash();
let selected_chunk_index = random_chunk(parent_hash.as_ref(), total_chunks);
let (info, chunk_index) = match Transactions::<T>::get(target_number) {
Some(infos) => {
let index = match infos
.binary_search_by_key(&selected_chunk_index, |info| info.block_chunks)
{
Ok(index) => index,
Err(index) => index,
};
let info = infos.get(index).ok_or(Error::<T>::MissingStateData)?.clone();
let chunks = num_chunks(info.size);
let prev_chunks = info.block_chunks - chunks;
(info, selected_chunk_index - prev_chunks)
},
None => return Err(Error::<T>::MissingStateData.into()),
};
ensure!(
sp_io::trie::blake2_256_verify_proof(
info.chunk_root,
&proof.proof,
&encode_index(chunk_index),
&proof.chunk,
sp_runtime::StateVersion::V1,
),
Error::<T>::InvalidProof
);
Self::ensure_chunk_proof(
proof,
parent_hash.as_ref(),
transactions.to_vec(),
total_chunks,
)?;
ProofChecked::<T>::put(true);
Self::deposit_event(Event::ProofChecked);
Ok(().into())
Expand Down Expand Up @@ -466,5 +450,60 @@ pub mod pallet {
T::FeeDestination::on_unbalanced(credit);
Ok(())
}

pub(crate) fn ensure_chunk_proof(
proof: TransactionStorageProof,
random_hash: &[u8],
infos: Vec<TransactionInfo>,
total_chunks: u32,
) -> Result<(), Error<T>> {
ensure!(total_chunks != 0, Error::<T>::UnexpectedProof);
ensure!(!infos.is_empty(), Error::<T>::UnexpectedProof);

// Get the random chunk index (from all transactions in the block = 0..total_chunks).
let selected_block_chunk_index = random_chunk(random_hash, total_chunks as _);

// Let's find the corresponding transaction and its "local" chunk index for "global"
// `selected_block_chunk_index`.
let (tx_info, tx_chunk_index) = {
// Binary search for the transaction that owns this `selected_block_chunk_index`
// chunk.
let tx_index = infos
.binary_search_by_key(&selected_block_chunk_index, |info| {
// Each `info.block_chunks` is cumulative count,
// so last chunk index = count - 1.
info.block_chunks.saturating_sub(1)
})
.unwrap_or_else(|tx_index| tx_index);

// Get the transaction and its local chunk index.
let tx_info = infos.get(tx_index).ok_or(Error::<T>::MissingStateData)?;
// We shouldn't reach this point; we rely on the fact that `fn store` does not allow
// empty transactions. Without this check, it would fail anyway below with
// `InvalidProof`.
ensure!(!tx_info.block_chunks.is_zero(), Error::<T>::EmptyTransaction);

// Convert a global chunk index into a transaction-local one.
let tx_chunks = num_chunks(tx_info.size);
let prev_chunks = tx_info.block_chunks - tx_chunks;
let tx_chunk_index = selected_block_chunk_index - prev_chunks;

(tx_info, tx_chunk_index)
};

// Verify the tx chunk proof.
ensure!(
sp_io::trie::blake2_256_verify_proof(
tx_info.chunk_root,
&proof.proof,
&encode_index(tx_chunk_index),
&proof.chunk,
sp_runtime::StateVersion::V1,
),
Error::<T>::InvalidProof
);

Ok(())
}
}
}
58 changes: 57 additions & 1 deletion substrate/frame/transaction-storage/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ use crate::mock::*;
use frame_support::{assert_noop, assert_ok};
use frame_system::RawOrigin;
use sp_runtime::{DispatchError, TokenError::FundsUnavailable};
use sp_transaction_storage_proof::registration::build_proof;
use sp_transaction_storage_proof::{registration::build_proof, CHUNK_SIZE};

const MAX_DATA_SIZE: u32 = DEFAULT_MAX_TRANSACTION_SIZE;

Expand Down Expand Up @@ -114,6 +114,62 @@ fn checks_proof() {
});
}

#[test]
fn ensure_chunk_proof_works() {
new_test_ext().execute_with(|| {
// Prepare a bunch of transactions with variable chunk sizes.
let transactions = vec![
vec![0u8; CHUNK_SIZE - 1],
vec![1u8; CHUNK_SIZE],
vec![2u8; CHUNK_SIZE + 1],
vec![3u8; 2 * CHUNK_SIZE - 1],
vec![3u8; 2 * CHUNK_SIZE],
vec![3u8; 2 * CHUNK_SIZE + 1],
vec![4u8; 7 * CHUNK_SIZE - 1],
vec![4u8; 7 * CHUNK_SIZE],
vec![4u8; 7 * CHUNK_SIZE + 1],
];
let expected_total_chunks =
transactions.iter().map(|t| t.len().div_ceil(CHUNK_SIZE) as u32).sum::<u32>();

// Store a couple of transactions in one block.
run_to_block(1, || None);
let caller = 1;
for transaction in transactions.clone() {
assert_ok!(TransactionStorage::<Test>::store(
RawOrigin::Signed(caller).into(),
transaction
));
}
run_to_block(2, || None);

// Read all the block transactions metadata.
let total_chunks = ChunkCount::<Test>::get(1);
let tx_infos = Transactions::<Test>::get(1).unwrap();
assert_eq!(expected_total_chunks, total_chunks);
assert_eq!(9, tx_infos.len());

// Verify proofs for all possible chunk indexes.
for chunk_index in 0..total_chunks {
// chunk index randomness
let mut random_hash = [0u8; 32];
random_hash[..8].copy_from_slice(&(chunk_index as u64).to_be_bytes());
let selected_chunk_index = random_chunk(random_hash.as_ref(), total_chunks);
assert_eq!(selected_chunk_index, chunk_index);

// build/check chunk proof roundtrip
let proof =
build_proof(random_hash.as_ref(), transactions.clone()).expect("valid proof");
assert_ok!(TransactionStorage::<Test>::ensure_chunk_proof(
proof,
random_hash.as_ref(),
tx_infos.to_vec(),
total_chunks
));
}
});
}

#[test]
fn renews_data() {
new_test_ext().execute_with(|| {
Expand Down
5 changes: 3 additions & 2 deletions substrate/primitives/io/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1486,7 +1486,7 @@ pub trait Hashing {
/// Interface that provides transaction indexing API.
#[runtime_interface]
pub trait TransactionIndex {
/// Add transaction index. Returns indexed content hash.
/// Indexes the specified transaction for the given `extrinsic` and `context_hash`.
fn index(
&mut self,
extrinsic: u32,
Expand All @@ -1496,7 +1496,8 @@ pub trait TransactionIndex {
self.storage_index_transaction(extrinsic, &context_hash, size);
}

/// Conduct a 512-bit Keccak hash.
/// Renews the transaction index entry for the given `extrinsic` using the provided
/// `context_hash`.
fn renew(&mut self, extrinsic: u32, context_hash: PassPointerAndReadCopy<[u8; 32], 32>) {
self.storage_renew_transaction_index(extrinsic, &context_hash);
}
Expand Down
43 changes: 30 additions & 13 deletions substrate/primitives/transaction-storage-proof/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@

//! Storage proof primitives. Contains types and basic code to extract storage
//! proofs for indexed transactions.
//!
//! Note: We use `u32` for both `total_chunks` and the chunk index.

#![cfg_attr(not(feature = "std"), no_std)]

Expand Down Expand Up @@ -104,7 +106,7 @@ impl sp_inherents::InherentDataProvider for InherentDataProvider {
mut error: &[u8],
) -> Option<Result<(), Error>> {
if *identifier != INHERENT_IDENTIFIER {
return None
return None;
}

let error = InherentError::decode(&mut error).ok()?;
Expand All @@ -114,14 +116,25 @@ impl sp_inherents::InherentDataProvider for InherentDataProvider {
}

/// A utility function to extract a chunk index from the source of randomness.
///
/// Note: The caller must ensure that `total_chunks` is not 0.
pub fn random_chunk(random_hash: &[u8], total_chunks: u32) -> u32 {
let mut buf = [0u8; 8];
buf.copy_from_slice(&random_hash[0..8]);
let random_u64 = u64::from_be_bytes(buf);
(random_u64 % total_chunks as u64) as u32
}

/// A utility function to encode transaction index as trie key.
/// A utility function to calculate the number of chunks.
///
/// * `bytes` - number of bytes
pub fn num_chunks(bytes: u32) -> u32 {
(bytes as u64).div_ceil(CHUNK_SIZE as u64) as u32
}

/// A utility function to encode the transaction index as a trie key.
///
/// * `input` - chunk index.
pub fn encode_index(input: u32) -> Vec<u8> {
codec::Encode::encode(&codec::Compact(input))
}
Expand Down Expand Up @@ -163,13 +176,12 @@ pub mod registration {
.saturating_sub(DEFAULT_STORAGE_PERIOD.into());
if number.is_zero() {
// Too early to collect proofs.
return Ok(InherentDataProvider::new(None))
return Ok(InherentDataProvider::new(None));
}

let proof = match client.block_indexed_body(number)? {
Some(transactions) if !transactions.is_empty() =>
Some(build_proof(parent.as_ref(), transactions)?),
Some(_) | None => {
Some(transactions) => Some(build_proof(parent.as_ref(), transactions)?),
None => {
// Nothing was indexed in that block.
None
},
Expand All @@ -182,19 +194,20 @@ pub mod registration {
random_hash: &[u8],
transactions: Vec<Vec<u8>>,
) -> Result<TransactionStorageProof, Error> {
let mut db = sp_trie::MemoryDB::<Hasher>::default();
// Get total chunks, we will need it to generate a random chunk index.
let total_chunks: u32 = transactions.iter().map(|t| num_chunks(t.len() as u32)).sum();
if total_chunks.is_zero() {
const MSG: &str = "No chunks to build proof for.";
return Err(Error::Application(Box::from(MSG)));
}
Comment on lines 203 to 205
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is this now an error? This will make the block production fail. Before it was just not providing any proof, which is the sensible thing to do here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is this now an error? This will make the block production fail. Before it was just not providing any proof, which is the sensible thing to do here.

oh, good point, thank you, you're right, my bad, please check here: 04cb7d7

I also tried to remove that .unwrap( and change return type to Result<Option, but I am not sure about the end (looks ok-ish and should not happen):

debug_assert!(false, "No chunk matched the target_chunk_index; logic error");
Ok(None)

@bkchr What would you suggest here?


I also talked to @arkpar and added an early return when the chunk is found, since we don’t need to process the subsequent transactions.

let target_chunk_index = random_chunk(random_hash.as_ref(), total_chunks);

let mut db = sp_trie::MemoryDB::<Hasher>::default();
let mut target_chunk = None;
let mut target_root = Default::default();
let mut target_chunk_key = Default::default();
let mut chunk_proof = Default::default();

let total_chunks: u64 =
transactions.iter().map(|t| t.len().div_ceil(CHUNK_SIZE) as u64).sum();
let mut buf = [0u8; 8];
buf.copy_from_slice(&random_hash[0..8]);
let random_u64 = u64::from_be_bytes(buf);
let target_chunk_index = random_u64 % total_chunks;
// Generate tries for each transaction.
let mut chunk_index = 0;
for transaction in transactions {
Expand Down Expand Up @@ -244,5 +257,9 @@ pub mod registration {
&[(encode_index(0), Some(proof.chunk))],
)
.unwrap();

// Fail for empty transactions/chunks.
assert!(build_proof(&random, vec![]).is_err());
assert!(build_proof(&random, vec![vec![]]).is_err());
}
}
Loading