From 1b02b9c2d86eb0768b7ff48940cd7d1a30477638 Mon Sep 17 00:00:00 2001 From: Rez Date: Fri, 28 Nov 2025 02:57:23 +1100 Subject: [PATCH 001/267] sequence manager, service, builder progr simplify Update cache.rs Update cache.rs simplify trait simplify Update sequence.rs simplifies Update lib.rs Update service.rs Simplify simplify --- Cargo.lock | 1 + crates/optimism/flashblocks/Cargo.toml | 3 +- crates/optimism/flashblocks/src/cache.rs | 87 ++++++------ crates/optimism/flashblocks/src/consensus.rs | 34 ++--- crates/optimism/flashblocks/src/lib.rs | 43 +++--- crates/optimism/flashblocks/src/op_impl.rs | 94 +++++++++++++ crates/optimism/flashblocks/src/sequence.rs | 141 ++++++++++--------- crates/optimism/flashblocks/src/service.rs | 58 ++++---- crates/optimism/flashblocks/src/traits.rs | 90 ++++++++++++ crates/optimism/flashblocks/src/worker.rs | 33 +++-- crates/optimism/rpc/src/eth/mod.rs | 9 +- 11 files changed, 399 insertions(+), 194 deletions(-) create mode 100644 crates/optimism/flashblocks/src/op_impl.rs create mode 100644 crates/optimism/flashblocks/src/traits.rs diff --git a/Cargo.lock b/Cargo.lock index 74867a1b269..01b03534c88 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9515,6 +9515,7 @@ dependencies = [ "reth-storage-api", "reth-tasks", "ringbuffer", + "serde", "serde_json", "test-case", "tokio", diff --git a/crates/optimism/flashblocks/Cargo.toml b/crates/optimism/flashblocks/Cargo.toml index e0754aab95e..ccaa44dec6e 100644 --- a/crates/optimism/flashblocks/Cargo.toml +++ b/crates/optimism/flashblocks/Cargo.toml @@ -34,11 +34,13 @@ alloy-rpc-types-engine = { workspace = true, features = ["serde"] } alloy-consensus.workspace = true # op-alloy +op-alloy-consensus.workspace = true op-alloy-rpc-types-engine = { workspace = true, features = ["k256"] } # io tokio.workspace = true tokio-tungstenite = { workspace = true, features = ["rustls-tls-native-roots"] } +serde.workspace = true serde_json.workspace = true url.workspace = true futures-util.workspace = true @@ -57,4 +59,3 @@ derive_more.workspace = true [dev-dependencies] test-case.workspace = true alloy-consensus.workspace = true -op-alloy-consensus.workspace = true diff --git a/crates/optimism/flashblocks/src/cache.rs b/crates/optimism/flashblocks/src/cache.rs index 9aeed3435e3..abdba3bde9e 100644 --- a/crates/optimism/flashblocks/src/cache.rs +++ b/crates/optimism/flashblocks/src/cache.rs @@ -5,12 +5,13 @@ use crate::{ sequence::{FlashBlockPendingSequence, SequenceExecutionOutcome}, + traits::{FlashblockDiff, FlashblockPayload, FlashblockPayloadBase}, worker::BuildArgs, - FlashBlock, FlashBlockCompleteSequence, PendingFlashBlock, + FlashBlockCompleteSequence, PendingFlashBlock, }; use alloy_eips::eip2718::WithEncoded; use alloy_primitives::B256; -use reth_primitives_traits::{NodePrimitives, Recovered, SignedTransaction}; +use reth_primitives_traits::{NodePrimitives, Recovered}; use reth_revm::cached::CachedReads; use ringbuffer::{AllocRingBuffer, RingBuffer}; use tokio::sync::broadcast; @@ -29,21 +30,21 @@ pub(crate) const FLASHBLOCK_BLOCK_TIME: u64 = 200; /// - Finding the best sequence to build based on local chain tip /// - Broadcasting completed sequences to subscribers #[derive(Debug)] -pub(crate) struct SequenceManager { +pub(crate) struct SequenceManager { /// Current pending sequence being built up from incoming flashblocks - pending: FlashBlockPendingSequence, + pending: FlashBlockPendingSequence

, /// Cached recovered transactions for the pending sequence - pending_transactions: Vec>>, + pending_transactions: Vec>>, /// Ring buffer of recently completed sequences bundled with their decoded transactions (FIFO, /// size 3) - completed_cache: AllocRingBuffer<(FlashBlockCompleteSequence, Vec>>)>, + completed_cache: AllocRingBuffer<(FlashBlockCompleteSequence

, Vec>>)>, /// Broadcast channel for completed sequences - block_broadcaster: broadcast::Sender, + block_broadcaster: broadcast::Sender>, /// Whether to compute state roots when building blocks compute_state_root: bool, } -impl SequenceManager { +impl SequenceManager

{ /// Creates a new sequence manager. pub(crate) fn new(compute_state_root: bool) -> Self { let (block_broadcaster, _) = broadcast::channel(128); @@ -59,12 +60,12 @@ impl SequenceManager { /// Returns the sender half of the flashblock sequence broadcast channel. pub(crate) const fn block_sequence_broadcaster( &self, - ) -> &broadcast::Sender { + ) -> &broadcast::Sender> { &self.block_broadcaster } /// Gets a subscriber to the flashblock sequences produced. - pub(crate) fn subscribe_block_sequence(&self) -> crate::FlashBlockCompleteSequenceRx { + pub(crate) fn subscribe_block_sequence(&self) -> broadcast::Receiver> { self.block_broadcaster.subscribe() } @@ -76,12 +77,12 @@ impl SequenceManager { /// with computed `state_root`. /// /// Transactions are recovered once and cached for reuse during block building. - pub(crate) fn insert_flashblock(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { + pub(crate) fn insert_flashblock(&mut self, flashblock: P) -> eyre::Result<()> { // If this starts a new block, finalize and cache the previous sequence BEFORE inserting - if flashblock.index == 0 && self.pending.count() > 0 { + if flashblock.index() == 0 && self.pending.count() > 0 { let completed = self.pending.finalize()?; let block_number = completed.block_number(); - let parent_hash = completed.payload_base().parent_hash; + let parent_hash = completed.payload_base().parent_hash(); trace!( target: "flashblocks", @@ -114,7 +115,7 @@ impl SequenceManager { } /// Returns the current pending sequence for inspection. - pub(crate) const fn pending(&self) -> &FlashBlockPendingSequence { + pub(crate) const fn pending(&self) -> &FlashBlockPendingSequence

{ &self.pending } @@ -129,21 +130,21 @@ impl SequenceManager { &mut self, local_tip_hash: B256, local_tip_timestamp: u64, - ) -> Option>>>> { + ) -> Option>>, P::Base>> { // Try to find a buildable sequence: (base, last_fb, transactions, cached_state, // source_name) let (base, last_flashblock, transactions, cached_state, source_name) = // Priority 1: Try current pending sequence - if let Some(base) = self.pending.payload_base().filter(|b| b.parent_hash == local_tip_hash) { - let cached_state = self.pending.take_cached_reads().map(|r| (base.parent_hash, r)); - let last_fb = self.pending.last_flashblock()?; + if let Some(base) = self.pending.payload_base().filter(|b| b.parent_hash() == local_tip_hash) { + let cached_state = self.pending.take_cached_reads().map(|r| (base.parent_hash(), r)); + let last_fb = self.pending.last_flashblock()?.clone(); let transactions = self.pending_transactions.clone(); (base, last_fb, transactions, cached_state, "pending") } // Priority 2: Try cached sequence with exact parent match - else if let Some((cached, txs)) = self.completed_cache.iter().find(|(c, _)| c.payload_base().parent_hash == local_tip_hash) { + else if let Some((cached, txs)) = self.completed_cache.iter().find(|(c, _)| c.payload_base().parent_hash() == local_tip_hash) { let base = cached.payload_base().clone(); - let last_fb = cached.last(); + let last_fb = cached.last().clone(); let transactions = txs.clone(); let cached_state = None; (base, last_fb, transactions, cached_state, "cached") @@ -179,20 +180,20 @@ impl SequenceManager { // compute the state root, causing FlashblockConsensusClient to lack precomputed state for // engine_newPayload. This is safe: we still have op-node as backstop to maintain // chain progression. - let block_time_ms = (base.timestamp - local_tip_timestamp) * 1000; + let block_time_ms = (base.timestamp() - local_tip_timestamp) * 1000; let expected_final_flashblock = block_time_ms / FLASHBLOCK_BLOCK_TIME; let compute_state_root = self.compute_state_root && - last_flashblock.diff.state_root.is_zero() && - last_flashblock.index >= expected_final_flashblock.saturating_sub(1); + last_flashblock.diff().state_root().is_zero() && + last_flashblock.index() >= expected_final_flashblock.saturating_sub(1); trace!( target: "flashblocks", - block_number = base.block_number, + block_number = base.block_number(), source = source_name, - flashblock_index = last_flashblock.index, + flashblock_index = last_flashblock.index(), expected_final_flashblock, compute_state_root_enabled = self.compute_state_root, - state_root_is_zero = last_flashblock.diff.state_root.is_zero(), + state_root_is_zero = last_flashblock.diff().state_root().is_zero(), will_compute_state_root = compute_state_root, "Building from flashblock sequence" ); @@ -201,8 +202,8 @@ impl SequenceManager { base, transactions, cached_state, - last_flashblock_index: last_flashblock.index, - last_flashblock_hash: last_flashblock.diff.block_hash, + last_flashblock_index: last_flashblock.index(), + last_flashblock_hash: last_flashblock.diff().block_hash(), compute_state_root, }) } @@ -227,7 +228,7 @@ impl SequenceManager { }); // Update pending sequence with execution results - if self.pending.payload_base().is_some_and(|base| base.parent_hash == parent_hash) { + if self.pending.payload_base().is_some_and(|base| base.parent_hash() == parent_hash) { self.pending.set_execution_outcome(execution_outcome); self.pending.set_cached_reads(cached_reads); trace!( @@ -241,7 +242,7 @@ impl SequenceManager { else if let Some((cached, _)) = self .completed_cache .iter_mut() - .find(|(c, _)| c.payload_base().parent_hash == parent_hash) + .find(|(c, _)| c.payload_base().parent_hash() == parent_hash) { // Only re-broadcast if we computed new information (state_root was missing). // If sequencer already provided state_root, we already broadcast in insert_flashblock, @@ -267,18 +268,18 @@ impl SequenceManager { mod tests { use super::*; use crate::test_utils::TestFlashBlockFactory; + use crate::FlashBlock; use alloy_primitives::B256; - use op_alloy_consensus::OpTxEnvelope; #[test] fn test_sequence_manager_new() { - let manager: SequenceManager = SequenceManager::new(true); + let manager: SequenceManager = SequenceManager::new(true); assert_eq!(manager.pending().count(), 0); } #[test] fn test_insert_flashblock_creates_pending_sequence() { - let mut manager: SequenceManager = SequenceManager::new(true); + let mut manager: SequenceManager = SequenceManager::new(true); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -290,7 +291,7 @@ mod tests { #[test] fn test_insert_flashblock_caches_completed_sequence() { - let mut manager: SequenceManager = SequenceManager::new(true); + let mut manager: SequenceManager = SequenceManager::new(true); let factory = TestFlashBlockFactory::new(); // Build first sequence @@ -314,7 +315,7 @@ mod tests { #[test] fn test_next_buildable_args_returns_none_when_empty() { - let mut manager: SequenceManager = SequenceManager::new(true); + let mut manager: SequenceManager = SequenceManager::new(true); let local_tip_hash = B256::random(); let local_tip_timestamp = 1000; @@ -324,7 +325,7 @@ mod tests { #[test] fn test_next_buildable_args_matches_pending_parent() { - let mut manager: SequenceManager = SequenceManager::new(true); + let mut manager: SequenceManager = SequenceManager::new(true); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -340,7 +341,7 @@ mod tests { #[test] fn test_next_buildable_args_returns_none_when_parent_mismatch() { - let mut manager: SequenceManager = SequenceManager::new(true); + let mut manager: SequenceManager = SequenceManager::new(true); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -354,7 +355,7 @@ mod tests { #[test] fn test_next_buildable_args_prefers_pending_over_cached() { - let mut manager: SequenceManager = SequenceManager::new(true); + let mut manager: SequenceManager = SequenceManager::new(true); let factory = TestFlashBlockFactory::new(); // Create and finalize first sequence @@ -373,7 +374,7 @@ mod tests { #[test] fn test_next_buildable_args_finds_cached_sequence() { - let mut manager: SequenceManager = SequenceManager::new(true); + let mut manager: SequenceManager = SequenceManager::new(true); let factory = TestFlashBlockFactory::new(); // Build and cache first sequence @@ -396,7 +397,7 @@ mod tests { #[test] fn test_compute_state_root_logic_near_expected_final() { - let mut manager: SequenceManager = SequenceManager::new(true); + let mut manager: SequenceManager = SequenceManager::new(true); let block_time = 2u64; let factory = TestFlashBlockFactory::new().with_block_time(block_time); @@ -420,7 +421,7 @@ mod tests { #[test] fn test_no_compute_state_root_when_provided_by_sequencer() { - let mut manager: SequenceManager = SequenceManager::new(true); + let mut manager: SequenceManager = SequenceManager::new(true); let block_time = 2u64; let factory = TestFlashBlockFactory::new().with_block_time(block_time); @@ -437,7 +438,7 @@ mod tests { #[test] fn test_no_compute_state_root_when_disabled() { - let mut manager: SequenceManager = SequenceManager::new(false); + let mut manager: SequenceManager = SequenceManager::new(false); let block_time = 2u64; let factory = TestFlashBlockFactory::new().with_block_time(block_time); @@ -461,7 +462,7 @@ mod tests { #[test] fn test_cache_ring_buffer_evicts_oldest() { - let mut manager: SequenceManager = SequenceManager::new(true); + let mut manager: SequenceManager = SequenceManager::new(true); let factory = TestFlashBlockFactory::new(); // Fill cache with 4 sequences (cache size is 3, so oldest should be evicted) diff --git a/crates/optimism/flashblocks/src/consensus.rs b/crates/optimism/flashblocks/src/consensus.rs index 0b502c07387..dce248e0bad 100644 --- a/crates/optimism/flashblocks/src/consensus.rs +++ b/crates/optimism/flashblocks/src/consensus.rs @@ -1,7 +1,9 @@ -use crate::{FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx}; +use crate::{ + traits::FlashblockPayloadBase, FlashBlock, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, +}; use alloy_primitives::B256; use alloy_rpc_types_engine::PayloadStatusEnum; -use op_alloy_rpc_types_engine::OpExecutionData; +use op_alloy_rpc_types_engine::{OpExecutionData, OpFlashblockPayload}; use reth_engine_primitives::ConsensusEngineHandle; use reth_optimism_payload_builder::OpPayloadTypes; use reth_payload_primitives::{EngineApiMessageVersion, ExecutionPayload, PayloadTypes}; @@ -22,18 +24,18 @@ where /// Handle to execution client. engine_handle: ConsensusEngineHandle

, /// Receiver for completed flashblock sequences from `FlashBlockService`. - sequence_receiver: FlashBlockCompleteSequenceRx, + sequence_receiver: FlashBlockCompleteSequenceRx, } impl

FlashBlockConsensusClient

where P: PayloadTypes, - P::ExecutionData: for<'a> TryFrom<&'a FlashBlockCompleteSequence, Error: std::fmt::Display>, + P::ExecutionData: for<'a> TryFrom<&'a FlashBlockCompleteSequence, Error: std::fmt::Display>, { /// Create a new `FlashBlockConsensusClient` with the given Op engine and sequence receiver. pub const fn new( engine_handle: ConsensusEngineHandle

, - sequence_receiver: FlashBlockCompleteSequenceRx, + sequence_receiver: FlashBlockCompleteSequenceRx, ) -> eyre::Result { Ok(Self { engine_handle, sequence_receiver }) } @@ -44,12 +46,12 @@ where /// in which case this returns the `parent_hash` instead to drive the chain forward. /// /// Returns the block hash to use for FCU (either the new block or parent). - async fn submit_new_payload(&self, sequence: &FlashBlockCompleteSequence) -> B256 { + async fn submit_new_payload(&self, sequence: &FlashBlockCompleteSequence) -> B256 { let payload = match P::ExecutionData::try_from(sequence) { Ok(payload) => payload, Err(err) => { trace!(target: "flashblocks", %err, "Failed payload conversion, using parent hash"); - return sequence.payload_base().parent_hash; + return sequence.payload_base().parent_hash(); } }; @@ -93,11 +95,11 @@ where async fn submit_forkchoice_update( &self, head_block_hash: B256, - sequence: &FlashBlockCompleteSequence, + sequence: &FlashBlockCompleteSequence, ) { let block_number = sequence.block_number(); - let safe_hash = sequence.payload_base().parent_hash; - let finalized_hash = sequence.payload_base().parent_hash; + let safe_hash = sequence.payload_base().parent_hash(); + let finalized_hash = sequence.payload_base().parent_hash(); let fcu_state = alloy_rpc_types_engine::ForkchoiceState { head_block_hash, safe_block_hash: safe_hash, @@ -157,10 +159,10 @@ where } } -impl TryFrom<&FlashBlockCompleteSequence> for OpExecutionData { +impl TryFrom<&FlashBlockCompleteSequence> for OpExecutionData { type Error = &'static str; - fn try_from(sequence: &FlashBlockCompleteSequence) -> Result { + fn try_from(sequence: &FlashBlockCompleteSequence) -> Result { let mut data = Self::from_flashblocks_unchecked(sequence); // If execution outcome is available, use the computed state_root and block_hash. @@ -320,7 +322,7 @@ mod tests { assert!(conversion_result.is_err()); // In the actual run loop, submit_new_payload would return parent_hash - assert_eq!(sequence.payload_base().parent_hash, parent_hash); + assert_eq!(sequence.payload_base().parent_hash(), parent_hash); } #[test] @@ -357,7 +359,7 @@ mod tests { let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); // Verify the expected forkchoice state - assert_eq!(sequence.payload_base().parent_hash, parent_hash); + assert_eq!(sequence.payload_base().parent_hash(), parent_hash); } #[test] @@ -389,7 +391,7 @@ mod tests { let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); // The head_block_hash for FCU would be parent_hash (fallback) - assert_eq!(sequence.payload_base().parent_hash, parent_hash); + assert_eq!(sequence.payload_base().parent_hash(), parent_hash); } } @@ -426,7 +428,7 @@ mod tests { assert!(conversion.is_err()); // But FCU should still happen with parent_hash - assert!(sequence.payload_base().parent_hash != B256::ZERO); + assert!(sequence.payload_base().parent_hash() != B256::ZERO); } #[test] diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index 6c5d9c1e86e..14e81c4dd27 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -14,6 +14,11 @@ use std::sync::Arc; // Included to enable serde feature for OpReceipt type used transitively use reth_optimism_primitives as _; +pub mod traits; +pub use traits::{FlashblockDiff, FlashblockPayload, FlashblockPayloadBase}; + +mod op_impl; + mod consensus; pub use consensus::FlashBlockConsensusClient; @@ -21,7 +26,7 @@ mod payload; pub use payload::{FlashBlock, PendingFlashBlock}; mod sequence; -pub use sequence::{FlashBlockCompleteSequence, FlashBlockPendingSequence}; +pub use sequence::{FlashBlockCompleteSequence, FlashBlockPendingSequence, SequenceExecutionOutcome}; mod service; pub use service::{FlashBlockBuildInfo, FlashBlockService}; @@ -36,47 +41,41 @@ mod test_utils; mod ws; pub use ws::{WsConnect, WsFlashBlockStream}; -/// Receiver of the most recent [`PendingFlashBlock`] built out of [`FlashBlock`]s. -/// -/// [`FlashBlock`]: crate::FlashBlock +/// Receiver of the most recent [`PendingFlashBlock`] built out of flashblocks. pub type PendingBlockRx = tokio::sync::watch::Receiver>>; -/// Receiver of the sequences of [`FlashBlock`]s built. -/// -/// [`FlashBlock`]: crate::FlashBlock -pub type FlashBlockCompleteSequenceRx = - tokio::sync::broadcast::Receiver; +/// Receiver of the sequences of flashblocks built (using OP payload type). +pub type FlashBlockCompleteSequenceRx

= + tokio::sync::broadcast::Receiver>; -/// Receiver of received [`FlashBlock`]s from the (websocket) subscription. -/// -/// [`FlashBlock`]: crate::FlashBlock +/// Receiver of received flashblocks from the (websocket) subscription. pub type FlashBlockRx = tokio::sync::broadcast::Receiver>; -/// Receiver that signals whether a [`FlashBlock`] is currently being built. +/// Receiver that signals whether a flashblock is currently being built. pub type InProgressFlashBlockRx = tokio::sync::watch::Receiver>; /// Container for all flashblocks-related listeners. /// /// Groups together the channels for flashblock-related updates. #[derive(Debug)] -pub struct FlashblocksListeners { - /// Receiver of the most recent executed [`PendingFlashBlock`] built out of [`FlashBlock`]s. +pub struct FlashblocksListeners { + /// Receiver of the most recent executed pending block built from flashblocks. pub pending_block_rx: PendingBlockRx, - /// Subscription channel of the complete sequences of [`FlashBlock`]s built. - pub flashblocks_sequence: tokio::sync::broadcast::Sender, - /// Receiver that signals whether a [`FlashBlock`] is currently being built. + /// Subscription channel of the complete sequences of flashblocks built. + pub flashblocks_sequence: tokio::sync::broadcast::Sender>, + /// Receiver that signals whether a flashblock is currently being built. pub in_progress_rx: InProgressFlashBlockRx, /// Subscription channel for received flashblocks from the (websocket) connection. - pub received_flashblocks: tokio::sync::broadcast::Sender>, + pub received_flashblocks: tokio::sync::broadcast::Sender>, } -impl FlashblocksListeners { +impl FlashblocksListeners { /// Creates a new [`FlashblocksListeners`] with the given channels. pub const fn new( pending_block_rx: PendingBlockRx, - flashblocks_sequence: tokio::sync::broadcast::Sender, + flashblocks_sequence: tokio::sync::broadcast::Sender>, in_progress_rx: InProgressFlashBlockRx, - received_flashblocks: tokio::sync::broadcast::Sender>, + received_flashblocks: tokio::sync::broadcast::Sender>, ) -> Self { Self { pending_block_rx, flashblocks_sequence, in_progress_rx, received_flashblocks } } diff --git a/crates/optimism/flashblocks/src/op_impl.rs b/crates/optimism/flashblocks/src/op_impl.rs new file mode 100644 index 00000000000..a37aedceb15 --- /dev/null +++ b/crates/optimism/flashblocks/src/op_impl.rs @@ -0,0 +1,94 @@ +//! Optimism implementation of flashblock traits. + +use crate::traits::{FlashblockDiff, FlashblockPayload, FlashblockPayloadBase}; +use alloy_consensus::crypto::RecoveryError; +use alloy_eips::{eip2718::WithEncoded, eip4895::Withdrawals}; +use alloy_primitives::{Bloom, Bytes, B256}; +use alloy_rpc_types_engine::PayloadId; +use op_alloy_consensus::OpTxEnvelope; +use op_alloy_rpc_types_engine::{ + OpFlashblockPayload, OpFlashblockPayloadBase, OpFlashblockPayloadDelta, +}; +use reth_primitives_traits::Recovered; + +impl FlashblockPayloadBase for OpFlashblockPayloadBase { + fn parent_hash(&self) -> B256 { + self.parent_hash + } + + fn block_number(&self) -> u64 { + self.block_number + } + + fn timestamp(&self) -> u64 { + self.timestamp + } +} + +impl FlashblockDiff for OpFlashblockPayloadDelta { + fn block_hash(&self) -> B256 { + self.block_hash + } + + fn state_root(&self) -> B256 { + self.state_root + } + + fn gas_used(&self) -> u64 { + self.gas_used + } + + fn logs_bloom(&self) -> &Bloom { + &self.logs_bloom + } + + fn receipts_root(&self) -> B256 { + self.receipts_root + } + + fn transactions_raw(&self) -> &[Bytes] { + &self.transactions + } + + fn withdrawals(&self) -> Option<&Withdrawals> { + // OpFlashblockPayloadDelta stores Vec, not Withdrawals newtype + // This method isn't currently used in the flashblocks infrastructure + None + } + + fn withdrawals_root(&self) -> Option { + Some(self.withdrawals_root) + } +} + +impl FlashblockPayload for OpFlashblockPayload { + type Base = OpFlashblockPayloadBase; + type Diff = OpFlashblockPayloadDelta; + type SignedTx = OpTxEnvelope; + + fn index(&self) -> u64 { + self.index + } + + fn payload_id(&self) -> PayloadId { + self.payload_id + } + + fn base(&self) -> Option { + self.base.clone() + } + + fn diff(&self) -> &Self::Diff { + &self.diff + } + + fn block_number(&self) -> u64 { + OpFlashblockPayload::block_number(self) + } + + fn recover_transactions( + &self, + ) -> impl Iterator>, RecoveryError>> { + OpFlashblockPayload::recover_transactions::(self) + } +} diff --git a/crates/optimism/flashblocks/src/sequence.rs b/crates/optimism/flashblocks/src/sequence.rs index abf9e6d514c..a9e1cec5e52 100644 --- a/crates/optimism/flashblocks/src/sequence.rs +++ b/crates/optimism/flashblocks/src/sequence.rs @@ -1,9 +1,8 @@ -use crate::{FlashBlock, FlashBlockCompleteSequenceRx}; +use crate::traits::FlashblockPayload; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_engine::PayloadId; use core::mem; use eyre::{bail, OptionExt}; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_revm::cached::CachedReads; use std::{collections::BTreeMap, ops::Deref}; use tokio::sync::broadcast; @@ -21,23 +20,23 @@ pub struct SequenceExecutionOutcome { pub state_root: B256, } -/// An ordered B-tree keeping the track of a sequence of [`FlashBlock`]s by their indices. +/// An ordered B-tree keeping the track of a sequence of flashblocks by their indices. #[derive(Debug)] -pub struct FlashBlockPendingSequence { +pub struct FlashBlockPendingSequence { /// tracks the individual flashblocks in order - inner: BTreeMap, + inner: BTreeMap, /// Broadcasts flashblocks to subscribers. - block_broadcaster: broadcast::Sender, + block_broadcaster: broadcast::Sender>, /// Optional execution outcome from building the current sequence. execution_outcome: Option, /// Cached state reads for the current block. - /// Current `PendingFlashBlock` is built out of a sequence of `FlashBlocks`, and executed again + /// Current `PendingFlashBlock` is built out of a sequence of flashblocks, and executed again /// when fb received on top of the same block. Avoid redundant I/O across multiple /// executions within the same block. cached_reads: Option, } -impl FlashBlockPendingSequence { +impl FlashBlockPendingSequence

{ /// Create a new pending sequence. pub fn new() -> Self { // Note: if the channel is full, send will not block but rather overwrite the oldest @@ -54,35 +53,35 @@ impl FlashBlockPendingSequence { /// Returns the sender half of the [`FlashBlockCompleteSequence`] channel. pub const fn block_sequence_broadcaster( &self, - ) -> &broadcast::Sender { + ) -> &broadcast::Sender> { &self.block_broadcaster } /// Gets a subscriber to the flashblock sequences produced. - pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { + pub fn subscribe_block_sequence(&self) -> broadcast::Receiver> { self.block_broadcaster.subscribe() } /// Inserts a new block into the sequence. /// - /// A [`FlashBlock`] with index 0 resets the set. - pub fn insert(&mut self, flashblock: FlashBlock) { - if flashblock.index == 0 { + /// A flashblock with index 0 resets the set. + pub fn insert(&mut self, flashblock: P) { + if flashblock.index() == 0 { trace!(target: "flashblocks", number=%flashblock.block_number(), "Tracking new flashblock sequence"); - self.inner.insert(flashblock.index, flashblock); + self.inner.insert(flashblock.index(), flashblock); return; } // only insert if we previously received the same block and payload, assume we received // index 0 let same_block = self.block_number() == Some(flashblock.block_number()); - let same_payload = self.payload_id() == Some(flashblock.payload_id); + let same_payload = self.payload_id() == Some(flashblock.payload_id()); if same_block && same_payload { - trace!(target: "flashblocks", number=%flashblock.block_number(), index = %flashblock.index, block_count = self.inner.len() ,"Received followup flashblock"); - self.inner.insert(flashblock.index, flashblock); + trace!(target: "flashblocks", number=%flashblock.block_number(), index = %flashblock.index(), block_count = self.inner.len() ,"Received followup flashblock"); + self.inner.insert(flashblock.index(), flashblock); } else { - trace!(target: "flashblocks", number=%flashblock.block_number(), index = %flashblock.index, current=?self.block_number() ,"Ignoring untracked flashblock following"); + trace!(target: "flashblocks", number=%flashblock.block_number(), index = %flashblock.index(), current=?self.block_number() ,"Ignoring untracked flashblock following"); } } @@ -110,8 +109,8 @@ impl FlashBlockPendingSequence { } /// Returns the payload base of the first tracked flashblock. - pub fn payload_base(&self) -> Option { - self.inner.values().next()?.base.clone() + pub fn payload_base(&self) -> Option { + self.inner.values().next()?.base() } /// Returns the number of tracked flashblocks. @@ -120,23 +119,24 @@ impl FlashBlockPendingSequence { } /// Returns the reference to the last flashblock. - pub fn last_flashblock(&self) -> Option<&FlashBlock> { + pub fn last_flashblock(&self) -> Option<&P> { self.inner.last_key_value().map(|(_, b)| b) } /// Returns the current/latest flashblock index in the sequence pub fn index(&self) -> Option { - Some(self.inner.values().last()?.index) + Some(self.inner.values().last()?.index()) } + /// Returns the payload id of the first tracked flashblock in the current sequence. pub fn payload_id(&self) -> Option { - Some(self.inner.values().next()?.payload_id) + Some(self.inner.values().next()?.payload_id()) } /// Finalizes the current pending sequence and returns it as a complete sequence. /// /// Clears the internal state and returns an error if the sequence is empty or validation fails. - pub fn finalize(&mut self) -> eyre::Result { + pub fn finalize(&mut self) -> eyre::Result> { if self.inner.is_empty() { bail!("Cannot finalize empty flashblock sequence"); } @@ -149,12 +149,12 @@ impl FlashBlockPendingSequence { } /// Returns an iterator over all flashblocks in the sequence. - pub fn flashblocks(&self) -> impl Iterator { + pub fn flashblocks(&self) -> impl Iterator { self.inner.values() } } -impl Default for FlashBlockPendingSequence { +impl Default for FlashBlockPendingSequence

{ fn default() -> Self { Self::new() } @@ -166,31 +166,31 @@ impl Default for FlashBlockPendingSequence { /// If this entire sequence of flashblocks was executed on top of latest block, this also includes /// the execution outcome with block hash and state root. #[derive(Debug, Clone)] -pub struct FlashBlockCompleteSequence { - inner: Vec, +pub struct FlashBlockCompleteSequence { + inner: Vec

, /// Optional execution outcome from building the flashblock sequence execution_outcome: Option, } -impl FlashBlockCompleteSequence { +impl FlashBlockCompleteSequence

{ /// Create a complete sequence from a vector of flashblocks. /// Ensure that: /// * vector is not empty /// * first flashblock have the base payload /// * sequence of flashblocks is sound (successive index from 0, same payload id, ...) pub fn new( - blocks: Vec, + blocks: Vec

, execution_outcome: Option, ) -> eyre::Result { let first_block = blocks.first().ok_or_eyre("No flashblocks in sequence")?; // Ensure that first flashblock have base - first_block.base.as_ref().ok_or_eyre("Flashblock at index 0 has no base")?; + first_block.base().ok_or_eyre("Flashblock at index 0 has no base")?; // Ensure that index are successive from 0, have same block number and payload id if !blocks.iter().enumerate().all(|(idx, block)| { - idx == block.index as usize && - block.payload_id == first_block.payload_id && + idx == block.index() as usize && + block.payload_id() == first_block.payload_id() && block.block_number() == first_block.block_number() }) { bail!("Flashblock inconsistencies detected in sequence"); @@ -205,17 +205,17 @@ impl FlashBlockCompleteSequence { } /// Returns the payload base of the first flashblock. - pub fn payload_base(&self) -> &OpFlashblockPayloadBase { - self.inner.first().unwrap().base.as_ref().unwrap() + pub fn payload_base(&self) -> P::Base { + self.inner.first().unwrap().base().unwrap() } /// Returns the number of flashblocks in the sequence. - pub const fn count(&self) -> usize { + pub fn count(&self) -> usize { self.inner.len() } /// Returns the last flashblock in the sequence. - pub fn last(&self) -> &FlashBlock { + pub fn last(&self) -> &P { self.inner.last().unwrap() } @@ -234,21 +234,30 @@ impl FlashBlockCompleteSequence { /// Returns all transactions from all flashblocks in the sequence pub fn all_transactions(&self) -> Vec { - self.inner.iter().flat_map(|fb| fb.diff.transactions.iter().cloned()).collect() + use crate::traits::FlashblockDiff; + self.inner + .iter() + .flat_map(|fb| fb.diff().transactions_raw().iter().cloned()) + .collect() + } + + /// Returns an iterator over all flashblocks in the sequence. + pub fn flashblocks(&self) -> impl Iterator { + self.inner.iter() } } -impl Deref for FlashBlockCompleteSequence { - type Target = Vec; +impl Deref for FlashBlockCompleteSequence

{ + type Target = Vec

; fn deref(&self) -> &Self::Target { &self.inner } } -impl TryFrom for FlashBlockCompleteSequence { +impl TryFrom> for FlashBlockCompleteSequence

{ type Error = eyre::Error; - fn try_from(sequence: FlashBlockPendingSequence) -> Result { + fn try_from(sequence: FlashBlockPendingSequence

) -> Result { Self::new(sequence.inner.into_values().collect(), sequence.execution_outcome) } } @@ -256,14 +265,15 @@ impl TryFrom for FlashBlockCompleteSequence { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::TestFlashBlockFactory; + use crate::{test_utils::TestFlashBlockFactory, FlashBlock}; mod pending_sequence_insert { use super::*; #[test] fn test_insert_index_zero_creates_new_sequence() { - let mut sequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); let payload_id = fb0.payload_id; @@ -277,7 +287,8 @@ mod tests { #[test] fn test_insert_followup_same_block_and_payload() { - let mut sequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -295,7 +306,7 @@ mod tests { #[test] fn test_insert_ignores_different_block_number() { - let mut sequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -311,7 +322,7 @@ mod tests { #[test] fn test_insert_ignores_different_payload_id() { - let mut sequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -329,7 +340,7 @@ mod tests { #[test] fn test_insert_maintains_btree_order() { - let mut sequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -341,7 +352,7 @@ mod tests { let fb1 = factory.flashblock_after(&fb0).build(); sequence.insert(fb1); - let indices: Vec = sequence.flashblocks().map(|fb| fb.index).collect(); + let indices: Vec = sequence.flashblocks().map(|fb| fb.index()).collect(); assert_eq!(indices, vec![0, 1, 2]); } } @@ -351,7 +362,7 @@ mod tests { #[test] fn test_finalize_empty_sequence_fails() { - let mut sequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); let result = sequence.finalize(); assert!(result.is_err()); @@ -363,7 +374,7 @@ mod tests { #[test] fn test_finalize_clears_pending_state() { - let mut sequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -380,7 +391,7 @@ mod tests { #[test] fn test_finalize_preserves_execution_outcome() { - let mut sequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -397,7 +408,7 @@ mod tests { #[test] fn test_finalize_clears_cached_reads() { - let mut sequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -415,7 +426,7 @@ mod tests { #[test] fn test_finalize_multiple_times_after_refill() { - let mut sequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); // First sequence @@ -440,7 +451,7 @@ mod tests { #[test] fn test_new_empty_sequence_fails() { - let result = FlashBlockCompleteSequence::new(vec![], None); + let result = FlashBlockCompleteSequence::::new(vec![], None); assert!(result.is_err()); assert_eq!(result.unwrap_err().to_string(), "No flashblocks in sequence"); } @@ -532,7 +543,7 @@ mod tests { let complete = result.unwrap(); assert_eq!(complete.count(), 3); - assert_eq!(complete.last().index, 2); + assert_eq!(complete.last().index(), 2); } #[test] @@ -605,28 +616,28 @@ mod tests { #[test] fn test_try_from_pending_to_complete_valid() { - let mut pending = FlashBlockPendingSequence::new(); + let mut pending: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); pending.insert(fb0); - let complete: Result = pending.try_into(); + let complete: Result, _> = pending.try_into(); assert!(complete.is_ok()); assert_eq!(complete.unwrap().count(), 1); } #[test] fn test_try_from_pending_to_complete_empty_fails() { - let pending = FlashBlockPendingSequence::new(); + let pending: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); - let complete: Result = pending.try_into(); + let complete: Result, _> = pending.try_into(); assert!(complete.is_err()); } #[test] fn test_try_from_preserves_execution_outcome() { - let mut pending = FlashBlockPendingSequence::new(); + let mut pending: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -636,7 +647,7 @@ mod tests { SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; pending.set_execution_outcome(Some(outcome)); - let complete: FlashBlockCompleteSequence = pending.try_into().unwrap(); + let complete: FlashBlockCompleteSequence = pending.try_into().unwrap(); assert_eq!(complete.execution_outcome(), Some(outcome)); } } @@ -646,7 +657,7 @@ mod tests { #[test] fn test_last_flashblock_returns_highest_index() { - let mut sequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -656,12 +667,12 @@ mod tests { sequence.insert(fb1); let last = sequence.last_flashblock().unwrap(); - assert_eq!(last.index, 1); + assert_eq!(last.index(), 1); } #[test] fn test_subscribe_block_sequence_channel() { - let sequence = FlashBlockPendingSequence::new(); + let sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); let mut rx = sequence.subscribe_block_sequence(); // Spawn a task that sends a complete sequence diff --git a/crates/optimism/flashblocks/src/service.rs b/crates/optimism/flashblocks/src/service.rs index 4eed74683f7..904d6c757dd 100644 --- a/crates/optimism/flashblocks/src/service.rs +++ b/crates/optimism/flashblocks/src/service.rs @@ -1,11 +1,12 @@ use crate::{ - cache::SequenceManager, worker::FlashBlockBuilder, FlashBlock, FlashBlockCompleteSequence, - FlashBlockCompleteSequenceRx, InProgressFlashBlockRx, PendingFlashBlock, + cache::SequenceManager, + traits::{FlashblockPayload, FlashblockPayloadBase}, + worker::FlashBlockBuilder, + FlashBlockCompleteSequence, InProgressFlashBlockRx, PendingFlashBlock, }; use alloy_primitives::B256; use futures_util::{FutureExt, Stream, StreamExt}; use metrics::{Gauge, Histogram}; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_evm::ConfigureEvm; use reth_metrics::Metrics; use reth_primitives_traits::{AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy}; @@ -13,43 +14,44 @@ use reth_revm::cached::CachedReads; use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskExecutor; use std::{sync::Arc, time::Instant}; -use tokio::sync::{oneshot, watch}; +use tokio::sync::{broadcast, oneshot, watch}; use tracing::*; /// The `FlashBlockService` maintains an in-memory [`PendingFlashBlock`] built out of a sequence of -/// [`FlashBlock`]s. +/// flashblocks. #[derive(Debug)] -pub struct FlashBlockService< +pub struct FlashBlockService +where N: NodePrimitives, - S, - EvmConfig: ConfigureEvm + Unpin>, - Provider, -> { + P: FlashblockPayload, + EvmConfig: ConfigureEvm + Unpin>, +{ /// Incoming flashblock stream. incoming_flashblock_rx: S, /// Signals when a block build is in progress. in_progress_tx: watch::Sender>, /// Broadcast channel to forward received flashblocks from the subscription. - received_flashblocks_tx: tokio::sync::broadcast::Sender>, + received_flashblocks_tx: broadcast::Sender>, /// Executes flashblock sequences to build pending blocks. - builder: FlashBlockBuilder, + builder: FlashBlockBuilder, /// Task executor for spawning block build jobs. spawner: TaskExecutor, /// Currently running block build job with start time and result receiver. job: Option>, /// Manages flashblock sequences with caching and intelligent build selection. - sequences: SequenceManager, + sequences: SequenceManager

, /// `FlashBlock` service's metrics metrics: FlashBlockServiceMetrics, } -impl FlashBlockService +impl FlashBlockService where N: NodePrimitives, - S: Stream> + Unpin + 'static, - EvmConfig: ConfigureEvm + Unpin> + P: FlashblockPayload, + S: Stream> + Unpin + 'static, + EvmConfig: ConfigureEvm + Unpin> + Clone + 'static, Provider: StateProviderFactory @@ -62,7 +64,7 @@ where + Clone + 'static, { - /// Constructs a new `FlashBlockService` that receives [`FlashBlock`]s from `rx` stream. + /// Constructs a new `FlashBlockService` that receives flashblocks from `rx` stream. pub fn new( incoming_flashblock_rx: S, evm_config: EvmConfig, @@ -71,7 +73,7 @@ where compute_state_root: bool, ) -> Self { let (in_progress_tx, _) = watch::channel(None); - let (received_flashblocks_tx, _) = tokio::sync::broadcast::channel(128); + let (received_flashblocks_tx, _) = broadcast::channel(128); Self { incoming_flashblock_rx, in_progress_tx, @@ -85,21 +87,17 @@ where } /// Returns the sender half to the received flashblocks. - pub const fn flashblocks_broadcaster( - &self, - ) -> &tokio::sync::broadcast::Sender> { + pub const fn flashblocks_broadcaster(&self) -> &broadcast::Sender> { &self.received_flashblocks_tx } /// Returns the sender half to the flashblock sequence. - pub const fn block_sequence_broadcaster( - &self, - ) -> &tokio::sync::broadcast::Sender { + pub const fn block_sequence_broadcaster(&self) -> &broadcast::Sender> { self.sequences.block_sequence_broadcaster() } /// Returns a subscriber to the flashblock sequence. - pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { + pub fn subscribe_block_sequence(&self) -> broadcast::Receiver> { self.sequences.subscribe_block_sequence() } @@ -181,10 +179,10 @@ where /// Processes a single flashblock: notifies subscribers, records metrics, and inserts into /// sequence. - fn process_flashblock(&mut self, flashblock: FlashBlock) { + fn process_flashblock(&mut self, flashblock: P) { self.notify_received_flashblock(&flashblock); - if flashblock.index == 0 { + if flashblock.index() == 0 { self.metrics.last_flashblock_length.record(self.sequences.pending().count() as f64); } @@ -194,7 +192,7 @@ where } /// Notifies all subscribers about the received flashblock. - fn notify_received_flashblock(&self, flashblock: &FlashBlock) { + fn notify_received_flashblock(&self, flashblock: &P) { if self.received_flashblocks_tx.receiver_count() > 0 { let _ = self.received_flashblocks_tx.send(Arc::new(flashblock.clone())); } @@ -217,9 +215,9 @@ where // Spawn build job let fb_info = FlashBlockBuildInfo { - parent_hash: args.base.parent_hash, + parent_hash: args.base.parent_hash(), index: args.last_flashblock_index, - block_number: args.base.block_number, + block_number: args.base.block_number(), }; self.metrics.current_block_height.set(fb_info.block_number as f64); self.metrics.current_index.set(fb_info.index as f64); diff --git a/crates/optimism/flashblocks/src/traits.rs b/crates/optimism/flashblocks/src/traits.rs new file mode 100644 index 00000000000..49dde64cb56 --- /dev/null +++ b/crates/optimism/flashblocks/src/traits.rs @@ -0,0 +1,90 @@ +//! Generic traits for flashblock payloads. +//! +//! These traits enable chain-specific flashblock implementations while sharing +//! the core flashblock infrastructure. + +use alloy_consensus::crypto::RecoveryError; +use alloy_eips::eip4895::Withdrawals; +use alloy_primitives::{Bloom, Bytes, B256}; +use alloy_rpc_types_engine::PayloadId; + +/// Base payload information for constructing block environment. +/// +/// Contains all fields needed to configure EVM execution context for the next block. +/// This is present only on the first flashblock (index 0) of a sequence. +pub trait FlashblockPayloadBase: Clone + Send + Sync + std::fmt::Debug + 'static { + /// Parent block hash. + fn parent_hash(&self) -> B256; + /// Block number being built. + fn block_number(&self) -> u64; + /// Block timestamp. + fn timestamp(&self) -> u64; +} + +/// State diff from flashblock execution. +/// +/// Contains the cumulative state changes from executing transactions in this flashblock. +pub trait FlashblockDiff: Clone + Send + Sync + std::fmt::Debug + 'static { + /// Block hash after applying this flashblock. + fn block_hash(&self) -> B256; + /// State root after applying this flashblock. + fn state_root(&self) -> B256; + /// Cumulative gas used. + fn gas_used(&self) -> u64; + /// Bloom filter for logs. + fn logs_bloom(&self) -> &Bloom; + /// Receipts root. + fn receipts_root(&self) -> B256; + /// Raw encoded transactions in this flashblock. + fn transactions_raw(&self) -> &[Bytes]; + + /// Withdrawals included in this flashblock. + fn withdrawals(&self) -> Option<&Withdrawals> { + None + } + + /// Withdrawals root. + fn withdrawals_root(&self) -> Option { + None + } +} + +/// A flashblock payload representing one slice of a block. +/// +/// Flashblocks are incremental updates to block state, allowing for faster +/// pre-confirmations. A complete block is built from a sequence of flashblocks. +pub trait FlashblockPayload: + Clone + Send + Sync + std::fmt::Debug + for<'de> serde::Deserialize<'de> + 'static +{ + /// The base payload type containing block environment configuration. + type Base: FlashblockPayloadBase; + /// The diff type containing state changes. + type Diff: FlashblockDiff; + /// The signed transaction type for this chain. + type SignedTx: reth_primitives_traits::SignedTransaction; + + /// Sequential index of this flashblock within the current block's sequence. + fn index(&self) -> u64; + + /// Unique identifier for the payload being built. + fn payload_id(&self) -> PayloadId; + + /// Base payload (only present on index 0). + fn base(&self) -> Option; + + /// State diff for this flashblock. + fn diff(&self) -> &Self::Diff; + + /// Block number this flashblock belongs to. + fn block_number(&self) -> u64 { + self.base().map(|b| b.block_number()).unwrap_or(0) + } + + /// Recovers transactions from the raw transaction bytes in this flashblock. + /// + /// Each item is a result containing either the recovered transaction with its encoding, + /// or an error if decoding/recovery failed. + fn recover_transactions( + &self, + ) -> impl Iterator>, RecoveryError>>; +} diff --git a/crates/optimism/flashblocks/src/worker.rs b/crates/optimism/flashblocks/src/worker.rs index 7d9ab860a58..06288d716f6 100644 --- a/crates/optimism/flashblocks/src/worker.rs +++ b/crates/optimism/flashblocks/src/worker.rs @@ -1,8 +1,9 @@ -use crate::PendingFlashBlock; +use crate::{traits::FlashblockPayloadBase, PendingFlashBlock}; use alloy_eips::{eip2718::WithEncoded, BlockNumberOrTag}; use alloy_primitives::B256; use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_chain_state::{ComputedTrieData, ExecutedBlock}; +use reth_chain_state::ExecutedBlock; use reth_errors::RethError; use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome}, @@ -16,6 +17,7 @@ use reth_revm::{cached::CachedReads, database::StateProviderDatabase, db::State} use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_storage_api::{noop::NoopProvider, BlockReaderIdExt, StateProviderFactory}; use std::{ + marker::PhantomData, sync::Arc, time::{Duration, Instant}, }; @@ -23,14 +25,15 @@ use tracing::trace; /// The `FlashBlockBuilder` builds [`PendingBlock`] out of a sequence of transactions. #[derive(Debug)] -pub(crate) struct FlashBlockBuilder { +pub(crate) struct FlashBlockBuilder { evm_config: EvmConfig, provider: Provider, + _base: PhantomData, } -impl FlashBlockBuilder { +impl FlashBlockBuilder { pub(crate) const fn new(evm_config: EvmConfig, provider: Provider) -> Self { - Self { evm_config, provider } + Self { evm_config, provider, _base: PhantomData } } pub(crate) const fn provider(&self) -> &Provider { @@ -38,8 +41,9 @@ impl FlashBlockBuilder { } } -pub(crate) struct BuildArgs { - pub(crate) base: OpFlashblockPayloadBase, +/// Arguments for building a block from flashblocks. +pub(crate) struct BuildArgs { + pub(crate) base: Base, pub(crate) transactions: I, pub(crate) cached_state: Option<(B256, CachedReads)>, pub(crate) last_flashblock_index: u64, @@ -47,10 +51,11 @@ pub(crate) struct BuildArgs { pub(crate) compute_state_root: bool, } -impl FlashBlockBuilder +impl FlashBlockBuilder where N: NodePrimitives, - EvmConfig: ConfigureEvm + Unpin>, + Base: FlashblockPayloadBase, + EvmConfig: ConfigureEvm + Unpin>, Provider: StateProviderFactory + BlockReaderIdExt< Header = HeaderTy, @@ -60,12 +65,12 @@ where > + Unpin, { /// Returns the [`PendingFlashBlock`] made purely out of transactions and - /// [`OpFlashblockPayloadBase`] in `args`. + /// the flashblock payload base in `args`. /// /// Returns `None` if the flashblock doesn't attach to the latest header. pub(crate) fn execute>>>( &self, - mut args: BuildArgs, + mut args: BuildArgs, ) -> eyre::Result, CachedReads)>> { trace!(target: "flashblocks", "Attempting new pending block from flashblocks"); @@ -75,8 +80,8 @@ where .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; let latest_hash = latest.hash(); - if args.base.parent_hash != latest_hash { - trace!(target: "flashblocks", flashblock_parent = ?args.base.parent_hash, local_latest=?latest.num_hash(),"Skipping non consecutive flashblock"); + if args.base.parent_hash() != latest_hash { + trace!(target: "flashblocks", flashblock_parent = ?args.base.parent_hash(), local_latest=?latest.num_hash(),"Skipping non consecutive flashblock"); // doesn't attach to the latest block return Ok(None) } @@ -141,8 +146,8 @@ where } } -impl Clone for FlashBlockBuilder { +impl Clone for FlashBlockBuilder { fn clone(&self) -> Self { - Self { evm_config: self.evm_config.clone(), provider: self.provider.clone() } + Self { evm_config: self.evm_config.clone(), provider: self.provider.clone(), _base: PhantomData } } } diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index d5f42a5473d..f916f48c9b4 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -16,12 +16,14 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{B256, U256}; use eyre::WrapErr; use op_alloy_network::Optimism; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +use op_alloy_rpc_types_engine::{OpFlashblockPayload, OpFlashblockPayloadBase}; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; use reqwest::Url; use reth_chainspec::{EthereumHardforks, Hardforks}; use reth_evm::ConfigureEvm; +use op_alloy_consensus::OpTxEnvelope; use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; +use reth_primitives_traits::NodePrimitives; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ FlashBlockBuildInfo, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, @@ -121,7 +123,7 @@ impl OpEthApi { } /// Returns a new subscription to flashblock sequences. - pub fn subscribe_flashblock_sequence(&self) -> Option { + pub fn subscribe_flashblock_sequence(&self) -> Option> { self.inner.flashblocks.as_ref().map(|f| f.flashblocks_sequence.subscribe()) } @@ -477,9 +479,10 @@ where >, Types: NodeTypes< ChainSpec: Hardforks + EthereumHardforks, + Primitives: NodePrimitives, Payload: reth_node_api::PayloadTypes< ExecutionData: for<'a> TryFrom< - &'a FlashBlockCompleteSequence, + &'a FlashBlockCompleteSequence, Error: std::fmt::Display, >, >, From 5b21aa18ce0d6a3693fc2606b9452c03374c598a Mon Sep 17 00:00:00 2001 From: Rez Date: Wed, 3 Dec 2025 04:07:55 +1100 Subject: [PATCH 002/267] websocket decoding and RPC fixes mods generic Update decoding.rs rpc fixes temp remove consensus --- crates/optimism/flashblocks/src/lib.rs | 2 +- .../optimism/flashblocks/src/ws/decoding.rs | 28 +++-- crates/optimism/flashblocks/src/ws/mod.rs | 2 +- crates/optimism/flashblocks/src/ws/stream.rs | 50 +++++--- crates/optimism/rpc/src/eth/block.rs | 7 +- crates/optimism/rpc/src/eth/call.rs | 10 +- crates/optimism/rpc/src/eth/mod.rs | 109 +++++++++++------- crates/optimism/rpc/src/eth/pending_block.rs | 4 +- crates/optimism/rpc/src/eth/receipt.rs | 4 +- crates/optimism/rpc/src/eth/transaction.rs | 10 +- 10 files changed, 143 insertions(+), 83 deletions(-) diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index 14e81c4dd27..f38236d5587 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -39,7 +39,7 @@ mod cache; mod test_utils; mod ws; -pub use ws::{WsConnect, WsFlashBlockStream}; +pub use ws::{FlashBlockDecoder, WsConnect, WsFlashBlockStream}; /// Receiver of the most recent [`PendingFlashBlock`] built out of flashblocks. pub type PendingBlockRx = tokio::sync::watch::Receiver>>; diff --git a/crates/optimism/flashblocks/src/ws/decoding.rs b/crates/optimism/flashblocks/src/ws/decoding.rs index 64d96dc5e3e..615045d82c2 100644 --- a/crates/optimism/flashblocks/src/ws/decoding.rs +++ b/crates/optimism/flashblocks/src/ws/decoding.rs @@ -1,24 +1,28 @@ -use crate::FlashBlock; use alloy_primitives::bytes::Bytes; use std::io; -/// A trait for decoding flashblocks from bytes. -pub trait FlashBlockDecoder: Send + 'static { - /// Decodes `bytes` into a [`FlashBlock`]. - fn decode(&self, bytes: Bytes) -> eyre::Result; +/// A trait for decoding flashblocks from bytes into payload type `F`. +pub trait FlashBlockDecoder: Send + 'static { + /// Decodes `bytes` into a flashblock payload of type `F`. + fn decode(&self, bytes: Bytes) -> eyre::Result; } -/// Default implementation of the decoder. -impl FlashBlockDecoder for () { - fn decode(&self, bytes: Bytes) -> eyre::Result { +impl FlashBlockDecoder for () +where + F: serde::de::DeserializeOwned, +{ + fn decode(&self, bytes: Bytes) -> eyre::Result { decode_flashblock(bytes) } } -pub(crate) fn decode_flashblock(bytes: Bytes) -> eyre::Result { - let bytes = crate::ws::decoding::try_parse_message(bytes)?; +fn decode_flashblock(bytes: Bytes) -> eyre::Result +where + F: serde::de::DeserializeOwned, +{ + let bytes = try_decompress(bytes)?; - let payload: FlashBlock = + let payload: F = serde_json::from_slice(&bytes).map_err(|e| eyre::eyre!("failed to parse message: {e}"))?; Ok(payload) @@ -30,7 +34,7 @@ pub(crate) fn decode_flashblock(bytes: Bytes) -> eyre::Result { /// then it assumes that it is JSON-encoded and returns it as-is. /// /// Otherwise, the `bytes` are passed through a brotli decompressor and returned. -fn try_parse_message(bytes: Bytes) -> eyre::Result { +fn try_decompress(bytes: Bytes) -> eyre::Result { if bytes.trim_ascii_start().starts_with(b"{") { return Ok(bytes); } diff --git a/crates/optimism/flashblocks/src/ws/mod.rs b/crates/optimism/flashblocks/src/ws/mod.rs index 8c8a5910892..651d83c916b 100644 --- a/crates/optimism/flashblocks/src/ws/mod.rs +++ b/crates/optimism/flashblocks/src/ws/mod.rs @@ -1,6 +1,6 @@ pub use stream::{WsConnect, WsFlashBlockStream}; mod decoding; -pub(crate) use decoding::FlashBlockDecoder; +pub use decoding::FlashBlockDecoder; mod stream; diff --git a/crates/optimism/flashblocks/src/ws/stream.rs b/crates/optimism/flashblocks/src/ws/stream.rs index e46fd6d747f..8a5ee7b547d 100644 --- a/crates/optimism/flashblocks/src/ws/stream.rs +++ b/crates/optimism/flashblocks/src/ws/stream.rs @@ -1,4 +1,4 @@ -use crate::{ws::FlashBlockDecoder, FlashBlock}; +use crate::ws::FlashBlockDecoder; use futures_util::{ stream::{SplitSink, SplitStream}, FutureExt, Sink, Stream, StreamExt, @@ -18,23 +18,27 @@ use tokio_tungstenite::{ use tracing::debug; use url::Url; -/// An asynchronous stream of [`FlashBlock`] from a websocket connection. +/// An asynchronous stream of flashblock payloads from a websocket connection. /// -/// The stream attempts to connect to a websocket URL and then decode each received item. +/// The stream attempts to connect to a websocket URL and then decode each received item +/// into the payload type `F`. /// /// If the connection fails, the error is returned and connection retried. The number of retries is /// unbounded. -pub struct WsFlashBlockStream { +pub struct WsFlashBlockStream { ws_url: Url, state: State, connector: Connector, - decoder: Box, + decoder: Box>, connect: ConnectFuture, stream: Option, sink: Option, } -impl WsFlashBlockStream { +impl WsFlashBlockStream +where + F: serde::de::DeserializeOwned, +{ /// Creates a new websocket stream over `ws_url`. pub fn new(ws_url: Url) -> Self { Self { @@ -48,13 +52,16 @@ impl WsFlashBlockStream { } } - /// Sets the [`FlashBlock`] decoder for the websocket stream. - pub fn with_decoder(self, decoder: Box) -> Self { + /// Sets a custom decoder for the websocket stream. + pub fn with_decoder(self, decoder: Box>) -> Self { Self { decoder, ..self } } } -impl WsFlashBlockStream { +impl WsFlashBlockStream +where + F: serde::de::DeserializeOwned, +{ /// Creates a new websocket stream over `ws_url`. pub fn with_connector(ws_url: Url, connector: C) -> Self { Self { @@ -69,13 +76,14 @@ impl WsFlashBlockStream { } } -impl Stream for WsFlashBlockStream +impl Stream for WsFlashBlockStream where Str: Stream> + Unpin, S: Sink + Send + Unpin, C: WsConnect + Clone + Send + 'static + Unpin, + F: 'static, { - type Item = eyre::Result; + type Item = eyre::Result; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -136,7 +144,7 @@ where } } -impl WsFlashBlockStream +impl WsFlashBlockStream where C: WsConnect + Clone + Send + 'static, { @@ -169,7 +177,7 @@ where } } -impl Debug for WsFlashBlockStream { +impl Debug for WsFlashBlockStream { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("FlashBlockStream") .field("ws_url", &self.ws_url) @@ -240,6 +248,7 @@ impl WsConnect for WsConnector { #[cfg(test)] mod tests { use super::*; + use crate::FlashBlock; use alloy_primitives::bytes::Bytes; use brotli::enc::BrotliEncoderParams; use std::{future, iter}; @@ -463,7 +472,8 @@ mod tests { let flashblocks = [flashblock()]; let connector = FakeConnector::from(flashblocks.iter().map(to_message)); let ws_url = "http://localhost".parse().unwrap(); - let stream = WsFlashBlockStream::with_connector(ws_url, connector); + let stream: WsFlashBlockStream<_, _, _, FlashBlock> = + WsFlashBlockStream::with_connector(ws_url, connector); let actual_messages: Vec<_> = stream.take(1).map(Result::unwrap).collect().await; let expected_messages = flashblocks.to_vec(); @@ -478,7 +488,8 @@ mod tests { let flashblock = flashblock(); let connector = FakeConnector::from([Ok(message), to_json_binary_message(&flashblock)]); let ws_url = "http://localhost".parse().unwrap(); - let mut stream = WsFlashBlockStream::with_connector(ws_url, connector); + let mut stream: WsFlashBlockStream<_, _, _, FlashBlock> = + WsFlashBlockStream::with_connector(ws_url, connector); let expected_message = flashblock; let actual_message = @@ -491,7 +502,8 @@ mod tests { async fn test_stream_passes_errors_through() { let connector = FakeConnector::from([Err(Error::AttackAttempt)]); let ws_url = "http://localhost".parse().unwrap(); - let stream = WsFlashBlockStream::with_connector(ws_url, connector); + let stream: WsFlashBlockStream<_, _, _, FlashBlock> = + WsFlashBlockStream::with_connector(ws_url, connector); let actual_messages: Vec<_> = stream.take(1).map(Result::unwrap_err).map(|e| format!("{e}")).collect().await; @@ -506,7 +518,8 @@ mod tests { let error_msg = "test".to_owned(); let connector = FailingConnector(error_msg.clone()); let ws_url = "http://localhost".parse().unwrap(); - let stream = WsFlashBlockStream::with_connector(ws_url, connector); + let stream: WsFlashBlockStream<_, _, _, FlashBlock> = + WsFlashBlockStream::with_connector(ws_url, connector); let actual_errors: Vec<_> = stream.take(tries).map(Result::unwrap_err).map(|e| format!("{e}")).collect().await; @@ -531,7 +544,8 @@ mod tests { let messages = [Ok(msg), to_json_binary_message(&flashblock)]; let connector = FakeConnectorWithSink::from(messages); let ws_url = "http://localhost".parse().unwrap(); - let mut stream = WsFlashBlockStream::with_connector(ws_url, connector); + let mut stream: WsFlashBlockStream<_, _, _, FlashBlock> = + WsFlashBlockStream::with_connector(ws_url, connector); let _ = stream.next().await; diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 0efd9aea988..9dacc2bbdb8 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -1,23 +1,26 @@ //! Loads and formats OP block RPC response. use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; +use reth_optimism_flashblocks::FlashblockPayload; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock}, FromEvmError, RpcConvert, }; -impl EthBlocks for OpEthApi +impl EthBlocks for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, Rpc: RpcConvert, + F: FlashblockPayload, { } -impl LoadBlock for OpEthApi +impl LoadBlock for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, Rpc: RpcConvert, + F: FlashblockPayload, { } diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index db96bda83f3..169885eccab 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,30 +1,34 @@ use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; +use reth_optimism_flashblocks::FlashblockPayload; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall}, FromEvmError, RpcConvert, }; -impl EthCall for OpEthApi +impl EthCall for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, Rpc: RpcConvert, + F: FlashblockPayload, { } -impl EstimateCall for OpEthApi +impl EstimateCall for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, Rpc: RpcConvert, + F: FlashblockPayload, { } -impl Call for OpEthApi +impl Call for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, Rpc: RpcConvert, + F: FlashblockPayload, { #[inline] fn call_gas_limit(&self) -> u64 { diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index f916f48c9b4..38116d4011c 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -16,19 +16,18 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{B256, U256}; use eyre::WrapErr; use op_alloy_network::Optimism; -use op_alloy_rpc_types_engine::{OpFlashblockPayload, OpFlashblockPayloadBase}; +use op_alloy_rpc_types_engine::{OpFlashblockPayload}; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; use reqwest::Url; use reth_chainspec::{EthereumHardforks, Hardforks}; use reth_evm::ConfigureEvm; -use op_alloy_consensus::OpTxEnvelope; use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; use reth_primitives_traits::NodePrimitives; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ FlashBlockBuildInfo, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, - FlashBlockConsensusClient, FlashBlockRx, FlashBlockService, FlashblocksListeners, - PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, + FlashBlockService, FlashblocksListeners, FlashblockPayload, PendingBlockRx, PendingFlashBlock, + WsFlashBlockStream, }; use reth_rpc::eth::core::EthApiInner; use reth_rpc_eth_api::{ @@ -54,6 +53,16 @@ use std::{ use tokio::{sync::watch, time}; use tracing::info; +/// Extension trait for OP-specific RPC types that includes flashblock configuration. +pub trait OpRpcTypes: RpcTypes { + /// The flashblock payload type for this chain. + type Flashblock: FlashblockPayload; +} + +impl OpRpcTypes for Optimism { + type Flashblock = OpFlashblockPayload; +} + /// Maximum duration to wait for a fresh flashblock when one is being built. const MAX_FLASHBLOCK_WAIT_DURATION: Duration = Duration::from_millis(50); @@ -70,24 +79,24 @@ pub type EthApiNodeBackend = EthApiInner; /// /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. -pub struct OpEthApi { +pub struct OpEthApi { /// Gateway to node's core components. - inner: Arc>, + inner: Arc>, } -impl Clone for OpEthApi { +impl Clone for OpEthApi { fn clone(&self) -> Self { Self { inner: self.inner.clone() } } } -impl OpEthApi { +impl OpEthApi { /// Creates a new `OpEthApi`. pub fn new( eth_api: EthApiNodeBackend, sequencer_client: Option, min_suggested_priority_fee: U256, - flashblocks: Option>, + flashblocks: Option>, ) -> Self { let inner = Arc::new(OpEthApiInner { eth_api, @@ -118,12 +127,14 @@ impl OpEthApi { } /// Returns a new subscription to received flashblocks. - pub fn subscribe_received_flashblocks(&self) -> Option { + pub fn subscribe_received_flashblocks( + &self, + ) -> Option>> { self.inner.flashblocks.as_ref().map(|f| f.received_flashblocks.subscribe()) } /// Returns a new subscription to flashblock sequences. - pub fn subscribe_flashblock_sequence(&self) -> Option> { + pub fn subscribe_flashblock_sequence(&self) -> Option> { self.inner.flashblocks.as_ref().map(|f| f.flashblocks_sequence.subscribe()) } @@ -187,10 +198,11 @@ impl OpEthApi { } } -impl EthApiTypes for OpEthApi +impl EthApiTypes for OpEthApi where N: RpcNodeCore, Rpc: RpcConvert, + F: FlashblockPayload, { type Error = OpEthApiError; type NetworkTypes = Rpc::Network; @@ -201,10 +213,11 @@ where } } -impl RpcNodeCore for OpEthApi +impl RpcNodeCore for OpEthApi where N: RpcNodeCore, Rpc: RpcConvert, + F: FlashblockPayload, { type Primitives = N::Primitives; type Provider = N::Provider; @@ -233,10 +246,11 @@ where } } -impl RpcNodeCoreExt for OpEthApi +impl RpcNodeCoreExt for OpEthApi where N: RpcNodeCore, Rpc: RpcConvert, + F: FlashblockPayload, { #[inline] fn cache(&self) -> &EthStateCache { @@ -244,10 +258,11 @@ where } } -impl EthApiSpec for OpEthApi +impl EthApiSpec for OpEthApi where N: RpcNodeCore, Rpc: RpcConvert, + F: FlashblockPayload, { #[inline] fn starting_block(&self) -> U256 { @@ -255,10 +270,11 @@ where } } -impl SpawnBlocking for OpEthApi +impl SpawnBlocking for OpEthApi where N: RpcNodeCore, Rpc: RpcConvert, + F: FlashblockPayload, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -276,11 +292,12 @@ where } } -impl LoadFee for OpEthApi +impl LoadFee for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, Rpc: RpcConvert, + F: FlashblockPayload, { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { @@ -302,18 +319,20 @@ where } } -impl LoadState for OpEthApi +impl LoadState for OpEthApi where N: RpcNodeCore, Rpc: RpcConvert, + F: FlashblockPayload, Self: LoadPendingBlock, { } -impl EthState for OpEthApi +impl EthState for OpEthApi where N: RpcNodeCore, Rpc: RpcConvert, + F: FlashblockPayload, Self: LoadPendingBlock, { #[inline] @@ -322,30 +341,33 @@ where } } -impl EthFees for OpEthApi +impl EthFees for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, Rpc: RpcConvert, + F: FlashblockPayload, { } -impl Trace for OpEthApi +impl Trace for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, Rpc: RpcConvert, + F: FlashblockPayload, { } -impl fmt::Debug for OpEthApi { +impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() } } /// Container type `OpEthApi` -pub struct OpEthApiInner { +pub struct OpEthApiInner +{ /// Gateway to node's core components. eth_api: EthApiNodeBackend, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP @@ -358,16 +380,16 @@ pub struct OpEthApiInner { /// Flashblocks listeners. /// /// If set, provides receivers for pending blocks, flashblock sequences, and build status. - flashblocks: Option>, + flashblocks: Option>, } -impl fmt::Debug for OpEthApiInner { +impl fmt::Debug for OpEthApiInner { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApiInner").finish() } } -impl OpEthApiInner { +impl OpEthApiInner { /// Returns a reference to the [`EthApiNodeBackend`]. const fn eth_api(&self) -> &EthApiNodeBackend { &self.eth_api @@ -474,26 +496,28 @@ where N: FullNodeComponents< Evm: ConfigureEvm< NextBlockEnvCtx: BuildPendingEnv> - + From + + From<::Base> + Unpin, >, Types: NodeTypes< ChainSpec: Hardforks + EthereumHardforks, - Primitives: NodePrimitives, + Primitives: NodePrimitives< + SignedTx = ::SignedTx, + >, Payload: reth_node_api::PayloadTypes< ExecutionData: for<'a> TryFrom< - &'a FlashBlockCompleteSequence, + &'a FlashBlockCompleteSequence, Error: std::fmt::Display, >, >, >, >, - NetworkT: RpcTypes, + NetworkT: OpRpcTypes, OpRpcConvert: RpcConvert, - OpEthApi>: + OpEthApi, NetworkT::Flashblock>: FullEthApiServer, { - type EthApi = OpEthApi>; + type EthApi = OpEthApi, NetworkT::Flashblock>; async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result { let Self { @@ -522,7 +546,8 @@ where info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); let (tx, pending_rx) = watch::channel(None); - let stream = WsFlashBlockStream::new(ws_url); + let stream: WsFlashBlockStream<_, _, _, NetworkT::Flashblock> = + WsFlashBlockStream::new(ws_url); let service = FlashBlockService::new( stream, ctx.components.evm_config().clone(), @@ -537,14 +562,16 @@ where let in_progress_rx = service.subscribe_in_progress(); ctx.components.task_executor().spawn(Box::pin(service.run(tx))); - if flashblock_consensus { - info!(target: "reth::cli", "Launching FlashBlockConsensusClient"); - let flashblock_client = FlashBlockConsensusClient::new( - ctx.engine_handle.clone(), - flashblocks_sequence.subscribe(), - )?; - ctx.components.task_executor().spawn(Box::pin(flashblock_client.run())); - } + + if flashblock_consensus { todo!("Modularize FlashBlockConsensusClient?") } + // if flashblock_consensus { + // info!(target: "reth::cli", "Launching FlashBlockConsensusClient"); + // let flashblock_client = FlashBlockConsensusClient::new( + // ctx.engine_handle.clone(), + // flashblocks_sequence.subscribe(), + // )?; + // ctx.components.task_executor().spawn(Box::pin(flashblock_client.run())); + // } Some(FlashblocksListeners::new( pending_rx, diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index bf351d7de11..8faf04c835c 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -4,6 +4,7 @@ use crate::{OpEthApi, OpEthApiError}; use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; use reth_chain_state::BlockState; +use reth_optimism_flashblocks::FlashblockPayload; use reth_rpc_eth_api::{ helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock, SpawnBlocking}, FromEvmError, RpcConvert, RpcNodeCore, RpcNodeCoreExt, @@ -14,11 +15,12 @@ use reth_rpc_eth_types::{ }; use reth_storage_api::{BlockReaderIdExt, StateProviderBox, StateProviderFactory}; -impl LoadPendingBlock for OpEthApi +impl LoadPendingBlock for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, Rpc: RpcConvert, + F: FlashblockPayload, { #[inline] fn pending_block(&self) -> &tokio::sync::Mutex>> { diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index c04a4d2c72d..05e868a9e82 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -10,6 +10,7 @@ use op_revm::estimate_tx_compressed_size; use reth_chainspec::ChainSpecProvider; use reth_node_api::NodePrimitives; use reth_optimism_evm::RethL1BlockInfo; +use reth_optimism_flashblocks::FlashblockPayload; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::OpReceipt; use reth_primitives_traits::SealedBlock; @@ -22,10 +23,11 @@ use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; use reth_storage_api::BlockReader; use std::fmt::Debug; -impl LoadReceipt for OpEthApi +impl LoadReceipt for OpEthApi where N: RpcNodeCore, Rpc: RpcConvert, + F: FlashblockPayload, { } diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 5dee6e14c5b..6565a7f45f9 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -25,13 +25,15 @@ use std::{ future::Future, time::Duration, }; +use reth_optimism_flashblocks::FlashblockPayload; use tokio_stream::wrappers::WatchStream; -impl EthTransactions for OpEthApi +impl EthTransactions for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, Rpc: RpcConvert, + F: FlashblockPayload, { fn signers(&self) -> &SignersForRpc { self.inner.eth_api.signers() @@ -175,11 +177,12 @@ where } } -impl LoadTransaction for OpEthApi +impl LoadTransaction for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, Rpc: RpcConvert, + F: FlashblockPayload, { async fn transaction_by_hash( &self, @@ -230,10 +233,11 @@ where } } -impl OpEthApi +impl OpEthApi where N: RpcNodeCore, Rpc: RpcConvert, + F: FlashblockPayload, { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { From 02580e845b4c5671f10ada6f3899dcc1c51035f7 Mon Sep 17 00:00:00 2001 From: Rez Date: Wed, 3 Dec 2025 05:35:20 +1100 Subject: [PATCH 003/267] custom node fix custom node tidy Update lib.rs Update worker.rs Update worker.rs --- crates/optimism/flashblocks/src/worker.rs | 2 - crates/optimism/rpc/src/lib.rs | 2 +- examples/custom-node/src/engine.rs | 10 +- examples/custom-node/src/evm/config.rs | 8 +- examples/custom-node/src/flashblock.rs | 121 ++++++++++++++++++++++ examples/custom-node/src/lib.rs | 1 + examples/custom-node/src/rpc.rs | 7 +- 7 files changed, 139 insertions(+), 12 deletions(-) create mode 100644 examples/custom-node/src/flashblock.rs diff --git a/crates/optimism/flashblocks/src/worker.rs b/crates/optimism/flashblocks/src/worker.rs index 06288d716f6..f2a99d36d0a 100644 --- a/crates/optimism/flashblocks/src/worker.rs +++ b/crates/optimism/flashblocks/src/worker.rs @@ -1,9 +1,7 @@ use crate::{traits::FlashblockPayloadBase, PendingFlashBlock}; use alloy_eips::{eip2718::WithEncoded, BlockNumberOrTag}; use alloy_primitives::B256; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_chain_state::{ComputedTrieData, ExecutedBlock}; -use reth_chain_state::ExecutedBlock; use reth_errors::RethError; use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome}, diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index 10f8ad5dccd..086ad114be8 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -21,6 +21,6 @@ pub mod witness; pub use engine::OpEngineApiClient; pub use engine::{OpEngineApi, OpEngineApiServer, OP_ENGINE_CAPABILITIES}; pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; -pub use eth::{OpEthApi, OpEthApiBuilder, OpReceiptBuilder}; +pub use eth::{OpEthApi, OpEthApiBuilder, OpReceiptBuilder, OpRpcTypes}; pub use metrics::SequencerMetrics; pub use sequencer::SequencerClient; diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index fceace2d2eb..e7502ef1c28 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -1,6 +1,7 @@ use crate::{ chainspec::CustomChainSpec, evm::CustomEvmConfig, + flashblock::CustomFlashblockPayload, primitives::{CustomHeader, CustomNodePrimitives, CustomTransaction}, CustomNode, }; @@ -67,14 +68,15 @@ impl ExecutionPayload for CustomExecutionData { } } -impl TryFrom<&reth_optimism_flashblocks::FlashBlockCompleteSequence> for CustomExecutionData { +impl TryFrom<&reth_optimism_flashblocks::FlashBlockCompleteSequence> + for CustomExecutionData +{ type Error = &'static str; fn try_from( - sequence: &reth_optimism_flashblocks::FlashBlockCompleteSequence, + _sequence: &reth_optimism_flashblocks::FlashBlockCompleteSequence, ) -> Result { - let inner = OpExecutionData::try_from(sequence)?; - Ok(Self { inner, extension: sequence.last().diff.gas_used }) + todo!("convert flashblock sequence to CustomExecutionData") } } diff --git a/examples/custom-node/src/evm/config.rs b/examples/custom-node/src/evm/config.rs index f2bd3326893..7666d25a296 100644 --- a/examples/custom-node/src/evm/config.rs +++ b/examples/custom-node/src/evm/config.rs @@ -2,6 +2,7 @@ use crate::{ chainspec::CustomChainSpec, engine::{CustomExecutionData, CustomPayloadBuilderAttributes}, evm::{alloy::CustomEvmFactory, executor::CustomBlockExecutionCtx, CustomBlockAssembler}, + flashblock::CustomFlashblockPayloadBase, primitives::{Block, CustomHeader, CustomNodePrimitives, CustomTransaction}, }; use alloy_consensus::BlockHeader; @@ -9,7 +10,6 @@ use alloy_eips::{eip2718::WithEncoded, Decodable2718}; use alloy_evm::EvmEnv; use alloy_op_evm::OpBlockExecutionCtx; use alloy_rpc_types_engine::PayloadError; -use op_alloy_rpc_types_engine::flashblock::OpFlashblockPayloadBase; use op_revm::OpSpecId; use reth_engine_primitives::ExecutableTxIterator; use reth_ethereum::{ @@ -143,9 +143,9 @@ pub struct CustomNextBlockEnvAttributes { extension: u64, } -impl From for CustomNextBlockEnvAttributes { - fn from(value: OpFlashblockPayloadBase) -> Self { - Self { inner: value.into(), extension: 0 } +impl From for CustomNextBlockEnvAttributes { + fn from(_value: CustomFlashblockPayloadBase) -> Self { + todo!("map CustomFlashblockPayloadBase fields to CustomNextBlockEnvAttributes") } } diff --git a/examples/custom-node/src/flashblock.rs b/examples/custom-node/src/flashblock.rs new file mode 100644 index 00000000000..c3c7e54de33 --- /dev/null +++ b/examples/custom-node/src/flashblock.rs @@ -0,0 +1,121 @@ +use crate::primitives::CustomTransaction; +use alloy_consensus::{crypto::RecoveryError, transaction::Recovered}; +use alloy_eips::{eip2718::WithEncoded, eip4895::Withdrawals}; +use alloy_primitives::{Bloom, Bytes, B256}; +use alloy_rpc_types_engine::PayloadId; +use reth_optimism_flashblocks::{FlashblockDiff, FlashblockPayload, FlashblockPayloadBase}; +use serde::{Deserialize, Deserializer}; + +#[derive(Debug, Clone, Default)] +pub struct CustomFlashblockPayloadBase { + pub parent_hash: B256, + pub block_number: u64, + pub timestamp: u64, +} + +impl FlashblockPayloadBase for CustomFlashblockPayloadBase { + fn parent_hash(&self) -> B256 { + self.parent_hash + } + + fn block_number(&self) -> u64 { + self.block_number + } + + fn timestamp(&self) -> u64 { + self.timestamp + } +} + +#[derive(Debug, Clone, Default)] +pub struct CustomFlashblockPayloadDiff { + pub block_hash: B256, + pub state_root: B256, + pub gas_used: u64, + pub logs_bloom: Bloom, + pub receipts_root: B256, + pub transactions: Vec, +} + +impl FlashblockDiff for CustomFlashblockPayloadDiff { + fn block_hash(&self) -> B256 { + self.block_hash + } + + fn state_root(&self) -> B256 { + self.state_root + } + + fn gas_used(&self) -> u64 { + self.gas_used + } + + fn logs_bloom(&self) -> &Bloom { + &self.logs_bloom + } + + fn receipts_root(&self) -> B256 { + self.receipts_root + } + + fn transactions_raw(&self) -> &[Bytes] { + &self.transactions + } + + fn withdrawals(&self) -> Option<&Withdrawals> { + None + } + + fn withdrawals_root(&self) -> Option { + None + } +} + +#[derive(Debug, Clone)] +pub struct CustomFlashblockPayload { + pub index: u64, + pub payload_id: PayloadId, + pub base: Option, + pub diff: CustomFlashblockPayloadDiff, +} + +impl<'de> Deserialize<'de> for CustomFlashblockPayload { + fn deserialize(_deserializer: D) -> Result + where + D: Deserializer<'de>, + { + todo!("implement deserialization") + } +} + +impl FlashblockPayload for CustomFlashblockPayload { + type Base = CustomFlashblockPayloadBase; + type Diff = CustomFlashblockPayloadDiff; + type SignedTx = CustomTransaction; + + fn index(&self) -> u64 { + self.index + } + + fn payload_id(&self) -> PayloadId { + self.payload_id + } + + fn base(&self) -> Option { + self.base.clone() + } + + fn diff(&self) -> &Self::Diff { + &self.diff + } + + fn block_number(&self) -> u64 { + self.base.as_ref().map(|b| b.block_number()).unwrap_or(0) + } + + fn recover_transactions( + &self, + ) -> impl Iterator>, RecoveryError>> { + std::iter::from_fn(|| todo!("implement transaction recovery")) + } +} diff --git a/examples/custom-node/src/lib.rs b/examples/custom-node/src/lib.rs index 4210ac9b767..791d25c7dc3 100644 --- a/examples/custom-node/src/lib.rs +++ b/examples/custom-node/src/lib.rs @@ -34,6 +34,7 @@ pub mod chainspec; pub mod engine; pub mod engine_api; pub mod evm; +pub mod flashblock; pub mod pool; pub mod primitives; pub mod rpc; diff --git a/examples/custom-node/src/rpc.rs b/examples/custom-node/src/rpc.rs index b6dc7742d93..f51f46c4533 100644 --- a/examples/custom-node/src/rpc.rs +++ b/examples/custom-node/src/rpc.rs @@ -1,5 +1,6 @@ use crate::{ evm::CustomTxEnv, + flashblock::CustomFlashblockPayload, primitives::{CustomHeader, CustomTransaction}, }; use alloy_consensus::error::ValueError; @@ -7,7 +8,7 @@ use alloy_evm::EvmEnv; use alloy_network::TxSigner; use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::{OpTransactionReceipt, OpTransactionRequest}; -use reth_op::rpc::RpcTypes; +use reth_op::rpc::{OpRpcTypes, RpcTypes}; use reth_rpc_api::eth::{ EthTxEnvError, SignTxRequestError, SignableTxRequest, TryIntoSimTx, TryIntoTxEnv, }; @@ -17,6 +18,10 @@ use revm::context::BlockEnv; #[non_exhaustive] pub struct CustomRpcTypes; +impl OpRpcTypes for CustomRpcTypes { + type Flashblock = CustomFlashblockPayload; +} + impl RpcTypes for CustomRpcTypes { type Header = alloy_rpc_types_eth::Header; type Receipt = OpTransactionReceipt; From 64b95481b3900c3d8986f7fee5a4f17f85ef871b Mon Sep 17 00:00:00 2001 From: Rez Date: Fri, 5 Dec 2025 01:43:51 +1100 Subject: [PATCH 004/267] make pr Update op_impl.rs --- crates/optimism/flashblocks/src/cache.rs | 23 ++++++++--- crates/optimism/flashblocks/src/consensus.rs | 6 ++- crates/optimism/flashblocks/src/lib.rs | 4 +- crates/optimism/flashblocks/src/op_impl.rs | 8 ++-- crates/optimism/flashblocks/src/sequence.rs | 40 +++++++++++-------- crates/optimism/flashblocks/src/service.rs | 9 +++-- crates/optimism/flashblocks/src/traits.rs | 7 +++- crates/optimism/flashblocks/src/worker.rs | 10 ++++- .../optimism/flashblocks/tests/it/stream.rs | 3 +- crates/optimism/rpc/src/eth/mod.rs | 22 ++++++---- crates/optimism/rpc/src/eth/transaction.rs | 2 +- crates/trie/trie/src/node_iter.rs | 3 ++ 12 files changed, 91 insertions(+), 46 deletions(-) diff --git a/crates/optimism/flashblocks/src/cache.rs b/crates/optimism/flashblocks/src/cache.rs index abdba3bde9e..4acd44fca0b 100644 --- a/crates/optimism/flashblocks/src/cache.rs +++ b/crates/optimism/flashblocks/src/cache.rs @@ -17,6 +17,16 @@ use ringbuffer::{AllocRingBuffer, RingBuffer}; use tokio::sync::broadcast; use tracing::*; +type CachedSequenceEntry

= ( + FlashBlockCompleteSequence

, + Vec::SignedTx>>>, +); + +type SequenceBuildArgs

= BuildArgs< + Vec::SignedTx>>>, +

::Base, +>; + /// Maximum number of cached sequences in the ring buffer. const CACHE_SIZE: usize = 3; /// 200 ms flashblock time. @@ -37,7 +47,7 @@ pub(crate) struct SequenceManager { pending_transactions: Vec>>, /// Ring buffer of recently completed sequences bundled with their decoded transactions (FIFO, /// size 3) - completed_cache: AllocRingBuffer<(FlashBlockCompleteSequence

, Vec>>)>, + completed_cache: AllocRingBuffer>, /// Broadcast channel for completed sequences block_broadcaster: broadcast::Sender>, /// Whether to compute state roots when building blocks @@ -65,7 +75,9 @@ impl SequenceManager

{ } /// Gets a subscriber to the flashblock sequences produced. - pub(crate) fn subscribe_block_sequence(&self) -> broadcast::Receiver> { + pub(crate) fn subscribe_block_sequence( + &self, + ) -> broadcast::Receiver> { self.block_broadcaster.subscribe() } @@ -130,7 +142,7 @@ impl SequenceManager

{ &mut self, local_tip_hash: B256, local_tip_timestamp: u64, - ) -> Option>>, P::Base>> { + ) -> Option> { // Try to find a buildable sequence: (base, last_fb, transactions, cached_state, // source_name) let (base, last_flashblock, transactions, cached_state, source_name) = @@ -143,7 +155,7 @@ impl SequenceManager

{ } // Priority 2: Try cached sequence with exact parent match else if let Some((cached, txs)) = self.completed_cache.iter().find(|(c, _)| c.payload_base().parent_hash() == local_tip_hash) { - let base = cached.payload_base().clone(); + let base = cached.payload_base(); let last_fb = cached.last().clone(); let transactions = txs.clone(); let cached_state = None; @@ -267,8 +279,7 @@ impl SequenceManager

{ #[cfg(test)] mod tests { use super::*; - use crate::test_utils::TestFlashBlockFactory; - use crate::FlashBlock; + use crate::{test_utils::TestFlashBlockFactory, FlashBlock}; use alloy_primitives::B256; #[test] diff --git a/crates/optimism/flashblocks/src/consensus.rs b/crates/optimism/flashblocks/src/consensus.rs index dce248e0bad..42a03d9945c 100644 --- a/crates/optimism/flashblocks/src/consensus.rs +++ b/crates/optimism/flashblocks/src/consensus.rs @@ -1,5 +1,6 @@ use crate::{ - traits::FlashblockPayloadBase, FlashBlock, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, + traits::FlashblockPayloadBase, FlashBlock, FlashBlockCompleteSequence, + FlashBlockCompleteSequenceRx, }; use alloy_primitives::B256; use alloy_rpc_types_engine::PayloadStatusEnum; @@ -30,7 +31,8 @@ where impl

FlashBlockConsensusClient

where P: PayloadTypes, - P::ExecutionData: for<'a> TryFrom<&'a FlashBlockCompleteSequence, Error: std::fmt::Display>, + P::ExecutionData: + for<'a> TryFrom<&'a FlashBlockCompleteSequence, Error: std::fmt::Display>, { /// Create a new `FlashBlockConsensusClient` with the given Op engine and sequence receiver. pub const fn new( diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index f38236d5587..cfe18d2ea88 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -26,7 +26,9 @@ mod payload; pub use payload::{FlashBlock, PendingFlashBlock}; mod sequence; -pub use sequence::{FlashBlockCompleteSequence, FlashBlockPendingSequence, SequenceExecutionOutcome}; +pub use sequence::{ + FlashBlockCompleteSequence, FlashBlockPendingSequence, SequenceExecutionOutcome, +}; mod service; pub use service::{FlashBlockBuildInfo, FlashBlockService}; diff --git a/crates/optimism/flashblocks/src/op_impl.rs b/crates/optimism/flashblocks/src/op_impl.rs index a37aedceb15..3bd840959dd 100644 --- a/crates/optimism/flashblocks/src/op_impl.rs +++ b/crates/optimism/flashblocks/src/op_impl.rs @@ -51,8 +51,8 @@ impl FlashblockDiff for OpFlashblockPayloadDelta { } fn withdrawals(&self) -> Option<&Withdrawals> { - // OpFlashblockPayloadDelta stores Vec, not Withdrawals newtype - // This method isn't currently used in the flashblocks infrastructure + // TODO: Might not be needed as withdrawals aren't processed in a block except if at start + // or end None } @@ -83,12 +83,12 @@ impl FlashblockPayload for OpFlashblockPayload { } fn block_number(&self) -> u64 { - OpFlashblockPayload::block_number(self) + Self::block_number(self) } fn recover_transactions( &self, ) -> impl Iterator>, RecoveryError>> { - OpFlashblockPayload::recover_transactions::(self) + Self::recover_transactions::(self) } } diff --git a/crates/optimism/flashblocks/src/sequence.rs b/crates/optimism/flashblocks/src/sequence.rs index a9e1cec5e52..0f1afe997e7 100644 --- a/crates/optimism/flashblocks/src/sequence.rs +++ b/crates/optimism/flashblocks/src/sequence.rs @@ -210,7 +210,7 @@ impl FlashBlockCompleteSequence

{ } /// Returns the number of flashblocks in the sequence. - pub fn count(&self) -> usize { + pub const fn count(&self) -> usize { self.inner.len() } @@ -235,10 +235,7 @@ impl FlashBlockCompleteSequence

{ /// Returns all transactions from all flashblocks in the sequence pub fn all_transactions(&self) -> Vec { use crate::traits::FlashblockDiff; - self.inner - .iter() - .flat_map(|fb| fb.diff().transactions_raw().iter().cloned()) - .collect() + self.inner.iter().flat_map(|fb| fb.diff().transactions_raw().iter().cloned()).collect() } /// Returns an iterator over all flashblocks in the sequence. @@ -306,7 +303,8 @@ mod tests { #[test] fn test_insert_ignores_different_block_number() { - let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -322,7 +320,8 @@ mod tests { #[test] fn test_insert_ignores_different_payload_id() { - let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -340,7 +339,8 @@ mod tests { #[test] fn test_insert_maintains_btree_order() { - let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -362,7 +362,8 @@ mod tests { #[test] fn test_finalize_empty_sequence_fails() { - let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let result = sequence.finalize(); assert!(result.is_err()); @@ -374,7 +375,8 @@ mod tests { #[test] fn test_finalize_clears_pending_state() { - let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -391,7 +393,8 @@ mod tests { #[test] fn test_finalize_preserves_execution_outcome() { - let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -408,7 +411,8 @@ mod tests { #[test] fn test_finalize_clears_cached_reads() { - let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -426,7 +430,8 @@ mod tests { #[test] fn test_finalize_multiple_times_after_refill() { - let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); // First sequence @@ -616,7 +621,8 @@ mod tests { #[test] fn test_try_from_pending_to_complete_valid() { - let mut pending: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); + let mut pending: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -637,7 +643,8 @@ mod tests { #[test] fn test_try_from_preserves_execution_outcome() { - let mut pending: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); + let mut pending: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); @@ -657,7 +664,8 @@ mod tests { #[test] fn test_last_flashblock_returns_highest_index() { - let mut sequence: FlashBlockPendingSequence = FlashBlockPendingSequence::new(); + let mut sequence: FlashBlockPendingSequence = + FlashBlockPendingSequence::new(); let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); diff --git a/crates/optimism/flashblocks/src/service.rs b/crates/optimism/flashblocks/src/service.rs index 904d6c757dd..6d149495606 100644 --- a/crates/optimism/flashblocks/src/service.rs +++ b/crates/optimism/flashblocks/src/service.rs @@ -51,9 +51,8 @@ where N: NodePrimitives, P: FlashblockPayload, S: Stream> + Unpin + 'static, - EvmConfig: ConfigureEvm + Unpin> - + Clone - + 'static, + EvmConfig: + ConfigureEvm + Unpin> + Clone + 'static, Provider: StateProviderFactory + BlockReaderIdExt< Header = HeaderTy, @@ -92,7 +91,9 @@ where } /// Returns the sender half to the flashblock sequence. - pub const fn block_sequence_broadcaster(&self) -> &broadcast::Sender> { + pub const fn block_sequence_broadcaster( + &self, + ) -> &broadcast::Sender> { self.sequences.block_sequence_broadcaster() } diff --git a/crates/optimism/flashblocks/src/traits.rs b/crates/optimism/flashblocks/src/traits.rs index 49dde64cb56..291fa0e11b0 100644 --- a/crates/optimism/flashblocks/src/traits.rs +++ b/crates/optimism/flashblocks/src/traits.rs @@ -86,5 +86,10 @@ pub trait FlashblockPayload: /// or an error if decoding/recovery failed. fn recover_transactions( &self, - ) -> impl Iterator>, RecoveryError>>; + ) -> impl Iterator< + Item = Result< + alloy_eips::eip2718::WithEncoded>, + RecoveryError, + >, + >; } diff --git a/crates/optimism/flashblocks/src/worker.rs b/crates/optimism/flashblocks/src/worker.rs index f2a99d36d0a..5b3ea6b5d16 100644 --- a/crates/optimism/flashblocks/src/worker.rs +++ b/crates/optimism/flashblocks/src/worker.rs @@ -144,8 +144,14 @@ where } } -impl Clone for FlashBlockBuilder { +impl Clone + for FlashBlockBuilder +{ fn clone(&self) -> Self { - Self { evm_config: self.evm_config.clone(), provider: self.provider.clone(), _base: PhantomData } + Self { + evm_config: self.evm_config.clone(), + provider: self.provider.clone(), + _base: PhantomData, + } } } diff --git a/crates/optimism/flashblocks/tests/it/stream.rs b/crates/optimism/flashblocks/tests/it/stream.rs index 99e78fee23a..1de1395bc02 100644 --- a/crates/optimism/flashblocks/tests/it/stream.rs +++ b/crates/optimism/flashblocks/tests/it/stream.rs @@ -1,11 +1,12 @@ use futures_util::stream::StreamExt; +use op_alloy_rpc_types_engine::OpFlashblockPayload; use reth_optimism_flashblocks::WsFlashBlockStream; #[tokio::test] async fn test_streaming_flashblocks_from_remote_source_is_successful() { let items = 3; let ws_url = "wss://sepolia.flashblocks.base.org/ws".parse().unwrap(); - let stream = WsFlashBlockStream::new(ws_url); + let stream: WsFlashBlockStream<_, _, _, OpFlashblockPayload> = WsFlashBlockStream::new(ws_url); let blocks: Vec<_> = stream.take(items).collect().await; diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 38116d4011c..f382f2bd3ee 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -16,19 +16,19 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{B256, U256}; use eyre::WrapErr; use op_alloy_network::Optimism; -use op_alloy_rpc_types_engine::{OpFlashblockPayload}; +use op_alloy_rpc_types_engine::OpFlashblockPayload; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; use reqwest::Url; use reth_chainspec::{EthereumHardforks, Hardforks}; use reth_evm::ConfigureEvm; use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; -use reth_primitives_traits::NodePrimitives; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ FlashBlockBuildInfo, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, - FlashBlockService, FlashblocksListeners, FlashblockPayload, PendingBlockRx, PendingFlashBlock, + FlashBlockService, FlashblockPayload, FlashblocksListeners, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; +use reth_primitives_traits::NodePrimitives; use reth_rpc::eth::core::EthApiInner; use reth_rpc_eth_api::{ helpers::{ @@ -366,8 +366,11 @@ impl fmt::Debug for OpEth } /// Container type `OpEthApi` -pub struct OpEthApiInner -{ +pub struct OpEthApiInner< + N: RpcNodeCore, + Rpc: RpcConvert, + F: FlashblockPayload = OpFlashblockPayload, +> { /// Gateway to node's core components. eth_api: EthApiNodeBackend, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP @@ -383,7 +386,9 @@ pub struct OpEthApiInner>, } -impl fmt::Debug for OpEthApiInner { +impl fmt::Debug + for OpEthApiInner +{ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApiInner").finish() } @@ -562,8 +567,9 @@ where let in_progress_rx = service.subscribe_in_progress(); ctx.components.task_executor().spawn(Box::pin(service.run(tx))); - - if flashblock_consensus { todo!("Modularize FlashBlockConsensusClient?") } + if flashblock_consensus { + todo!("Modularize FlashBlockConsensusClient?") + } // if flashblock_consensus { // info!(target: "reth::cli", "Launching FlashBlockConsensusClient"); // let flashblock_client = FlashBlockConsensusClient::new( diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 6565a7f45f9..018bf964175 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -6,6 +6,7 @@ use alloy_rpc_types_eth::TransactionInfo; use futures::StreamExt; use op_alloy_consensus::{transaction::OpTransactionInfo, OpTransaction}; use reth_chain_state::CanonStateSubscriptions; +use reth_optimism_flashblocks::FlashblockPayload; use reth_optimism_primitives::DepositReceipt; use reth_primitives_traits::{ BlockBody, Recovered, SignedTransaction, SignerRecoverable, WithEncoded, @@ -25,7 +26,6 @@ use std::{ future::Future, time::Duration, }; -use reth_optimism_flashblocks::FlashblockPayload; use tokio_stream::wrappers::WatchStream; impl EthTransactions for OpEthApi diff --git a/crates/trie/trie/src/node_iter.rs b/crates/trie/trie/src/node_iter.rs index b57fc2da707..7c2ff216835 100644 --- a/crates/trie/trie/src/node_iter.rs +++ b/crates/trie/trie/src/node_iter.rs @@ -84,16 +84,19 @@ where K: AsRef, { /// Creates a new [`TrieNodeIter`] for the state trie. + #[allow(clippy::missing_const_for_fn)] pub fn state_trie(walker: TrieWalker, hashed_cursor: H) -> Self { Self::new(walker, hashed_cursor, TrieType::State) } /// Creates a new [`TrieNodeIter`] for the storage trie. + #[allow(clippy::missing_const_for_fn)] pub fn storage_trie(walker: TrieWalker, hashed_cursor: H) -> Self { Self::new(walker, hashed_cursor, TrieType::Storage) } /// Creates a new [`TrieNodeIter`]. + #[allow(clippy::missing_const_for_fn)] fn new(walker: TrieWalker, hashed_cursor: H, trie_type: TrieType) -> Self { Self { walker, From e1f36a73e7f715820546ebde93735f4a92478418 Mon Sep 17 00:00:00 2001 From: Rez Date: Mon, 29 Dec 2025 22:55:50 +1100 Subject: [PATCH 005/267] remove implicit clones and pr comment Update cache.rs Update stream.rs --- crates/optimism/flashblocks/src/cache.rs | 4 ++-- crates/optimism/flashblocks/src/lib.rs | 3 --- crates/optimism/flashblocks/src/op_impl.rs | 4 ++-- crates/optimism/flashblocks/src/sequence.rs | 4 ++-- crates/optimism/flashblocks/src/traits.rs | 2 +- crates/optimism/flashblocks/src/ws/stream.rs | 2 ++ examples/custom-node/src/flashblock.rs | 4 ++-- 7 files changed, 11 insertions(+), 12 deletions(-) diff --git a/crates/optimism/flashblocks/src/cache.rs b/crates/optimism/flashblocks/src/cache.rs index 4acd44fca0b..99b2a317f02 100644 --- a/crates/optimism/flashblocks/src/cache.rs +++ b/crates/optimism/flashblocks/src/cache.rs @@ -147,7 +147,7 @@ impl SequenceManager

{ // source_name) let (base, last_flashblock, transactions, cached_state, source_name) = // Priority 1: Try current pending sequence - if let Some(base) = self.pending.payload_base().filter(|b| b.parent_hash() == local_tip_hash) { + if let Some(base) = self.pending.payload_base().cloned().filter(|b| b.parent_hash() == local_tip_hash) { let cached_state = self.pending.take_cached_reads().map(|r| (base.parent_hash(), r)); let last_fb = self.pending.last_flashblock()?.clone(); let transactions = self.pending_transactions.clone(); @@ -155,7 +155,7 @@ impl SequenceManager

{ } // Priority 2: Try cached sequence with exact parent match else if let Some((cached, txs)) = self.completed_cache.iter().find(|(c, _)| c.payload_base().parent_hash() == local_tip_hash) { - let base = cached.payload_base(); + let base = cached.payload_base().clone(); let last_fb = cached.last().clone(); let transactions = txs.clone(); let cached_state = None; diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index cfe18d2ea88..817481e5e5d 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -50,9 +50,6 @@ pub type PendingBlockRx = tokio::sync::watch::Receiver = tokio::sync::broadcast::Receiver>; -/// Receiver of received flashblocks from the (websocket) subscription. -pub type FlashBlockRx = tokio::sync::broadcast::Receiver>; - /// Receiver that signals whether a flashblock is currently being built. pub type InProgressFlashBlockRx = tokio::sync::watch::Receiver>; diff --git a/crates/optimism/flashblocks/src/op_impl.rs b/crates/optimism/flashblocks/src/op_impl.rs index 3bd840959dd..a6a42b82063 100644 --- a/crates/optimism/flashblocks/src/op_impl.rs +++ b/crates/optimism/flashblocks/src/op_impl.rs @@ -74,8 +74,8 @@ impl FlashblockPayload for OpFlashblockPayload { self.payload_id } - fn base(&self) -> Option { - self.base.clone() + fn base(&self) -> Option<&Self::Base> { + self.base.as_ref() } fn diff(&self) -> &Self::Diff { diff --git a/crates/optimism/flashblocks/src/sequence.rs b/crates/optimism/flashblocks/src/sequence.rs index 0f1afe997e7..ca047a8e148 100644 --- a/crates/optimism/flashblocks/src/sequence.rs +++ b/crates/optimism/flashblocks/src/sequence.rs @@ -109,7 +109,7 @@ impl FlashBlockPendingSequence

{ } /// Returns the payload base of the first tracked flashblock. - pub fn payload_base(&self) -> Option { + pub fn payload_base(&self) -> Option<&P::Base> { self.inner.values().next()?.base() } @@ -205,7 +205,7 @@ impl FlashBlockCompleteSequence

{ } /// Returns the payload base of the first flashblock. - pub fn payload_base(&self) -> P::Base { + pub fn payload_base(&self) -> &P::Base { self.inner.first().unwrap().base().unwrap() } diff --git a/crates/optimism/flashblocks/src/traits.rs b/crates/optimism/flashblocks/src/traits.rs index 291fa0e11b0..52aca48d816 100644 --- a/crates/optimism/flashblocks/src/traits.rs +++ b/crates/optimism/flashblocks/src/traits.rs @@ -70,7 +70,7 @@ pub trait FlashblockPayload: fn payload_id(&self) -> PayloadId; /// Base payload (only present on index 0). - fn base(&self) -> Option; + fn base(&self) -> Option<&Self::Base>; /// State diff for this flashblock. fn diff(&self) -> &Self::Diff; diff --git a/crates/optimism/flashblocks/src/ws/stream.rs b/crates/optimism/flashblocks/src/ws/stream.rs index 8a5ee7b547d..3487a32fdc0 100644 --- a/crates/optimism/flashblocks/src/ws/stream.rs +++ b/crates/optimism/flashblocks/src/ws/stream.rs @@ -51,7 +51,9 @@ where sink: None, } } +} +impl WsFlashBlockStream { /// Sets a custom decoder for the websocket stream. pub fn with_decoder(self, decoder: Box>) -> Self { Self { decoder, ..self } diff --git a/examples/custom-node/src/flashblock.rs b/examples/custom-node/src/flashblock.rs index c3c7e54de33..dc1e42936c6 100644 --- a/examples/custom-node/src/flashblock.rs +++ b/examples/custom-node/src/flashblock.rs @@ -101,8 +101,8 @@ impl FlashblockPayload for CustomFlashblockPayload { self.payload_id } - fn base(&self) -> Option { - self.base.clone() + fn base(&self) -> Option<&Self::Base> { + self.base.as_ref() } fn diff(&self) -> &Self::Diff { From 0cdd5af496ed13cc868c97c320a3b842001fd6ab Mon Sep 17 00:00:00 2001 From: Rez Date: Mon, 29 Dec 2025 23:37:55 +1100 Subject: [PATCH 006/267] modularize consensus client --- crates/optimism/flashblocks/src/consensus.rs | 25 ++++++++++---------- crates/optimism/rpc/src/eth/mod.rs | 19 +++++++-------- 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/crates/optimism/flashblocks/src/consensus.rs b/crates/optimism/flashblocks/src/consensus.rs index 42a03d9945c..9308de06d08 100644 --- a/crates/optimism/flashblocks/src/consensus.rs +++ b/crates/optimism/flashblocks/src/consensus.rs @@ -1,10 +1,10 @@ use crate::{ - traits::FlashblockPayloadBase, FlashBlock, FlashBlockCompleteSequence, - FlashBlockCompleteSequenceRx, + traits::{FlashblockPayload, FlashblockPayloadBase}, + FlashBlock, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, }; use alloy_primitives::B256; use alloy_rpc_types_engine::PayloadStatusEnum; -use op_alloy_rpc_types_engine::{OpExecutionData, OpFlashblockPayload}; +use op_alloy_rpc_types_engine::OpExecutionData; use reth_engine_primitives::ConsensusEngineHandle; use reth_optimism_payload_builder::OpPayloadTypes; use reth_payload_primitives::{EngineApiMessageVersion, ExecutionPayload, PayloadTypes}; @@ -18,26 +18,27 @@ use tracing::*; /// /// [`FlashBlockService`]: crate::FlashBlockService #[derive(Debug)] -pub struct FlashBlockConsensusClient

+pub struct FlashBlockConsensusClient

where P: PayloadTypes, + F: FlashblockPayload, { /// Handle to execution client. engine_handle: ConsensusEngineHandle

, /// Receiver for completed flashblock sequences from `FlashBlockService`. - sequence_receiver: FlashBlockCompleteSequenceRx, + sequence_receiver: FlashBlockCompleteSequenceRx, } -impl

FlashBlockConsensusClient

+impl FlashBlockConsensusClient where P: PayloadTypes, - P::ExecutionData: - for<'a> TryFrom<&'a FlashBlockCompleteSequence, Error: std::fmt::Display>, + F: FlashblockPayload, + P::ExecutionData: for<'a> TryFrom<&'a FlashBlockCompleteSequence, Error: std::fmt::Display>, { - /// Create a new `FlashBlockConsensusClient` with the given Op engine and sequence receiver. + /// Create a new `FlashBlockConsensusClient` with the given engine handle and sequence receiver. pub const fn new( engine_handle: ConsensusEngineHandle

, - sequence_receiver: FlashBlockCompleteSequenceRx, + sequence_receiver: FlashBlockCompleteSequenceRx, ) -> eyre::Result { Ok(Self { engine_handle, sequence_receiver }) } @@ -48,7 +49,7 @@ where /// in which case this returns the `parent_hash` instead to drive the chain forward. /// /// Returns the block hash to use for FCU (either the new block or parent). - async fn submit_new_payload(&self, sequence: &FlashBlockCompleteSequence) -> B256 { + async fn submit_new_payload(&self, sequence: &FlashBlockCompleteSequence) -> B256 { let payload = match P::ExecutionData::try_from(sequence) { Ok(payload) => payload, Err(err) => { @@ -97,7 +98,7 @@ where async fn submit_forkchoice_update( &self, head_block_hash: B256, - sequence: &FlashBlockCompleteSequence, + sequence: &FlashBlockCompleteSequence, ) { let block_number = sequence.block_number(); let safe_hash = sequence.payload_base().parent_hash(); diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index f382f2bd3ee..14da741334c 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -25,8 +25,8 @@ use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ FlashBlockBuildInfo, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, - FlashBlockService, FlashblockPayload, FlashblocksListeners, PendingBlockRx, PendingFlashBlock, - WsFlashBlockStream, + FlashBlockConsensusClient, FlashBlockService, FlashblockPayload, FlashblocksListeners, + PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; use reth_primitives_traits::NodePrimitives; use reth_rpc::eth::core::EthApiInner; @@ -568,16 +568,13 @@ where ctx.components.task_executor().spawn(Box::pin(service.run(tx))); if flashblock_consensus { - todo!("Modularize FlashBlockConsensusClient?") + info!(target: "reth::cli", "Launching FlashBlockConsensusClient"); + let flashblock_client = FlashBlockConsensusClient::new( + ctx.engine_handle.clone(), + flashblocks_sequence.subscribe(), + )?; + ctx.components.task_executor().spawn(Box::pin(flashblock_client.run())); } - // if flashblock_consensus { - // info!(target: "reth::cli", "Launching FlashBlockConsensusClient"); - // let flashblock_client = FlashBlockConsensusClient::new( - // ctx.engine_handle.clone(), - // flashblocks_sequence.subscribe(), - // )?; - // ctx.components.task_executor().spawn(Box::pin(flashblock_client.run())); - // } Some(FlashblocksListeners::new( pending_rx, From ccacce044d1b0b0d3c2bf2ac42533c3f7adc2c31 Mon Sep 17 00:00:00 2001 From: Rez Date: Wed, 31 Dec 2025 14:44:54 +1100 Subject: [PATCH 007/267] fix custom node Update engine.rs Update engine.rs added base fee simplify like op Update engine.rs --- examples/custom-node/src/engine.rs | 83 ++++++++++++++++++-- examples/custom-node/src/evm/config.rs | 14 +++- examples/custom-node/src/flashblock.rs | 101 ++++++++----------------- 3 files changed, 119 insertions(+), 79 deletions(-) diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index e7502ef1c28..fcd53a4ed3d 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -6,7 +6,9 @@ use crate::{ CustomNode, }; use alloy_eips::eip2718::WithEncoded; -use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload}; +use alloy_primitives::B256; +use alloy_rpc_types_engine::{ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3}; +use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload, OpExecutionPayloadV4}; use reth_engine_primitives::EngineApiValidator; use reth_ethereum::{ node::api::{ @@ -24,6 +26,7 @@ use reth_op::node::{ engine::OpEngineValidator, payload::OpAttributes, OpBuiltPayload, OpEngineTypes, OpPayloadAttributes, OpPayloadBuilderAttributes, }; +use reth_optimism_flashblocks::FlashBlockCompleteSequence; use revm_primitives::U256; use serde::{Deserialize, Serialize}; use std::sync::Arc; @@ -68,15 +71,83 @@ impl ExecutionPayload for CustomExecutionData { } } -impl TryFrom<&reth_optimism_flashblocks::FlashBlockCompleteSequence> - for CustomExecutionData -{ +impl CustomExecutionData { + pub fn from_flashblocks_unchecked(flashblocks: &[CustomFlashblockPayload]) -> Self { + // Extract base from first flashblock + // SAFETY: Caller guarantees at least one flashblock exists with base payload + let first = flashblocks.first().expect("flashblocks must not be empty"); + let base = first.base.as_ref().expect("first flashblock must have base payload"); + + // Get the final state from the last flashblock + // SAFETY: Caller guarantees at least one flashblock exists + let diff = &flashblocks.last().expect("flashblocks must not be empty").diff; + + // Collect all transactions and withdrawals from all flashblocks + let (transactions, withdrawals) = + flashblocks.iter().fold((Vec::new(), Vec::new()), |(mut txs, mut withdrawals), p| { + txs.extend(p.diff.transactions.iter().cloned()); + withdrawals.extend(p.diff.withdrawals.iter().cloned()); + (txs, withdrawals) + }); + + let v3 = ExecutionPayloadV3 { + blob_gas_used: diff.blob_gas_used.unwrap_or(0), + excess_blob_gas: 0, + payload_inner: ExecutionPayloadV2 { + withdrawals, + payload_inner: ExecutionPayloadV1 { + parent_hash: base.inner.parent_hash, + fee_recipient: base.inner.fee_recipient, + state_root: diff.state_root, + receipts_root: diff.receipts_root, + logs_bloom: diff.logs_bloom, + prev_randao: base.inner.prev_randao, + block_number: base.inner.block_number, + gas_limit: base.inner.gas_limit, + gas_used: diff.gas_used, + timestamp: base.inner.timestamp, + extra_data: base.inner.extra_data.clone(), + base_fee_per_gas: base.inner.base_fee_per_gas, + block_hash: diff.block_hash, + transactions, + }, + }, + }; + + // Before Isthmus hardfork, withdrawals_root was not included. + // A zero withdrawals_root indicates a pre-Isthmus flashblock. + if diff.withdrawals_root == B256::ZERO { + let inner = OpExecutionData::v3(v3, Vec::new(), base.inner.parent_beacon_block_root); + return Self { inner, extension: base.extension }; + } + + let v4 = OpExecutionPayloadV4 { withdrawals_root: diff.withdrawals_root, payload_inner: v3 }; + let inner = + OpExecutionData::v4(v4, Vec::new(), base.inner.parent_beacon_block_root, Default::default()); + + Self { inner, extension: base.extension } + } +} + +impl TryFrom<&FlashBlockCompleteSequence> for CustomExecutionData { type Error = &'static str; fn try_from( - _sequence: &reth_optimism_flashblocks::FlashBlockCompleteSequence, + sequence: &FlashBlockCompleteSequence, ) -> Result { - todo!("convert flashblock sequence to CustomExecutionData") + let mut data = Self::from_flashblocks_unchecked(sequence); + + if let Some(execution_outcome) = sequence.execution_outcome() { + let payload = data.inner.payload.as_v1_mut(); + payload.state_root = execution_outcome.state_root; + payload.block_hash = execution_outcome.block_hash; + } + + if data.inner.payload.as_v1_mut().state_root == B256::ZERO { + return Err("No state_root available for payload"); + } + + Ok(data) } } diff --git a/examples/custom-node/src/evm/config.rs b/examples/custom-node/src/evm/config.rs index 7666d25a296..37c3c2a42f4 100644 --- a/examples/custom-node/src/evm/config.rs +++ b/examples/custom-node/src/evm/config.rs @@ -144,8 +144,18 @@ pub struct CustomNextBlockEnvAttributes { } impl From for CustomNextBlockEnvAttributes { - fn from(_value: CustomFlashblockPayloadBase) -> Self { - todo!("map CustomFlashblockPayloadBase fields to CustomNextBlockEnvAttributes") + fn from(value: CustomFlashblockPayloadBase) -> Self { + Self { + inner: OpNextBlockEnvAttributes { + timestamp: value.inner.timestamp, + suggested_fee_recipient: value.inner.fee_recipient, + prev_randao: value.inner.prev_randao, + gas_limit: value.inner.gas_limit, + parent_beacon_block_root: Some(value.inner.parent_beacon_block_root), + extra_data: value.inner.extra_data, + }, + extension: value.extension, + } } } diff --git a/examples/custom-node/src/flashblock.rs b/examples/custom-node/src/flashblock.rs index dc1e42936c6..b4c132236ca 100644 --- a/examples/custom-node/src/flashblock.rs +++ b/examples/custom-node/src/flashblock.rs @@ -1,96 +1,51 @@ use crate::primitives::CustomTransaction; -use alloy_consensus::{crypto::RecoveryError, transaction::Recovered}; -use alloy_eips::{eip2718::WithEncoded, eip4895::Withdrawals}; -use alloy_primitives::{Bloom, Bytes, B256}; +use alloy_consensus::{ + crypto::RecoveryError, + transaction::{Recovered, SignerRecoverable}, +}; +use alloy_eips::{eip2718::WithEncoded, Decodable2718}; +use alloy_primitives::B256; use alloy_rpc_types_engine::PayloadId; -use reth_optimism_flashblocks::{FlashblockDiff, FlashblockPayload, FlashblockPayloadBase}; -use serde::{Deserialize, Deserializer}; +use op_alloy_rpc_types_engine::{ + OpFlashblockPayloadBase, OpFlashblockPayloadDelta, OpFlashblockPayloadMetadata, +}; +use reth_optimism_flashblocks::{FlashblockPayload, FlashblockPayloadBase}; +use serde::{Deserialize, Serialize}; -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct CustomFlashblockPayloadBase { - pub parent_hash: B256, - pub block_number: u64, - pub timestamp: u64, + #[serde(flatten)] + pub inner: OpFlashblockPayloadBase, + pub extension: u64, } impl FlashblockPayloadBase for CustomFlashblockPayloadBase { fn parent_hash(&self) -> B256 { - self.parent_hash + self.inner.parent_hash } fn block_number(&self) -> u64 { - self.block_number + self.inner.block_number } fn timestamp(&self) -> u64 { - self.timestamp + self.inner.timestamp } } -#[derive(Debug, Clone, Default)] -pub struct CustomFlashblockPayloadDiff { - pub block_hash: B256, - pub state_root: B256, - pub gas_used: u64, - pub logs_bloom: Bloom, - pub receipts_root: B256, - pub transactions: Vec, -} - -impl FlashblockDiff for CustomFlashblockPayloadDiff { - fn block_hash(&self) -> B256 { - self.block_hash - } - - fn state_root(&self) -> B256 { - self.state_root - } - - fn gas_used(&self) -> u64 { - self.gas_used - } - - fn logs_bloom(&self) -> &Bloom { - &self.logs_bloom - } - - fn receipts_root(&self) -> B256 { - self.receipts_root - } - - fn transactions_raw(&self) -> &[Bytes] { - &self.transactions - } - - fn withdrawals(&self) -> Option<&Withdrawals> { - None - } - - fn withdrawals_root(&self) -> Option { - None - } -} - -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct CustomFlashblockPayload { - pub index: u64, pub payload_id: PayloadId, + pub index: u64, + #[serde(skip_serializing_if = "Option::is_none")] pub base: Option, - pub diff: CustomFlashblockPayloadDiff, -} - -impl<'de> Deserialize<'de> for CustomFlashblockPayload { - fn deserialize(_deserializer: D) -> Result - where - D: Deserializer<'de>, - { - todo!("implement deserialization") - } + pub diff: OpFlashblockPayloadDelta, + pub metadata: OpFlashblockPayloadMetadata, } impl FlashblockPayload for CustomFlashblockPayload { type Base = CustomFlashblockPayloadBase; - type Diff = CustomFlashblockPayloadDiff; + type Diff = OpFlashblockPayloadDelta; type SignedTx = CustomTransaction; fn index(&self) -> u64 { @@ -110,12 +65,16 @@ impl FlashblockPayload for CustomFlashblockPayload { } fn block_number(&self) -> u64 { - self.base.as_ref().map(|b| b.block_number()).unwrap_or(0) + self.metadata.block_number } fn recover_transactions( &self, ) -> impl Iterator>, RecoveryError>> { - std::iter::from_fn(|| todo!("implement transaction recovery")) + self.diff.transactions.clone().into_iter().map(|raw| { + let tx = CustomTransaction::decode_2718(&mut raw.as_ref()) + .map_err(RecoveryError::from_source)?; + tx.try_into_recovered().map(|tx| tx.into_encoded_with(raw.clone())) + }) } } From 78c1f11dac5088b417d0d21e8847cea4d739aea5 Mon Sep 17 00:00:00 2001 From: Rez Date: Wed, 31 Dec 2025 17:21:47 +1100 Subject: [PATCH 008/267] make pr linting --- examples/custom-node/src/engine.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index d212c8f3b7c..56fec06bd94 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -129,9 +129,14 @@ impl CustomExecutionData { return Self { inner, extension: base.extension }; } - let v4 = OpExecutionPayloadV4 { withdrawals_root: diff.withdrawals_root, payload_inner: v3 }; - let inner = - OpExecutionData::v4(v4, Vec::new(), base.inner.parent_beacon_block_root, Default::default()); + let v4 = + OpExecutionPayloadV4 { withdrawals_root: diff.withdrawals_root, payload_inner: v3 }; + let inner = OpExecutionData::v4( + v4, + Vec::new(), + base.inner.parent_beacon_block_root, + Default::default(), + ); Self { inner, extension: base.extension } } From f4532093faa3680ae6548e3ebe4befc57c85066f Mon Sep 17 00:00:00 2001 From: Rez Date: Wed, 31 Dec 2025 17:38:01 +1100 Subject: [PATCH 009/267] remove unusused trait methods remove default impl for block number --- crates/optimism/flashblocks/src/op_impl.rs | 12 +----------- crates/optimism/flashblocks/src/traits.rs | 15 +-------------- 2 files changed, 2 insertions(+), 25 deletions(-) diff --git a/crates/optimism/flashblocks/src/op_impl.rs b/crates/optimism/flashblocks/src/op_impl.rs index a6a42b82063..c7ce4dd327f 100644 --- a/crates/optimism/flashblocks/src/op_impl.rs +++ b/crates/optimism/flashblocks/src/op_impl.rs @@ -2,7 +2,7 @@ use crate::traits::{FlashblockDiff, FlashblockPayload, FlashblockPayloadBase}; use alloy_consensus::crypto::RecoveryError; -use alloy_eips::{eip2718::WithEncoded, eip4895::Withdrawals}; +use alloy_eips::eip2718::WithEncoded; use alloy_primitives::{Bloom, Bytes, B256}; use alloy_rpc_types_engine::PayloadId; use op_alloy_consensus::OpTxEnvelope; @@ -49,16 +49,6 @@ impl FlashblockDiff for OpFlashblockPayloadDelta { fn transactions_raw(&self) -> &[Bytes] { &self.transactions } - - fn withdrawals(&self) -> Option<&Withdrawals> { - // TODO: Might not be needed as withdrawals aren't processed in a block except if at start - // or end - None - } - - fn withdrawals_root(&self) -> Option { - Some(self.withdrawals_root) - } } impl FlashblockPayload for OpFlashblockPayload { diff --git a/crates/optimism/flashblocks/src/traits.rs b/crates/optimism/flashblocks/src/traits.rs index 52aca48d816..bc199674954 100644 --- a/crates/optimism/flashblocks/src/traits.rs +++ b/crates/optimism/flashblocks/src/traits.rs @@ -4,7 +4,6 @@ //! the core flashblock infrastructure. use alloy_consensus::crypto::RecoveryError; -use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{Bloom, Bytes, B256}; use alloy_rpc_types_engine::PayloadId; @@ -37,16 +36,6 @@ pub trait FlashblockDiff: Clone + Send + Sync + std::fmt::Debug + 'static { fn receipts_root(&self) -> B256; /// Raw encoded transactions in this flashblock. fn transactions_raw(&self) -> &[Bytes]; - - /// Withdrawals included in this flashblock. - fn withdrawals(&self) -> Option<&Withdrawals> { - None - } - - /// Withdrawals root. - fn withdrawals_root(&self) -> Option { - None - } } /// A flashblock payload representing one slice of a block. @@ -76,9 +65,7 @@ pub trait FlashblockPayload: fn diff(&self) -> &Self::Diff; /// Block number this flashblock belongs to. - fn block_number(&self) -> u64 { - self.base().map(|b| b.block_number()).unwrap_or(0) - } + fn block_number(&self) -> u64; /// Recovers transactions from the raw transaction bytes in this flashblock. /// From d6de2f9a870e2b80704d8f1a6803eeaa082271f3 Mon Sep 17 00:00:00 2001 From: Rez Date: Fri, 2 Jan 2026 16:20:44 +1100 Subject: [PATCH 010/267] remove useless clippy ignore --- crates/trie/trie/src/node_iter.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/crates/trie/trie/src/node_iter.rs b/crates/trie/trie/src/node_iter.rs index 36bc303b85c..7d53bd4b6d4 100644 --- a/crates/trie/trie/src/node_iter.rs +++ b/crates/trie/trie/src/node_iter.rs @@ -84,19 +84,16 @@ where K: AsRef, { /// Creates a new [`TrieNodeIter`] for the state trie. - #[allow(clippy::missing_const_for_fn)] pub fn state_trie(walker: TrieWalker, hashed_cursor: H) -> Self { Self::new(walker, hashed_cursor, TrieType::State) } /// Creates a new [`TrieNodeIter`] for the storage trie. - #[allow(clippy::missing_const_for_fn)] pub fn storage_trie(walker: TrieWalker, hashed_cursor: H) -> Self { Self::new(walker, hashed_cursor, TrieType::Storage) } /// Creates a new [`TrieNodeIter`]. - #[allow(clippy::missing_const_for_fn)] fn new(walker: TrieWalker, hashed_cursor: H, trie_type: TrieType) -> Self { Self { walker, From 2301225e730ce5cf107a4c3c40a7beb800ed6a68 Mon Sep 17 00:00:00 2001 From: Rez Date: Fri, 2 Jan 2026 16:40:26 +1100 Subject: [PATCH 011/267] relax trait bounds Update traits.rs Update traits.rs --- crates/optimism/flashblocks/src/traits.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/optimism/flashblocks/src/traits.rs b/crates/optimism/flashblocks/src/traits.rs index bc199674954..2d80d305420 100644 --- a/crates/optimism/flashblocks/src/traits.rs +++ b/crates/optimism/flashblocks/src/traits.rs @@ -11,7 +11,7 @@ use alloy_rpc_types_engine::PayloadId; /// /// Contains all fields needed to configure EVM execution context for the next block. /// This is present only on the first flashblock (index 0) of a sequence. -pub trait FlashblockPayloadBase: Clone + Send + Sync + std::fmt::Debug + 'static { +pub trait FlashblockPayloadBase: Clone + Send + Sync + 'static { /// Parent block hash. fn parent_hash(&self) -> B256; /// Block number being built. @@ -23,7 +23,7 @@ pub trait FlashblockPayloadBase: Clone + Send + Sync + std::fmt::Debug + 'static /// State diff from flashblock execution. /// /// Contains the cumulative state changes from executing transactions in this flashblock. -pub trait FlashblockDiff: Clone + Send + Sync + std::fmt::Debug + 'static { +pub trait FlashblockDiff: Clone + Send + Sync + 'static { /// Block hash after applying this flashblock. fn block_hash(&self) -> B256; /// State root after applying this flashblock. @@ -43,7 +43,7 @@ pub trait FlashblockDiff: Clone + Send + Sync + std::fmt::Debug + 'static { /// Flashblocks are incremental updates to block state, allowing for faster /// pre-confirmations. A complete block is built from a sequence of flashblocks. pub trait FlashblockPayload: - Clone + Send + Sync + std::fmt::Debug + for<'de> serde::Deserialize<'de> + 'static + Clone + Send + Sync + 'static + for<'de> serde::Deserialize<'de> { /// The base payload type containing block environment configuration. type Base: FlashblockPayloadBase; From 6ceac1fb13d54136661fac12259b962433fb8bb6 Mon Sep 17 00:00:00 2001 From: Rez Date: Mon, 5 Jan 2026 22:47:51 -0800 Subject: [PATCH 012/267] merge conflict issues --- crates/optimism/rpc/src/eth/mod.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 673dae0e1f6..bfb0bf1396f 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -1,5 +1,6 @@ //! OP-Reth `eth_` endpoint implementation. +use reth_optimism_flashblocks::FlashblockPayloadBase; pub mod ext; pub mod receipt; pub mod transaction; @@ -28,8 +29,8 @@ use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ FlashBlockBuildInfo, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, - FlashBlockConsensusClient, FlashBlockService, FlashblockPayload, FlashblocksListeners, - PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, + FlashBlockConsensusClient, FlashBlockService, FlashblockDiff, FlashblockPayload, + FlashblocksListeners, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; use reth_primitives_traits::NodePrimitives; use reth_rpc::eth::core::EthApiInner; @@ -163,8 +164,8 @@ impl OpEthApi }; // Update state from base flashblock for block level meta data. - if let Some(base) = &fb.base { - *state = Some((base.block_number, base.timestamp)); + if let Some(base) = &fb.base() { + *state = Some((base.block_number(), base.timestamp())); } let Some((block_number, timestamp)) = *state else { @@ -174,11 +175,11 @@ impl OpEthApi }; let receipts = - fb.metadata.receipts.iter().map(|(tx, receipt)| (*tx, receipt)); + fb.metadata().receipts.iter().map(|(tx, receipt)| (*tx, receipt)); let all_logs = matching_block_logs_with_tx_hashes( &filter, - BlockNumHash::new(block_number, fb.diff.block_hash), + BlockNumHash::new(block_number, fb.diff().block_hash()), timestamp, receipts, false, From 131a6666cba261c7c879c757c501cb968a72bec8 Mon Sep 17 00:00:00 2001 From: Rez Date: Mon, 5 Jan 2026 23:07:22 -0800 Subject: [PATCH 013/267] extend FlashblockPayload to support Metadata --- crates/optimism/flashblocks/src/lib.rs | 2 +- crates/optimism/flashblocks/src/op_impl.rs | 18 ++++++++++++++++-- crates/optimism/flashblocks/src/traits.rs | 16 +++++++++++++++- crates/optimism/rpc/src/eth/mod.rs | 7 +++---- examples/custom-node/src/flashblock.rs | 5 +++++ 5 files changed, 40 insertions(+), 8 deletions(-) diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index 817481e5e5d..a3277cdbbe1 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -15,7 +15,7 @@ use std::sync::Arc; use reth_optimism_primitives as _; pub mod traits; -pub use traits::{FlashblockDiff, FlashblockPayload, FlashblockPayloadBase}; +pub use traits::{FlashblockDiff, FlashblockMetadata, FlashblockPayload, FlashblockPayloadBase}; mod op_impl; diff --git a/crates/optimism/flashblocks/src/op_impl.rs b/crates/optimism/flashblocks/src/op_impl.rs index c7ce4dd327f..ea425f7f263 100644 --- a/crates/optimism/flashblocks/src/op_impl.rs +++ b/crates/optimism/flashblocks/src/op_impl.rs @@ -1,13 +1,14 @@ //! Optimism implementation of flashblock traits. -use crate::traits::{FlashblockDiff, FlashblockPayload, FlashblockPayloadBase}; +use crate::traits::{FlashblockDiff, FlashblockMetadata, FlashblockPayload, FlashblockPayloadBase}; use alloy_consensus::crypto::RecoveryError; use alloy_eips::eip2718::WithEncoded; use alloy_primitives::{Bloom, Bytes, B256}; use alloy_rpc_types_engine::PayloadId; -use op_alloy_consensus::OpTxEnvelope; +use op_alloy_consensus::{OpReceipt, OpTxEnvelope}; use op_alloy_rpc_types_engine::{ OpFlashblockPayload, OpFlashblockPayloadBase, OpFlashblockPayloadDelta, + OpFlashblockPayloadMetadata, }; use reth_primitives_traits::Recovered; @@ -51,10 +52,19 @@ impl FlashblockDiff for OpFlashblockPayloadDelta { } } +impl FlashblockMetadata for OpFlashblockPayloadMetadata { + type Receipt = OpReceipt; + + fn receipts(&self) -> impl Iterator { + self.receipts.iter().map(|(k, v)| (*k, v)) + } +} + impl FlashblockPayload for OpFlashblockPayload { type Base = OpFlashblockPayloadBase; type Diff = OpFlashblockPayloadDelta; type SignedTx = OpTxEnvelope; + type Metadata = OpFlashblockPayloadMetadata; fn index(&self) -> u64 { self.index @@ -72,6 +82,10 @@ impl FlashblockPayload for OpFlashblockPayload { &self.diff } + fn metadata(&self) -> &Self::Metadata { + &self.metadata + } + fn block_number(&self) -> u64 { Self::block_number(self) } diff --git a/crates/optimism/flashblocks/src/traits.rs b/crates/optimism/flashblocks/src/traits.rs index 2d80d305420..663ec41d599 100644 --- a/crates/optimism/flashblocks/src/traits.rs +++ b/crates/optimism/flashblocks/src/traits.rs @@ -3,7 +3,7 @@ //! These traits enable chain-specific flashblock implementations while sharing //! the core flashblock infrastructure. -use alloy_consensus::crypto::RecoveryError; +use alloy_consensus::{crypto::RecoveryError, TxReceipt}; use alloy_primitives::{Bloom, Bytes, B256}; use alloy_rpc_types_engine::PayloadId; @@ -38,6 +38,15 @@ pub trait FlashblockDiff: Clone + Send + Sync + 'static { fn transactions_raw(&self) -> &[Bytes]; } +/// Metadata associated with a flashblock payload. +pub trait FlashblockMetadata: Clone + Send + Sync + 'static { + /// The receipt type for this chain. + type Receipt: TxReceipt; + + /// Returns an iterator over receipts. + fn receipts(&self) -> impl Iterator; +} + /// A flashblock payload representing one slice of a block. /// /// Flashblocks are incremental updates to block state, allowing for faster @@ -51,6 +60,8 @@ pub trait FlashblockPayload: type Diff: FlashblockDiff; /// The signed transaction type for this chain. type SignedTx: reth_primitives_traits::SignedTransaction; + /// The metadata type containing chain-specific information like receipts. + type Metadata: FlashblockMetadata; /// Sequential index of this flashblock within the current block's sequence. fn index(&self) -> u64; @@ -64,6 +75,9 @@ pub trait FlashblockPayload: /// State diff for this flashblock. fn diff(&self) -> &Self::Diff; + /// Metadata for this flashblock (receipts, balance changes, etc). + fn metadata(&self) -> &Self::Metadata; + /// Block number this flashblock belongs to. fn block_number(&self) -> u64; diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index bfb0bf1396f..ca746b9fca4 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -29,8 +29,8 @@ use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ FlashBlockBuildInfo, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, - FlashBlockConsensusClient, FlashBlockService, FlashblockDiff, FlashblockPayload, - FlashblocksListeners, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, + FlashBlockConsensusClient, FlashBlockService, FlashblockDiff, FlashblockMetadata, + FlashblockPayload, FlashblocksListeners, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; use reth_primitives_traits::NodePrimitives; use reth_rpc::eth::core::EthApiInner; @@ -174,8 +174,7 @@ impl OpEthApi return futures::future::ready(Some(Vec::new())) }; - let receipts = - fb.metadata().receipts.iter().map(|(tx, receipt)| (*tx, receipt)); + let receipts = fb.metadata().receipts(); let all_logs = matching_block_logs_with_tx_hashes( &filter, diff --git a/examples/custom-node/src/flashblock.rs b/examples/custom-node/src/flashblock.rs index b4c132236ca..d91c9f74c6d 100644 --- a/examples/custom-node/src/flashblock.rs +++ b/examples/custom-node/src/flashblock.rs @@ -47,6 +47,7 @@ impl FlashblockPayload for CustomFlashblockPayload { type Base = CustomFlashblockPayloadBase; type Diff = OpFlashblockPayloadDelta; type SignedTx = CustomTransaction; + type Metadata = OpFlashblockPayloadMetadata; fn index(&self) -> u64 { self.index @@ -64,6 +65,10 @@ impl FlashblockPayload for CustomFlashblockPayload { &self.diff } + fn metadata(&self) -> &Self::Metadata { + &self.metadata + } + fn block_number(&self) -> u64 { self.metadata.block_number } From 5cf1d2a0b0ea9b77b0005eb9eb101dc192f63df4 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 14 Jan 2026 20:12:15 +0100 Subject: [PATCH 014/267] fix(trie): Update branch masks when revealing blinded nodes (#20937) --- Cargo.lock | 1 + crates/trie/sparse-parallel/Cargo.toml | 1 + crates/trie/sparse-parallel/src/trie.rs | 185 ++++++++++++++++++++++-- 3 files changed, 177 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 006f28d853f..72043a05c34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11307,6 +11307,7 @@ dependencies = [ "reth-metrics", "reth-primitives-traits", "reth-provider", + "reth-tracing", "reth-trie", "reth-trie-common", "reth-trie-db", diff --git a/crates/trie/sparse-parallel/Cargo.toml b/crates/trie/sparse-parallel/Cargo.toml index 9c62aabaddf..19893831aac 100644 --- a/crates/trie/sparse-parallel/Cargo.toml +++ b/crates/trie/sparse-parallel/Cargo.toml @@ -39,6 +39,7 @@ reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie-db.workspace = true reth-trie-sparse = { workspace = true, features = ["test-utils"] } reth-trie.workspace = true +reth-tracing.workspace = true # misc arbitrary.workspace = true diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 220b698f108..ea0c93b76b0 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -339,7 +339,12 @@ impl SparseTrieInterface for ParallelSparseTrie { if let Some(reveal_path) = reveal_path { let subtrie = self.subtrie_for_path_mut(&reveal_path); - if subtrie.nodes.get(&reveal_path).expect("node must exist").is_hash() { + let reveal_masks = if subtrie + .nodes + .get(&reveal_path) + .expect("node must exist") + .is_hash() + { debug!( target: "trie::parallel_sparse", child_path = ?reveal_path, @@ -360,12 +365,19 @@ impl SparseTrieInterface for ParallelSparseTrie { ); let masks = BranchNodeMasks::from_optional(hash_mask, tree_mask); subtrie.reveal_node(reveal_path, &decoded, masks)?; + masks } else { return Err(SparseTrieErrorKind::NodeNotFoundInProvider { path: reveal_path, } .into()) } + } else { + None + }; + + if let Some(masks) = reveal_masks { + self.branch_node_masks.insert(reveal_path, masks); } } @@ -436,7 +448,11 @@ impl SparseTrieInterface for ParallelSparseTrie { // If we didn't update the target leaf, we need to call update_leaf on the subtrie // to ensure that the leaf is updated correctly. - subtrie.update_leaf(full_path, value, provider, retain_updates)?; + if let Some((revealed_path, revealed_masks)) = + subtrie.update_leaf(full_path, value, provider, retain_updates)? + { + self.branch_node_masks.insert(revealed_path, revealed_masks); + } } Ok(()) @@ -1232,7 +1248,7 @@ impl ParallelSparseTrie { ) -> SparseTrieResult { let remaining_child_subtrie = self.subtrie_for_path_mut(remaining_child_path); - let remaining_child_node = match remaining_child_subtrie + let (remaining_child_node, remaining_child_masks) = match remaining_child_subtrie .nodes .get(remaining_child_path) .unwrap() @@ -1258,7 +1274,10 @@ impl ParallelSparseTrie { ); let masks = BranchNodeMasks::from_optional(hash_mask, tree_mask); remaining_child_subtrie.reveal_node(*remaining_child_path, &decoded, masks)?; - remaining_child_subtrie.nodes.get(remaining_child_path).unwrap().clone() + ( + remaining_child_subtrie.nodes.get(remaining_child_path).unwrap().clone(), + masks, + ) } else { return Err(SparseTrieErrorKind::NodeNotFoundInProvider { path: *remaining_child_path, @@ -1266,9 +1285,15 @@ impl ParallelSparseTrie { .into()) } } - node => node.clone(), + // The node is already revealed so we don't need to return its masks here, as they don't + // need to be inserted. + node => (node.clone(), None), }; + if let Some(masks) = remaining_child_masks { + self.branch_node_masks.insert(*remaining_child_path, masks); + } + // If `recurse_into_extension` is true, and the remaining child is an extension node, then // its child will be ensured to be revealed as well. This is required for generation of // trie updates; without revealing the grandchild branch it's not always possible to know @@ -1636,9 +1661,9 @@ impl SparseSubtrie { /// /// # Returns /// - /// Returns the `Ok` if the update is successful. + /// Returns the path and masks of any blinded node revealed as a result of updating the leaf. /// - /// Note: If an update requires revealing a blinded node, an error is returned if the blinded + /// If an update requires revealing a blinded node, an error is returned if the blinded /// provider returns an error. pub fn update_leaf( &mut self, @@ -1646,16 +1671,17 @@ impl SparseSubtrie { value: Vec, provider: impl TrieNodeProvider, retain_updates: bool, - ) -> SparseTrieResult<()> { + ) -> SparseTrieResult> { debug_assert!(full_path.starts_with(&self.path)); let existing = self.inner.values.insert(full_path, value); if existing.is_some() { // trie structure unchanged, return immediately - return Ok(()) + return Ok(None) } // Here we are starting at the root of the subtrie, and traversing from there. let mut current = Some(self.path); + let mut revealed = None; while let Some(current_path) = current { match self.update_next_node(current_path, &full_path, retain_updates)? { LeafUpdateStep::Continue { next_node } => { @@ -1685,6 +1711,12 @@ impl SparseSubtrie { ); let masks = BranchNodeMasks::from_optional(hash_mask, tree_mask); self.reveal_node(reveal_path, &decoded, masks)?; + + debug_assert_eq!( + revealed, None, + "Only a single blinded node should be revealed during update_leaf" + ); + revealed = masks.map(|masks| (reveal_path, masks)); } else { return Err(SparseTrieErrorKind::NodeNotFoundInProvider { path: reveal_path, @@ -1701,7 +1733,7 @@ impl SparseSubtrie { } } - Ok(()) + Ok(revealed) } /// Processes the current node, returning what to do next in the leaf update process. @@ -6803,4 +6835,137 @@ mod tests { if path == path_to_blind && hash == blinded_hash ); } + + #[test] + fn test_mainnet_block_24185431_storage_0x6ba784ee() { + reth_tracing::init_test_tracing(); + + // Reveal branch at 0x3 with full state + let mut branch_0x3_hashes = vec![ + B256::from(hex!("fc11ba8de4b220b8f19a09f0676c69b8e18bae1350788392640069e59b41733d")), + B256::from(hex!("8afe085cc6685680bd8ba4bac6e65937a4babf737dc5e7413d21cdda958e8f74")), + B256::from(hex!("c7b6f7c0fc601a27aece6ec178fd9be17cdee77c4884ecfbe1ee459731eb57da")), + B256::from(hex!("71c1aec60db78a2deb4e10399b979a2ed5be42b4ee0c0a17c614f9ddc9f9072e")), + B256::from(hex!("e9261302e7c0b77930eaf1851b585210906cd01e015ab6be0f7f3c0cc947c32a")), + B256::from(hex!("38ce8f369c56bd77fabdf679b27265b1f8d0a54b09ef612c8ee8ddfc6b3fab95")), + B256::from(hex!("7b507a8936a28c5776b647d1c4bda0bbbb3d0d227f16c5f5ebba58d02e31918d")), + B256::from(hex!("0f456b9457a824a81e0eb555aa861461acb38674dcf36959b3b26deb24ed0af9")), + B256::from(hex!("2145420289652722ad199ba932622e3003c779d694fa5a2acfb2f77b0782b38a")), + B256::from(hex!("2c1a04dce1a9e2f1cfbf8806edce50a356dfa58e7e7c542c848541502613b796")), + B256::from(hex!("dad7ca55186ac8f40d4450dc874166df8267b44abc07e684d9507260f5712df3")), + B256::from(hex!("3a8c2a1d7d2423e92965ec29014634e7f0307ded60b1a63d28c86c3222b24236")), + B256::from(hex!("4e9929e6728b3a7bf0db6a0750ab376045566b556c9c605e606ecb8ec25200d7")), + B256::from(hex!("1797c36f98922f52292c161590057a1b5582d5503e3370bcfbf6fd939f3ec98b")), + B256::from(hex!("9e514589a9c9210b783c19fa3f0b384bbfaefe98f10ea189a2bfc58c6bf000a1")), + B256::from(hex!("85bdaabbcfa583cbd049650e41d3d19356bd833b3ed585cf225a3548557c7fa3")), + ]; + let branch_0x3_node = create_branch_node_with_children( + &[0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf], + branch_0x3_hashes.iter().map(RlpNode::word_rlp), + ); + + // Reveal branch at 0x31 + let branch_0x31_hashes = vec![B256::from(hex!( + "3ca994ba59ce70b83fee1f01731c8dac4fdd0f70ade79bf9b0695c4c53531aab" + ))]; + let branch_0x31_node = create_branch_node_with_children( + &[0xc], + branch_0x31_hashes.into_iter().map(|h| RlpNode::word_rlp(&h)), + ); + + // Reveal leaf at 0x31b0b645a6c4a0a1bb3d2f0c1d31c39f4aba2e3b015928a8eef7161e28388b81 + let leaf_path = hex!("31b0b645a6c4a0a1bb3d2f0c1d31c39f4aba2e3b015928a8eef7161e28388b81"); + let leaf_nibbles = Nibbles::unpack(leaf_path.as_slice()); + let leaf_value = hex!("0009ae8ce8245bff").to_vec(); + + // Reveal branch at 0x31c + let branch_0x31c_hashes = vec![ + B256::from(hex!("1a68fdb36b77e9332b49a977faf800c22d0199e6cecf44032bb083c78943e540")), + B256::from(hex!("cd4622c6df6fd7172c7fed1b284ef241e0f501b4c77b675ef10c612bd0948a7a")), + B256::from(hex!("abf3603d2f991787e21f1709ee4c7375d85dfc506995c0435839fccf3fe2add4")), + ]; + let branch_0x31c_node = create_branch_node_with_children( + &[0x3, 0x7, 0xc], + branch_0x31c_hashes.into_iter().map(|h| RlpNode::word_rlp(&h)), + ); + let mut branch_0x31c_node_encoded = Vec::new(); + branch_0x31c_node.encode(&mut branch_0x31c_node_encoded); + + // Create a mock provider and preload 0x31c onto it, it will be revealed during remove_leaf. + let mut provider = MockTrieNodeProvider::new(); + provider.add_revealed_node( + Nibbles::from_nibbles([0x3, 0x1, 0xc]), + RevealedNode { + node: branch_0x31c_node_encoded.into(), + tree_mask: Some(0.into()), + hash_mask: Some(4096.into()), + }, + ); + + // Reveal the trie structure using ProofTrieNode + let proof_nodes = vec![ + ProofTrieNode { + path: Nibbles::from_nibbles([0x3]), + node: branch_0x3_node, + masks: Some(BranchNodeMasks { + tree_mask: TrieMask::new(26099), + hash_mask: TrieMask::new(65535), + }), + }, + ProofTrieNode { + path: Nibbles::from_nibbles([0x3, 0x1]), + node: branch_0x31_node, + masks: Some(BranchNodeMasks { + tree_mask: TrieMask::new(4096), + hash_mask: TrieMask::new(4096), + }), + }, + ]; + + // Create a sparse trie and reveal nodes + let mut trie = ParallelSparseTrie::default() + .with_root( + TrieNode::Extension(ExtensionNode { + key: Nibbles::from_nibbles([0x3]), + child: RlpNode::word_rlp(&B256::ZERO), + }), + None, + true, + ) + .expect("root revealed"); + + trie.reveal_nodes(proof_nodes).unwrap(); + + // Update the leaf in order to reveal it in the trie + trie.update_leaf(leaf_nibbles, leaf_value, &provider).unwrap(); + + // Now delete the leaf + trie.remove_leaf(&leaf_nibbles, &provider).unwrap(); + + // Compute the root to trigger updates + let _ = trie.root(); + + // Assert the resulting branch node updates + let updates = trie.updates_ref(); + + // Check that the branch at 0x3 was updated with the expected structure + let branch_0x3_update = updates + .updated_nodes + .get(&Nibbles::from_nibbles([0x3])) + .expect("Branch at 0x3 should be in updates"); + + // We no longer expect to track the hash for child 1 + branch_0x3_hashes.remove(1); + + // Expected structure from prompt.md + let expected_branch = BranchNodeCompact::new( + 0b1111111111111111, + 0b0110010111110011, + 0b1111111111111101, + branch_0x3_hashes, + None, + ); + + assert_eq!(branch_0x3_update, &expected_branch); + } } From 15f16a5a2ee4f3867bdc94a34f506eda0b8490e3 Mon Sep 17 00:00:00 2001 From: ethfanWilliam Date: Wed, 14 Jan 2026 23:22:22 +0400 Subject: [PATCH 015/267] fix: propagate keccak-cache-global feature to reth-optimism-cli (#21051) Co-authored-by: Matthias Seitz --- crates/optimism/bin/Cargo.toml | 1 + crates/optimism/cli/Cargo.toml | 6 ++++++ crates/optimism/reth/Cargo.toml | 1 + 3 files changed, 8 insertions(+) diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index ecaa339ab8e..4049ee1ecad 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -43,6 +43,7 @@ tracy = ["reth-optimism-cli/tracy"] asm-keccak = ["reth-optimism-cli/asm-keccak", "reth-optimism-node/asm-keccak"] keccak-cache-global = [ + "reth-optimism-cli/keccak-cache-global", "reth-optimism-node/keccak-cache-global", ] dev = [ diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index a928c8af2b9..db171bab782 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -85,6 +85,12 @@ asm-keccak = [ "reth-optimism-node/asm-keccak", ] +keccak-cache-global = [ + "alloy-primitives/keccak-cache-global", + "reth-node-core/keccak-cache-global", + "reth-optimism-node/keccak-cache-global", +] + # Jemalloc feature for vergen to generate correct env vars jemalloc = [ "reth-node-core/jemalloc", diff --git a/crates/optimism/reth/Cargo.toml b/crates/optimism/reth/Cargo.toml index f18f0e10db8..cd39a0bcf81 100644 --- a/crates/optimism/reth/Cargo.toml +++ b/crates/optimism/reth/Cargo.toml @@ -77,6 +77,7 @@ arbitrary = [ keccak-cache-global = [ "reth-optimism-node?/keccak-cache-global", "reth-node-core?/keccak-cache-global", + "reth-optimism-cli?/keccak-cache-global", ] test-utils = [ "reth-chainspec/test-utils", From 8cb506c4d3bf90aebfcb51221c5fbe96194e1f07 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 14 Jan 2026 19:26:23 +0000 Subject: [PATCH 016/267] perf: don't clone entire keys set (#21042) --- .../src/tree/payload_processor/multiproof.rs | 43 ++++++++++++++++++- crates/trie/common/src/added_removed_keys.rs | 6 ++- 2 files changed, 45 insertions(+), 4 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index b502e4decf0..db44b1f98d6 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -20,6 +20,7 @@ use reth_trie_parallel::{ AccountMultiproofInput, ProofResultContext, ProofResultMessage, ProofWorkerHandle, }, }; +use revm_primitives::map::{hash_map, B256Map}; use std::{collections::BTreeMap, sync::Arc, time::Instant}; use tracing::{debug, error, instrument, trace}; @@ -609,7 +610,19 @@ impl MultiProofTask { self.multi_added_removed_keys.touch_accounts(targets.keys().copied()); // Clone+Arc MultiAddedRemovedKeys for sharing with the dispatched multiproof tasks - let multi_added_removed_keys = Arc::new(self.multi_added_removed_keys.clone()); + let multi_added_removed_keys = Arc::new(MultiAddedRemovedKeys { + account: self.multi_added_removed_keys.account.clone(), + storages: targets + .keys() + .filter_map(|account| { + self.multi_added_removed_keys + .storages + .get(account) + .cloned() + .map(|keys| (*account, keys)) + }) + .collect(), + }); self.metrics.prefetch_proof_targets_accounts_histogram.record(targets.len() as f64); self.metrics @@ -705,7 +718,33 @@ impl MultiProofTask { } // Clone+Arc MultiAddedRemovedKeys for sharing with the dispatched multiproof tasks - let multi_added_removed_keys = Arc::new(self.multi_added_removed_keys.clone()); + let multi_added_removed_keys = Arc::new(MultiAddedRemovedKeys { + account: self.multi_added_removed_keys.account.clone(), + storages: { + let mut storages = B256Map::with_capacity_and_hasher( + not_fetched_state_update.storages.len(), + Default::default(), + ); + + for account in not_fetched_state_update + .storages + .keys() + .chain(not_fetched_state_update.accounts.keys()) + { + if let hash_map::Entry::Vacant(entry) = storages.entry(*account) { + entry.insert( + self.multi_added_removed_keys + .storages + .get(account) + .cloned() + .unwrap_or_default(), + ); + } + } + + storages + }, + }); let chunking_len = not_fetched_state_update.chunking_length(); let mut spawned_proof_targets = MultiProofTargets::default(); diff --git a/crates/trie/common/src/added_removed_keys.rs b/crates/trie/common/src/added_removed_keys.rs index 8e61423718a..34a4561dad6 100644 --- a/crates/trie/common/src/added_removed_keys.rs +++ b/crates/trie/common/src/added_removed_keys.rs @@ -7,8 +7,10 @@ use alloy_trie::proof::AddedRemovedKeys; /// Tracks added and removed keys across account and storage tries. #[derive(Debug, Clone)] pub struct MultiAddedRemovedKeys { - account: AddedRemovedKeys, - storages: B256Map, + /// Added and removed accounts. + pub account: AddedRemovedKeys, + /// Added and removed storage keys for each account. + pub storages: B256Map, } /// Returns [`AddedRemovedKeys`] with default parameters. This is necessary while we are not yet From 1bc07fad8e2b04978d32289c850e0eb17afa8a06 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 14 Jan 2026 19:31:11 +0000 Subject: [PATCH 017/267] perf: use binary search in `ForwardInMemoryCursor` (#21049) --- crates/trie/trie/src/forward_cursor.rs | 93 +++++++++++++++++++++++--- 1 file changed, 84 insertions(+), 9 deletions(-) diff --git a/crates/trie/trie/src/forward_cursor.rs b/crates/trie/trie/src/forward_cursor.rs index 5abb5e2431a..e6a98a61861 100644 --- a/crates/trie/trie/src/forward_cursor.rs +++ b/crates/trie/trie/src/forward_cursor.rs @@ -53,9 +53,13 @@ impl<'a, K, V> ForwardInMemoryCursor<'a, K, V> { } } +/// Threshold for remaining entries above which binary search is used instead of linear scan. +/// For small slices, linear scan has better cache locality and lower overhead. +const BINARY_SEARCH_THRESHOLD: usize = 64; + impl ForwardInMemoryCursor<'_, K, V> where - K: PartialOrd + Clone, + K: Ord + Clone, V: Clone, { /// Returns the first entry from the current cursor position that's greater or equal to the @@ -73,19 +77,22 @@ where /// Advances the cursor forward while `predicate` returns `true` or until the collection is /// exhausted. /// + /// Uses binary search for large remaining slices (>= 64 entries), linear scan for small ones. + /// /// Returns the first entry for which `predicate` returns `false` or `None`. The cursor will /// point to the returned entry. fn advance_while(&mut self, predicate: impl Fn(&K) -> bool) -> Option<(K, V)> { - let mut entry; - loop { - entry = self.current(); - if entry.is_some_and(|(k, _)| predicate(k)) { + let remaining = self.entries.len().saturating_sub(self.idx); + if remaining >= BINARY_SEARCH_THRESHOLD { + let slice = &self.entries[self.idx..]; + let pos = slice.partition_point(|(k, _)| predicate(k)); + self.idx += pos; + } else { + while self.current().is_some_and(|(k, _)| predicate(k)) { self.next(); - } else { - break; } } - entry.cloned() + self.current().cloned() } } @@ -94,7 +101,7 @@ mod tests { use super::*; #[test] - fn test_cursor() { + fn test_cursor_small() { let mut cursor = ForwardInMemoryCursor::new(&[(1, ()), (2, ()), (3, ()), (4, ()), (5, ())]); assert_eq!(cursor.current(), Some(&(1, ()))); @@ -113,4 +120,72 @@ mod tests { assert_eq!(cursor.seek(&6), None); assert_eq!(cursor.current(), None); } + + #[test] + fn test_cursor_large_binary_search() { + // Create a large enough collection to trigger binary search + let entries: Vec<(i32, ())> = (0..200).map(|i| (i * 2, ())).collect(); + let mut cursor = ForwardInMemoryCursor::new(&entries); + + // Seek to beginning + assert_eq!(cursor.seek(&0), Some((0, ()))); + assert_eq!(cursor.idx, 0); + + // Seek to middle (should use binary search) + assert_eq!(cursor.seek(&100), Some((100, ()))); + assert_eq!(cursor.idx, 50); + + // Seek to non-existent key (should find next greater) + assert_eq!(cursor.seek(&101), Some((102, ()))); + assert_eq!(cursor.idx, 51); + + // Seek to end + assert_eq!(cursor.seek(&398), Some((398, ()))); + assert_eq!(cursor.idx, 199); + + // Seek past end + assert_eq!(cursor.seek(&1000), None); + } + + #[test] + fn test_first_after_large() { + let entries: Vec<(i32, ())> = (0..200).map(|i| (i * 2, ())).collect(); + let mut cursor = ForwardInMemoryCursor::new(&entries); + + // first_after should find strictly greater + assert_eq!(cursor.first_after(&0), Some((2, ()))); + assert_eq!(cursor.idx, 1); + + // Reset and test from beginning + cursor.reset(); + assert_eq!(cursor.first_after(&99), Some((100, ()))); + + // first_after on exact match + cursor.reset(); + assert_eq!(cursor.first_after(&100), Some((102, ()))); + } + + #[test] + fn test_cursor_consistency() { + // Verify binary search and linear scan produce same results + let entries: Vec<(i32, ())> = (0..200).map(|i| (i * 3, ())).collect(); + + for search_key in [0, 1, 3, 50, 150, 299, 300, 597, 598, 599, 1000] { + // Test with fresh cursor (binary search path) + let mut cursor1 = ForwardInMemoryCursor::new(&entries); + let result1 = cursor1.seek(&search_key); + + // Manually advance to trigger linear path by getting close first + let mut cursor2 = ForwardInMemoryCursor::new(&entries); + if search_key > 100 { + cursor2.seek(&(search_key - 50)); + } + let result2 = cursor2.seek(&search_key); + + assert_eq!( + result1, result2, + "Mismatch for key {search_key}: binary={result1:?}, linear={result2:?}" + ); + } + } } From 1fbd5a95f8e229e08b4202e07574b1a92775bc21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kamil=20Szczygie=C5=82?= Date: Wed, 14 Jan 2026 22:29:00 +0100 Subject: [PATCH 018/267] feat: Support for sending logs through OTLP (#21039) Co-authored-by: Matthias Seitz --- .config/zepter.yaml | 2 +- Cargo.lock | 13 +++ Cargo.toml | 1 + bin/reth/Cargo.toml | 6 +- crates/ethereum/cli/Cargo.toml | 1 + crates/ethereum/cli/src/interface.rs | 17 ++- crates/node/core/Cargo.toml | 3 +- crates/node/core/src/args/mod.rs | 2 +- crates/node/core/src/args/trace.rs | 88 ++++++++++++++- crates/optimism/cli/Cargo.toml | 3 +- crates/optimism/cli/src/app.rs | 14 ++- crates/tracing-otlp/Cargo.toml | 8 ++ crates/tracing-otlp/src/lib.rs | 106 ++++++++++++++++-- crates/tracing/Cargo.toml | 1 + crates/tracing/src/layers.rs | 18 +++ docs/vocs/docs/pages/cli/op-reth.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/config.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/db.mdx | 22 +++- .../pages/cli/op-reth/db/account-storage.mdx | 22 +++- .../docs/pages/cli/op-reth/db/checksum.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/db/clear.mdx | 22 +++- .../docs/pages/cli/op-reth/db/clear/mdbx.mdx | 22 +++- .../cli/op-reth/db/clear/static-file.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/db/diff.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/db/drop.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/db/get.mdx | 22 +++- .../docs/pages/cli/op-reth/db/get/mdbx.mdx | 22 +++- .../pages/cli/op-reth/db/get/static-file.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/db/list.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/db/path.mdx | 22 +++- .../docs/pages/cli/op-reth/db/repair-trie.mdx | 22 +++- .../docs/pages/cli/op-reth/db/settings.mdx | 22 +++- .../pages/cli/op-reth/db/settings/get.mdx | 22 +++- .../pages/cli/op-reth/db/settings/set.mdx | 22 +++- .../db/settings/set/account_changesets.mdx | 22 +++- .../cli/op-reth/db/settings/set/receipts.mdx | 22 +++- .../db/settings/set/transaction_senders.mdx | 22 +++- .../cli/op-reth/db/static-file-header.mdx | 22 +++- .../op-reth/db/static-file-header/block.mdx | 22 +++- .../op-reth/db/static-file-header/path.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/db/stats.mdx | 22 +++- .../docs/pages/cli/op-reth/db/version.mdx | 22 +++- .../docs/pages/cli/op-reth/dump-genesis.mdx | 22 +++- .../vocs/docs/pages/cli/op-reth/import-op.mdx | 22 +++- .../pages/cli/op-reth/import-receipts-op.mdx | 22 +++- .../docs/pages/cli/op-reth/init-state.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/init.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/node.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/p2p.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/p2p/body.mdx | 22 +++- .../docs/pages/cli/op-reth/p2p/bootnode.mdx | 22 +++- .../docs/pages/cli/op-reth/p2p/header.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/p2p/rlpx.mdx | 22 +++- .../docs/pages/cli/op-reth/p2p/rlpx/ping.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/prune.mdx | 22 +++- .../docs/pages/cli/op-reth/re-execute.mdx | 22 +++- docs/vocs/docs/pages/cli/op-reth/stage.mdx | 22 +++- .../docs/pages/cli/op-reth/stage/drop.mdx | 22 +++- .../docs/pages/cli/op-reth/stage/dump.mdx | 22 +++- .../op-reth/stage/dump/account-hashing.mdx | 22 +++- .../cli/op-reth/stage/dump/execution.mdx | 22 +++- .../pages/cli/op-reth/stage/dump/merkle.mdx | 22 +++- .../op-reth/stage/dump/storage-hashing.mdx | 22 +++- .../vocs/docs/pages/cli/op-reth/stage/run.mdx | 22 +++- .../docs/pages/cli/op-reth/stage/unwind.mdx | 22 +++- .../cli/op-reth/stage/unwind/num-blocks.mdx | 22 +++- .../cli/op-reth/stage/unwind/to-block.mdx | 22 +++- docs/vocs/docs/pages/cli/reth.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/config.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/db.mdx | 22 +++- .../pages/cli/reth/db/account-storage.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/db/checksum.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/db/clear.mdx | 22 +++- .../docs/pages/cli/reth/db/clear/mdbx.mdx | 22 +++- .../pages/cli/reth/db/clear/static-file.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/db/diff.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/db/drop.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/db/get.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx | 22 +++- .../pages/cli/reth/db/get/static-file.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/db/list.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/db/path.mdx | 22 +++- .../docs/pages/cli/reth/db/repair-trie.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/db/settings.mdx | 22 +++- .../docs/pages/cli/reth/db/settings/get.mdx | 22 +++- .../docs/pages/cli/reth/db/settings/set.mdx | 22 +++- .../db/settings/set/account_changesets.mdx | 22 +++- .../cli/reth/db/settings/set/receipts.mdx | 22 +++- .../db/settings/set/transaction_senders.mdx | 22 +++- .../pages/cli/reth/db/static-file-header.mdx | 22 +++- .../cli/reth/db/static-file-header/block.mdx | 22 +++- .../cli/reth/db/static-file-header/path.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/db/stats.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/db/version.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/download.mdx | 22 +++- .../vocs/docs/pages/cli/reth/dump-genesis.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/export-era.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/import-era.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/import.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/init-state.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/init.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/node.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/p2p.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 22 +++- .../vocs/docs/pages/cli/reth/p2p/bootnode.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx | 22 +++- .../docs/pages/cli/reth/p2p/rlpx/ping.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/prune.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/re-execute.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/stage.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 22 +++- .../cli/reth/stage/dump/account-hashing.mdx | 22 +++- .../pages/cli/reth/stage/dump/execution.mdx | 22 +++- .../docs/pages/cli/reth/stage/dump/merkle.mdx | 22 +++- .../cli/reth/stage/dump/storage-hashing.mdx | 22 +++- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 22 +++- .../vocs/docs/pages/cli/reth/stage/unwind.mdx | 22 +++- .../cli/reth/stage/unwind/num-blocks.mdx | 22 +++- .../pages/cli/reth/stage/unwind/to-block.mdx | 22 +++- 121 files changed, 2382 insertions(+), 233 deletions(-) diff --git a/.config/zepter.yaml b/.config/zepter.yaml index a79b3bc47a6..40cfef6acbd 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -12,7 +12,7 @@ workflows: # Check that `A` activates the features of `B`. "propagate-feature", # These are the features to check: - "--features=std,op,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,tracy,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat,min-error-logs,min-warn-logs,min-info-logs,min-debug-logs,min-trace-logs,otlp,js-tracer,portable,keccak-cache-global", + "--features=std,op,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,tracy,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat,min-error-logs,min-warn-logs,min-info-logs,min-debug-logs,min-trace-logs,otlp,otlp-logs,js-tracer,portable,keccak-cache-global", # Do not try to add a new section to `[features]` of `A` only because `B` exposes that feature. There are edge-cases where this is still needed, but we can add them manually. "--left-side-feature-missing=ignore", # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. diff --git a/Cargo.lock b/Cargo.lock index 72043a05c34..3c9707af7e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6548,6 +6548,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "opentelemetry-appender-tracing" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef6a1ac5ca3accf562b8c306fa8483c85f4390f768185ab775f242f7fe8fdcc2" +dependencies = [ + "opentelemetry", + "tracing", + "tracing-core", + "tracing-subscriber 0.3.22", +] + [[package]] name = "opentelemetry-http" version = "0.31.0" @@ -11072,6 +11084,7 @@ dependencies = [ "clap", "eyre", "opentelemetry", + "opentelemetry-appender-tracing", "opentelemetry-otlp", "opentelemetry-semantic-conventions", "opentelemetry_sdk", diff --git a/Cargo.toml b/Cargo.toml index b75906c15e0..3db3f9a50cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -665,6 +665,7 @@ opentelemetry_sdk = "0.31" opentelemetry = "0.31" opentelemetry-otlp = "0.31" opentelemetry-semantic-conventions = "0.31" +opentelemetry-appender-tracing = "0.31" tracing-opentelemetry = "0.32" # misc-testing diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index a50a785bf2d..c7e26d852e5 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -81,12 +81,16 @@ backon.workspace = true tempfile.workspace = true [features] -default = ["jemalloc", "otlp", "reth-revm/portable", "js-tracer", "keccak-cache-global", "asm-keccak"] +default = ["jemalloc", "otlp", "otlp-logs", "reth-revm/portable", "js-tracer", "keccak-cache-global", "asm-keccak"] otlp = [ "reth-ethereum-cli/otlp", "reth-node-core/otlp", ] +otlp-logs = [ + "reth-ethereum-cli/otlp-logs", + "reth-node-core/otlp-logs", +] js-tracer = [ "reth-node-builder/js-tracer", "reth-node-ethereum/js-tracer", diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index 524a094647c..20b1d87cc7c 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -38,6 +38,7 @@ tempfile.workspace = true default = [] otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] +otlp-logs = ["reth-tracing/otlp-logs", "reth-node-core/otlp-logs"] dev = ["reth-cli-commands/arbitrary"] diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index 354748f1a77..1f45bf7f53a 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -19,7 +19,7 @@ use reth_db::DatabaseEnv; use reth_node_api::NodePrimitives; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ - args::{LogArgs, OtlpInitStatus, TraceArgs}, + args::{LogArgs, OtlpInitStatus, OtlpLogsStatus, TraceArgs}, version::version_metadata, }; use reth_node_metrics::recorder::install_prometheus_recorder; @@ -223,16 +223,19 @@ impl< /// If file logging is enabled, this function returns a guard that must be kept alive to ensure /// that all logs are flushed to disk. /// - /// If an OTLP endpoint is specified, it will export metrics to the configured collector. + /// If an OTLP endpoint is specified, it will export traces and logs to the configured + /// collector. pub fn init_tracing( &mut self, runner: &CliRunner, mut layers: Layers, ) -> eyre::Result> { let otlp_status = runner.block_on(self.traces.init_otlp_tracing(&mut layers))?; + let otlp_logs_status = runner.block_on(self.traces.init_otlp_logs(&mut layers))?; let guard = self.logs.init_tracing_with_layers(layers)?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); + match otlp_status { OtlpInitStatus::Started(endpoint) => { info!(target: "reth::cli", "Started OTLP {:?} tracing export to {endpoint}", self.traces.protocol); @@ -243,6 +246,16 @@ impl< OtlpInitStatus::Disabled => {} } + match otlp_logs_status { + OtlpLogsStatus::Started(endpoint) => { + info!(target: "reth::cli", "Started OTLP {:?} logs export to {endpoint}", self.traces.protocol); + } + OtlpLogsStatus::NoFeature => { + warn!(target: "reth::cli", "Provided OTLP logs arguments do not have effect, compile with the `otlp-logs` feature") + } + OtlpLogsStatus::Disabled => {} + } + Ok(guard) } } diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index c4ea8711a6d..3ed981297eb 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -81,7 +81,8 @@ tokio.workspace = true jemalloc = ["reth-cli-util/jemalloc"] asm-keccak = ["alloy-primitives/asm-keccak"] keccak-cache-global = ["alloy-primitives/keccak-cache-global"] -otlp = ["reth-tracing/otlp"] +otlp = ["reth-tracing/otlp", "reth-tracing-otlp/otlp"] +otlp-logs = ["reth-tracing/otlp-logs", "reth-tracing-otlp/otlp-logs"] tracy = ["reth-tracing/tracy"] min-error-logs = ["tracing/release_max_level_error"] diff --git a/crates/node/core/src/args/mod.rs b/crates/node/core/src/args/mod.rs index d5c2aa4c666..9351128570d 100644 --- a/crates/node/core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -26,7 +26,7 @@ pub use log::{ColorMode, LogArgs, Verbosity}; /// `TraceArgs` for tracing and spans support mod trace; -pub use trace::{OtlpInitStatus, TraceArgs}; +pub use trace::{OtlpInitStatus, OtlpLogsStatus, TraceArgs}; /// `MetricArgs` to configure metrics. mod metric; diff --git a/crates/node/core/src/args/trace.rs b/crates/node/core/src/args/trace.rs index b94b83f4338..58371940359 100644 --- a/crates/node/core/src/args/trace.rs +++ b/crates/node/core/src/args/trace.rs @@ -1,4 +1,4 @@ -//! Opentelemetry tracing configuration through CLI args. +//! Opentelemetry tracing and logging configuration through CLI args. use clap::Parser; use eyre::WrapErr; @@ -6,7 +6,7 @@ use reth_tracing::{tracing_subscriber::EnvFilter, Layers}; use reth_tracing_otlp::OtlpProtocol; use url::Url; -/// CLI arguments for configuring `Opentelemetry` trace and span export. +/// CLI arguments for configuring `Opentelemetry` trace and logs export. #[derive(Debug, Clone, Parser)] pub struct TraceArgs { /// Enable `Opentelemetry` tracing export to an OTLP endpoint. @@ -30,9 +30,29 @@ pub struct TraceArgs { )] pub otlp: Option, - /// OTLP transport protocol to use for exporting traces. + /// Enable `Opentelemetry` logs export to an OTLP endpoint. /// - /// - `http`: expects endpoint path to end with `/v1/traces` + /// If no value provided, defaults based on protocol: + /// - HTTP: `http://localhost:4318/v1/logs` + /// - gRPC: `http://localhost:4317` + /// + /// Example: --logs-otlp=http://collector:4318/v1/logs + #[arg( + long = "logs-otlp", + env = "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT", + global = true, + value_name = "URL", + num_args = 0..=1, + default_missing_value = "http://localhost:4318/v1/logs", + require_equals = true, + value_parser = parse_otlp_endpoint, + help_heading = "Logging" + )] + pub logs_otlp: Option, + + /// OTLP transport protocol to use for exporting traces and logs. + /// + /// - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` /// - `grpc`: expects endpoint without a path /// /// Defaults to HTTP if not specified. @@ -62,6 +82,22 @@ pub struct TraceArgs { )] pub otlp_filter: EnvFilter, + /// Set a filter directive for the OTLP logs exporter. This controls the verbosity + /// of logs sent to the OTLP endpoint. It follows the same syntax as the + /// `RUST_LOG` environment variable. + /// + /// Example: --logs-otlp.filter=info,reth=debug + /// + /// Defaults to INFO if not specified. + #[arg( + long = "logs-otlp.filter", + global = true, + value_name = "FILTER", + default_value = "info", + help_heading = "Logging" + )] + pub logs_otlp_filter: EnvFilter, + /// Service name to use for OTLP tracing export. /// /// This name will be used to identify the service in distributed tracing systems @@ -101,8 +137,10 @@ impl Default for TraceArgs { fn default() -> Self { Self { otlp: None, + logs_otlp: None, protocol: OtlpProtocol::Http, otlp_filter: EnvFilter::from_default_env(), + logs_otlp_filter: EnvFilter::try_new("info").expect("valid filter"), sample_ratio: None, service_name: "reth".to_string(), } @@ -150,6 +188,37 @@ impl TraceArgs { Ok(OtlpInitStatus::Disabled) } } + + /// Initialize OTLP logs export with the given layers. + /// + /// This method handles OTLP logs initialization based on the configured options, + /// including validation and protocol selection. + /// + /// Returns the initialization status to allow callers to log appropriate messages. + pub async fn init_otlp_logs(&mut self, _layers: &mut Layers) -> eyre::Result { + if let Some(endpoint) = self.logs_otlp.as_mut() { + self.protocol.validate_logs_endpoint(endpoint)?; + + #[cfg(feature = "otlp-logs")] + { + let config = reth_tracing_otlp::OtlpLogsConfig::new( + self.service_name.clone(), + endpoint.clone(), + self.protocol, + )?; + + _layers.with_log_layer(config.clone(), self.logs_otlp_filter.clone())?; + + Ok(OtlpLogsStatus::Started(config.endpoint().clone())) + } + #[cfg(not(feature = "otlp-logs"))] + { + Ok(OtlpLogsStatus::NoFeature) + } + } else { + Ok(OtlpLogsStatus::Disabled) + } + } } /// Status of OTLP tracing initialization. @@ -163,6 +232,17 @@ pub enum OtlpInitStatus { NoFeature, } +/// Status of OTLP logs initialization. +#[derive(Debug)] +pub enum OtlpLogsStatus { + /// OTLP logs export was successfully started with the given endpoint. + Started(Url), + /// OTLP logs export is disabled (no endpoint configured). + Disabled, + /// OTLP logs arguments provided but feature is not compiled. + NoFeature, +} + // Parses an OTLP endpoint url. fn parse_otlp_endpoint(arg: &str) -> eyre::Result { Url::parse(arg).wrap_err("Invalid URL for OTLP trace output") diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index db171bab782..a109c2fc8b0 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -76,8 +76,9 @@ reth-optimism-chainspec = { workspace = true, features = ["std", "superchain-con [features] default = [] -# Opentelemtry feature to activate metrics export +# Opentelemetry feature to activate tracing and logs export otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] +otlp-logs = ["reth-tracing/otlp-logs", "reth-node-core/otlp-logs"] asm-keccak = [ "alloy-primitives/asm-keccak", diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 8785338e5ec..4dbdd810576 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -3,7 +3,7 @@ use eyre::{eyre, Result}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::launcher::Launcher; use reth_cli_runner::CliRunner; -use reth_node_core::args::OtlpInitStatus; +use reth_node_core::args::{OtlpInitStatus, OtlpLogsStatus}; use reth_node_metrics::recorder::install_prometheus_recorder; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OpBeaconConsensus; @@ -124,9 +124,11 @@ where let mut layers = self.layers.take().unwrap_or_default(); let otlp_status = runner.block_on(self.cli.traces.init_otlp_tracing(&mut layers))?; + let otlp_logs_status = runner.block_on(self.cli.traces.init_otlp_logs(&mut layers))?; self.guard = self.cli.logs.init_tracing_with_layers(layers)?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); + match otlp_status { OtlpInitStatus::Started(endpoint) => { info!(target: "reth::cli", "Started OTLP {:?} tracing export to {endpoint}", self.cli.traces.protocol); @@ -136,6 +138,16 @@ where } OtlpInitStatus::Disabled => {} } + + match otlp_logs_status { + OtlpLogsStatus::Started(endpoint) => { + info!(target: "reth::cli", "Started OTLP {:?} logs export to {endpoint}", self.cli.traces.protocol); + } + OtlpLogsStatus::NoFeature => { + warn!(target: "reth::cli", "Provided OTLP logs arguments do not have effect, compile with the `otlp-logs` feature") + } + OtlpLogsStatus::Disabled => {} + } } Ok(()) } diff --git a/crates/tracing-otlp/Cargo.toml b/crates/tracing-otlp/Cargo.toml index 5b01095d4ff..3adc8e9ebe8 100644 --- a/crates/tracing-otlp/Cargo.toml +++ b/crates/tracing-otlp/Cargo.toml @@ -14,6 +14,7 @@ opentelemetry_sdk = { workspace = true, optional = true } opentelemetry = { workspace = true, optional = true } opentelemetry-otlp = { workspace = true, optional = true, features = ["grpc-tonic"] } opentelemetry-semantic-conventions = { workspace = true, optional = true } +opentelemetry-appender-tracing = { workspace = true, optional = true } tracing-opentelemetry = { workspace = true, optional = true } tracing-subscriber.workspace = true tracing.workspace = true @@ -36,3 +37,10 @@ otlp = [ "opentelemetry-semantic-conventions", "tracing-opentelemetry", ] + +otlp-logs = [ + "otlp", + "opentelemetry-appender-tracing", + "opentelemetry-otlp/logs", + "opentelemetry_sdk/logs", +] diff --git a/crates/tracing-otlp/src/lib.rs b/crates/tracing-otlp/src/lib.rs index c7af074ad11..55836b89784 100644 --- a/crates/tracing-otlp/src/lib.rs +++ b/crates/tracing-otlp/src/lib.rs @@ -1,10 +1,12 @@ #![cfg(feature = "otlp")] -//! Provides a tracing layer for `OpenTelemetry` that exports spans to an OTLP endpoint. +//! Provides tracing layers for `OpenTelemetry` that export spans, logs, and metrics to an OTLP +//! endpoint. //! -//! This module simplifies the integration of `OpenTelemetry` tracing with OTLP export in Rust -//! applications. It allows for easily capturing and exporting distributed traces to compatible -//! backends like Jaeger, Zipkin, or any other OpenTelemetry-compatible tracing system. +//! This module simplifies the integration of `OpenTelemetry` with OTLP export in Rust +//! applications. It allows for easily capturing and exporting distributed traces, logs, +//! and metrics to compatible backends like Jaeger, Zipkin, or any other +//! OpenTelemetry-compatible system. use clap::ValueEnum; use eyre::ensure; @@ -24,6 +26,7 @@ use url::Url; // Otlp http endpoint is expected to end with this path. // See also . const HTTP_TRACE_ENDPOINT: &str = "/v1/traces"; +const HTTP_LOGS_ENDPOINT: &str = "/v1/logs"; /// Creates a tracing [`OpenTelemetryLayer`] that exports spans to an OTLP endpoint. /// @@ -62,6 +65,42 @@ where Ok(tracing_opentelemetry::layer().with_tracer(tracer)) } +/// Creates a tracing layer that exports logs to an OTLP endpoint. +/// +/// This layer bridges logs emitted via the `tracing` crate to `OpenTelemetry` logs. +#[cfg(feature = "otlp-logs")] +pub fn log_layer( + otlp_config: OtlpLogsConfig, +) -> eyre::Result< + opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge< + opentelemetry_sdk::logs::SdkLoggerProvider, + opentelemetry_sdk::logs::SdkLogger, + >, +> { + use opentelemetry_otlp::LogExporter; + use opentelemetry_sdk::logs::SdkLoggerProvider; + + let resource = build_resource(otlp_config.service_name.clone()); + + let log_builder = LogExporter::builder(); + + let log_exporter = match otlp_config.protocol { + OtlpProtocol::Http => { + log_builder.with_http().with_endpoint(otlp_config.endpoint.as_str()).build()? + } + OtlpProtocol::Grpc => { + log_builder.with_tonic().with_endpoint(otlp_config.endpoint.as_str()).build()? + } + }; + + let logger_provider = SdkLoggerProvider::builder() + .with_resource(resource) + .with_batch_exporter(log_exporter) + .build(); + + Ok(opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge::new(&logger_provider)) +} + /// Configuration for OTLP trace export. #[derive(Debug, Clone)] pub struct OtlpConfig { @@ -115,6 +154,43 @@ impl OtlpConfig { } } +/// Configuration for OTLP logs export. +#[derive(Debug, Clone)] +pub struct OtlpLogsConfig { + /// Service name for log identification + service_name: String, + /// Otlp endpoint URL + endpoint: Url, + /// Transport protocol, HTTP or gRPC + protocol: OtlpProtocol, +} + +impl OtlpLogsConfig { + /// Creates a new OTLP logs configuration. + pub fn new( + service_name: impl Into, + endpoint: Url, + protocol: OtlpProtocol, + ) -> eyre::Result { + Ok(Self { service_name: service_name.into(), endpoint, protocol }) + } + + /// Returns the service name. + pub fn service_name(&self) -> &str { + &self.service_name + } + + /// Returns the OTLP endpoint URL. + pub const fn endpoint(&self) -> &Url { + &self.endpoint + } + + /// Returns the transport protocol. + pub const fn protocol(&self) -> OtlpProtocol { + self.protocol + } +} + // Builds OTLP resource with service information. fn build_resource(service_name: impl Into) -> Resource { Resource::builder() @@ -145,23 +221,35 @@ pub enum OtlpProtocol { } impl OtlpProtocol { - /// Validate and correct the URL to match protocol requirements. + /// Validate and correct the URL to match protocol requirements for traces. /// /// For HTTP: Ensures the path ends with `/v1/traces`, appending it if necessary. /// For gRPC: Ensures the path does NOT include `/v1/traces`. pub fn validate_endpoint(&self, url: &mut Url) -> eyre::Result<()> { + self.validate_endpoint_with_path(url, HTTP_TRACE_ENDPOINT) + } + + /// Validate and correct the URL to match protocol requirements for logs. + /// + /// For HTTP: Ensures the path ends with `/v1/logs`, appending it if necessary. + /// For gRPC: Ensures the path does NOT include `/v1/logs`. + pub fn validate_logs_endpoint(&self, url: &mut Url) -> eyre::Result<()> { + self.validate_endpoint_with_path(url, HTTP_LOGS_ENDPOINT) + } + + fn validate_endpoint_with_path(&self, url: &mut Url, http_path: &str) -> eyre::Result<()> { match self { Self::Http => { - if !url.path().ends_with(HTTP_TRACE_ENDPOINT) { + if !url.path().ends_with(http_path) { let path = url.path().trim_end_matches('/'); - url.set_path(&format!("{}{}", path, HTTP_TRACE_ENDPOINT)); + url.set_path(&format!("{}{}", path, http_path)); } } Self::Grpc => { ensure!( - !url.path().ends_with(HTTP_TRACE_ENDPOINT), + !url.path().ends_with(http_path), "OTLP gRPC endpoint should not include {} path, got: {}", - HTTP_TRACE_ENDPOINT, + http_path, url ); } diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index 595e76765cc..5943ab3b7c9 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -33,4 +33,5 @@ rolling-file.workspace = true [features] default = ["otlp"] otlp = ["reth-tracing-otlp"] +otlp-logs = ["reth-tracing-otlp/otlp-logs"] tracy = ["tracing-tracy", "tracy-client"] diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 2b0d72eb5c0..51fccd1bdf8 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -1,4 +1,6 @@ use crate::{formatter::LogFormat, LayerInfo}; +#[cfg(feature = "otlp-logs")] +use reth_tracing_otlp::{log_layer, OtlpLogsConfig}; #[cfg(feature = "otlp")] use reth_tracing_otlp::{span_layer, OtlpConfig}; use rolling_file::{RollingConditionBasic, RollingFileAppender}; @@ -168,6 +170,22 @@ impl Layers { Ok(()) } + + /// Add OTLP logs layer to the layer collection + #[cfg(feature = "otlp-logs")] + pub fn with_log_layer( + &mut self, + otlp_config: OtlpLogsConfig, + filter: EnvFilter, + ) -> eyre::Result<()> { + let log_layer = log_layer(otlp_config) + .map_err(|e| eyre::eyre!("Failed to build OTLP log exporter {}", e))? + .with_filter(filter); + + self.add_layer(log_layer); + + Ok(()) + } } /// Holds configuration information for file logging. diff --git a/docs/vocs/docs/pages/cli/op-reth.mdx b/docs/vocs/docs/pages/cli/op-reth.mdx index d0bbd53e2b6..572335c21b1 100644 --- a/docs/vocs/docs/pages/cli/op-reth.mdx +++ b/docs/vocs/docs/pages/cli/op-reth.mdx @@ -99,6 +99,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -123,9 +141,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/config.mdx b/docs/vocs/docs/pages/cli/op-reth/config.mdx index a82594b7fd3..62389fe994e 100644 --- a/docs/vocs/docs/pages/cli/op-reth/config.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/config.mdx @@ -87,6 +87,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -111,9 +129,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db.mdx b/docs/vocs/docs/pages/cli/op-reth/db.mdx index 625f528b08f..c2d7b89b031 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db.mdx @@ -214,6 +214,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -238,9 +256,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/account-storage.mdx b/docs/vocs/docs/pages/cli/op-reth/db/account-storage.mdx index 4beaf216245..f71f08c74f1 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/account-storage.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/account-storage.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/op-reth/db/checksum.mdx index c45597647f1..45870209413 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/checksum.mdx @@ -104,6 +104,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -128,9 +146,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/clear.mdx b/docs/vocs/docs/pages/cli/op-reth/db/clear.mdx index 27005443770..1139958cbcb 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/clear.mdx @@ -96,6 +96,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -120,9 +138,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/op-reth/db/clear/mdbx.mdx index 2468f94ca2f..b9f6537b3dd 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/clear/mdbx.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx index 29afe723fe4..5ee09d1b050 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx @@ -100,6 +100,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -124,9 +142,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/diff.mdx b/docs/vocs/docs/pages/cli/op-reth/db/diff.mdx index 82be159fb5e..1df107888f5 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/diff.mdx @@ -147,6 +147,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -171,9 +189,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/drop.mdx b/docs/vocs/docs/pages/cli/op-reth/db/drop.mdx index 51d5e6d274e..ec387677f79 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/drop.mdx @@ -94,6 +94,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -118,9 +136,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/get.mdx b/docs/vocs/docs/pages/cli/op-reth/db/get.mdx index 3bd6d445fca..15b7ea8a287 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/get.mdx @@ -96,6 +96,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -120,9 +138,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/op-reth/db/get/mdbx.mdx index 3e0f225bc88..e4f002625c4 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/get/mdbx.mdx @@ -110,6 +110,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -134,9 +152,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx index 0966f9b6b0c..cd979cffde2 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx @@ -109,6 +109,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -133,9 +151,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/list.mdx b/docs/vocs/docs/pages/cli/op-reth/db/list.mdx index 3c45dac5469..1b926b1acab 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/list.mdx @@ -137,6 +137,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -161,9 +179,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/path.mdx b/docs/vocs/docs/pages/cli/op-reth/db/path.mdx index 3fd2245d9a5..b500e225e1e 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/path.mdx @@ -91,6 +91,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -115,9 +133,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/op-reth/db/repair-trie.mdx index 371c2e0e3a7..67d5b8cee0b 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/repair-trie.mdx @@ -99,6 +99,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -123,9 +141,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings.mdx index 5550c3c7841..95ff5e0df67 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/settings.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings.mdx @@ -96,6 +96,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -120,9 +138,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/get.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/get.mdx index 5f214b383a6..c4e54307302 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/settings/get.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/get.mdx @@ -91,6 +91,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -115,9 +133,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx index 9d86f268a43..76cf564715a 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx @@ -97,6 +97,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -121,9 +139,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/account_changesets.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/account_changesets.mdx index b808875dce5..40520075834 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/account_changesets.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/account_changesets.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/receipts.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/receipts.mdx index a0baca8c418..2cbd8647ba2 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/receipts.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/receipts.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/transaction_senders.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/transaction_senders.mdx index 354f32553ee..f95ddf7b811 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/transaction_senders.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/transaction_senders.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header.mdx b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header.mdx index 8b5aae01560..5483848ff01 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header.mdx @@ -96,6 +96,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -120,9 +138,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx index f9b74fd9500..a95bbcfbca4 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx @@ -105,6 +105,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -129,9 +147,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/path.mdx b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/path.mdx index abe179a08fe..7f237185cec 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/path.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/path.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/stats.mdx b/docs/vocs/docs/pages/cli/op-reth/db/stats.mdx index 0b8efa30adf..1a2ed7d4a93 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/stats.mdx @@ -107,6 +107,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -131,9 +149,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/version.mdx b/docs/vocs/docs/pages/cli/op-reth/db/version.mdx index 56250ad6e17..a33bf8ef3bd 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/version.mdx @@ -91,6 +91,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -115,9 +133,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/op-reth/dump-genesis.mdx index c91b9ae389e..2486fb79aea 100644 --- a/docs/vocs/docs/pages/cli/op-reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/dump-genesis.mdx @@ -90,6 +90,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -114,9 +132,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx index b8099d89629..42398a75159 100644 --- a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx @@ -207,6 +207,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -231,9 +249,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx index 1de3e032e13..75b260f7e04 100644 --- a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx @@ -207,6 +207,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -231,9 +249,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx index dbea8471a10..3f1b0bff2e2 100644 --- a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx @@ -237,6 +237,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -261,9 +279,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/init.mdx b/docs/vocs/docs/pages/cli/op-reth/init.mdx index 0b5a2bbe149..3f7c5ab5479 100644 --- a/docs/vocs/docs/pages/cli/op-reth/init.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/init.mdx @@ -198,6 +198,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -222,9 +240,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index 0c7f40e75f4..74964bf641f 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -1139,6 +1139,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -1163,9 +1181,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/p2p.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p.mdx index 07246ad198e..3b4efdbd6f0 100644 --- a/docs/vocs/docs/pages/cli/op-reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/p2p.mdx @@ -88,6 +88,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -112,9 +130,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p/body.mdx index 6a228812ba5..7fb5e5fa61c 100644 --- a/docs/vocs/docs/pages/cli/op-reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/p2p/body.mdx @@ -338,6 +338,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -362,9 +380,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p/bootnode.mdx index a36d568ab8b..387eef511b5 100644 --- a/docs/vocs/docs/pages/cli/op-reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/p2p/bootnode.mdx @@ -99,6 +99,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -123,9 +141,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p/header.mdx index cadce11b792..9ede2d8eb71 100644 --- a/docs/vocs/docs/pages/cli/op-reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/p2p/header.mdx @@ -338,6 +338,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -362,9 +380,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p/rlpx.mdx index 31aabc276b6..5b33e8b850e 100644 --- a/docs/vocs/docs/pages/cli/op-reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/p2p/rlpx.mdx @@ -85,6 +85,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -109,9 +127,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p/rlpx/ping.mdx index 4f03545a907..e91e437e943 100644 --- a/docs/vocs/docs/pages/cli/op-reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/p2p/rlpx/ping.mdx @@ -85,6 +85,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -109,9 +127,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/prune.mdx b/docs/vocs/docs/pages/cli/op-reth/prune.mdx index 02ff5e4d941..2df4b66eb9b 100644 --- a/docs/vocs/docs/pages/cli/op-reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/prune.mdx @@ -198,6 +198,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -222,9 +240,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx index 5ee62b4ef97..e0759212e37 100644 --- a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx @@ -214,6 +214,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -238,9 +256,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage.mdx b/docs/vocs/docs/pages/cli/op-reth/stage.mdx index 1322ab4579c..f1669000555 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage.mdx @@ -88,6 +88,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -112,9 +130,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx index d6088616b09..d1f90a84676 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx @@ -213,6 +213,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -237,9 +255,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx index 99e0cfe7599..3107256bbe5 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx @@ -205,6 +205,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -229,9 +247,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump/account-hashing.mdx index dd58e31fbb6..05bedbe5b09 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump/account-hashing.mdx @@ -103,6 +103,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -127,9 +145,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump/execution.mdx index 47740d0e064..95cf1f8e64d 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump/execution.mdx @@ -103,6 +103,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -127,9 +145,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump/merkle.mdx index 3b02f7199a1..523e60bbe92 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump/merkle.mdx @@ -103,6 +103,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -127,9 +145,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump/storage-hashing.mdx index 07feb42a250..2e10a26adee 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump/storage-hashing.mdx @@ -103,6 +103,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -127,9 +145,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx index 31549275a9d..7f813ba789c 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx @@ -460,6 +460,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -484,9 +502,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx index 5f166ef73a2..426d481f6ed 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx @@ -206,6 +206,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -230,9 +248,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/unwind/num-blocks.mdx index d204bdc5e77..b8434741573 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/unwind/num-blocks.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/unwind/to-block.mdx index 9577e0dd76a..363c6adcf37 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/unwind/to-block.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 0287f2e47f4..79586fd5fb6 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -101,6 +101,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -125,9 +143,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index 07d09a16dac..6fc23ce552c 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -87,6 +87,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -111,9 +129,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 22024cf3f52..d9e12bd7471 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -214,6 +214,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -238,9 +256,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/account-storage.mdx b/docs/vocs/docs/pages/cli/reth/db/account-storage.mdx index 380291feb30..c9efbfd79cb 100644 --- a/docs/vocs/docs/pages/cli/reth/db/account-storage.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/account-storage.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index 21ce752c42c..067737df48b 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -104,6 +104,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -128,9 +146,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 3483b71f46f..ce27ae40a64 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -96,6 +96,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -120,9 +138,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index 9fbfb1e8256..3645c1a39b2 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index f1ce7b217bb..e9a7792cf09 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -100,6 +100,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -124,9 +142,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index 745211efa4a..1716ecd44a0 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -147,6 +147,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -171,9 +189,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index 75292252953..cd828746565 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -94,6 +94,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -118,9 +136,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index abb95ab6b42..af77c976201 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -96,6 +96,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -120,9 +138,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 1983cfe7b29..3e306686773 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -110,6 +110,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -134,9 +152,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 45061bdd843..2a794310144 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -109,6 +109,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -133,9 +151,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index 0c1f4bc857f..faf4bdbbf90 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -137,6 +137,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -161,9 +179,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index b0b2c3c7545..296e9b443a3 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -91,6 +91,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -115,9 +133,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index 528a24b090f..9c9cf2e051b 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -99,6 +99,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -123,9 +141,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/settings.mdx b/docs/vocs/docs/pages/cli/reth/db/settings.mdx index 19d707370ea..f9bb6ad559f 100644 --- a/docs/vocs/docs/pages/cli/reth/db/settings.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/settings.mdx @@ -96,6 +96,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -120,9 +138,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/get.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/get.mdx index 9df06547252..0013c6121a3 100644 --- a/docs/vocs/docs/pages/cli/reth/db/settings/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/settings/get.mdx @@ -91,6 +91,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -115,9 +133,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx index 5bc4b1566d7..8c6f1c5273e 100644 --- a/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx @@ -97,6 +97,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -121,9 +139,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set/account_changesets.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set/account_changesets.mdx index d64a77fc670..ee3eec22654 100644 --- a/docs/vocs/docs/pages/cli/reth/db/settings/set/account_changesets.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set/account_changesets.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set/receipts.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set/receipts.mdx index 62d729b82af..6c2d73e74af 100644 --- a/docs/vocs/docs/pages/cli/reth/db/settings/set/receipts.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set/receipts.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set/transaction_senders.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set/transaction_senders.mdx index 8d7b217aa78..e5645aff4cd 100644 --- a/docs/vocs/docs/pages/cli/reth/db/settings/set/transaction_senders.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set/transaction_senders.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/static-file-header.mdx b/docs/vocs/docs/pages/cli/reth/db/static-file-header.mdx index 0343097545d..2894ca1f623 100644 --- a/docs/vocs/docs/pages/cli/reth/db/static-file-header.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/static-file-header.mdx @@ -96,6 +96,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -120,9 +138,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx b/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx index 51d779984f4..dfa69b1281d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx @@ -105,6 +105,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -129,9 +147,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/static-file-header/path.mdx b/docs/vocs/docs/pages/cli/reth/db/static-file-header/path.mdx index 404f7110828..9b566c88b2d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/static-file-header/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/static-file-header/path.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index 36ebe7938fb..61afe8d775d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -107,6 +107,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -131,9 +149,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index 5e983356be3..6f6e6b282c7 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -91,6 +91,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -115,9 +133,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 46bd7c28da1..016fc2b07f7 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -208,6 +208,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -232,9 +250,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index 905f2fbf4a9..954e96a4484 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -90,6 +90,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -114,9 +132,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 33f13c8db11..4eab7b84a07 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -214,6 +214,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -238,9 +256,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index b168f7c8412..97386ec8579 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -209,6 +209,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -233,9 +251,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index f5e05da3e59..10eed084909 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -210,6 +210,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -234,9 +252,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 2f5cf858e20..eaab28160ee 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -230,6 +230,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -254,9 +272,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index 5440c4526d2..586f0d4a44e 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -198,6 +198,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -222,9 +240,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index eaac479607a..d6be9ba55e4 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -1112,6 +1112,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -1136,9 +1154,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 9ceba951c14..11d9743c97c 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -88,6 +88,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -112,9 +130,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index dd834eb71b2..e0308bb2552 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -338,6 +338,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -362,9 +380,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 79335dfd92e..47e73dd7d08 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -99,6 +99,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -123,9 +141,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index 110bb4d53ac..101724c438a 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -338,6 +338,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -362,9 +380,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index ee5d70b5faf..6e728e71549 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -85,6 +85,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -109,9 +127,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index 3bf3599145c..cef500ecced 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -85,6 +85,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -109,9 +127,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 61030d7b47b..07cde6cd02c 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -198,6 +198,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -222,9 +240,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index 198415e32d4..6cbacf37e4f 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -214,6 +214,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -238,9 +256,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index 67ca5866f45..527398c0860 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -88,6 +88,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -112,9 +130,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index c0fe88527f7..4b460cadd85 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -213,6 +213,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -237,9 +255,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 2e995a92f2a..b162958fd60 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -205,6 +205,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -229,9 +247,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index 80348194ce2..73fc6125c8b 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -103,6 +103,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -127,9 +145,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index a48c7e65dba..d5daaaf6603 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -103,6 +103,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -127,9 +145,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index 203751e12fe..3b5ae17e2ee 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -103,6 +103,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -127,9 +145,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index 1431798792e..85fba8cbf73 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -103,6 +103,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -127,9 +145,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 47d2ba37e38..131c2b04c2c 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -460,6 +460,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -484,9 +502,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index ef119e783b3..ecb0f3f82d6 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -206,6 +206,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -230,9 +248,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index adad84db516..56d30980424 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index 133c0b01245..2eb72130b78 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -95,6 +95,24 @@ Logging: [default: always] + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + Display: -v, --verbosity... Set the minimum log level. @@ -119,9 +137,9 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. + OTLP transport protocol to use for exporting traces and logs. - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path Defaults to HTTP if not specified. From 0a4bac77d03606d5856d1f44bf803bf2bf3d8dcf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 14 Jan 2026 23:19:09 +0100 Subject: [PATCH 019/267] feat(primitives): add From> for SealedBlock (#21078) --- crates/primitives-traits/src/block/sealed.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/primitives-traits/src/block/sealed.rs b/crates/primitives-traits/src/block/sealed.rs index 0ef109117c5..440491e9319 100644 --- a/crates/primitives-traits/src/block/sealed.rs +++ b/crates/primitives-traits/src/block/sealed.rs @@ -282,6 +282,13 @@ where } } +impl From> for SealedBlock { + fn from(sealed: Sealed) -> Self { + let (block, hash) = sealed.into_parts(); + Self::new_unchecked(block, hash) + } +} + impl Default for SealedBlock where B: Block + Default, From a75a0a5db7abf64aecf24a3464f3bb43bf9617a7 Mon Sep 17 00:00:00 2001 From: Sergei Shulepov Date: Wed, 14 Jan 2026 22:30:42 +0000 Subject: [PATCH 020/267] feat(cli): support file:// URLs in reth download (#21026) Co-authored-by: Sergei Shulepov --- crates/cli/commands/src/download.rs | 85 ++++++++++++++++++---- docs/vocs/docs/pages/cli/reth/download.mdx | 4 +- 2 files changed, 75 insertions(+), 14 deletions(-) diff --git a/crates/cli/commands/src/download.rs b/crates/cli/commands/src/download.rs index 05d5d730dd9..3dd7fd33933 100644 --- a/crates/cli/commands/src/download.rs +++ b/crates/cli/commands/src/download.rs @@ -86,6 +86,9 @@ impl DownloadDefaults { "\nIf no URL is provided, the latest mainnet archive snapshot\nwill be proposed for download from ", ); help.push_str(self.default_base_url.as_ref()); + help.push_str( + ".\n\nLocal file:// URLs are also supported for extracting snapshots from disk.", + ); help } @@ -293,19 +296,14 @@ impl CompressionFormat { } } -/// Downloads and extracts a snapshot, blocking until finished. -fn blocking_download_and_extract(url: &str, target_dir: &Path) -> Result<()> { - let client = reqwest::blocking::Client::builder().build()?; - let response = client.get(url).send()?.error_for_status()?; - - let total_size = response.content_length().ok_or_else(|| { - eyre::eyre!( - "Server did not provide Content-Length header. This is required for snapshot downloads" - ) - })?; - - let progress_reader = ProgressReader::new(response, total_size); - let format = CompressionFormat::from_url(url)?; +/// Extracts a compressed tar archive to the target directory with progress tracking. +fn extract_archive( + reader: R, + total_size: u64, + format: CompressionFormat, + target_dir: &Path, +) -> Result<()> { + let progress_reader = ProgressReader::new(reader, total_size); match format { CompressionFormat::Lz4 => { @@ -322,6 +320,45 @@ fn blocking_download_and_extract(url: &str, target_dir: &Path) -> Result<()> { Ok(()) } +/// Extracts a snapshot from a local file. +fn extract_from_file(path: &Path, format: CompressionFormat, target_dir: &Path) -> Result<()> { + let file = std::fs::File::open(path)?; + let total_size = file.metadata()?.len(); + extract_archive(file, total_size, format, target_dir) +} + +/// Fetches the snapshot from a remote URL, uncompressing it in a streaming fashion. +fn download_and_extract(url: &str, format: CompressionFormat, target_dir: &Path) -> Result<()> { + let client = reqwest::blocking::Client::builder().build()?; + let response = client.get(url).send()?.error_for_status()?; + + let total_size = response.content_length().ok_or_else(|| { + eyre::eyre!( + "Server did not provide Content-Length header. This is required for snapshot downloads" + ) + })?; + + extract_archive(response, total_size, format, target_dir) +} + +/// Downloads and extracts a snapshot, blocking until finished. +/// +/// Supports both `file://` URLs for local files and HTTP(S) URLs for remote downloads. +fn blocking_download_and_extract(url: &str, target_dir: &Path) -> Result<()> { + let format = CompressionFormat::from_url(url)?; + + if let Ok(parsed_url) = Url::parse(url) && + parsed_url.scheme() == "file" + { + let file_path = parsed_url + .to_file_path() + .map_err(|_| eyre::eyre!("Invalid file:// URL path: {}", url))?; + extract_from_file(&file_path, format, target_dir) + } else { + download_and_extract(url, format, target_dir) + } +} + async fn stream_and_extract(url: &str, target_dir: &Path) -> Result<()> { let target_dir = target_dir.to_path_buf(); let url = url.to_string(); @@ -380,6 +417,7 @@ mod tests { assert!(help.contains("Available snapshot sources:")); assert!(help.contains("merkle.io")); assert!(help.contains("publicnode.com")); + assert!(help.contains("file://")); } #[test] @@ -404,4 +442,25 @@ mod tests { assert_eq!(defaults.available_snapshots.len(), 4); // 2 defaults + 2 added assert_eq!(defaults.long_help, Some("Custom help for snapshots".to_string())); } + + #[test] + fn test_compression_format_detection() { + assert!(matches!( + CompressionFormat::from_url("https://example.com/snapshot.tar.lz4"), + Ok(CompressionFormat::Lz4) + )); + assert!(matches!( + CompressionFormat::from_url("https://example.com/snapshot.tar.zst"), + Ok(CompressionFormat::Zstd) + )); + assert!(matches!( + CompressionFormat::from_url("file:///path/to/snapshot.tar.lz4"), + Ok(CompressionFormat::Lz4) + )); + assert!(matches!( + CompressionFormat::from_url("file:///path/to/snapshot.tar.zst"), + Ok(CompressionFormat::Zstd) + )); + assert!(CompressionFormat::from_url("https://example.com/snapshot.tar.gz").is_err()); + } } diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 016fc2b07f7..2f7fd058425 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -137,7 +137,9 @@ Static Files: - https://publicnode.com/snapshots (full nodes & testnets) If no URL is provided, the latest mainnet archive snapshot - will be proposed for download from https://downloads.merkle.io + will be proposed for download from https://downloads.merkle.io. + + Local file:// URLs are also supported for extracting snapshots from disk. Logging: --log.stdout.format From b9ff5941eb2e0b98a2ea72119b09e7abc04e8acb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 14 Jan 2026 23:49:55 +0100 Subject: [PATCH 021/267] feat(primitives): add SealedBlock::decode_sealed for efficient RLP decoding (#21030) --- Cargo.lock | 120 ++++++++--------- Cargo.toml | 56 ++++---- crates/primitives-traits/src/block/sealed.rs | 130 +++++++++++++++++-- 3 files changed, 208 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c9707af7e3..824221eb03d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -121,9 +121,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7ea09cffa9ad82f6404e6ab415ea0c41a7674c0f2e2e689cb8683f772b5940d" +checksum = "5c3a590d13de3944675987394715f37537b50b856e3b23a0e66e97d963edbf38" dependencies = [ "alloy-eips", "alloy-primitives", @@ -149,9 +149,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aafa1f0ddb5cbb6cba6b10e8fa6e31f8c5d5c22e262b30a5d2fa9d336c3b637" +checksum = "0f28f769d5ea999f0d8a105e434f483456a15b4e1fcb08edbbbe1650a497ff6d" dependencies = [ "alloy-consensus", "alloy-eips", @@ -164,9 +164,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "398c81368b864fdea950071a00b298c22b21506fed1ed8abc7f2902727f987f1" +checksum = "990fa65cd132a99d3c3795a82b9f93ec82b81c7de3bab0bf26ca5c73286f7186" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -260,9 +260,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "691fed81bbafefae0f5a6cedd837ebb3fade46e7d91c5b67a463af12ecf5b11a" +checksum = "09535cbc646b0e0c6fcc12b7597eaed12cf86dff4c4fba9507a61e71b94f30eb" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -308,9 +308,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf91e325928dfffe90c769c2c758cc6e9ba35331c6e984310fe8276548df4a9e" +checksum = "1005520ccf89fa3d755e46c1d992a9e795466c2e7921be2145ef1f749c5727de" dependencies = [ "alloy-eips", "alloy-primitives", @@ -349,9 +349,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8618cd8431d82d21ed98c300b6072f73fe925dff73b548aa2d4573b5a8d3ca91" +checksum = "72b626409c98ba43aaaa558361bca21440c88fd30df7542c7484b9c7a1489cdb" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -364,9 +364,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "390641d0e7e51d5d39b905be654ef391a89d62b9e6d3a74fd931b4df26daae20" +checksum = "89924fdcfeee0e0fa42b1f10af42f92802b5d16be614a70897382565663bf7cf" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -390,9 +390,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9badd9de9f310f0c17602c642c043eee40033c0651f45809189e411f6b166e0f" +checksum = "0f0dbe56ff50065713ff8635d8712a0895db3ad7f209db9793ad8fcb6b1734aa" dependencies = [ "alloy-consensus", "alloy-eips", @@ -466,9 +466,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b7dcf6452993e31ea728b9fc316ebe4e4e3a820c094f2aad55646041ee812a0" +checksum = "8b56f7a77513308a21a2ba0e9d57785a9d9d2d609e77f4e71a78a1192b83ff2d" dependencies = [ "alloy-chains", "alloy-consensus", @@ -511,9 +511,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "040dabce173e246b9522cf189db8e383c811b89cf6bd07a6ab952ec3b822a1e6" +checksum = "94813abbd7baa30c700ea02e7f92319dbcb03bff77aeea92a3a9af7ba19c5c70" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -555,9 +555,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce4a28b1302733f565a2900a0d7cb3db94ffd1dd58ad7ebf5b0ec302e868ed1e" +checksum = "ff01723afc25ec4c5b04de399155bef7b6a96dfde2475492b1b7b4e7a4f46445" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -581,9 +581,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1408505e2a41c71f7b3f83ee52e5ecd0f2a6f2db98046d0a4defb9f85a007a9e" +checksum = "f91bf006bb06b7d812591b6ac33395cb92f46c6a65cda11ee30b348338214f0f" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -594,9 +594,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee46cb2875073395f936482392d63f8128f1676a788762468857bd81390f8a4" +checksum = "b934c3bcdc6617563b45deb36a40881c8230b94d0546ea739dff7edb3aa2f6fd" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -606,9 +606,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456a35438dc5631320a747466a0366bf21b03494fc2e33ac903c128504a68edf" +checksum = "7e82145856df8abb1fefabef58cdec0f7d9abf337d4abd50c1ed7e581634acdd" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -618,9 +618,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6792425a4a8e74be38e8785f90f497f8f325188f40f13c168a220310fd421d12" +checksum = "212ca1c1dab27f531d3858f8b1a2d6bfb2da664be0c1083971078eb7b71abe4b" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -629,9 +629,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5e181ada2cd52aaad734a03a541e2ccc5a6198eb5b011843c41b0d6c0d245f5" +checksum = "6d92a9b4b268fac505ef7fb1dac9bb129d4fd7de7753f22a5b6e9f666f7f7de6" dependencies = [ "alloy-eips", "alloy-primitives", @@ -649,9 +649,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f72b891c28aa7376f7e4468c40d2bdcc1013ab47ceae57a2696e78b0cd1e8341" +checksum = "bab1ebed118b701c497e6541d2d11dfa6f3c6ae31a3c52999daa802fcdcc16b7" dependencies = [ "alloy-primitives", "derive_more", @@ -661,9 +661,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7bcd9ead89076095806364327a1b18c2215998b6fff5a45f82c658bfbabf2df" +checksum = "232f00fcbcd3ee3b9399b96223a8fc884d17742a70a44f9d7cef275f93e6e872" dependencies = [ "alloy-consensus", "alloy-eips", @@ -682,9 +682,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3b505d6223c88023fb1217ac24eab950e4368f6634405bea3977d34cae6935b" +checksum = "5715d0bf7efbd360873518bd9f6595762136b5327a9b759a8c42ccd9b5e44945" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -704,9 +704,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da6c1a9891c2fe0582fe19dda5064e7ad8f21762ed51731717cce676193b3baa" +checksum = "c7b61941d2add2ee64646612d3eda92cbbde8e6c933489760b6222c8898c79be" dependencies = [ "alloy-consensus", "alloy-eips", @@ -719,9 +719,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca8db59fa69da9da5bb6b75823c2b07c27b0f626a0f3af72bac32a7c361a418" +checksum = "9763cc931a28682bd4b9a68af90057b0fbe80e2538a82251afd69d7ae00bbebf" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -733,9 +733,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14194567368b8c8b7aeef470831bbe90cc8b12ef5f48b18acdda9cf20070ff1" +checksum = "359a8caaa98cb49eed62d03f5bc511dd6dd5dee292238e8627a6e5690156df0f" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -745,9 +745,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a755a3cc0297683c2879bbfe2ff22778f35068f07444f0b52b5b87570142b6" +checksum = "5ed8531cae8d21ee1c6571d0995f8c9f0652a6ef6452fde369283edea6ab7138" dependencies = [ "alloy-primitives", "arbitrary", @@ -757,9 +757,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d73afcd1fb2d851bf4ba67504a951b73231596f819cc814f50d11126db7ac1b" +checksum = "fb10ccd49d0248df51063fce6b716f68a315dd912d55b32178c883fd48b4021d" dependencies = [ "alloy-primitives", "async-trait", @@ -772,9 +772,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "807b043936012acc788c96cba06b8580609d124bb105dc470a1617051cc4aa63" +checksum = "f4d992d44e6c414ece580294abbadb50e74cfd4eaa69787350a4dfd4b20eaa1b" dependencies = [ "alloy-consensus", "alloy-network", @@ -861,9 +861,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b84a605484a03959436e5bea194e6d62f77c3caef750196b4b4f1c8d23254df" +checksum = "3f50a9516736d22dd834cc2240e5bf264f338667cc1d9e514b55ec5a78b987ca" dependencies = [ "alloy-json-rpc", "auto_impl", @@ -884,9 +884,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a400ad5b73590a099111481d4a66a2ca1266ebc85972a844958caf42bfdd37d" +checksum = "0a18b541a6197cf9a084481498a766fdf32fefda0c35ea6096df7d511025e9f1" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -899,9 +899,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74adc2ef0cb8c2cad4de2044afec2d4028061bc016148a251704dc204f259477" +checksum = "8075911680ebc537578cacf9453464fd394822a0f68614884a9c63f9fbaf5e89" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -919,9 +919,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2c1672b97fef0057f3ca268507fb4f1bc59497531603f39ccaf47cc1e5b9cb4" +checksum = "921d37a57e2975e5215f7dd0f28873ed5407c7af630d4831a4b5c737de4b0b8b" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -956,9 +956,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f17272de4df6b8b59889b264f0306eba47a69f23f57f1c08f1366a4617b48c30" +checksum = "b2289a842d02fe63f8c466db964168bb2c7a9fdfb7b24816dbb17d45520575fb" dependencies = [ "darling 0.21.3", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 3db3f9a50cd..449429b88b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -485,7 +485,7 @@ revm-inspectors = "0.33.2" # eth alloy-chains = { version = "0.2.5", default-features = false } -alloy-dyn-abi = "1.4.1" +alloy-dyn-abi = "1.4.3" alloy-eip2124 = { version = "0.2.0", default-features = false } alloy-eip7928 = { version = "0.1.0", default-features = false } alloy-evm = { version = "0.25.1", default-features = false } @@ -497,33 +497,33 @@ alloy-trie = { version = "0.9.1", default-features = false } alloy-hardforks = "0.4.5" -alloy-consensus = { version = "1.4.1", default-features = false } -alloy-contract = { version = "1.4.1", default-features = false } -alloy-eips = { version = "1.4.1", default-features = false } -alloy-genesis = { version = "1.4.1", default-features = false } -alloy-json-rpc = { version = "1.4.1", default-features = false } -alloy-network = { version = "1.4.1", default-features = false } -alloy-network-primitives = { version = "1.4.1", default-features = false } -alloy-provider = { version = "1.4.1", features = ["reqwest", "debug-api"], default-features = false } -alloy-pubsub = { version = "1.4.1", default-features = false } -alloy-rpc-client = { version = "1.4.1", default-features = false } -alloy-rpc-types = { version = "1.4.1", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.4.1", default-features = false } -alloy-rpc-types-anvil = { version = "1.4.1", default-features = false } -alloy-rpc-types-beacon = { version = "1.4.1", default-features = false } -alloy-rpc-types-debug = { version = "1.4.1", default-features = false } -alloy-rpc-types-engine = { version = "1.4.1", default-features = false } -alloy-rpc-types-eth = { version = "1.4.1", default-features = false } -alloy-rpc-types-mev = { version = "1.4.1", default-features = false } -alloy-rpc-types-trace = { version = "1.4.1", default-features = false } -alloy-rpc-types-txpool = { version = "1.4.1", default-features = false } -alloy-serde = { version = "1.4.1", default-features = false } -alloy-signer = { version = "1.4.1", default-features = false } -alloy-signer-local = { version = "1.4.1", default-features = false } -alloy-transport = { version = "1.4.1" } -alloy-transport-http = { version = "1.4.1", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.4.1", default-features = false } -alloy-transport-ws = { version = "1.4.1", default-features = false } +alloy-consensus = { version = "1.4.3", default-features = false } +alloy-contract = { version = "1.4.3", default-features = false } +alloy-eips = { version = "1.4.3", default-features = false } +alloy-genesis = { version = "1.4.3", default-features = false } +alloy-json-rpc = { version = "1.4.3", default-features = false } +alloy-network = { version = "1.4.3", default-features = false } +alloy-network-primitives = { version = "1.4.3", default-features = false } +alloy-provider = { version = "1.4.3", features = ["reqwest", "debug-api"], default-features = false } +alloy-pubsub = { version = "1.4.3", default-features = false } +alloy-rpc-client = { version = "1.4.3", default-features = false } +alloy-rpc-types = { version = "1.4.3", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.4.3", default-features = false } +alloy-rpc-types-anvil = { version = "1.4.3", default-features = false } +alloy-rpc-types-beacon = { version = "1.4.3", default-features = false } +alloy-rpc-types-debug = { version = "1.4.3", default-features = false } +alloy-rpc-types-engine = { version = "1.4.3", default-features = false } +alloy-rpc-types-eth = { version = "1.4.3", default-features = false } +alloy-rpc-types-mev = { version = "1.4.3", default-features = false } +alloy-rpc-types-trace = { version = "1.4.3", default-features = false } +alloy-rpc-types-txpool = { version = "1.4.3", default-features = false } +alloy-serde = { version = "1.4.3", default-features = false } +alloy-signer = { version = "1.4.3", default-features = false } +alloy-signer-local = { version = "1.4.3", default-features = false } +alloy-transport = { version = "1.4.3" } +alloy-transport-http = { version = "1.4.3", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.4.3", default-features = false } +alloy-transport-ws = { version = "1.4.3", default-features = false } # op alloy-op-evm = { version = "0.25.0", default-features = false } diff --git a/crates/primitives-traits/src/block/sealed.rs b/crates/primitives-traits/src/block/sealed.rs index 440491e9319..02d83b4f5ca 100644 --- a/crates/primitives-traits/src/block/sealed.rs +++ b/crates/primitives-traits/src/block/sealed.rs @@ -1,12 +1,12 @@ //! Sealed block types use crate::{ - block::{error::BlockRecoveryError, RecoveredBlock}, - transaction::signed::RecoveryError, + block::{error::BlockRecoveryError, header::BlockHeader, RecoveredBlock}, + transaction::signed::{RecoveryError, SignedTransaction}, Block, BlockBody, GotExpected, InMemorySize, SealedHeader, }; use alloc::vec::Vec; -use alloy_consensus::BlockHeader; +use alloy_consensus::BlockHeader as _; use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; use alloy_primitives::{Address, BlockHash, Sealable, Sealed, B256}; use alloy_rlp::{Decodable, Encodable}; @@ -282,13 +282,6 @@ where } } -impl From> for SealedBlock { - fn from(sealed: Sealed) -> Self { - let (block, hash) = sealed.into_parts(); - Self::new_unchecked(block, hash) - } -} - impl Default for SealedBlock where B: Block + Default, @@ -334,6 +327,31 @@ impl From> for Sealed { } } +impl From> for SealedBlock { + fn from(value: Sealed) -> Self { + let (block, hash) = value.into_parts(); + Self::new_unchecked(block, hash) + } +} + +impl SealedBlock> +where + T: Decodable + SignedTransaction, + H: BlockHeader, +{ + /// Decodes the block from RLP, computing the header hash directly from the RLP bytes. + /// + /// This is more efficient than decoding and then sealing, as the header hash is computed + /// from the raw RLP bytes without re-encoding. + /// + /// This leverages [`alloy_consensus::Block::decode_sealed`]. + pub fn decode_sealed(buf: &mut &[u8]) -> alloy_rlp::Result { + let sealed = alloy_consensus::Block::::decode_sealed(buf)?; + let (block, hash) = sealed.into_parts(); + Ok(Self::new_unchecked(block, hash)) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a, B> arbitrary::Arbitrary<'a> for SealedBlock where @@ -562,4 +580,96 @@ mod tests { assert_eq!(sealed_block.header().state_root, decoded.header().state_root); assert_eq!(sealed_block.body().transactions.len(), decoded.body().transactions.len()); } + + #[test] + fn test_decode_sealed_produces_correct_hash() { + // Create a sample block using alloy_consensus::Block + let header = alloy_consensus::Header { + parent_hash: B256::ZERO, + ommers_hash: B256::ZERO, + beneficiary: Address::ZERO, + state_root: B256::ZERO, + transactions_root: B256::ZERO, + receipts_root: B256::ZERO, + logs_bloom: Default::default(), + difficulty: Default::default(), + number: 42, + gas_limit: 30_000_000, + gas_used: 21_000, + timestamp: 1_000_000, + extra_data: Default::default(), + mix_hash: B256::ZERO, + nonce: Default::default(), + base_fee_per_gas: Some(1_000_000_000), + withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_hash: None, + }; + + // Create a simple transaction + let tx = alloy_consensus::TxLegacy { + chain_id: Some(1), + nonce: 0, + gas_price: 21_000_000_000, + gas_limit: 21_000, + to: alloy_primitives::TxKind::Call(Address::ZERO), + value: alloy_primitives::U256::from(100), + input: alloy_primitives::Bytes::default(), + }; + + let tx_signed = + alloy_consensus::TxEnvelope::Legacy(alloy_consensus::Signed::new_unchecked( + tx, + alloy_primitives::Signature::test_signature(), + B256::ZERO, + )); + + // Create block body with the transaction + let body = alloy_consensus::BlockBody { + transactions: vec![tx_signed], + ommers: vec![], + withdrawals: Some(Default::default()), + }; + + // Create the block + let block = alloy_consensus::Block::new(header, body); + let expected_hash = block.header.hash_slow(); + + // Encode the block + let mut encoded = Vec::new(); + block.encode(&mut encoded); + + // Decode using decode_sealed - this should compute hash from raw RLP + let decoded = + SealedBlock::>::decode_sealed( + &mut encoded.as_slice(), + ) + .expect("Failed to decode sealed block"); + + // Verify the hash matches + assert_eq!(decoded.hash(), expected_hash); + assert_eq!(decoded.header().number, 42); + assert_eq!(decoded.body().transactions.len(), 1); + } + + #[test] + fn test_sealed_block_from_sealed() { + let header = alloy_consensus::Header::default(); + let body = alloy_consensus::BlockBody::::default(); + let block = alloy_consensus::Block::new(header, body); + let hash = block.header.hash_slow(); + + // Create Sealed + let sealed: Sealed> = + Sealed::new_unchecked(block.clone(), hash); + + // Convert to SealedBlock + let sealed_block: SealedBlock> = + SealedBlock::from(sealed); + + assert_eq!(sealed_block.hash(), hash); + assert_eq!(sealed_block.header().number, block.header.number); + } } From 1265a89c21eb35f1c99bb539565a3a508c10459c Mon Sep 17 00:00:00 2001 From: James Prestwich Date: Wed, 14 Jan 2026 18:42:42 -0500 Subject: [PATCH 022/267] refactor: make use of dbi consistent across mdbx interface (#21079) --- crates/cli/commands/src/db/list.rs | 2 +- crates/cli/commands/src/db/stats.rs | 5 +- crates/storage/db/benches/hash_keys.rs | 2 +- .../storage/db/src/implementation/mdbx/mod.rs | 2 +- .../storage/db/src/implementation/mdbx/tx.rs | 15 +++- crates/storage/libmdbx-rs/benches/cursor.rs | 8 +- crates/storage/libmdbx-rs/src/environment.rs | 2 +- crates/storage/libmdbx-rs/src/transaction.rs | 34 ++++---- crates/storage/libmdbx-rs/tests/cursor.rs | 77 ++++++++++--------- .../storage/libmdbx-rs/tests/transaction.rs | 56 +++++++------- 10 files changed, 105 insertions(+), 98 deletions(-) diff --git a/crates/cli/commands/src/db/list.rs b/crates/cli/commands/src/db/list.rs index 5d6c055c949..452fcf0a789 100644 --- a/crates/cli/commands/src/db/list.rs +++ b/crates/cli/commands/src/db/list.rs @@ -100,7 +100,7 @@ impl TableViewer<()> for ListTableViewer<'_, N> { tx.disable_long_read_transaction_safety(); let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?; - let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", self.args.table.name()))?; + let stats = tx.inner.db_stat(table_db.dbi()).wrap_err(format!("Could not find table: {}", self.args.table.name()))?; let total_entries = stats.entries(); let final_entry_idx = total_entries.saturating_sub(1); if self.args.skip > final_entry_idx { diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index e225b2f9914..d84091c2d6e 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -88,7 +88,7 @@ impl Command { let stats = tx .inner - .db_stat(&table_db) + .db_stat(table_db.dbi()) .wrap_err(format!("Could not find table: {db_table}"))?; // Defaults to 16KB right now but we should @@ -129,7 +129,8 @@ impl Command { table.add_row(row); let freelist = tx.inner.env().freelist()?; - let pagesize = tx.inner.db_stat(&mdbx::Database::freelist_db())?.page_size() as usize; + let pagesize = + tx.inner.db_stat(mdbx::Database::freelist_db().dbi())?.page_size() as usize; let freelist_size = freelist * pagesize; let mut row = Row::new(); diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index 69074097d5f..b55965e1e74 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -248,7 +248,7 @@ where println!( "{:?}\n", tx.inner - .db_stat(&table_db) + .db_stat(table_db.dbi()) .map_err(|_| format!("Could not find table: {}", T::NAME)) .map(|stats| { let num_pages = diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index c660bb4a4cd..1f51e9b49d2 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -278,7 +278,7 @@ impl DatabaseMetrics for DatabaseEnv { let stats = tx .inner - .db_stat(&table_db) + .db_stat(table_db.dbi()) .wrap_err(format!("Could not find table: {table}"))?; let page_size = stats.page_size() as usize; diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 6f9ca4f230b..25de65c8532 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -67,18 +67,25 @@ impl Tx { self.metrics_handler.as_ref().map_or_else(|| self.inner.id(), |handler| Ok(handler.txn_id)) } - /// Gets a table database handle if it exists, otherwise creates it. - pub fn get_dbi(&self) -> Result { - if let Some(dbi) = self.dbis.get(T::NAME) { + /// Gets a table database handle by name if it exists, otherwise, check the + /// database, opening the DB if it exists. + pub fn get_dbi_raw(&self, name: &str) -> Result { + if let Some(dbi) = self.dbis.get(name) { Ok(*dbi) } else { self.inner - .open_db(Some(T::NAME)) + .open_db(Some(name)) .map(|db| db.dbi()) .map_err(|e| DatabaseError::Open(e.into())) } } + /// Gets a table database handle by name if it exists, otherwise, check the + /// database, opening the DB if it exists. + pub fn get_dbi(&self) -> Result { + self.get_dbi_raw(T::NAME) + } + /// Create db Cursor pub fn new_cursor(&self) -> Result, DatabaseError> { let inner = self diff --git a/crates/storage/libmdbx-rs/benches/cursor.rs b/crates/storage/libmdbx-rs/benches/cursor.rs index f03e8a65ff4..16b3d4efa27 100644 --- a/crates/storage/libmdbx-rs/benches/cursor.rs +++ b/crates/storage/libmdbx-rs/benches/cursor.rs @@ -12,10 +12,10 @@ fn bench_get_seq_iter(c: &mut Criterion) { let (_dir, env) = setup_bench_db(n); let txn = env.begin_ro_txn().unwrap(); let db = txn.open_db(None).unwrap(); - + let dbi = db.dbi(); c.bench_function("bench_get_seq_iter", |b| { b.iter(|| { - let mut cursor = txn.cursor(&db).unwrap(); + let mut cursor = txn.cursor(dbi).unwrap(); let mut i = 0; let mut count = 0u32; @@ -54,11 +54,11 @@ fn bench_get_seq_cursor(c: &mut Criterion) { let (_dir, env) = setup_bench_db(n); let txn = env.begin_ro_txn().unwrap(); let db = txn.open_db(None).unwrap(); - + let dbi = db.dbi(); c.bench_function("bench_get_seq_cursor", |b| { b.iter(|| { let (i, count) = txn - .cursor(&db) + .cursor(dbi) .unwrap() .iter::() .map(Result::unwrap) diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index ba835b5325d..fa11bf5629a 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -211,7 +211,7 @@ impl Environment { let mut freelist: usize = 0; let txn = self.begin_ro_txn()?; let db = Database::freelist_db(); - let cursor = txn.cursor(&db)?; + let cursor = txn.cursor(db.dbi())?; for result in cursor.iter_slices() { let (_key, value) = result?; diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index e47e71ac261..913f5156b5d 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -208,11 +208,11 @@ where } /// Gets the option flags for the given database in the transaction. - pub fn db_flags(&self, db: &Database) -> Result { + pub fn db_flags(&self, dbi: ffi::MDBX_dbi) -> Result { let mut flags: c_uint = 0; unsafe { self.txn_execute(|txn| { - mdbx_result(ffi::mdbx_dbi_flags_ex(txn, db.dbi(), &mut flags, ptr::null_mut())) + mdbx_result(ffi::mdbx_dbi_flags_ex(txn, dbi, &mut flags, ptr::null_mut())) })??; } @@ -222,8 +222,8 @@ where } /// Retrieves database statistics. - pub fn db_stat(&self, db: &Database) -> Result { - self.db_stat_with_dbi(db.dbi()) + pub fn db_stat(&self, dbi: ffi::MDBX_dbi) -> Result { + self.db_stat_with_dbi(dbi) } /// Retrieves database statistics by the given dbi. @@ -238,8 +238,8 @@ where } /// Open a new cursor on the given database. - pub fn cursor(&self, db: &Database) -> Result> { - Cursor::new(self.clone(), db.dbi()) + pub fn cursor(&self, dbi: ffi::MDBX_dbi) -> Result> { + Cursor::new(self.clone(), dbi) } /// Open a new cursor on the given dbi. @@ -400,7 +400,7 @@ impl Transaction { #[allow(clippy::mut_from_ref)] pub fn reserve( &self, - db: &Database, + dbi: ffi::MDBX_dbi, key: impl AsRef<[u8]>, len: usize, flags: WriteFlags, @@ -412,13 +412,7 @@ impl Transaction { ffi::MDBX_val { iov_len: len, iov_base: ptr::null_mut::() }; unsafe { mdbx_result(self.txn_execute(|txn| { - ffi::mdbx_put( - txn, - db.dbi(), - &key_val, - &mut data_val, - flags.bits() | ffi::MDBX_RESERVE, - ) + ffi::mdbx_put(txn, dbi, &key_val, &mut data_val, flags.bits() | ffi::MDBX_RESERVE) })?)?; Ok(slice::from_raw_parts_mut(data_val.iov_base as *mut u8, data_val.iov_len)) } @@ -473,10 +467,10 @@ impl Transaction { /// Drops the database from the environment. /// /// # Safety - /// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi - /// BEFORE calling this function. - pub unsafe fn drop_db(&self, db: Database) -> Result<()> { - mdbx_result(self.txn_execute(|txn| unsafe { ffi::mdbx_drop(txn, db.dbi(), true) })?)?; + /// Caller must close ALL other [Database] and [Cursor] instances pointing + /// to the same dbi BEFORE calling this function. + pub unsafe fn drop_db(&self, dbi: ffi::MDBX_dbi) -> Result<()> { + mdbx_result(self.txn_execute(|txn| unsafe { ffi::mdbx_drop(txn, dbi, true) })?)?; Ok(()) } @@ -488,8 +482,8 @@ impl Transaction { /// # Safety /// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi /// BEFORE calling this function. - pub unsafe fn close_db(&self, db: Database) -> Result<()> { - mdbx_result(unsafe { ffi::mdbx_dbi_close(self.env().env_ptr(), db.dbi()) })?; + pub unsafe fn close_db(&self, dbi: ffi::MDBX_dbi) -> Result<()> { + mdbx_result(unsafe { ffi::mdbx_dbi_close(self.env().env_ptr(), dbi) })?; Ok(()) } diff --git a/crates/storage/libmdbx-rs/tests/cursor.rs b/crates/storage/libmdbx-rs/tests/cursor.rs index aba11f480c0..afb9cee9580 100644 --- a/crates/storage/libmdbx-rs/tests/cursor.rs +++ b/crates/storage/libmdbx-rs/tests/cursor.rs @@ -9,15 +9,15 @@ fn test_get() { let env = Environment::builder().open(dir.path()).unwrap(); let txn = env.begin_rw_txn().unwrap(); - let db = txn.open_db(None).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); - assert_eq!(None, txn.cursor(&db).unwrap().first::<(), ()>().unwrap()); + assert_eq!(None, txn.cursor(dbi).unwrap().first::<(), ()>().unwrap()); - txn.put(db.dbi(), b"key1", b"val1", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key2", b"val2", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key3", b"val3", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key1", b"val1", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key2", b"val2", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key3", b"val3", WriteFlags::empty()).unwrap(); - let mut cursor = txn.cursor(&db).unwrap(); + let mut cursor = txn.cursor(dbi).unwrap(); assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1"))); assert_eq!(cursor.get_current().unwrap(), Some((*b"key1", *b"val1"))); assert_eq!(cursor.next().unwrap(), Some((*b"key2", *b"val2"))); @@ -34,15 +34,15 @@ fn test_get_dup() { let env = Environment::builder().open(dir.path()).unwrap(); let txn = env.begin_rw_txn().unwrap(); - let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); - txn.put(db.dbi(), b"key1", b"val1", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key1", b"val2", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key1", b"val3", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key2", b"val1", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key2", b"val2", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key2", b"val3", WriteFlags::empty()).unwrap(); - - let mut cursor = txn.cursor(&db).unwrap(); + let dbi = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap().dbi(); + txn.put(dbi, b"key1", b"val1", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key1", b"val2", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key1", b"val3", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key2", b"val1", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key2", b"val2", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key2", b"val3", WriteFlags::empty()).unwrap(); + + let mut cursor = txn.cursor(dbi).unwrap(); assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1"))); assert_eq!(cursor.first_dup().unwrap(), Some(*b"val1")); assert_eq!(cursor.get_current().unwrap(), Some((*b"key1", *b"val1"))); @@ -78,15 +78,16 @@ fn test_get_dupfixed() { let env = Environment::builder().open(dir.path()).unwrap(); let txn = env.begin_rw_txn().unwrap(); - let db = txn.create_db(None, DatabaseFlags::DUP_SORT | DatabaseFlags::DUP_FIXED).unwrap(); - txn.put(db.dbi(), b"key1", b"val1", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key1", b"val2", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key1", b"val3", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key2", b"val4", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key2", b"val5", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key2", b"val6", WriteFlags::empty()).unwrap(); - - let mut cursor = txn.cursor(&db).unwrap(); + let dbi = + txn.create_db(None, DatabaseFlags::DUP_SORT | DatabaseFlags::DUP_FIXED).unwrap().dbi(); + txn.put(dbi, b"key1", b"val1", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key1", b"val2", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key1", b"val3", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key2", b"val4", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key2", b"val5", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key2", b"val6", WriteFlags::empty()).unwrap(); + + let mut cursor = txn.cursor(dbi).unwrap(); assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1"))); assert_eq!(cursor.get_multiple().unwrap(), Some(*b"val1val2val3")); assert_eq!(cursor.next_multiple::<(), ()>().unwrap(), None); @@ -114,8 +115,8 @@ fn test_iter() { } let txn = env.begin_ro_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - let mut cursor = txn.cursor(&db).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); + let mut cursor = txn.cursor(dbi).unwrap(); // Because Result implements FromIterator, we can collect the iterator // of items of type Result<_, E> into a Result> by specifying @@ -155,8 +156,8 @@ fn test_iter_empty_database() { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); let txn = env.begin_ro_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - let mut cursor = txn.cursor(&db).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); + let mut cursor = txn.cursor(dbi).unwrap(); assert!(cursor.iter::<(), ()>().next().is_none()); assert!(cursor.iter_start::<(), ()>().next().is_none()); @@ -173,8 +174,8 @@ fn test_iter_empty_dup_database() { txn.commit().unwrap(); let txn = env.begin_ro_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - let mut cursor = txn.cursor(&db).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); + let mut cursor = txn.cursor(dbi).unwrap(); assert!(cursor.iter::<(), ()>().next().is_none()); assert!(cursor.iter_start::<(), ()>().next().is_none()); @@ -223,8 +224,8 @@ fn test_iter_dup() { } let txn = env.begin_ro_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - let mut cursor = txn.cursor(&db).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); + let mut cursor = txn.cursor(dbi).unwrap(); assert_eq!(items, cursor.iter_dup().flatten().collect::>>().unwrap()); cursor.set::<()>(b"b").unwrap(); @@ -271,9 +272,9 @@ fn test_iter_del_get() { let items = vec![(*b"a", *b"1"), (*b"b", *b"2")]; { let txn = env.begin_rw_txn().unwrap(); - let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); + let dbi = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap().dbi(); assert_eq!( - txn.cursor(&db) + txn.cursor(dbi) .unwrap() .iter_dup_of::<(), ()>(b"a") .collect::>>() @@ -294,8 +295,8 @@ fn test_iter_del_get() { } let txn = env.begin_rw_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - let mut cursor = txn.cursor(&db).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); + let mut cursor = txn.cursor(dbi).unwrap(); assert_eq!(items, cursor.iter_dup().flatten().collect::>>().unwrap()); assert_eq!( @@ -316,8 +317,8 @@ fn test_put_del() { let env = Environment::builder().open(dir.path()).unwrap(); let txn = env.begin_rw_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - let mut cursor = txn.cursor(&db).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); + let mut cursor = txn.cursor(dbi).unwrap(); cursor.put(b"key1", b"val1", WriteFlags::empty()).unwrap(); cursor.put(b"key2", b"val2", WriteFlags::empty()).unwrap(); diff --git a/crates/storage/libmdbx-rs/tests/transaction.rs b/crates/storage/libmdbx-rs/tests/transaction.rs index c7e8e3fcd37..7e4b18e4fd3 100644 --- a/crates/storage/libmdbx-rs/tests/transaction.rs +++ b/crates/storage/libmdbx-rs/tests/transaction.rs @@ -50,9 +50,9 @@ fn test_put_get_del_multi() { txn.commit().unwrap(); let txn = env.begin_rw_txn().unwrap(); - let db = txn.open_db(None).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); { - let mut cur = txn.cursor(&db).unwrap(); + let mut cur = txn.cursor(dbi).unwrap(); let iter = cur.iter_dup_of::<(), [u8; 4]>(b"key1"); let vals = iter.map(|x| x.unwrap()).map(|(_, x)| x).collect::>(); assert_eq!(vals, vec![*b"val1", *b"val2", *b"val3"]); @@ -66,9 +66,9 @@ fn test_put_get_del_multi() { txn.commit().unwrap(); let txn = env.begin_rw_txn().unwrap(); - let db = txn.open_db(None).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); { - let mut cur = txn.cursor(&db).unwrap(); + let mut cur = txn.cursor(dbi).unwrap(); let iter = cur.iter_dup_of::<(), [u8; 4]>(b"key1"); let vals = iter.map(|x| x.unwrap()).map(|(_, x)| x).collect::>(); assert_eq!(vals, vec![*b"val1", *b"val3"]); @@ -103,9 +103,9 @@ fn test_reserve() { let env = Environment::builder().open(dir.path()).unwrap(); let txn = env.begin_rw_txn().unwrap(); - let db = txn.open_db(None).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); { - let mut writer = txn.reserve(&db, b"key1", 4, WriteFlags::empty()).unwrap(); + let mut writer = txn.reserve(dbi, b"key1", 4, WriteFlags::empty()).unwrap(); writer.write_all(b"val1").unwrap(); } txn.commit().unwrap(); @@ -182,9 +182,9 @@ fn test_drop_db() { } { let txn = env.begin_rw_txn().unwrap(); - let db = txn.open_db(Some("test")).unwrap(); + let dbi = txn.open_db(Some("test")).unwrap().dbi(); unsafe { - txn.drop_db(db).unwrap(); + txn.drop_db(dbi).unwrap(); } assert!(matches!(txn.open_db(Some("test")).unwrap_err(), Error::NotFound)); assert!(!txn.commit().unwrap().0); @@ -291,8 +291,8 @@ fn test_stat() { { let txn = env.begin_ro_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - let stat = txn.db_stat(&db).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); + let stat = txn.db_stat(dbi).unwrap(); assert_eq!(stat.entries(), 3); } @@ -304,8 +304,8 @@ fn test_stat() { { let txn = env.begin_ro_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - let stat = txn.db_stat(&db).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); + let stat = txn.db_stat(dbi).unwrap(); assert_eq!(stat.entries(), 1); } @@ -318,8 +318,8 @@ fn test_stat() { { let txn = env.begin_ro_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - let stat = txn.db_stat(&db).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); + let stat = txn.db_stat(dbi).unwrap(); assert_eq!(stat.entries(), 4); } } @@ -331,20 +331,22 @@ fn test_stat_dupsort() { let txn = env.begin_rw_txn().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); - txn.put(db.dbi(), b"key1", b"val1", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key1", b"val2", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key1", b"val3", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key2", b"val1", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key2", b"val2", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key2", b"val3", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key3", b"val1", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key3", b"val2", WriteFlags::empty()).unwrap(); - txn.put(db.dbi(), b"key3", b"val3", WriteFlags::empty()).unwrap(); + let dbi = db.dbi(); + txn.put(dbi, b"key1", b"val1", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key1", b"val2", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key1", b"val3", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key2", b"val1", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key2", b"val2", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key2", b"val3", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key3", b"val1", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key3", b"val2", WriteFlags::empty()).unwrap(); + txn.put(dbi, b"key3", b"val3", WriteFlags::empty()).unwrap(); txn.commit().unwrap(); { let txn = env.begin_ro_txn().unwrap(); - let stat = txn.db_stat(&txn.open_db(None).unwrap()).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); + let stat = txn.db_stat(dbi).unwrap(); assert_eq!(stat.entries(), 9); } @@ -356,7 +358,8 @@ fn test_stat_dupsort() { { let txn = env.begin_ro_txn().unwrap(); - let stat = txn.db_stat(&txn.open_db(None).unwrap()).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); + let stat = txn.db_stat(dbi).unwrap(); assert_eq!(stat.entries(), 5); } @@ -369,7 +372,8 @@ fn test_stat_dupsort() { { let txn = env.begin_ro_txn().unwrap(); - let stat = txn.db_stat(&txn.open_db(None).unwrap()).unwrap(); + let dbi = txn.open_db(None).unwrap().dbi(); + let stat = txn.db_stat(dbi).unwrap(); assert_eq!(stat.entries(), 8); } } From 26a99ac5a3811f2425f18938b5265200b23973c1 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 14 Jan 2026 23:46:58 +0000 Subject: [PATCH 023/267] perf: small improvement to extend_sorted_vec (#21032) --- crates/trie/common/src/utils.rs | 33 +++++++++++---------------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/crates/trie/common/src/utils.rs b/crates/trie/common/src/utils.rs index 2c30b474bc2..a70608ea603 100644 --- a/crates/trie/common/src/utils.rs +++ b/crates/trie/common/src/utils.rs @@ -2,53 +2,42 @@ use alloc::vec::Vec; use core::cmp::Ordering; /// Helper function to extend a sorted vector with another sorted vector. -/// Values from `other` take precedence for duplicate keys. /// -/// This function efficiently merges two sorted vectors by: -/// 1. Iterating through the target vector with mutable references -/// 2. Using a peekable iterator for the other vector -/// 3. For each target item, processing other items that come before or equal to it -/// 4. Collecting items from other that need to be inserted -/// 5. Appending and re-sorting only if new items were added +/// Values from `other` take precedence for duplicate keys. pub(crate) fn extend_sorted_vec(target: &mut Vec<(K, V)>, other: &[(K, V)]) where K: Clone + Ord, V: Clone, { + let cmp = |a: &(K, V), b: &(K, V)| a.0.cmp(&b.0); + if other.is_empty() { return; } let mut other_iter = other.iter().peekable(); - let mut to_insert = Vec::new(); - - // Iterate through target and update/collect items from other - for target_item in target.iter_mut() { + let initial_len = target.len(); + for i in 0..initial_len { while let Some(other_item) = other_iter.peek() { - match other_item.0.cmp(&target_item.0) { + let target_item = &mut target[i]; + match cmp(other_item, target_item) { Ordering::Less => { - // Other item comes before current target item, collect it - to_insert.push(other_iter.next().unwrap().clone()); + target.push(other_iter.next().unwrap().clone()); } Ordering::Equal => { - // Same key, update target with other's value target_item.1 = other_iter.next().unwrap().1.clone(); break; } Ordering::Greater => { - // Other item comes after current target item, keep target unchanged break; } } } } - // Append collected new items, as well as any remaining from `other` which are necessarily also - // new, and sort if needed - if !to_insert.is_empty() || other_iter.peek().is_some() { - target.extend(to_insert); - target.extend(other_iter.cloned()); - target.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + target.extend(other_iter.cloned()); + if target.len() > initial_len { + target.sort_by(cmp); } } From 27fbd9a7defbacd501a8959514fc7c0161a98995 Mon Sep 17 00:00:00 2001 From: Sergei Shulepov Date: Wed, 14 Jan 2026 23:56:27 +0000 Subject: [PATCH 024/267] fix(db): change commit return type from Result to Result<()> (#21077) Co-authored-by: Sergei Shulepov --- crates/storage/db-api/src/mock.rs | 6 ++--- crates/storage/db-api/src/transaction.rs | 2 +- .../storage/db/src/implementation/mdbx/tx.rs | 4 ++-- crates/storage/libmdbx-rs/src/codec.rs | 4 +++- crates/storage/libmdbx-rs/src/environment.rs | 5 +++- crates/storage/libmdbx-rs/src/error.rs | 15 ++++++++++++ crates/storage/libmdbx-rs/src/transaction.rs | 23 ++++++++++++++----- crates/storage/libmdbx-rs/tests/cursor.rs | 2 +- .../storage/libmdbx-rs/tests/transaction.rs | 8 +++---- .../src/providers/database/provider.rs | 6 ++--- .../storage/provider/src/test_utils/mock.rs | 2 +- crates/storage/rpc-provider/src/lib.rs | 2 +- .../storage-api/src/database_provider.rs | 2 +- crates/storage/storage-api/src/noop.rs | 2 +- docs/crates/db.md | 2 +- 15 files changed, 58 insertions(+), 27 deletions(-) diff --git a/crates/storage/db-api/src/mock.rs b/crates/storage/db-api/src/mock.rs index 60f69ae8f0d..9928a66c0d4 100644 --- a/crates/storage/db-api/src/mock.rs +++ b/crates/storage/db-api/src/mock.rs @@ -92,10 +92,10 @@ impl DbTx for TxMock { /// Commits the transaction. /// - /// **Mock behavior**: Always returns `Ok(true)`, indicating successful commit. + /// **Mock behavior**: Always returns `Ok(())`, indicating successful commit. /// No actual data is persisted since this is a mock implementation. - fn commit(self) -> Result { - Ok(true) + fn commit(self) -> Result<(), DatabaseError> { + Ok(()) } /// Aborts the transaction. diff --git a/crates/storage/db-api/src/transaction.rs b/crates/storage/db-api/src/transaction.rs index 281912267f2..545c0ce39f3 100644 --- a/crates/storage/db-api/src/transaction.rs +++ b/crates/storage/db-api/src/transaction.rs @@ -35,7 +35,7 @@ pub trait DbTx: Debug + Send { ) -> Result, DatabaseError>; /// Commit for read only transaction will consume and free transaction and allows /// freeing of memory pages - fn commit(self) -> Result; + fn commit(self) -> Result<(), DatabaseError>; /// Aborts transaction fn abort(self); /// Iterate over read only values in table. diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 25de65c8532..c0e958ef818 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -302,10 +302,10 @@ impl DbTx for Tx { }) } - fn commit(self) -> Result { + fn commit(self) -> Result<(), DatabaseError> { self.execute_with_close_transaction_metric(TransactionOutcome::Commit, |this| { match this.inner.commit().map_err(|e| DatabaseError::Commit(e.into())) { - Ok((v, latency)) => (Ok(v), Some(latency)), + Ok(latency) => (Ok(()), Some(latency)), Err(e) => (Err(e), None), } }) diff --git a/crates/storage/libmdbx-rs/src/codec.rs b/crates/storage/libmdbx-rs/src/codec.rs index c0b2f0f1cf7..91142362c6d 100644 --- a/crates/storage/libmdbx-rs/src/codec.rs +++ b/crates/storage/libmdbx-rs/src/codec.rs @@ -42,7 +42,9 @@ impl TableObject for Cow<'_, [u8]> { #[cfg(not(feature = "return-borrowed"))] { let is_dirty = (!K::IS_READ_ONLY) && - crate::error::mdbx_result(ffi::mdbx_is_dirty(_txn, data_val.iov_base))?; + crate::error::mdbx_result(unsafe { + ffi::mdbx_is_dirty(_txn, data_val.iov_base) + })?; Ok(if is_dirty { Cow::Owned(s.to_vec()) } else { Cow::Borrowed(s) }) } diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index fa11bf5629a..524f4340297 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -989,7 +989,10 @@ mod tests { result @ Err(_) => result.unwrap(), } } - tx.commit().unwrap(); + // The transaction may be in an error state after hitting MapFull, + // so commit could fail. We don't care about the result here since + // the purpose of this test is to verify the HSR callback was called. + let _ = tx.commit(); } // Expect the HSR to be called diff --git a/crates/storage/libmdbx-rs/src/error.rs b/crates/storage/libmdbx-rs/src/error.rs index 2f852c3dc77..007d828af2b 100644 --- a/crates/storage/libmdbx-rs/src/error.rs +++ b/crates/storage/libmdbx-rs/src/error.rs @@ -123,6 +123,12 @@ pub enum Error { /// Read transaction has been timed out. #[error("read transaction has been timed out")] ReadTransactionTimeout, + /// The transaction commit was aborted due to previous errors. + /// + /// This can happen in exceptionally rare cases and it signals the problem coming from inside + /// of mdbx. + #[error("botched transaction")] + BotchedTransaction, /// Permission defined #[error("permission denied to setup database")] Permission, @@ -204,6 +210,7 @@ impl Error { Self::WriteTransactionUnsupportedInReadOnlyMode | Self::NestedTransactionsUnsupportedWithWriteMap => ffi::MDBX_EACCESS, Self::ReadTransactionTimeout => -96000, // Custom non-MDBX error code + Self::BotchedTransaction => -96001, Self::Permission => ffi::MDBX_EPERM, Self::Other(err_code) => *err_code, } @@ -216,6 +223,14 @@ impl From for i32 { } } +/// Parses an MDBX error code into a result type. +/// +/// Note that this function returns `Ok(false)` on `MDBX_SUCCESS` and +/// `Ok(true)` on `MDBX_RESULT_TRUE`. The return value requires extra +/// care since its interpretation depends on the callee being called. +/// +/// The most unintuitive case is `mdbx_txn_commit` which returns `Ok(true)` +/// when the commit has been aborted. #[inline] pub(crate) const fn mdbx_result(err_code: c_int) -> Result { match err_code { diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 913f5156b5d..f0f4f120ae1 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -170,8 +170,8 @@ where /// Commits the transaction. /// /// Any pending operations will be saved. - pub fn commit(self) -> Result<(bool, CommitLatency)> { - let result = self.txn_execute(|txn| { + pub fn commit(self) -> Result { + match self.txn_execute(|txn| { if K::IS_READ_ONLY { #[cfg(feature = "read-tx-timeouts")] self.env().txn_manager().remove_active_read_transaction(txn); @@ -186,10 +186,21 @@ where .send_message(TxnManagerMessage::Commit { tx: TxnPtr(txn), sender }); rx.recv().unwrap() } - })?; - - self.inner.set_committed(); - result + })? { + // + Ok((false, lat)) => { + self.inner.set_committed(); + Ok(lat) + } + Ok((true, _)) => { + // MDBX_RESULT_TRUE means the transaction was aborted due to prior errors. + // The transaction is still finished/freed by MDBX, so we must mark it as + // committed to prevent the Drop impl from trying to abort it again. + self.inner.set_committed(); + Err(Error::BotchedTransaction) + } + Err(e) => Err(e), + } } /// Opens a handle to an MDBX database. diff --git a/crates/storage/libmdbx-rs/tests/cursor.rs b/crates/storage/libmdbx-rs/tests/cursor.rs index afb9cee9580..0d483baf29f 100644 --- a/crates/storage/libmdbx-rs/tests/cursor.rs +++ b/crates/storage/libmdbx-rs/tests/cursor.rs @@ -111,7 +111,7 @@ fn test_iter() { for (key, data) in &items { txn.put(db.dbi(), key, data, WriteFlags::empty()).unwrap(); } - assert!(!txn.commit().unwrap().0); + txn.commit().unwrap(); } let txn = env.begin_ro_txn().unwrap(); diff --git a/crates/storage/libmdbx-rs/tests/transaction.rs b/crates/storage/libmdbx-rs/tests/transaction.rs index 7e4b18e4fd3..81da1beada8 100644 --- a/crates/storage/libmdbx-rs/tests/transaction.rs +++ b/crates/storage/libmdbx-rs/tests/transaction.rs @@ -148,13 +148,13 @@ fn test_clear_db() { { let txn = env.begin_rw_txn().unwrap(); txn.put(txn.open_db(None).unwrap().dbi(), b"key", b"val", WriteFlags::empty()).unwrap(); - assert!(!txn.commit().unwrap().0); + txn.commit().unwrap(); } { let txn = env.begin_rw_txn().unwrap(); txn.clear_db(txn.open_db(None).unwrap().dbi()).unwrap(); - assert!(!txn.commit().unwrap().0); + txn.commit().unwrap(); } let txn = env.begin_ro_txn().unwrap(); @@ -178,7 +178,7 @@ fn test_drop_db() { .unwrap(); // Workaround for MDBX dbi drop issue txn.create_db(Some("canary"), DatabaseFlags::empty()).unwrap(); - assert!(!txn.commit().unwrap().0); + txn.commit().unwrap(); } { let txn = env.begin_rw_txn().unwrap(); @@ -187,7 +187,7 @@ fn test_drop_db() { txn.drop_db(dbi).unwrap(); } assert!(matches!(txn.open_db(Some("test")).unwrap_err(), Error::NotFound)); - assert!(!txn.commit().unwrap().0); + txn.commit().unwrap(); } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 4bde73a37fc..78424785cc4 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -125,7 +125,7 @@ impl AsRef::TXMut, impl DatabaseProviderRW { /// Commit database transaction and static file if it exists. - pub fn commit(self) -> ProviderResult { + pub fn commit(self) -> ProviderResult<()> { self.0.commit() } @@ -3422,7 +3422,7 @@ impl DBProvider for DatabaseProvider } /// Commit database transaction, static files, and pending `RocksDB` batches. - fn commit(self) -> ProviderResult { + fn commit(self) -> ProviderResult<()> { // For unwinding it makes more sense to commit the database first, since if // it is interrupted before the static files commit, we can just // truncate the static files according to the @@ -3453,7 +3453,7 @@ impl DBProvider for DatabaseProvider self.tx.commit()?; } - Ok(true) + Ok(()) } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 065b9ee71de..f5c40978a73 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -267,7 +267,7 @@ impl DBProvider self.tx } - fn commit(self) -> ProviderResult { + fn commit(self) -> ProviderResult<()> { Ok(self.tx.commit()?) } diff --git a/crates/storage/rpc-provider/src/lib.rs b/crates/storage/rpc-provider/src/lib.rs index 76f4802188a..fa340ea2ae6 100644 --- a/crates/storage/rpc-provider/src/lib.rs +++ b/crates/storage/rpc-provider/src/lib.rs @@ -1357,7 +1357,7 @@ where self } - fn commit(self) -> ProviderResult { + fn commit(self) -> ProviderResult<()> { unimplemented!("commit not supported for RPC provider") } diff --git a/crates/storage/storage-api/src/database_provider.rs b/crates/storage/storage-api/src/database_provider.rs index b206ca09222..417f8e282b9 100644 --- a/crates/storage/storage-api/src/database_provider.rs +++ b/crates/storage/storage-api/src/database_provider.rs @@ -37,7 +37,7 @@ pub trait DBProvider: Sized { } /// Commit database transaction - fn commit(self) -> ProviderResult; + fn commit(self) -> ProviderResult<()>; /// Returns a reference to prune modes. fn prune_modes_ref(&self) -> &PruneModes; diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index b3d2c7a9096..8e912c23a40 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -639,7 +639,7 @@ impl DBProvider for NoopProvider ProviderResult { + fn commit(self) -> ProviderResult<()> { use reth_db_api::transaction::DbTx; Ok(self.tx.commit()?) diff --git a/docs/crates/db.md b/docs/crates/db.md index 38dc31736fb..f6460b6c121 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -159,7 +159,7 @@ pub trait DbTx: Debug + Send + Sync { ) -> Result, DatabaseError>; /// Commit for read only transaction will consume and free transaction and allows /// freeing of memory pages - fn commit(self) -> Result; + fn commit(self) -> Result<(), DatabaseError>; /// Aborts transaction fn abort(self); /// Iterate over read only values in table. From 905de9694479c300e8389d11ca52a12a62ab0f06 Mon Sep 17 00:00:00 2001 From: Emma Jamieson-Hoare Date: Thu, 15 Jan 2026 09:41:54 +0000 Subject: [PATCH 025/267] chore: release 1.9.4 (#21048) Co-authored-by: Emma Jamieson-Hoare --- Cargo.lock | 278 +++++++++++++++++++-------------------- Cargo.toml | 2 +- docs/vocs/vocs.config.ts | 2 +- 3 files changed, 141 insertions(+), 141 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 824221eb03d..46a1fa39ee9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3328,7 +3328,7 @@ dependencies = [ [[package]] name = "ef-test-runner" -version = "1.9.3" +version = "1.9.4" dependencies = [ "clap", "ef-tests", @@ -3336,7 +3336,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3826,7 +3826,7 @@ dependencies = [ [[package]] name = "example-full-contract-state" -version = "1.9.3" +version = "1.9.4" dependencies = [ "eyre", "reth-ethereum", @@ -3965,7 +3965,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "clap", @@ -6495,7 +6495,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.9.3" +version = "1.9.4" dependencies = [ "clap", "reth-cli-util", @@ -7647,7 +7647,7 @@ checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" [[package]] name = "reth" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7694,7 +7694,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7717,7 +7717,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7760,7 +7760,7 @@ dependencies = [ [[package]] name = "reth-bench-compare" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "alloy-provider", @@ -7788,7 +7788,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7820,7 +7820,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7840,7 +7840,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-genesis", "clap", @@ -7853,7 +7853,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7938,7 +7938,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.9.3" +version = "1.9.4" dependencies = [ "reth-tasks", "tokio", @@ -7947,7 +7947,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7968,7 +7968,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7992,7 +7992,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.9.3" +version = "1.9.4" dependencies = [ "proc-macro2", "quote", @@ -8002,7 +8002,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "eyre", @@ -8020,7 +8020,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8032,7 +8032,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8046,7 +8046,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8071,7 +8071,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8105,7 +8105,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -8135,7 +8135,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -8165,7 +8165,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8181,7 +8181,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8207,7 +8207,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8232,7 +8232,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8260,7 +8260,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8298,7 +8298,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8355,7 +8355,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.9.3" +version = "1.9.4" dependencies = [ "aes", "alloy-primitives", @@ -8382,7 +8382,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8406,7 +8406,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8430,7 +8430,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.9.3" +version = "1.9.4" dependencies = [ "futures", "pin-project", @@ -8459,7 +8459,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eip7928", @@ -8532,7 +8532,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8559,7 +8559,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8581,7 +8581,7 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "bytes", @@ -8599,7 +8599,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8625,7 +8625,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.9.3" +version = "1.9.4" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -8635,7 +8635,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8673,7 +8673,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8698,7 +8698,7 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8738,7 +8738,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.9.3" +version = "1.9.4" dependencies = [ "clap", "eyre", @@ -8760,7 +8760,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8776,7 +8776,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8794,7 +8794,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8807,7 +8807,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8835,7 +8835,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8862,7 +8862,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "rayon", @@ -8872,7 +8872,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8897,7 +8897,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8921,7 +8921,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-evm", "alloy-primitives", @@ -8933,7 +8933,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8953,7 +8953,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8998,7 +8998,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eips", "eyre", @@ -9029,7 +9029,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9046,7 +9046,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.9.3" +version = "1.9.4" dependencies = [ "serde", "serde_json", @@ -9055,7 +9055,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9088,7 +9088,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.9.3" +version = "1.9.4" dependencies = [ "bytes", "futures", @@ -9110,7 +9110,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.9.3" +version = "1.9.4" dependencies = [ "bitflags 2.10.0", "byteorder", @@ -9128,7 +9128,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.9.3" +version = "1.9.4" dependencies = [ "bindgen 0.71.1", "cc", @@ -9136,7 +9136,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.9.3" +version = "1.9.4" dependencies = [ "futures", "metrics", @@ -9147,7 +9147,7 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "ipnet", @@ -9155,7 +9155,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "1.9.3" +version = "1.9.4" dependencies = [ "futures-util", "if-addrs", @@ -9169,7 +9169,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9230,7 +9230,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9254,7 +9254,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9276,7 +9276,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9293,7 +9293,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -9306,7 +9306,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.9.3" +version = "1.9.4" dependencies = [ "anyhow", "bincode 1.3.3", @@ -9324,7 +9324,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -9347,7 +9347,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9418,7 +9418,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9475,7 +9475,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-contract", @@ -9535,7 +9535,7 @@ dependencies = [ [[package]] name = "reth-node-ethstats" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9558,7 +9558,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9581,7 +9581,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.9.3" +version = "1.9.4" dependencies = [ "bytes", "eyre", @@ -9610,7 +9610,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.9.3" +version = "1.9.4" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9621,7 +9621,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.9.3" +version = "1.9.4" dependencies = [ "reth-chainspec", "reth-cli-util", @@ -9661,7 +9661,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9689,7 +9689,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9738,7 +9738,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9769,7 +9769,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9798,7 +9798,7 @@ dependencies = [ [[package]] name = "reth-optimism-flashblocks" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9836,7 +9836,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9846,7 +9846,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9906,7 +9906,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9945,7 +9945,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9972,7 +9972,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10034,7 +10034,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "reth-codecs", @@ -10046,7 +10046,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10083,7 +10083,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10103,7 +10103,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.9.3" +version = "1.9.4" dependencies = [ "pin-project", "reth-payload-primitives", @@ -10114,7 +10114,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10137,7 +10137,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10146,7 +10146,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -10155,7 +10155,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10177,7 +10177,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10214,7 +10214,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10263,7 +10263,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10295,11 +10295,11 @@ dependencies = [ [[package]] name = "reth-prune-db" -version = "1.9.3" +version = "1.9.4" [[package]] name = "reth-prune-types" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "arbitrary", @@ -10318,7 +10318,7 @@ dependencies = [ [[package]] name = "reth-ress-protocol" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10344,7 +10344,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10370,7 +10370,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10384,7 +10384,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10469,7 +10469,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eip7928", "alloy-eips", @@ -10500,7 +10500,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10519,7 +10519,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eips", "alloy-network", @@ -10575,7 +10575,7 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-evm", @@ -10601,7 +10601,7 @@ dependencies = [ [[package]] name = "reth-rpc-e2e-tests" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", @@ -10621,7 +10621,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10657,7 +10657,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10700,7 +10700,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10748,7 +10748,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10765,7 +10765,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10780,7 +10780,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10838,7 +10838,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10870,7 +10870,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "arbitrary", @@ -10886,7 +10886,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10914,7 +10914,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "assert_matches", @@ -10937,7 +10937,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "clap", @@ -10952,7 +10952,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10975,7 +10975,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10990,7 +10990,7 @@ dependencies = [ [[package]] name = "reth-storage-rpc-provider" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11019,7 +11019,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.9.3" +version = "1.9.4" dependencies = [ "auto_impl", "dyn-clone", @@ -11036,7 +11036,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11052,7 +11052,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.9.3" +version = "1.9.4" dependencies = [ "tokio", "tokio-stream", @@ -11061,7 +11061,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.9.3" +version = "1.9.4" dependencies = [ "clap", "eyre", @@ -11079,7 +11079,7 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.9.3" +version = "1.9.4" dependencies = [ "clap", "eyre", @@ -11096,7 +11096,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11144,7 +11144,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11178,7 +11178,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -11211,7 +11211,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -11238,7 +11238,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11268,7 +11268,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11301,7 +11301,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" -version = "1.9.3" +version = "1.9.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11331,7 +11331,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.9.3" +version = "1.9.4" dependencies = [ "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index 449429b88b9..4f7d9ea8a8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.9.3" +version = "1.9.4" edition = "2024" rust-version = "1.88" license = "MIT OR Apache-2.0" diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index a2d7efa9adc..9ac8bbe3b0e 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -21,7 +21,7 @@ export default defineConfig({ }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, { - text: 'v1.9.3', + text: 'v1.9.4', items: [ { text: 'Releases', From b25f32a977b489f9b84254c7811a2a5a25a81369 Mon Sep 17 00:00:00 2001 From: Emma Jamieson-Hoare Date: Thu, 15 Jan 2026 10:50:35 +0000 Subject: [PATCH 026/267] chore(release): set version v1.10.0 (#21091) Co-authored-by: Emma Jamieson-Hoare --- Cargo.lock | 278 +++++++++++++++++++-------------------- Cargo.toml | 2 +- docs/vocs/vocs.config.ts | 2 +- 3 files changed, 141 insertions(+), 141 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 46a1fa39ee9..9e8d52a102b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3328,7 +3328,7 @@ dependencies = [ [[package]] name = "ef-test-runner" -version = "1.9.4" +version = "1.10.0" dependencies = [ "clap", "ef-tests", @@ -3336,7 +3336,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3826,7 +3826,7 @@ dependencies = [ [[package]] name = "example-full-contract-state" -version = "1.9.4" +version = "1.10.0" dependencies = [ "eyre", "reth-ethereum", @@ -3965,7 +3965,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "clap", @@ -6495,7 +6495,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.9.4" +version = "1.10.0" dependencies = [ "clap", "reth-cli-util", @@ -7647,7 +7647,7 @@ checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" [[package]] name = "reth" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7694,7 +7694,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7717,7 +7717,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7760,7 +7760,7 @@ dependencies = [ [[package]] name = "reth-bench-compare" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "alloy-provider", @@ -7788,7 +7788,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7820,7 +7820,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7840,7 +7840,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-genesis", "clap", @@ -7853,7 +7853,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7938,7 +7938,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.9.4" +version = "1.10.0" dependencies = [ "reth-tasks", "tokio", @@ -7947,7 +7947,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7968,7 +7968,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7992,7 +7992,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.9.4" +version = "1.10.0" dependencies = [ "proc-macro2", "quote", @@ -8002,7 +8002,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "eyre", @@ -8020,7 +8020,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8032,7 +8032,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8046,7 +8046,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8071,7 +8071,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8105,7 +8105,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -8135,7 +8135,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -8165,7 +8165,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8181,7 +8181,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8207,7 +8207,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8232,7 +8232,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8260,7 +8260,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8298,7 +8298,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8355,7 +8355,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.9.4" +version = "1.10.0" dependencies = [ "aes", "alloy-primitives", @@ -8382,7 +8382,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8406,7 +8406,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8430,7 +8430,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.9.4" +version = "1.10.0" dependencies = [ "futures", "pin-project", @@ -8459,7 +8459,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eip7928", @@ -8532,7 +8532,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8559,7 +8559,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8581,7 +8581,7 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "bytes", @@ -8599,7 +8599,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8625,7 +8625,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.9.4" +version = "1.10.0" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -8635,7 +8635,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8673,7 +8673,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8698,7 +8698,7 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8738,7 +8738,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.9.4" +version = "1.10.0" dependencies = [ "clap", "eyre", @@ -8760,7 +8760,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8776,7 +8776,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8794,7 +8794,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8807,7 +8807,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8835,7 +8835,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8862,7 +8862,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "rayon", @@ -8872,7 +8872,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8897,7 +8897,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8921,7 +8921,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-evm", "alloy-primitives", @@ -8933,7 +8933,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8953,7 +8953,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8998,7 +8998,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eips", "eyre", @@ -9029,7 +9029,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9046,7 +9046,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.9.4" +version = "1.10.0" dependencies = [ "serde", "serde_json", @@ -9055,7 +9055,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9088,7 +9088,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.9.4" +version = "1.10.0" dependencies = [ "bytes", "futures", @@ -9110,7 +9110,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.9.4" +version = "1.10.0" dependencies = [ "bitflags 2.10.0", "byteorder", @@ -9128,7 +9128,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.9.4" +version = "1.10.0" dependencies = [ "bindgen 0.71.1", "cc", @@ -9136,7 +9136,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.9.4" +version = "1.10.0" dependencies = [ "futures", "metrics", @@ -9147,7 +9147,7 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "ipnet", @@ -9155,7 +9155,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "1.9.4" +version = "1.10.0" dependencies = [ "futures-util", "if-addrs", @@ -9169,7 +9169,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9230,7 +9230,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9254,7 +9254,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9276,7 +9276,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9293,7 +9293,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -9306,7 +9306,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.9.4" +version = "1.10.0" dependencies = [ "anyhow", "bincode 1.3.3", @@ -9324,7 +9324,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -9347,7 +9347,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9418,7 +9418,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9475,7 +9475,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-contract", @@ -9535,7 +9535,7 @@ dependencies = [ [[package]] name = "reth-node-ethstats" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9558,7 +9558,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9581,7 +9581,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.9.4" +version = "1.10.0" dependencies = [ "bytes", "eyre", @@ -9610,7 +9610,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.9.4" +version = "1.10.0" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9621,7 +9621,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.9.4" +version = "1.10.0" dependencies = [ "reth-chainspec", "reth-cli-util", @@ -9661,7 +9661,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9689,7 +9689,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9738,7 +9738,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9769,7 +9769,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9798,7 +9798,7 @@ dependencies = [ [[package]] name = "reth-optimism-flashblocks" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9836,7 +9836,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9846,7 +9846,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9906,7 +9906,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9945,7 +9945,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9972,7 +9972,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10034,7 +10034,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "reth-codecs", @@ -10046,7 +10046,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10083,7 +10083,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10103,7 +10103,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.9.4" +version = "1.10.0" dependencies = [ "pin-project", "reth-payload-primitives", @@ -10114,7 +10114,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10137,7 +10137,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10146,7 +10146,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -10155,7 +10155,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10177,7 +10177,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10214,7 +10214,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10263,7 +10263,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10295,11 +10295,11 @@ dependencies = [ [[package]] name = "reth-prune-db" -version = "1.9.4" +version = "1.10.0" [[package]] name = "reth-prune-types" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -10318,7 +10318,7 @@ dependencies = [ [[package]] name = "reth-ress-protocol" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10344,7 +10344,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10370,7 +10370,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10384,7 +10384,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10469,7 +10469,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eip7928", "alloy-eips", @@ -10500,7 +10500,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10519,7 +10519,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eips", "alloy-network", @@ -10575,7 +10575,7 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-evm", @@ -10601,7 +10601,7 @@ dependencies = [ [[package]] name = "reth-rpc-e2e-tests" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", @@ -10621,7 +10621,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10657,7 +10657,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10700,7 +10700,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10748,7 +10748,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10765,7 +10765,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10780,7 +10780,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10838,7 +10838,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10870,7 +10870,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -10886,7 +10886,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10914,7 +10914,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "assert_matches", @@ -10937,7 +10937,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "clap", @@ -10952,7 +10952,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10975,7 +10975,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10990,7 +10990,7 @@ dependencies = [ [[package]] name = "reth-storage-rpc-provider" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11019,7 +11019,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.9.4" +version = "1.10.0" dependencies = [ "auto_impl", "dyn-clone", @@ -11036,7 +11036,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11052,7 +11052,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.9.4" +version = "1.10.0" dependencies = [ "tokio", "tokio-stream", @@ -11061,7 +11061,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.9.4" +version = "1.10.0" dependencies = [ "clap", "eyre", @@ -11079,7 +11079,7 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.9.4" +version = "1.10.0" dependencies = [ "clap", "eyre", @@ -11096,7 +11096,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11144,7 +11144,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11178,7 +11178,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -11211,7 +11211,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -11238,7 +11238,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11268,7 +11268,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11301,7 +11301,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" -version = "1.9.4" +version = "1.10.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11331,7 +11331,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.9.4" +version = "1.10.0" dependencies = [ "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index 4f7d9ea8a8f..a4fc2e292ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.9.4" +version = "1.10.0" edition = "2024" rust-version = "1.88" license = "MIT OR Apache-2.0" diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index 9ac8bbe3b0e..f29394c6928 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -21,7 +21,7 @@ export default defineConfig({ }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, { - text: 'v1.9.4', + text: 'v1.10.0', items: [ { text: 'Releases', From 9bcd3712c8fe306ec0db40ba389c5ce4d64e5eca Mon Sep 17 00:00:00 2001 From: YK Date: Thu, 15 Jan 2026 19:16:40 +0800 Subject: [PATCH 027/267] test(storage): add parametrized MDBX/RocksDB history lookup equivalence tests (#20871) --- crates/storage/provider/src/either_writer.rs | 452 +++++++++++++++++- crates/storage/provider/src/providers/mod.rs | 4 +- .../src/providers/rocksdb/provider.rs | 133 +----- .../src/providers/state/historical.rs | 106 ++-- 4 files changed, 515 insertions(+), 180 deletions(-) diff --git a/crates/storage/provider/src/either_writer.rs b/crates/storage/provider/src/either_writer.rs index a437db2561e..5336b773e6c 100644 --- a/crates/storage/provider/src/either_writer.rs +++ b/crates/storage/provider/src/either_writer.rs @@ -10,7 +10,7 @@ use std::{ #[cfg(all(unix, feature = "rocksdb"))] use crate::providers::rocksdb::RocksDBBatch; use crate::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut}, + providers::{history_info, HistoryInfo, StaticFileProvider, StaticFileProviderRWRefMut}, StaticFileProviderFactory, }; use alloy_primitives::{map::HashMap, Address, BlockNumber, TxHash, TxNumber}; @@ -708,7 +708,7 @@ impl EitherReader<'_, CURSOR, N> where CURSOR: DbCursorRO, { - /// Gets a storage history entry. + /// Gets a storage history shard entry for the given [`StorageShardedKey`], if present. pub fn get_storage_history( &mut self, key: StorageShardedKey, @@ -720,13 +720,43 @@ where Self::RocksDB(tx) => tx.get::(key), } } + + /// Lookup storage history and return [`HistoryInfo`]. + pub fn storage_history_info( + &mut self, + address: Address, + storage_key: alloy_primitives::B256, + block_number: BlockNumber, + lowest_available_block_number: Option, + ) -> ProviderResult { + match self { + Self::Database(cursor, _) => { + let key = StorageShardedKey::new(address, storage_key, block_number); + history_info::( + cursor, + key, + block_number, + |k| k.address == address && k.sharded_key.key == storage_key, + lowest_available_block_number, + ) + } + Self::StaticFile(_, _) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(tx) => tx.storage_history_info( + address, + storage_key, + block_number, + lowest_available_block_number, + ), + } + } } impl EitherReader<'_, CURSOR, N> where CURSOR: DbCursorRO, { - /// Gets an account history entry. + /// Gets an account history shard entry for the given [`ShardedKey`], if present. pub fn get_account_history( &mut self, key: ShardedKey

, @@ -738,6 +768,32 @@ where Self::RocksDB(tx) => tx.get::(key), } } + + /// Lookup account history and return [`HistoryInfo`]. + pub fn account_history_info( + &mut self, + address: Address, + block_number: BlockNumber, + lowest_available_block_number: Option, + ) -> ProviderResult { + match self { + Self::Database(cursor, _) => { + let key = ShardedKey::new(address, block_number); + history_info::( + cursor, + key, + block_number, + |k| k.key == address, + lowest_available_block_number, + ) + } + Self::StaticFile(_, _) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(tx) => { + tx.account_history_info(address, block_number, lowest_available_block_number) + } + } + } } impl EitherReader<'_, CURSOR, N> @@ -894,8 +950,11 @@ mod rocksdb_tests { use reth_db_api::{ models::{storage_sharded_key::StorageShardedKey, IntegerList, ShardedKey}, tables, + transaction::DbTxMut, }; + use reth_ethereum_primitives::EthPrimitives; use reth_storage_api::{DatabaseProviderFactory, StorageSettings}; + use std::marker::PhantomData; use tempfile::TempDir; fn create_rocksdb_provider() -> (TempDir, RocksDBProvider) { @@ -1125,10 +1184,391 @@ mod rocksdb_tests { assert_eq!(provider.get::(key).unwrap(), None); } - /// Test that `RocksDB` commits happen at `provider.commit()` level, not at writer level. + // ==================== Parametrized Backend Equivalence Tests ==================== + // + // These tests verify that MDBX and RocksDB produce identical results for history lookups. + // Each scenario sets up the same data in both backends and asserts identical HistoryInfo. + + /// Query parameters for a history lookup test case. + struct HistoryQuery { + block_number: BlockNumber, + lowest_available: Option, + expected: HistoryInfo, + } + + // Type aliases for cursor types (needed for EitherWriter/EitherReader type inference) + type AccountsHistoryWriteCursor = + reth_db::mdbx::cursor::Cursor; + type StoragesHistoryWriteCursor = + reth_db::mdbx::cursor::Cursor; + type AccountsHistoryReadCursor = + reth_db::mdbx::cursor::Cursor; + type StoragesHistoryReadCursor = + reth_db::mdbx::cursor::Cursor; + + /// Runs the same account history queries against both MDBX and `RocksDB` backends, + /// asserting they produce identical results. + fn run_account_history_scenario( + scenario_name: &str, + address: Address, + shards: &[(BlockNumber, Vec)], // (shard_highest_block, blocks_in_shard) + queries: &[HistoryQuery], + ) { + // Setup MDBX and RocksDB with identical data using EitherWriter + let factory = create_test_provider_factory(); + let mdbx_provider = factory.database_provider_rw().unwrap(); + let (temp_dir, rocks_provider) = create_rocksdb_provider(); + + // Create writers for both backends + let mut mdbx_writer: EitherWriter<'_, AccountsHistoryWriteCursor, EthPrimitives> = + EitherWriter::Database( + mdbx_provider.tx_ref().cursor_write::().unwrap(), + ); + let mut rocks_writer: EitherWriter<'_, AccountsHistoryWriteCursor, EthPrimitives> = + EitherWriter::RocksDB(rocks_provider.batch()); + + // Write identical data to both backends in a single loop + for (highest_block, blocks) in shards { + let key = ShardedKey::new(address, *highest_block); + let value = IntegerList::new(blocks.clone()).unwrap(); + mdbx_writer.put_account_history(key.clone(), &value).unwrap(); + rocks_writer.put_account_history(key, &value).unwrap(); + } + + // Commit both backends + drop(mdbx_writer); + mdbx_provider.commit().unwrap(); + if let EitherWriter::RocksDB(batch) = rocks_writer { + batch.commit().unwrap(); + } + + // Run queries against both backends using EitherReader + let mdbx_ro = factory.database_provider_ro().unwrap(); + let rocks_tx = rocks_provider.tx(); + + for (i, query) in queries.iter().enumerate() { + // MDBX query via EitherReader + let mut mdbx_reader: EitherReader<'_, AccountsHistoryReadCursor, EthPrimitives> = + EitherReader::Database( + mdbx_ro.tx_ref().cursor_read::().unwrap(), + PhantomData, + ); + let mdbx_result = mdbx_reader + .account_history_info(address, query.block_number, query.lowest_available) + .unwrap(); + + // RocksDB query via EitherReader + let mut rocks_reader: EitherReader<'_, AccountsHistoryReadCursor, EthPrimitives> = + EitherReader::RocksDB(&rocks_tx); + let rocks_result = rocks_reader + .account_history_info(address, query.block_number, query.lowest_available) + .unwrap(); + + // Assert both backends produce identical results + assert_eq!( + mdbx_result, + rocks_result, + "Backend mismatch in scenario '{}' query {}: block={}, lowest={:?}\n\ + MDBX: {:?}, RocksDB: {:?}", + scenario_name, + i, + query.block_number, + query.lowest_available, + mdbx_result, + rocks_result + ); + + // Also verify against expected result + assert_eq!( + mdbx_result, + query.expected, + "Unexpected result in scenario '{}' query {}: block={}, lowest={:?}\n\ + Got: {:?}, Expected: {:?}", + scenario_name, + i, + query.block_number, + query.lowest_available, + mdbx_result, + query.expected + ); + } + + rocks_tx.rollback().unwrap(); + drop(temp_dir); + } + + /// Runs the same storage history queries against both MDBX and `RocksDB` backends, + /// asserting they produce identical results. + fn run_storage_history_scenario( + scenario_name: &str, + address: Address, + storage_key: B256, + shards: &[(BlockNumber, Vec)], // (shard_highest_block, blocks_in_shard) + queries: &[HistoryQuery], + ) { + // Setup MDBX and RocksDB with identical data using EitherWriter + let factory = create_test_provider_factory(); + let mdbx_provider = factory.database_provider_rw().unwrap(); + let (temp_dir, rocks_provider) = create_rocksdb_provider(); + + // Create writers for both backends + let mut mdbx_writer: EitherWriter<'_, StoragesHistoryWriteCursor, EthPrimitives> = + EitherWriter::Database( + mdbx_provider.tx_ref().cursor_write::().unwrap(), + ); + let mut rocks_writer: EitherWriter<'_, StoragesHistoryWriteCursor, EthPrimitives> = + EitherWriter::RocksDB(rocks_provider.batch()); + + // Write identical data to both backends in a single loop + for (highest_block, blocks) in shards { + let key = StorageShardedKey::new(address, storage_key, *highest_block); + let value = IntegerList::new(blocks.clone()).unwrap(); + mdbx_writer.put_storage_history(key.clone(), &value).unwrap(); + rocks_writer.put_storage_history(key, &value).unwrap(); + } + + // Commit both backends + drop(mdbx_writer); + mdbx_provider.commit().unwrap(); + if let EitherWriter::RocksDB(batch) = rocks_writer { + batch.commit().unwrap(); + } + + // Run queries against both backends using EitherReader + let mdbx_ro = factory.database_provider_ro().unwrap(); + let rocks_tx = rocks_provider.tx(); + + for (i, query) in queries.iter().enumerate() { + // MDBX query via EitherReader + let mut mdbx_reader: EitherReader<'_, StoragesHistoryReadCursor, EthPrimitives> = + EitherReader::Database( + mdbx_ro.tx_ref().cursor_read::().unwrap(), + PhantomData, + ); + let mdbx_result = mdbx_reader + .storage_history_info( + address, + storage_key, + query.block_number, + query.lowest_available, + ) + .unwrap(); + + // RocksDB query via EitherReader + let mut rocks_reader: EitherReader<'_, StoragesHistoryReadCursor, EthPrimitives> = + EitherReader::RocksDB(&rocks_tx); + let rocks_result = rocks_reader + .storage_history_info( + address, + storage_key, + query.block_number, + query.lowest_available, + ) + .unwrap(); + + // Assert both backends produce identical results + assert_eq!( + mdbx_result, + rocks_result, + "Backend mismatch in scenario '{}' query {}: block={}, lowest={:?}\n\ + MDBX: {:?}, RocksDB: {:?}", + scenario_name, + i, + query.block_number, + query.lowest_available, + mdbx_result, + rocks_result + ); + + // Also verify against expected result + assert_eq!( + mdbx_result, + query.expected, + "Unexpected result in scenario '{}' query {}: block={}, lowest={:?}\n\ + Got: {:?}, Expected: {:?}", + scenario_name, + i, + query.block_number, + query.lowest_available, + mdbx_result, + query.expected + ); + } + + rocks_tx.rollback().unwrap(); + drop(temp_dir); + } + + /// Tests account history lookups across both MDBX and `RocksDB` backends. /// - /// This ensures all storage commits (MDBX, static files, `RocksDB`) happen atomically - /// in a single place, making it easier to reason about commit ordering and consistency. + /// Covers the following scenarios from PR2's `RocksDB`-only tests: + /// 1. Single shard - basic lookups within one shard + /// 2. Multiple shards - `prev()` shard detection and transitions + /// 3. No history - query address with no entries + /// 4. Pruning boundary - `lowest_available` boundary behavior (block at/after boundary) + #[test] + fn test_account_history_info_both_backends() { + let address = Address::from([0x42; 20]); + + // Scenario 1: Single shard with blocks [100, 200, 300] + run_account_history_scenario( + "single_shard", + address, + &[(u64::MAX, vec![100, 200, 300])], + &[ + // Before first entry -> NotYetWritten + HistoryQuery { + block_number: 50, + lowest_available: None, + expected: HistoryInfo::NotYetWritten, + }, + // Between entries -> InChangeset(next_write) + HistoryQuery { + block_number: 150, + lowest_available: None, + expected: HistoryInfo::InChangeset(200), + }, + // Exact match on entry -> InChangeset(same_block) + HistoryQuery { + block_number: 300, + lowest_available: None, + expected: HistoryInfo::InChangeset(300), + }, + // After last entry in last shard -> InPlainState + HistoryQuery { + block_number: 500, + lowest_available: None, + expected: HistoryInfo::InPlainState, + }, + ], + ); + + // Scenario 2: Multiple shards - tests prev() shard detection + run_account_history_scenario( + "multiple_shards", + address, + &[ + (500, vec![100, 200, 300, 400, 500]), // First shard ends at 500 + (u64::MAX, vec![600, 700, 800]), // Last shard + ], + &[ + // Before first shard, no prev -> NotYetWritten + HistoryQuery { + block_number: 50, + lowest_available: None, + expected: HistoryInfo::NotYetWritten, + }, + // Within first shard + HistoryQuery { + block_number: 150, + lowest_available: None, + expected: HistoryInfo::InChangeset(200), + }, + // Between shards - prev() should find first shard + HistoryQuery { + block_number: 550, + lowest_available: None, + expected: HistoryInfo::InChangeset(600), + }, + // After all entries + HistoryQuery { + block_number: 900, + lowest_available: None, + expected: HistoryInfo::InPlainState, + }, + ], + ); + + // Scenario 3: No history for address + let address_without_history = Address::from([0x43; 20]); + run_account_history_scenario( + "no_history", + address_without_history, + &[], // No shards for this address + &[HistoryQuery { + block_number: 150, + lowest_available: None, + expected: HistoryInfo::NotYetWritten, + }], + ); + + // Scenario 4: Query at pruning boundary + // Note: We test block >= lowest_available because HistoricalStateProviderRef + // errors on blocks below the pruning boundary before doing the lookup. + // The RocksDB implementation doesn't have this check at the same level. + // This tests that when pruning IS available, both backends agree. + run_account_history_scenario( + "with_pruning_boundary", + address, + &[(u64::MAX, vec![100, 200, 300])], + &[ + // At pruning boundary -> InChangeset(first entry after block) + HistoryQuery { + block_number: 100, + lowest_available: Some(100), + expected: HistoryInfo::InChangeset(100), + }, + // After pruning boundary, between entries + HistoryQuery { + block_number: 150, + lowest_available: Some(100), + expected: HistoryInfo::InChangeset(200), + }, + ], + ); + } + + /// Tests storage history lookups across both MDBX and `RocksDB` backends. + #[test] + fn test_storage_history_info_both_backends() { + let address = Address::from([0x42; 20]); + let storage_key = B256::from([0x01; 32]); + let other_storage_key = B256::from([0x02; 32]); + + // Single shard with blocks [100, 200, 300] + run_storage_history_scenario( + "storage_single_shard", + address, + storage_key, + &[(u64::MAX, vec![100, 200, 300])], + &[ + // Before first entry -> NotYetWritten + HistoryQuery { + block_number: 50, + lowest_available: None, + expected: HistoryInfo::NotYetWritten, + }, + // Between entries -> InChangeset(next_write) + HistoryQuery { + block_number: 150, + lowest_available: None, + expected: HistoryInfo::InChangeset(200), + }, + // After last entry -> InPlainState + HistoryQuery { + block_number: 500, + lowest_available: None, + expected: HistoryInfo::InPlainState, + }, + ], + ); + + // No history for different storage key + run_storage_history_scenario( + "storage_no_history", + address, + other_storage_key, + &[], // No shards for this storage key + &[HistoryQuery { + block_number: 150, + lowest_available: None, + expected: HistoryInfo::NotYetWritten, + }], + ); + } + + /// Test that `RocksDB` batches created via `EitherWriter` are only made visible when + /// `provider.commit()` is called, not when the writer is dropped. #[test] fn test_rocksdb_commits_at_provider_level() { let factory = create_test_provider_factory(); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index e4f61839915..2ff34c7d2a4 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -16,8 +16,8 @@ pub use static_file::{ mod state; pub use state::{ historical::{ - needs_prev_shard_check, HistoricalStateProvider, HistoricalStateProviderRef, HistoryInfo, - LowestAvailableBlocks, + history_info, needs_prev_shard_check, HistoricalStateProvider, HistoricalStateProviderRef, + HistoryInfo, LowestAvailableBlocks, }, latest::{LatestStateProvider, LatestStateProviderRef}, overlay::{OverlayStateProvider, OverlayStateProviderFactory}, diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index d27d4c9df33..670ab0ccba0 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -1272,101 +1272,9 @@ mod tests { assert_eq!(last, Some((20, b"value_20".to_vec()))); } - #[test] - fn test_account_history_info_single_shard() { - let temp_dir = TempDir::new().unwrap(); - let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); - - let address = Address::from([0x42; 20]); - - // Create a single shard with blocks [100, 200, 300] and highest_block = u64::MAX - // This is the "last shard" invariant - let chunk = IntegerList::new([100, 200, 300]).unwrap(); - let shard_key = ShardedKey::new(address, u64::MAX); - provider.put::(shard_key, &chunk).unwrap(); - - let tx = provider.tx(); - - // Query for block 150: should find block 200 in changeset - let result = tx.account_history_info(address, 150, None).unwrap(); - assert_eq!(result, HistoryInfo::InChangeset(200)); - - // Query for block 50: should return NotYetWritten (before first entry, no prev shard) - let result = tx.account_history_info(address, 50, None).unwrap(); - assert_eq!(result, HistoryInfo::NotYetWritten); - - // Query for block 300: should return InChangeset(300) - exact match means look at - // changeset at that block for the previous value - let result = tx.account_history_info(address, 300, None).unwrap(); - assert_eq!(result, HistoryInfo::InChangeset(300)); - - // Query for block 500: should return InPlainState (after last entry in last shard) - let result = tx.account_history_info(address, 500, None).unwrap(); - assert_eq!(result, HistoryInfo::InPlainState); - - tx.rollback().unwrap(); - } - - #[test] - fn test_account_history_info_multiple_shards() { - let temp_dir = TempDir::new().unwrap(); - let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); - - let address = Address::from([0x42; 20]); - - // Create two shards: first shard ends at block 500, second is the last shard - let chunk1 = IntegerList::new([100, 200, 300, 400, 500]).unwrap(); - let shard_key1 = ShardedKey::new(address, 500); - provider.put::(shard_key1, &chunk1).unwrap(); - - let chunk2 = IntegerList::new([600, 700, 800]).unwrap(); - let shard_key2 = ShardedKey::new(address, u64::MAX); - provider.put::(shard_key2, &chunk2).unwrap(); - - let tx = provider.tx(); - - // Query for block 50: should return NotYetWritten (before first shard, no prev) - let result = tx.account_history_info(address, 50, None).unwrap(); - assert_eq!(result, HistoryInfo::NotYetWritten); - - // Query for block 150: should find block 200 in first shard's changeset - let result = tx.account_history_info(address, 150, None).unwrap(); - assert_eq!(result, HistoryInfo::InChangeset(200)); - - // Query for block 550: should find block 600 in second shard's changeset - // prev() should detect first shard exists - let result = tx.account_history_info(address, 550, None).unwrap(); - assert_eq!(result, HistoryInfo::InChangeset(600)); - - // Query for block 900: should return InPlainState (after last entry in last shard) - let result = tx.account_history_info(address, 900, None).unwrap(); - assert_eq!(result, HistoryInfo::InPlainState); - - tx.rollback().unwrap(); - } - - #[test] - fn test_account_history_info_no_history() { - let temp_dir = TempDir::new().unwrap(); - let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); - - let address1 = Address::from([0x42; 20]); - let address2 = Address::from([0x43; 20]); - - // Only add history for address1 - let chunk = IntegerList::new([100, 200, 300]).unwrap(); - let shard_key = ShardedKey::new(address1, u64::MAX); - provider.put::(shard_key, &chunk).unwrap(); - - let tx = provider.tx(); - - // Query for address2 (no history exists): should return NotYetWritten - let result = tx.account_history_info(address2, 150, None).unwrap(); - assert_eq!(result, HistoryInfo::NotYetWritten); - - tx.rollback().unwrap(); - } - + /// Tests the edge case where block < `lowest_available_block_number`. + /// This case cannot be tested via `HistoricalStateProviderRef` (which errors before lookup), + /// so we keep this RocksDB-specific test to verify the low-level behavior. #[test] fn test_account_history_info_pruned_before_first_entry() { let temp_dir = TempDir::new().unwrap(); @@ -1390,39 +1298,4 @@ mod tests { tx.rollback().unwrap(); } - - #[test] - fn test_storage_history_info() { - let temp_dir = TempDir::new().unwrap(); - let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); - - let address = Address::from([0x42; 20]); - let storage_key = B256::from([0x01; 32]); - - // Create a single shard for this storage slot - let chunk = IntegerList::new([100, 200, 300]).unwrap(); - let shard_key = StorageShardedKey::new(address, storage_key, u64::MAX); - provider.put::(shard_key, &chunk).unwrap(); - - let tx = provider.tx(); - - // Query for block 150: should find block 200 in changeset - let result = tx.storage_history_info(address, storage_key, 150, None).unwrap(); - assert_eq!(result, HistoryInfo::InChangeset(200)); - - // Query for block 50: should return NotYetWritten - let result = tx.storage_history_info(address, storage_key, 50, None).unwrap(); - assert_eq!(result, HistoryInfo::NotYetWritten); - - // Query for block 500: should return InPlainState - let result = tx.storage_history_info(address, storage_key, 500, None).unwrap(); - assert_eq!(result, HistoryInfo::InPlainState); - - // Query for different storage key (no history): should return NotYetWritten - let other_key = B256::from([0x02; 32]); - let result = tx.storage_history_info(address, other_key, 150, None).unwrap(); - assert_eq!(result, HistoryInfo::NotYetWritten); - - tx.rollback().unwrap(); - } } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index acec7e78fff..f9bc61c7eb3 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -135,7 +135,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader> // history key to search IntegerList of block number changesets. let history_key = ShardedKey::new(address, self.block_number); - self.history_info::( + self.history_info_lookup::( history_key, |key| key.key == address, self.lowest_available_blocks.account_history_block_number, @@ -154,7 +154,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader> // history key to search IntegerList of block number changesets. let history_key = StorageShardedKey::new(address, storage_key, self.block_number); - self.history_info::( + self.history_info_lookup::( history_key, |key| key.address == address && key.sharded_key.key == storage_key, self.lowest_available_blocks.storage_history_block_number, @@ -204,7 +204,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader> Ok(HashedStorage::from_reverts(self.tx(), address, self.block_number)?) } - fn history_info( + fn history_info_lookup( &self, key: K, key_filter: impl Fn(&K) -> bool, @@ -214,45 +214,13 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader> T: Table, { let mut cursor = self.tx().cursor_read::()?; - - // Lookup the history chunk in the history index. If the key does not appear in the - // index, the first chunk for the next key will be returned so we filter out chunks that - // have a different key. - if let Some(chunk) = cursor.seek(key)?.filter(|(key, _)| key_filter(key)).map(|x| x.1) { - // Get the rank of the first entry before or equal to our block. - let mut rank = chunk.rank(self.block_number); - - // Adjust the rank, so that we have the rank of the first entry strictly before our - // block (not equal to it). - if rank.checked_sub(1).and_then(|r| chunk.select(r)) == Some(self.block_number) { - rank -= 1; - } - - let found_block = chunk.select(rank); - - // If our block is before the first entry in the index chunk and this first entry - // doesn't equal to our block, it might be before the first write ever. To check, we - // look at the previous entry and check if the key is the same. - // This check is worth it, the `cursor.prev()` check is rarely triggered (the if will - // short-circuit) and when it passes we save a full seek into the changeset/plain state - // table. - let is_before_first_write = - needs_prev_shard_check(rank, found_block, self.block_number) && - !cursor.prev()?.is_some_and(|(key, _)| key_filter(&key)); - - Ok(HistoryInfo::from_lookup( - found_block, - is_before_first_write, - lowest_available_block_number, - )) - } else if lowest_available_block_number.is_some() { - // The key may have been written, but due to pruning we may not have changesets and - // history, so we need to make a plain state lookup. - Ok(HistoryInfo::MaybeInPlainState) - } else { - // The key has not been written to at all. - Ok(HistoryInfo::NotYetWritten) - } + history_info::( + &mut cursor, + key, + self.block_number, + key_filter, + lowest_available_block_number, + ) } /// Set the lowest block number at which the account history is available. @@ -570,6 +538,60 @@ pub fn needs_prev_shard_check( rank == 0 && found_block != Some(block_number) } +/// Generic history lookup for sharded history tables. +/// +/// Seeks to the shard containing `block_number`, verifies the key via `key_filter`, +/// and checks previous shard to detect if we're before the first write. +pub fn history_info( + cursor: &mut C, + key: K, + block_number: BlockNumber, + key_filter: impl Fn(&K) -> bool, + lowest_available_block_number: Option, +) -> ProviderResult +where + T: Table, + C: DbCursorRO, +{ + // Lookup the history chunk in the history index. If the key does not appear in the + // index, the first chunk for the next key will be returned so we filter out chunks that + // have a different key. + if let Some(chunk) = cursor.seek(key)?.filter(|(k, _)| key_filter(k)).map(|x| x.1) { + // Get the rank of the first entry before or equal to our block. + let mut rank = chunk.rank(block_number); + + // Adjust the rank, so that we have the rank of the first entry strictly before our + // block (not equal to it). + if rank.checked_sub(1).and_then(|r| chunk.select(r)) == Some(block_number) { + rank -= 1; + } + + let found_block = chunk.select(rank); + + // If our block is before the first entry in the index chunk and this first entry + // doesn't equal to our block, it might be before the first write ever. To check, we + // look at the previous entry and check if the key is the same. + // This check is worth it, the `cursor.prev()` check is rarely triggered (the if will + // short-circuit) and when it passes we save a full seek into the changeset/plain state + // table. + let is_before_first_write = needs_prev_shard_check(rank, found_block, block_number) && + !cursor.prev()?.is_some_and(|(k, _)| key_filter(&k)); + + Ok(HistoryInfo::from_lookup( + found_block, + is_before_first_write, + lowest_available_block_number, + )) + } else if lowest_available_block_number.is_some() { + // The key may have been written, but due to pruning we may not have changesets and + // history, so we need to make a plain state lookup. + Ok(HistoryInfo::MaybeInPlainState) + } else { + // The key has not been written to at all. + Ok(HistoryInfo::NotYetWritten) + } +} + #[cfg(test)] mod tests { use super::needs_prev_shard_check; From d469b7f1d059783ecf6484f2f442f7007b46dda9 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 15 Jan 2026 12:05:30 +0000 Subject: [PATCH 028/267] feat(rpc): add flag to skip invalid transactions in testing_buildBlockV1 (#21094) Co-authored-by: Matthias Seitz --- crates/ethereum/node/src/node.rs | 12 ++++--- crates/node/core/src/args/rpc_server.rs | 10 ++++++ crates/rpc/rpc/src/testing.rs | 43 ++++++++++++++++++++--- docs/vocs/docs/pages/cli/op-reth/node.mdx | 5 +++ docs/vocs/docs/pages/cli/reth/node.mdx | 5 +++ 5 files changed, 67 insertions(+), 8 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index cf409cce9c4..95ff8072532 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -303,6 +303,8 @@ where let eth_config = EthConfigHandler::new(ctx.node.provider().clone(), ctx.node.evm_config().clone()); + let testing_skip_invalid_transactions = ctx.config.rpc.testing_skip_invalid_transactions; + self.inner .launch_add_ons_with(ctx, move |container| { container.modules.merge_if_module_configured( @@ -316,14 +318,16 @@ where // testing_buildBlockV1: only wire when the hidden testing module is explicitly // requested on any transport. Default stays disabled to honor security guidance. - let testing_api = TestingApi::new( + let mut testing_api = TestingApi::new( container.registry.eth_api().clone(), container.registry.evm_config().clone(), - ) - .into_rpc(); + ); + if testing_skip_invalid_transactions { + testing_api = testing_api.with_skip_invalid_transactions(); + } container .modules - .merge_if_module_configured(RethRpcModule::Testing, testing_api)?; + .merge_if_module_configured(RethRpcModule::Testing, testing_api.into_rpc())?; Ok(()) }) diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 7c2253b7b05..0b0dcc066a3 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -640,6 +640,13 @@ pub struct RpcServerArgs { value_parser = parse_duration_from_secs_or_ms, )] pub rpc_send_raw_transaction_sync_timeout: Duration, + + /// Skip invalid transactions in `testing_buildBlockV1` instead of failing. + /// + /// When enabled, transactions that fail execution will be skipped, and all subsequent + /// transactions from the same sender will also be skipped. + #[arg(long = "testing.skip-invalid-transactions", default_value_t = false)] + pub testing_skip_invalid_transactions: bool, } impl RpcServerArgs { @@ -852,6 +859,7 @@ impl Default for RpcServerArgs { rpc_state_cache, gas_price_oracle, rpc_send_raw_transaction_sync_timeout, + testing_skip_invalid_transactions: false, } } } @@ -1026,6 +1034,7 @@ mod tests { default_suggested_fee: None, }, rpc_send_raw_transaction_sync_timeout: std::time::Duration::from_secs(30), + testing_skip_invalid_transactions: true, }; let parsed_args = CommandParser::::parse_from([ @@ -1114,6 +1123,7 @@ mod tests { "60", "--rpc.send-raw-transaction-sync-timeout", "30s", + "--testing.skip-invalid-transactions", ]) .args; diff --git a/crates/rpc/rpc/src/testing.rs b/crates/rpc/rpc/src/testing.rs index 833f0749e26..c1c8a65d1ce 100644 --- a/crates/rpc/rpc/src/testing.rs +++ b/crates/rpc/rpc/src/testing.rs @@ -4,7 +4,7 @@ use alloy_consensus::{Header, Transaction}; use alloy_evm::Evm; -use alloy_primitives::U256; +use alloy_primitives::{map::HashSet, Address, U256}; use alloy_rpc_types_engine::ExecutionPayloadEnvelopeV5; use async_trait::async_trait; use jsonrpsee::core::RpcResult; @@ -19,19 +19,31 @@ use reth_rpc_eth_api::{helpers::Call, FromEthApiError}; use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_storage_api::{BlockReader, HeaderProvider}; use revm::context::Block; +use revm_primitives::map::DefaultHashBuilder; use std::sync::Arc; +use tracing::debug; /// Testing API handler. #[derive(Debug, Clone)] pub struct TestingApi { eth_api: Eth, evm_config: Evm, + /// If true, skip invalid transactions instead of failing. + skip_invalid_transactions: bool, } impl TestingApi { /// Create a new testing API handler. pub const fn new(eth_api: Eth, evm_config: Evm) -> Self { - Self { eth_api, evm_config } + Self { eth_api, evm_config, skip_invalid_transactions: false } + } + + /// Enable skipping invalid transactions instead of failing. + /// When a transaction fails, all subsequent transactions from the same sender are also + /// skipped. + pub const fn with_skip_invalid_transactions(mut self) -> Self { + self.skip_invalid_transactions = true; + self } } @@ -46,6 +58,7 @@ where request: TestingBuildBlockRequestV1, ) -> Result { let evm_config = self.evm_config.clone(); + let skip_invalid_transactions = self.skip_invalid_transactions; self.eth_api .spawn_with_state_at_block(request.parent_block_hash, move |eth_api, state| { let state = state.database.0; @@ -79,11 +92,33 @@ where let mut total_fees = U256::ZERO; let base_fee = builder.evm_mut().block().basefee(); + let mut invalid_senders: HashSet = HashSet::default(); + for tx in request.transactions { let tx: Recovered> = recover_raw_transaction(&tx)?; + let sender = tx.signer(); + + if skip_invalid_transactions && invalid_senders.contains(&sender) { + continue; + } + let tip = tx.effective_tip_per_gas(base_fee).unwrap_or_default(); - let gas_used = - builder.execute_transaction(tx).map_err(Eth::Error::from_eth_err)?; + let gas_used = match builder.execute_transaction(tx) { + Ok(gas_used) => gas_used, + Err(err) => { + if skip_invalid_transactions { + debug!( + target: "rpc::testing", + ?sender, + error = ?err, + "Skipping invalid transaction" + ); + invalid_senders.insert(sender); + continue; + } + return Err(Eth::Error::from_eth_err(err)); + } + }; total_fees += U256::from(tip) * U256::from(gas_used); } diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index 74964bf641f..f245315040d 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -530,6 +530,11 @@ Gas Price Oracle: [default: 30s] + --testing.skip-invalid-transactions + Skip invalid transactions in `testing_buildBlockV1` instead of failing. + + When enabled, transactions that fail execution will be skipped, and all subsequent transactions from the same sender will also be skipped. + TxPool: --txpool.pending-max-count Max number of transaction in the pending sub-pool diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index d6be9ba55e4..c052076fc89 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -530,6 +530,11 @@ Gas Price Oracle: [default: 30s] + --testing.skip-invalid-transactions + Skip invalid transactions in `testing_buildBlockV1` instead of failing. + + When enabled, transactions that fail execution will be skipped, and all subsequent transactions from the same sender will also be skipped. + TxPool: --txpool.pending-max-count Max number of transaction in the pending sub-pool From d225fc1d7f37b94dad32c2e52723f5d2798e0bbe Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 15 Jan 2026 14:48:05 +0000 Subject: [PATCH 029/267] feat: add get/set db settings for `rocksdb` (#21095) --- crates/cli/commands/src/db/settings.rs | 39 ++++ .../provider/src/providers/rocksdb/metrics.rs | 6 +- docs/vocs/docs/pages/cli/SUMMARY.mdx | 6 + .../pages/cli/op-reth/db/settings/set.mdx | 11 +- .../db/settings/set/account_history.mdx | 170 ++++++++++++++++++ .../db/settings/set/storages_history.mdx | 170 ++++++++++++++++++ .../settings/set/transaction_hash_numbers.mdx | 170 ++++++++++++++++++ .../docs/pages/cli/reth/db/settings/set.mdx | 11 +- .../reth/db/settings/set/account_history.mdx | 170 ++++++++++++++++++ .../reth/db/settings/set/storages_history.mdx | 170 ++++++++++++++++++ .../settings/set/transaction_hash_numbers.mdx | 170 ++++++++++++++++++ docs/vocs/sidebar-cli-op-reth.ts | 12 ++ docs/vocs/sidebar-cli-reth.ts | 12 ++ 13 files changed, 1108 insertions(+), 9 deletions(-) create mode 100644 docs/vocs/docs/pages/cli/op-reth/db/settings/set/account_history.mdx create mode 100644 docs/vocs/docs/pages/cli/op-reth/db/settings/set/storages_history.mdx create mode 100644 docs/vocs/docs/pages/cli/op-reth/db/settings/set/transaction_hash_numbers.mdx create mode 100644 docs/vocs/docs/pages/cli/reth/db/settings/set/account_history.mdx create mode 100644 docs/vocs/docs/pages/cli/reth/db/settings/set/storages_history.mdx create mode 100644 docs/vocs/docs/pages/cli/reth/db/settings/set/transaction_hash_numbers.mdx diff --git a/crates/cli/commands/src/db/settings.rs b/crates/cli/commands/src/db/settings.rs index 8f13b579087..9bfe01b52a1 100644 --- a/crates/cli/commands/src/db/settings.rs +++ b/crates/cli/commands/src/db/settings.rs @@ -54,6 +54,21 @@ pub enum SetCommand { #[clap(action(ArgAction::Set))] value: bool, }, + /// Store storage history in rocksdb instead of MDBX + StoragesHistory { + #[clap(action(ArgAction::Set))] + value: bool, + }, + /// Store transaction hash to number mapping in rocksdb instead of MDBX + TransactionHashNumbers { + #[clap(action(ArgAction::Set))] + value: bool, + }, + /// Store account history in rocksdb instead of MDBX + AccountHistory { + #[clap(action(ArgAction::Set))] + value: bool, + }, } impl Command { @@ -128,6 +143,30 @@ impl Command { settings.account_changesets_in_static_files = value; println!("Set account_changesets_in_static_files = {}", value); } + SetCommand::StoragesHistory { value } => { + if settings.storages_history_in_rocksdb == value { + println!("storages_history_in_rocksdb is already set to {}", value); + return Ok(()); + } + settings.storages_history_in_rocksdb = value; + println!("Set storages_history_in_rocksdb = {}", value); + } + SetCommand::TransactionHashNumbers { value } => { + if settings.transaction_hash_numbers_in_rocksdb == value { + println!("transaction_hash_numbers_in_rocksdb is already set to {}", value); + return Ok(()); + } + settings.transaction_hash_numbers_in_rocksdb = value; + println!("Set transaction_hash_numbers_in_rocksdb = {}", value); + } + SetCommand::AccountHistory { value } => { + if settings.account_history_in_rocksdb == value { + println!("account_history_in_rocksdb is already set to {}", value); + return Ok(()); + } + settings.account_history_in_rocksdb = value; + println!("Set account_history_in_rocksdb = {}", value); + } } // Write updated settings diff --git a/crates/storage/provider/src/providers/rocksdb/metrics.rs b/crates/storage/provider/src/providers/rocksdb/metrics.rs index 890d9faac2f..913016a1f34 100644 --- a/crates/storage/provider/src/providers/rocksdb/metrics.rs +++ b/crates/storage/provider/src/providers/rocksdb/metrics.rs @@ -6,7 +6,11 @@ use reth_db::Tables; use reth_metrics::Metrics; use strum::{EnumIter, IntoEnumIterator}; -const ROCKSDB_TABLES: &[&str] = &[Tables::TransactionHashNumbers.name()]; +const ROCKSDB_TABLES: &[&str] = &[ + Tables::TransactionHashNumbers.name(), + Tables::StoragesHistory.name(), + Tables::AccountsHistory.name(), +]; /// Metrics for the `RocksDB` provider. #[derive(Debug)] diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index 882b1f292f0..4381ed78427 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -30,6 +30,9 @@ - [`reth db settings set receipts`](./reth/db/settings/set/receipts.mdx) - [`reth db settings set transaction_senders`](./reth/db/settings/set/transaction_senders.mdx) - [`reth db settings set account_changesets`](./reth/db/settings/set/account_changesets.mdx) + - [`reth db settings set storages_history`](./reth/db/settings/set/storages_history.mdx) + - [`reth db settings set transaction_hash_numbers`](./reth/db/settings/set/transaction_hash_numbers.mdx) + - [`reth db settings set account_history`](./reth/db/settings/set/account_history.mdx) - [`reth db account-storage`](./reth/db/account-storage.mdx) - [`reth download`](./reth/download.mdx) - [`reth stage`](./reth/stage.mdx) @@ -83,6 +86,9 @@ - [`op-reth db settings set receipts`](./op-reth/db/settings/set/receipts.mdx) - [`op-reth db settings set transaction_senders`](./op-reth/db/settings/set/transaction_senders.mdx) - [`op-reth db settings set account_changesets`](./op-reth/db/settings/set/account_changesets.mdx) + - [`op-reth db settings set storages_history`](./op-reth/db/settings/set/storages_history.mdx) + - [`op-reth db settings set transaction_hash_numbers`](./op-reth/db/settings/set/transaction_hash_numbers.mdx) + - [`op-reth db settings set account_history`](./op-reth/db/settings/set/account_history.mdx) - [`op-reth db account-storage`](./op-reth/db/account-storage.mdx) - [`op-reth stage`](./op-reth/stage.mdx) - [`op-reth stage run`](./op-reth/stage/run.mdx) diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx index 76cf564715a..c804080a0d6 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx @@ -9,10 +9,13 @@ $ op-reth db settings set --help Usage: op-reth db settings set [OPTIONS] Commands: - receipts Store receipts in static files instead of the database - transaction_senders Store transaction senders in static files instead of the database - account_changesets Store account changesets in static files instead of the database - help Print this message or the help of the given subcommand(s) + receipts Store receipts in static files instead of the database + transaction_senders Store transaction senders in static files instead of the database + account_changesets Store account changesets in static files instead of the database + storages_history Store storage history in rocksdb instead of MDBX + transaction_hash_numbers Store transaction hash to number mapping in rocksdb instead of MDBX + account_history Store account history in rocksdb instead of MDBX + help Print this message or the help of the given subcommand(s) Options: -h, --help diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/account_history.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/account_history.mdx new file mode 100644 index 00000000000..641475ab142 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/account_history.mdx @@ -0,0 +1,170 @@ +# op-reth db settings set account_history + +Store account history in rocksdb instead of MDBX + +```bash +$ op-reth db settings set account_history --help +``` +```txt +Usage: op-reth db settings set account_history [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/storages_history.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/storages_history.mdx new file mode 100644 index 00000000000..bef26be1a49 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/storages_history.mdx @@ -0,0 +1,170 @@ +# op-reth db settings set storages_history + +Store storage history in rocksdb instead of MDBX + +```bash +$ op-reth db settings set storages_history --help +``` +```txt +Usage: op-reth db settings set storages_history [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/transaction_hash_numbers.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/transaction_hash_numbers.mdx new file mode 100644 index 00000000000..b7f7dda97f1 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/transaction_hash_numbers.mdx @@ -0,0 +1,170 @@ +# op-reth db settings set transaction_hash_numbers + +Store transaction hash to number mapping in rocksdb instead of MDBX + +```bash +$ op-reth db settings set transaction_hash_numbers --help +``` +```txt +Usage: op-reth db settings set transaction_hash_numbers [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx index 8c6f1c5273e..53a1b8aea06 100644 --- a/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx @@ -9,10 +9,13 @@ $ reth db settings set --help Usage: reth db settings set [OPTIONS] Commands: - receipts Store receipts in static files instead of the database - transaction_senders Store transaction senders in static files instead of the database - account_changesets Store account changesets in static files instead of the database - help Print this message or the help of the given subcommand(s) + receipts Store receipts in static files instead of the database + transaction_senders Store transaction senders in static files instead of the database + account_changesets Store account changesets in static files instead of the database + storages_history Store storage history in rocksdb instead of MDBX + transaction_hash_numbers Store transaction hash to number mapping in rocksdb instead of MDBX + account_history Store account history in rocksdb instead of MDBX + help Print this message or the help of the given subcommand(s) Options: -h, --help diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set/account_history.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set/account_history.mdx new file mode 100644 index 00000000000..b109c98ec7d --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set/account_history.mdx @@ -0,0 +1,170 @@ +# reth db settings set account_history + +Store account history in rocksdb instead of MDBX + +```bash +$ reth db settings set account_history --help +``` +```txt +Usage: reth db settings set account_history [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set/storages_history.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set/storages_history.mdx new file mode 100644 index 00000000000..484de7c91bb --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set/storages_history.mdx @@ -0,0 +1,170 @@ +# reth db settings set storages_history + +Store storage history in rocksdb instead of MDBX + +```bash +$ reth db settings set storages_history --help +``` +```txt +Usage: reth db settings set storages_history [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set/transaction_hash_numbers.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set/transaction_hash_numbers.mdx new file mode 100644 index 00000000000..1724b1493b0 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set/transaction_hash_numbers.mdx @@ -0,0 +1,170 @@ +# reth db settings set transaction_hash_numbers + +Store transaction hash to number mapping in rocksdb instead of MDBX + +```bash +$ reth db settings set transaction_hash_numbers --help +``` +```txt +Usage: reth db settings set transaction_hash_numbers [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/sidebar-cli-op-reth.ts b/docs/vocs/sidebar-cli-op-reth.ts index ac5c356f5fc..ad2c6be69c7 100644 --- a/docs/vocs/sidebar-cli-op-reth.ts +++ b/docs/vocs/sidebar-cli-op-reth.ts @@ -136,6 +136,18 @@ export const opRethCliSidebar: SidebarItem = { { text: "op-reth db settings set account_changesets", link: "/cli/op-reth/db/settings/set/account_changesets" + }, + { + text: "op-reth db settings set storages_history", + link: "/cli/op-reth/db/settings/set/storages_history" + }, + { + text: "op-reth db settings set transaction_hash_numbers", + link: "/cli/op-reth/db/settings/set/transaction_hash_numbers" + }, + { + text: "op-reth db settings set account_history", + link: "/cli/op-reth/db/settings/set/account_history" } ] } diff --git a/docs/vocs/sidebar-cli-reth.ts b/docs/vocs/sidebar-cli-reth.ts index f789bb7cc80..1b0f88b4035 100644 --- a/docs/vocs/sidebar-cli-reth.ts +++ b/docs/vocs/sidebar-cli-reth.ts @@ -140,6 +140,18 @@ export const rethCliSidebar: SidebarItem = { { text: "reth db settings set account_changesets", link: "/cli/reth/db/settings/set/account_changesets" + }, + { + text: "reth db settings set storages_history", + link: "/cli/reth/db/settings/set/storages_history" + }, + { + text: "reth db settings set transaction_hash_numbers", + link: "/cli/reth/db/settings/set/transaction_hash_numbers" + }, + { + text: "reth db settings set account_history", + link: "/cli/reth/db/settings/set/account_history" } ] } From f012b3391e8c5121b5b055c7eee14da0a6ff3187 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 15 Jan 2026 14:58:06 +0000 Subject: [PATCH 030/267] feat: parallelize `save_blocks` (#20993) Co-authored-by: Sergei Shulepov Co-authored-by: Sergei Shulepov Co-authored-by: Brian Picciano --- crates/engine/tree/src/persistence.rs | 4 +- .../cli/src/commands/import_receipts.rs | 8 +- crates/stages/stages/src/stages/execution.rs | 4 +- crates/storage/db-common/src/init.rs | 10 +- crates/storage/errors/src/provider.rs | 3 + crates/storage/libmdbx-rs/src/txn_manager.rs | 7 + crates/storage/provider/src/either_writer.rs | 35 ++ crates/storage/provider/src/lib.rs | 7 +- .../src/providers/blockchain_provider.rs | 9 +- .../src/providers/database/metrics.rs | 93 +++- .../provider/src/providers/database/mod.rs | 2 +- .../src/providers/database/provider.rs | 525 ++++++++++++------ crates/storage/provider/src/providers/mod.rs | 2 +- .../src/providers/static_file/manager.rs | 217 +++++++- .../provider/src/providers/static_file/mod.rs | 3 +- .../src/providers/static_file/writer.rs | 11 +- crates/storage/provider/src/writer/mod.rs | 20 +- .../storage/storage-api/src/state_writer.rs | 26 +- testing/ef-tests/src/cases/blockchain_test.rs | 8 +- 19 files changed, 763 insertions(+), 231 deletions(-) diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 5dbaefcd298..314d0eba9de 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -7,7 +7,7 @@ use reth_ethereum_primitives::EthPrimitives; use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockHashReader, ChainStateBlockWriter, - DBProvider, DatabaseProviderFactory, ProviderFactory, + DBProvider, DatabaseProviderFactory, ProviderFactory, SaveBlocksMode, }; use reth_prune::{PrunerError, PrunerOutput, PrunerWithFactory}; use reth_stages_api::{MetricEvent, MetricEventsSender}; @@ -151,7 +151,7 @@ where if last_block.is_some() { let provider_rw = self.provider.database_provider_rw()?; - provider_rw.save_blocks(blocks)?; + provider_rw.save_blocks(blocks, SaveBlocksMode::Full)?; provider_rw.commit()?; } diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index db25afe9099..8ab71f66102 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -18,7 +18,7 @@ use reth_optimism_primitives::{bedrock::is_dup_tx, OpPrimitives, OpReceipt}; use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::ProviderNodeTypes, DBProvider, DatabaseProviderFactory, OriginalValuesKnown, - ProviderFactory, StageCheckpointReader, StageCheckpointWriter, StateWriter, + ProviderFactory, StageCheckpointReader, StageCheckpointWriter, StateWriteConfig, StateWriter, StaticFileProviderFactory, StatsReader, }; use reth_stages::{StageCheckpoint, StageId}; @@ -228,7 +228,11 @@ where ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default()); // finally, write the receipts - provider.write_state(&execution_outcome, OriginalValuesKnown::Yes)?; + provider.write_state( + &execution_outcome, + OriginalValuesKnown::Yes, + StateWriteConfig::default(), + )?; } // Only commit if we have imported as many receipts as the number of transactions. diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index f78b8258220..593180926dd 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -12,7 +12,7 @@ use reth_primitives_traits::{format_gas_throughput, BlockBody, NodePrimitives}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, BlockReader, DBProvider, EitherWriter, ExecutionOutcome, HeaderProvider, - LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateWriter, + LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateWriteConfig, StateWriter, StaticFileProviderFactory, StatsReader, StorageSettingsCache, TransactionVariant, }; use reth_revm::database::StateProviderDatabase; @@ -463,7 +463,7 @@ where } // write output - provider.write_state(&state, OriginalValuesKnown::Yes)?; + provider.write_state(&state, OriginalValuesKnown::Yes, StateWriteConfig::default())?; let db_write_duration = time.elapsed(); debug!( diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index d2b7b7f1141..c82025970b7 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -16,8 +16,8 @@ use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashingWriter, HeaderProvider, HistoryWriter, MetadataWriter, OriginalValuesKnown, - ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, StateWriter, - StaticFileProviderFactory, StorageSettings, StorageSettingsCache, TrieWriter, + ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, StateWriteConfig, + StateWriter, StaticFileProviderFactory, StorageSettings, StorageSettingsCache, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -334,7 +334,11 @@ where Vec::new(), ); - provider.write_state(&execution_outcome, OriginalValuesKnown::Yes)?; + provider.write_state( + &execution_outcome, + OriginalValuesKnown::Yes, + StateWriteConfig::default(), + )?; trace!(target: "reth::cli", "Inserted state"); diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 8e150046451..c6d5a2e2609 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -225,6 +225,9 @@ pub enum StaticFileWriterError { /// Cannot call `sync_all` or `finalize` when prune is queued. #[error("cannot call sync_all or finalize when prune is queued, use commit() instead")] FinalizeWithPruneQueued, + /// Thread panicked during execution. + #[error("thread panicked: {_0}")] + ThreadPanic(&'static str), /// Other error with message. #[error("{0}")] Other(String), diff --git a/crates/storage/libmdbx-rs/src/txn_manager.rs b/crates/storage/libmdbx-rs/src/txn_manager.rs index 0b1202095e2..601d82b8055 100644 --- a/crates/storage/libmdbx-rs/src/txn_manager.rs +++ b/crates/storage/libmdbx-rs/src/txn_manager.rs @@ -58,6 +58,9 @@ impl TxnManager { match rx.recv() { Ok(msg) => match msg { TxnManagerMessage::Begin { parent, flags, sender } => { + let _span = + tracing::debug_span!(target: "libmdbx::txn", "begin", flags) + .entered(); let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); let res = mdbx_result(unsafe { ffi::mdbx_txn_begin_ex( @@ -72,9 +75,13 @@ impl TxnManager { sender.send(res).unwrap(); } TxnManagerMessage::Abort { tx, sender } => { + let _span = + tracing::debug_span!(target: "libmdbx::txn", "abort").entered(); sender.send(mdbx_result(unsafe { ffi::mdbx_txn_abort(tx.0) })).unwrap(); } TxnManagerMessage::Commit { tx, sender } => { + let _span = + tracing::debug_span!(target: "libmdbx::txn", "commit").entered(); sender .send({ let mut latency = CommitLatency::new(); diff --git a/crates/storage/provider/src/either_writer.rs b/crates/storage/provider/src/either_writer.rs index 5336b773e6c..5cc79d85227 100644 --- a/crates/storage/provider/src/either_writer.rs +++ b/crates/storage/provider/src/either_writer.rs @@ -429,6 +429,41 @@ where } } + /// Puts multiple transaction hash number mappings in a batch. + /// + /// Accepts a vector of `(TxHash, TxNumber)` tuples and writes them all using the same cursor. + /// This is more efficient than calling `put_transaction_hash_number` repeatedly. + /// + /// When `append_only` is true, uses `cursor.append()` which requires entries to be + /// pre-sorted and the table to be empty or have only lower keys. + /// When false, uses `cursor.upsert()` which handles arbitrary insertion order. + pub fn put_transaction_hash_numbers_batch( + &mut self, + entries: Vec<(TxHash, TxNumber)>, + append_only: bool, + ) -> ProviderResult<()> { + match self { + Self::Database(cursor) => { + for (hash, tx_num) in entries { + if append_only { + cursor.append(hash, &tx_num)?; + } else { + cursor.upsert(hash, &tx_num)?; + } + } + Ok(()) + } + Self::StaticFile(_) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(batch) => { + for (hash, tx_num) in entries { + batch.put::(hash, &tx_num)?; + } + Ok(()) + } + } + } + /// Deletes a transaction hash number mapping. pub fn delete_transaction_hash_number(&mut self, hash: TxHash) -> ProviderResult<()> { match self { diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 317216dc940..bfab44cb2ac 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -21,7 +21,8 @@ pub mod providers; pub use providers::{ DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW, HistoricalStateProvider, HistoricalStateProviderRef, LatestStateProvider, LatestStateProviderRef, ProviderFactory, - StaticFileAccess, StaticFileProviderBuilder, StaticFileWriter, + SaveBlocksMode, StaticFileAccess, StaticFileProviderBuilder, StaticFileWriteCtx, + StaticFileWriter, }; pub mod changeset_walker; @@ -44,8 +45,8 @@ pub use revm_database::states::OriginalValuesKnown; // reexport traits to avoid breaking changes pub use reth_static_file_types as static_file; pub use reth_storage_api::{ - HistoryWriter, MetadataProvider, MetadataWriter, StatsReader, StorageSettings, - StorageSettingsCache, + HistoryWriter, MetadataProvider, MetadataWriter, StateWriteConfig, StatsReader, + StorageSettings, StorageSettingsCache, }; /// Re-export provider error. pub use reth_storage_errors::provider::{ProviderError, ProviderResult}; diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 3565d99d8d9..e12095ff446 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -789,7 +789,7 @@ mod tests { create_test_provider_factory, create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }, - BlockWriter, CanonChainTracker, ProviderFactory, + BlockWriter, CanonChainTracker, ProviderFactory, SaveBlocksMode, }; use alloy_eips::{BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, TxNumber, B256}; @@ -808,8 +808,8 @@ mod tests { use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DBProvider, DatabaseProviderFactory, - HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, StateProviderFactory, StateWriter, - TransactionVariant, TransactionsProvider, + HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, StateProviderFactory, + StateWriteConfig, StateWriter, TransactionVariant, TransactionsProvider, }; use reth_testing_utils::generators::{ self, random_block, random_block_range, random_changeset_range, random_eoa_accounts, @@ -907,6 +907,7 @@ mod tests { ..Default::default() }, OriginalValuesKnown::No, + StateWriteConfig::default(), )?; } @@ -997,7 +998,7 @@ mod tests { // Push to disk let provider_rw = hook_provider.database_provider_rw().unwrap(); - provider_rw.save_blocks(vec![lowest_memory_block]).unwrap(); + provider_rw.save_blocks(vec![lowest_memory_block], SaveBlocksMode::Full).unwrap(); provider_rw.commit().unwrap(); // Remove from memory diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index de7dc0b5429..45186da71db 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -40,16 +40,8 @@ pub(crate) enum Action { InsertHeaderNumbers, InsertBlockBodyIndices, InsertTransactionBlocks, - GetNextTxNum, InsertTransactionSenders, InsertTransactionHashNumbers, - SaveBlocksInsertBlock, - SaveBlocksWriteState, - SaveBlocksWriteHashedState, - SaveBlocksWriteTrieChangesets, - SaveBlocksWriteTrieUpdates, - SaveBlocksUpdateHistoryIndices, - SaveBlocksUpdatePipelineStages, } /// Database provider metrics @@ -66,19 +58,24 @@ pub(crate) struct DatabaseProviderMetrics { insert_history_indices: Histogram, /// Duration of update pipeline stages update_pipeline_stages: Histogram, - /// Duration of insert canonical headers /// Duration of insert header numbers insert_header_numbers: Histogram, /// Duration of insert block body indices insert_block_body_indices: Histogram, /// Duration of insert transaction blocks insert_tx_blocks: Histogram, - /// Duration of get next tx num - get_next_tx_num: Histogram, /// Duration of insert transaction senders insert_transaction_senders: Histogram, /// Duration of insert transaction hash numbers insert_transaction_hash_numbers: Histogram, + /// Duration of `save_blocks` + save_blocks_total: Histogram, + /// Duration of MDBX work in `save_blocks` + save_blocks_mdbx: Histogram, + /// Duration of static file work in `save_blocks` + save_blocks_sf: Histogram, + /// Duration of `RocksDB` work in `save_blocks` + save_blocks_rocksdb: Histogram, /// Duration of `insert_block` in `save_blocks` save_blocks_insert_block: Histogram, /// Duration of `write_state` in `save_blocks` @@ -93,6 +90,39 @@ pub(crate) struct DatabaseProviderMetrics { save_blocks_update_history_indices: Histogram, /// Duration of `update_pipeline_stages` in `save_blocks` save_blocks_update_pipeline_stages: Histogram, + /// Number of blocks per `save_blocks` call + save_blocks_block_count: Histogram, + /// Duration of MDBX commit in `save_blocks` + save_blocks_commit_mdbx: Histogram, + /// Duration of static file commit in `save_blocks` + save_blocks_commit_sf: Histogram, + /// Duration of `RocksDB` commit in `save_blocks` + save_blocks_commit_rocksdb: Histogram, +} + +/// Timings collected during a `save_blocks` call. +#[derive(Debug, Default)] +pub(crate) struct SaveBlocksTimings { + pub total: Duration, + pub mdbx: Duration, + pub sf: Duration, + pub rocksdb: Duration, + pub insert_block: Duration, + pub write_state: Duration, + pub write_hashed_state: Duration, + pub write_trie_changesets: Duration, + pub write_trie_updates: Duration, + pub update_history_indices: Duration, + pub update_pipeline_stages: Duration, + pub block_count: u64, +} + +/// Timings collected during a `commit` call. +#[derive(Debug, Default)] +pub(crate) struct CommitTimings { + pub mdbx: Duration, + pub sf: Duration, + pub rocksdb: Duration, } impl DatabaseProviderMetrics { @@ -107,28 +137,33 @@ impl DatabaseProviderMetrics { Action::InsertHeaderNumbers => self.insert_header_numbers.record(duration), Action::InsertBlockBodyIndices => self.insert_block_body_indices.record(duration), Action::InsertTransactionBlocks => self.insert_tx_blocks.record(duration), - Action::GetNextTxNum => self.get_next_tx_num.record(duration), Action::InsertTransactionSenders => self.insert_transaction_senders.record(duration), Action::InsertTransactionHashNumbers => { self.insert_transaction_hash_numbers.record(duration) } - Action::SaveBlocksInsertBlock => self.save_blocks_insert_block.record(duration), - Action::SaveBlocksWriteState => self.save_blocks_write_state.record(duration), - Action::SaveBlocksWriteHashedState => { - self.save_blocks_write_hashed_state.record(duration) - } - Action::SaveBlocksWriteTrieChangesets => { - self.save_blocks_write_trie_changesets.record(duration) - } - Action::SaveBlocksWriteTrieUpdates => { - self.save_blocks_write_trie_updates.record(duration) - } - Action::SaveBlocksUpdateHistoryIndices => { - self.save_blocks_update_history_indices.record(duration) - } - Action::SaveBlocksUpdatePipelineStages => { - self.save_blocks_update_pipeline_stages.record(duration) - } } } + + /// Records all `save_blocks` timings. + pub(crate) fn record_save_blocks(&self, timings: &SaveBlocksTimings) { + self.save_blocks_total.record(timings.total); + self.save_blocks_mdbx.record(timings.mdbx); + self.save_blocks_sf.record(timings.sf); + self.save_blocks_rocksdb.record(timings.rocksdb); + self.save_blocks_insert_block.record(timings.insert_block); + self.save_blocks_write_state.record(timings.write_state); + self.save_blocks_write_hashed_state.record(timings.write_hashed_state); + self.save_blocks_write_trie_changesets.record(timings.write_trie_changesets); + self.save_blocks_write_trie_updates.record(timings.write_trie_updates); + self.save_blocks_update_history_indices.record(timings.update_history_indices); + self.save_blocks_update_pipeline_stages.record(timings.update_pipeline_stages); + self.save_blocks_block_count.record(timings.block_count as f64); + } + + /// Records all commit timings. + pub(crate) fn record_commit(&self, timings: &CommitTimings) { + self.save_blocks_commit_mdbx.record(timings.mdbx); + self.save_blocks_commit_sf.record(timings.sf); + self.save_blocks_commit_rocksdb.record(timings.rocksdb); + } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 38208f14c35..99c81755b47 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -43,7 +43,7 @@ use std::{ use tracing::trace; mod provider; -pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW}; +pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW, SaveBlocksMode}; use super::ProviderNodeTypes; use reth_trie::KeccakKeyHasher; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 78424785cc4..692bc7737cd 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -5,7 +5,7 @@ use crate::{ providers::{ database::{chain::ChainStorage, metrics}, rocksdb::RocksDBProvider, - static_file::StaticFileWriter, + static_file::{StaticFileWriteCtx, StaticFileWriter}, NodeTypesForProvider, StaticFileProvider, }, to_range, @@ -35,7 +35,7 @@ use alloy_primitives::{ use itertools::Itertools; use parking_lot::RwLock; use rayon::slice::ParallelSliceMut; -use reth_chain_state::ExecutedBlock; +use reth_chain_state::{ComputedTrieData, ExecutedBlock}; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, @@ -61,10 +61,10 @@ use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ BlockBodyIndicesProvider, BlockBodyReader, MetadataProvider, MetadataWriter, - NodePrimitivesProvider, StateProvider, StorageChangeSetReader, StorageSettingsCache, - TryIntoHistoricalStateProvider, + NodePrimitivesProvider, StateProvider, StateWriteConfig, StorageChangeSetReader, + StorageSettingsCache, TryIntoHistoricalStateProvider, }; -use reth_storage_errors::provider::ProviderResult; +use reth_storage_errors::provider::{ProviderResult, StaticFileWriterError}; use reth_trie::{ trie_cursor::{ InMemoryTrieCursor, InMemoryTrieCursorFactory, TrieCursor, TrieCursorFactory, @@ -85,9 +85,10 @@ use std::{ fmt::Debug, ops::{Deref, DerefMut, Range, RangeBounds, RangeFrom, RangeInclusive}, sync::Arc, - time::{Duration, Instant}, + thread, + time::Instant, }; -use tracing::{debug, trace}; +use tracing::{debug, instrument, trace}; /// A [`DatabaseProvider`] that holds a read-only database transaction. pub type DatabaseProviderRO = DatabaseProvider<::TX, N>; @@ -150,6 +151,25 @@ impl From> } } +/// Mode for [`DatabaseProvider::save_blocks`]. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SaveBlocksMode { + /// Full mode: write block structure + receipts + state + trie. + /// Used by engine/production code. + Full, + /// Blocks only: write block structure (headers, txs, senders, indices). + /// Receipts/state/trie are skipped - they may come later via separate calls. + /// Used by `insert_block`. + BlocksOnly, +} + +impl SaveBlocksMode { + /// Returns `true` if this is [`SaveBlocksMode::Full`]. + pub const fn with_state(self) -> bool { + matches!(self, Self::Full) + } +} + /// A provider struct that fetches data from the database. /// Wrapper around [`DbTx`] and [`DbTxMut`]. Example: [`HeaderProvider`] [`BlockHashReader`] pub struct DatabaseProvider { @@ -356,98 +376,257 @@ impl DatabaseProvider ProviderResult { + let tip = self.last_block_number()?.max(last_block); + Ok(StaticFileWriteCtx { + write_senders: EitherWriterDestination::senders(self).is_static_file() && + self.prune_modes.sender_recovery.is_none_or(|m| !m.is_full()), + write_receipts: save_mode.with_state() && + EitherWriter::receipts_destination(self).is_static_file(), + write_account_changesets: save_mode.with_state() && + EitherWriterDestination::account_changesets(self).is_static_file(), + tip, + receipts_prune_mode: self.prune_modes.receipts, + // Receipts are prunable if no receipts exist in SF yet and within pruning distance + receipts_prunable: self + .static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Receipts) + .is_none() && + PruneMode::Distance(self.minimum_pruning_distance) + .should_prune(first_block, tip), + }) + } + /// Writes executed blocks and state to storage. - pub fn save_blocks(&self, blocks: Vec>) -> ProviderResult<()> { + /// + /// This method parallelizes static file (SF) writes with MDBX writes. + /// The SF thread writes headers, transactions, senders (if SF), and receipts (if SF, Full mode + /// only). The main thread writes MDBX data (indices, state, trie - Full mode only). + /// + /// Use [`SaveBlocksMode::Full`] for production (includes receipts, state, trie). + /// Use [`SaveBlocksMode::BlocksOnly`] for block structure only (used by `insert_block`). + #[instrument(level = "debug", target = "providers::db", skip_all, fields(block_count = blocks.len()))] + pub fn save_blocks( + &self, + blocks: Vec>, + save_mode: SaveBlocksMode, + ) -> ProviderResult<()> { if blocks.is_empty() { debug!(target: "providers::db", "Attempted to write empty block range"); return Ok(()) } - // NOTE: checked non-empty above - let first_block = blocks.first().unwrap().recovered_block(); - - let last_block = blocks.last().unwrap().recovered_block(); - let first_number = first_block.number(); - let last_block_number = last_block.number(); - - debug!(target: "providers::db", block_count = %blocks.len(), "Writing blocks and execution data to storage"); - - // Accumulate durations for each step - let mut total_insert_block = Duration::ZERO; - let mut total_write_state = Duration::ZERO; - let mut total_write_hashed_state = Duration::ZERO; - let mut total_write_trie_changesets = Duration::ZERO; - let mut total_write_trie_updates = Duration::ZERO; - - // TODO: Do performant / batched writes for each type of object - // instead of a loop over all blocks, - // meaning: - // * blocks - // * state - // * hashed state - // * trie updates (cannot naively extend, need helper) - // * indices (already done basically) - // Insert the blocks - for block in blocks { - let trie_data = block.trie_data(); - let ExecutedBlock { recovered_block, execution_output, .. } = block; - let block_number = recovered_block.number(); + let total_start = Instant::now(); + let block_count = blocks.len() as u64; + let first_number = blocks.first().unwrap().recovered_block().number(); + let last_block_number = blocks.last().unwrap().recovered_block().number(); - let start = Instant::now(); - self.insert_block(&recovered_block)?; - total_insert_block += start.elapsed(); + debug!(target: "providers::db", block_count, "Writing blocks and execution data to storage"); - // Write state and changesets to the database. - // Must be written after blocks because of the receipt lookup. - let start = Instant::now(); - self.write_state(&execution_output, OriginalValuesKnown::No)?; - total_write_state += start.elapsed(); + // Compute tx_nums upfront (both threads need these) + let first_tx_num = self + .tx + .cursor_read::()? + .last()? + .map(|(n, _)| n + 1) + .unwrap_or_default(); - // insert hashes and intermediate merkle nodes - let start = Instant::now(); - self.write_hashed_state(&trie_data.hashed_state)?; - total_write_hashed_state += start.elapsed(); + let tx_nums: Vec = { + let mut nums = Vec::with_capacity(blocks.len()); + let mut current = first_tx_num; + for block in &blocks { + nums.push(current); + current += block.recovered_block().body().transaction_count() as u64; + } + nums + }; + + let mut timings = metrics::SaveBlocksTimings { block_count, ..Default::default() }; + + // avoid capturing &self.tx in scope below. + let sf_provider = &self.static_file_provider; + let sf_ctx = self.static_file_write_ctx(save_mode, first_number, last_block_number)?; + + thread::scope(|s| { + // SF writes + let sf_handle = s.spawn(|| { + let start = Instant::now(); + sf_provider.write_blocks_data(&blocks, &tx_nums, sf_ctx)?; + Ok::<_, ProviderError>(start.elapsed()) + }); + // MDBX writes + let mdbx_start = Instant::now(); + + // Collect all transaction hashes across all blocks, sort them, and write in batch + if !self.cached_storage_settings().transaction_hash_numbers_in_rocksdb && + self.prune_modes.transaction_lookup.is_none_or(|m| !m.is_full()) + { + let start = Instant::now(); + let mut all_tx_hashes = Vec::new(); + for (i, block) in blocks.iter().enumerate() { + let recovered_block = block.recovered_block(); + let mut tx_num = tx_nums[i]; + for transaction in recovered_block.body().transactions_iter() { + all_tx_hashes.push((*transaction.tx_hash(), tx_num)); + tx_num += 1; + } + } + + // Sort by hash for optimal MDBX insertion performance + all_tx_hashes.sort_unstable_by_key(|(hash, _)| *hash); + + // Write all transaction hash numbers in a single batch + self.with_rocksdb_batch(|batch| { + let mut tx_hash_writer = + EitherWriter::new_transaction_hash_numbers(self, batch)?; + tx_hash_writer.put_transaction_hash_numbers_batch(all_tx_hashes, false)?; + let raw_batch = tx_hash_writer.into_raw_rocksdb_batch(); + Ok(((), raw_batch)) + })?; + self.metrics.record_duration( + metrics::Action::InsertTransactionHashNumbers, + start.elapsed(), + ); + } + + for (i, block) in blocks.iter().enumerate() { + let recovered_block = block.recovered_block(); + + let start = Instant::now(); + self.insert_block_mdbx_only(recovered_block, tx_nums[i])?; + timings.insert_block += start.elapsed(); + + if save_mode.with_state() { + let execution_output = block.execution_outcome(); + let block_number = recovered_block.number(); + + // Write state and changesets to the database. + // Must be written after blocks because of the receipt lookup. + // Skip receipts/account changesets if they're being written to static files. + let start = Instant::now(); + self.write_state( + execution_output, + OriginalValuesKnown::No, + StateWriteConfig { + write_receipts: !sf_ctx.write_receipts, + write_account_changesets: !sf_ctx.write_account_changesets, + }, + )?; + timings.write_state += start.elapsed(); + + let trie_data = block.trie_data(); + + // insert hashes and intermediate merkle nodes + let start = Instant::now(); + self.write_hashed_state(&trie_data.hashed_state)?; + timings.write_hashed_state += start.elapsed(); + + let start = Instant::now(); + self.write_trie_changesets(block_number, &trie_data.trie_updates, None)?; + timings.write_trie_changesets += start.elapsed(); + + let start = Instant::now(); + self.write_trie_updates_sorted(&trie_data.trie_updates)?; + timings.write_trie_updates += start.elapsed(); + } + } + + // Full mode: update history indices + if save_mode.with_state() { + let start = Instant::now(); + self.update_history_indices(first_number..=last_block_number)?; + timings.update_history_indices = start.elapsed(); + } + + // Update pipeline progress let start = Instant::now(); - self.write_trie_changesets(block_number, &trie_data.trie_updates, None)?; - total_write_trie_changesets += start.elapsed(); + self.update_pipeline_stages(last_block_number, false)?; + timings.update_pipeline_stages = start.elapsed(); + + timings.mdbx = mdbx_start.elapsed(); + + // Wait for SF thread + timings.sf = sf_handle + .join() + .map_err(|_| StaticFileWriterError::ThreadPanic("static file"))??; + + timings.total = total_start.elapsed(); + + self.metrics.record_save_blocks(&timings); + debug!(target: "providers::db", range = ?first_number..=last_block_number, "Appended block data"); + + Ok(()) + }) + } + /// Writes MDBX-only data for a block (indices, lookups, and senders if configured for MDBX). + /// + /// SF data (headers, transactions, senders if SF, receipts if SF) must be written separately. + #[instrument(level = "debug", target = "providers::db", skip_all)] + fn insert_block_mdbx_only( + &self, + block: &RecoveredBlock>, + first_tx_num: TxNumber, + ) -> ProviderResult { + if self.prune_modes.sender_recovery.is_none_or(|m| !m.is_full()) && + EitherWriterDestination::senders(self).is_database() + { let start = Instant::now(); - self.write_trie_updates_sorted(&trie_data.trie_updates)?; - total_write_trie_updates += start.elapsed(); + let tx_nums_iter = std::iter::successors(Some(first_tx_num), |n| Some(n + 1)); + let mut cursor = self.tx.cursor_write::()?; + for (tx_num, sender) in tx_nums_iter.zip(block.senders_iter().copied()) { + cursor.append(tx_num, &sender)?; + } + self.metrics + .record_duration(metrics::Action::InsertTransactionSenders, start.elapsed()); } - // update history indices + let block_number = block.number(); + let tx_count = block.body().transaction_count() as u64; + let start = Instant::now(); - self.update_history_indices(first_number..=last_block_number)?; - let duration_update_history_indices = start.elapsed(); + self.tx.put::(block.hash(), block_number)?; + self.metrics.record_duration(metrics::Action::InsertHeaderNumbers, start.elapsed()); - // Update pipeline progress + self.write_block_body_indices(block_number, block.body(), first_tx_num, tx_count)?; + + Ok(StoredBlockBodyIndices { first_tx_num, tx_count }) + } + + /// Writes MDBX block body indices (`BlockBodyIndices`, `TransactionBlocks`, + /// `Ommers`/`Withdrawals`). + fn write_block_body_indices( + &self, + block_number: BlockNumber, + body: &BodyTy, + first_tx_num: TxNumber, + tx_count: u64, + ) -> ProviderResult<()> { + // MDBX: BlockBodyIndices let start = Instant::now(); - self.update_pipeline_stages(last_block_number, false)?; - let duration_update_pipeline_stages = start.elapsed(); - - // Record all metrics at the end - self.metrics.record_duration(metrics::Action::SaveBlocksInsertBlock, total_insert_block); - self.metrics.record_duration(metrics::Action::SaveBlocksWriteState, total_write_state); - self.metrics - .record_duration(metrics::Action::SaveBlocksWriteHashedState, total_write_hashed_state); - self.metrics.record_duration( - metrics::Action::SaveBlocksWriteTrieChangesets, - total_write_trie_changesets, - ); - self.metrics - .record_duration(metrics::Action::SaveBlocksWriteTrieUpdates, total_write_trie_updates); - self.metrics.record_duration( - metrics::Action::SaveBlocksUpdateHistoryIndices, - duration_update_history_indices, - ); - self.metrics.record_duration( - metrics::Action::SaveBlocksUpdatePipelineStages, - duration_update_pipeline_stages, - ); + self.tx + .cursor_write::()? + .append(block_number, &StoredBlockBodyIndices { first_tx_num, tx_count })?; + self.metrics.record_duration(metrics::Action::InsertBlockBodyIndices, start.elapsed()); - debug!(target: "providers::db", range = ?first_number..=last_block_number, "Appended block data"); + // MDBX: TransactionBlocks (last tx -> block mapping) + if tx_count > 0 { + let start = Instant::now(); + self.tx + .cursor_write::()? + .append(first_tx_num + tx_count - 1, &block_number)?; + self.metrics.record_duration(metrics::Action::InsertTransactionBlocks, start.elapsed()); + } + + // MDBX: Ommers/Withdrawals + self.storage.writer().write_block_bodies(self, vec![(block_number, Some(body))])?; Ok(()) } @@ -1727,6 +1906,7 @@ impl StageCheckpointWriter for DatabaseProvider(id.to_string(), checkpoint)?) } + #[instrument(level = "debug", target = "providers::db", skip_all)] fn update_pipeline_stages( &self, block_number: BlockNumber, @@ -1817,24 +1997,31 @@ impl StateWriter { type Receipt = ReceiptTy; + #[instrument(level = "debug", target = "providers::db", skip_all)] fn write_state( &self, execution_outcome: &ExecutionOutcome, is_value_known: OriginalValuesKnown, + config: StateWriteConfig, ) -> ProviderResult<()> { let first_block = execution_outcome.first_block(); - let block_count = execution_outcome.len() as u64; - let last_block = execution_outcome.last_block(); - let block_range = first_block..=last_block; - - let tip = self.last_block_number()?.max(last_block); let (plain_state, reverts) = execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); - self.write_state_reverts(reverts, first_block)?; + self.write_state_reverts(reverts, first_block, config)?; self.write_state_changes(plain_state)?; + if !config.write_receipts { + return Ok(()); + } + + let block_count = execution_outcome.len() as u64; + let last_block = execution_outcome.last_block(); + let block_range = first_block..=last_block; + + let tip = self.last_block_number()?.max(last_block); + // Fetch the first transaction number for each block in the range let block_indices: Vec<_> = self .block_body_indices_range(block_range)? @@ -1918,6 +2105,7 @@ impl StateWriter &self, reverts: PlainStateReverts, first_block: BlockNumber, + config: StateWriteConfig, ) -> ProviderResult<()> { // Write storage changes tracing::trace!("Writing storage changes"); @@ -1965,7 +2153,11 @@ impl StateWriter } } - // Write account changes to static files + if !config.write_account_changesets { + return Ok(()); + } + + // Write account changes tracing::debug!(target: "sync::stages::merkle_changesets", ?first_block, "Writing account changes"); for (block_index, account_block_reverts) in reverts.accounts.into_iter().enumerate() { let block_number = first_block + block_index as BlockNumber; @@ -2043,6 +2235,7 @@ impl StateWriter Ok(()) } + #[instrument(level = "debug", target = "providers::db", skip_all)] fn write_hashed_state(&self, hashed_state: &HashedPostStateSorted) -> ProviderResult<()> { // Write hashed account updates. let mut hashed_accounts_cursor = self.tx_ref().cursor_write::()?; @@ -2336,6 +2529,7 @@ impl TrieWriter for DatabaseProvider /// Writes trie updates to the database with already sorted updates. /// /// Returns the number of entries modified. + #[instrument(level = "debug", target = "providers::db", skip_all)] fn write_trie_updates_sorted(&self, trie_updates: &TrieUpdatesSorted) -> ProviderResult { if trie_updates.is_empty() { return Ok(0) @@ -2379,6 +2573,7 @@ impl TrieWriter for DatabaseProvider /// the same `TrieUpdates`. /// /// Returns the number of keys written. + #[instrument(level = "debug", target = "providers::db", skip_all)] fn write_trie_changesets( &self, block_number: BlockNumber, @@ -2970,6 +3165,7 @@ impl HistoryWriter for DatabaseProvi ) } + #[instrument(level = "debug", target = "providers::db", skip_all)] fn update_history_indices(&self, range: RangeInclusive) -> ProviderResult<()> { // account history stage { @@ -2987,7 +3183,7 @@ impl HistoryWriter for DatabaseProvi } } -impl BlockExecutionWriter +impl BlockExecutionWriter for DatabaseProvider { fn take_block_and_execution_above( @@ -3030,89 +3226,40 @@ impl BlockExecu } } -impl BlockWriter +impl BlockWriter for DatabaseProvider { type Block = BlockTy; type Receipt = ReceiptTy; - /// Inserts the block into the database, always modifying the following static file segments and - /// tables: - /// * [`StaticFileSegment::Headers`] - /// * [`tables::HeaderNumbers`] - /// * [`tables::BlockBodyIndices`] - /// - /// If there are transactions in the block, the following static file segments and tables will - /// be modified: - /// * [`StaticFileSegment::Transactions`] - /// * [`tables::TransactionBlocks`] - /// - /// If ommers are not empty, this will modify [`BlockOmmers`](tables::BlockOmmers). - /// If withdrawals are not empty, this will modify - /// [`BlockWithdrawals`](tables::BlockWithdrawals). - /// - /// If the provider has __not__ configured full sender pruning, this will modify either: - /// * [`StaticFileSegment::TransactionSenders`] if senders are written to static files - /// * [`tables::TransactionSenders`] if senders are written to the database + /// Inserts the block into the database, writing to both static files and MDBX. /// - /// If the provider has __not__ configured full transaction lookup pruning, this will modify - /// [`TransactionHashNumbers`](tables::TransactionHashNumbers). + /// This is a convenience method primarily used in tests. For production use, + /// prefer [`Self::save_blocks`] which handles execution output and trie data. fn insert_block( &self, block: &RecoveredBlock, ) -> ProviderResult { let block_number = block.number(); - let tx_count = block.body().transaction_count() as u64; - - let mut durations_recorder = metrics::DurationsRecorder::new(&self.metrics); - - self.static_file_provider - .get_writer(block_number, StaticFileSegment::Headers)? - .append_header(block.header(), &block.hash())?; - - self.tx.put::(block.hash(), block_number)?; - durations_recorder.record_relative(metrics::Action::InsertHeaderNumbers); - - let first_tx_num = self - .tx - .cursor_read::()? - .last()? - .map(|(n, _)| n + 1) - .unwrap_or_default(); - durations_recorder.record_relative(metrics::Action::GetNextTxNum); - let tx_nums_iter = std::iter::successors(Some(first_tx_num), |n| Some(n + 1)); - - if self.prune_modes.sender_recovery.as_ref().is_none_or(|m| !m.is_full()) { - let mut senders_writer = EitherWriter::new_senders(self, block.number())?; - senders_writer.increment_block(block.number())?; - senders_writer - .append_senders(tx_nums_iter.clone().zip(block.senders_iter().copied()))?; - durations_recorder.record_relative(metrics::Action::InsertTransactionSenders); - } - - if self.prune_modes.transaction_lookup.is_none_or(|m| !m.is_full()) { - self.with_rocksdb_batch(|batch| { - let mut writer = EitherWriter::new_transaction_hash_numbers(self, batch)?; - for (tx_num, transaction) in tx_nums_iter.zip(block.body().transactions_iter()) { - let hash = transaction.tx_hash(); - writer.put_transaction_hash_number(*hash, tx_num, false)?; - } - Ok(((), writer.into_raw_rocksdb_batch())) - })?; - durations_recorder.record_relative(metrics::Action::InsertTransactionHashNumbers); - } - - self.append_block_bodies(vec![(block_number, Some(block.body()))])?; - - debug!( - target: "providers::db", - ?block_number, - actions = ?durations_recorder.actions, - "Inserted block" + // Wrap block in ExecutedBlock with empty execution output (no receipts/state/trie) + let executed_block = ExecutedBlock::new( + Arc::new(block.clone()), + Arc::new(ExecutionOutcome::new( + Default::default(), + Vec::>>::new(), + block_number, + vec![], + )), + ComputedTrieData::default(), ); - Ok(StoredBlockBodyIndices { first_tx_num, tx_count }) + // Delegate to save_blocks with BlocksOnly mode (skips receipts/state/trie) + self.save_blocks(vec![executed_block], SaveBlocksMode::BlocksOnly)?; + + // Return the body indices + self.block_body_indices(block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number)) } fn append_block_bodies( @@ -3298,7 +3445,7 @@ impl BlockWrite durations_recorder.record_relative(metrics::Action::InsertBlock); } - self.write_state(execution_outcome, OriginalValuesKnown::No)?; + self.write_state(execution_outcome, OriginalValuesKnown::No, StateWriteConfig::default())?; durations_recorder.record_relative(metrics::Action::InsertState); // insert hashes and intermediate merkle nodes @@ -3440,17 +3587,28 @@ impl DBProvider for DatabaseProvider self.static_file_provider.commit()?; } else { - self.static_file_provider.commit()?; + // Normal path: finalize() will call sync_all() if not already synced + let mut timings = metrics::CommitTimings::default(); + + let start = Instant::now(); + self.static_file_provider.finalize()?; + timings.sf = start.elapsed(); #[cfg(all(unix, feature = "rocksdb"))] { + let start = Instant::now(); let batches = std::mem::take(&mut *self.pending_rocksdb_batches.lock()); for batch in batches { self.rocksdb_provider.commit_batch(batch)?; } + timings.rocksdb = start.elapsed(); } + let start = Instant::now(); self.tx.commit()?; + timings.mdbx = start.elapsed(); + + self.metrics.record_commit(&timings); } Ok(()) @@ -3523,10 +3681,17 @@ mod tests { .write_state( &ExecutionOutcome { first_block: 0, receipts: vec![vec![]], ..Default::default() }, crate::OriginalValuesKnown::No, + StateWriteConfig::default(), ) .unwrap(); provider_rw.insert_block(&data.blocks[0].0).unwrap(); - provider_rw.write_state(&data.blocks[0].1, crate::OriginalValuesKnown::No).unwrap(); + provider_rw + .write_state( + &data.blocks[0].1, + crate::OriginalValuesKnown::No, + StateWriteConfig::default(), + ) + .unwrap(); provider_rw.commit().unwrap(); let provider = factory.provider().unwrap(); @@ -3549,11 +3714,18 @@ mod tests { .write_state( &ExecutionOutcome { first_block: 0, receipts: vec![vec![]], ..Default::default() }, crate::OriginalValuesKnown::No, + StateWriteConfig::default(), ) .unwrap(); for i in 0..3 { provider_rw.insert_block(&data.blocks[i].0).unwrap(); - provider_rw.write_state(&data.blocks[i].1, crate::OriginalValuesKnown::No).unwrap(); + provider_rw + .write_state( + &data.blocks[i].1, + crate::OriginalValuesKnown::No, + StateWriteConfig::default(), + ) + .unwrap(); } provider_rw.commit().unwrap(); @@ -3579,13 +3751,20 @@ mod tests { .write_state( &ExecutionOutcome { first_block: 0, receipts: vec![vec![]], ..Default::default() }, crate::OriginalValuesKnown::No, + StateWriteConfig::default(), ) .unwrap(); // insert blocks 1-3 with receipts for i in 0..3 { provider_rw.insert_block(&data.blocks[i].0).unwrap(); - provider_rw.write_state(&data.blocks[i].1, crate::OriginalValuesKnown::No).unwrap(); + provider_rw + .write_state( + &data.blocks[i].1, + crate::OriginalValuesKnown::No, + StateWriteConfig::default(), + ) + .unwrap(); } provider_rw.commit().unwrap(); @@ -3610,11 +3789,18 @@ mod tests { .write_state( &ExecutionOutcome { first_block: 0, receipts: vec![vec![]], ..Default::default() }, crate::OriginalValuesKnown::No, + StateWriteConfig::default(), ) .unwrap(); for i in 0..3 { provider_rw.insert_block(&data.blocks[i].0).unwrap(); - provider_rw.write_state(&data.blocks[i].1, crate::OriginalValuesKnown::No).unwrap(); + provider_rw + .write_state( + &data.blocks[i].1, + crate::OriginalValuesKnown::No, + StateWriteConfig::default(), + ) + .unwrap(); } provider_rw.commit().unwrap(); @@ -3673,11 +3859,18 @@ mod tests { .write_state( &ExecutionOutcome { first_block: 0, receipts: vec![vec![]], ..Default::default() }, crate::OriginalValuesKnown::No, + StateWriteConfig::default(), ) .unwrap(); for i in 0..3 { provider_rw.insert_block(&data.blocks[i].0).unwrap(); - provider_rw.write_state(&data.blocks[i].1, crate::OriginalValuesKnown::No).unwrap(); + provider_rw + .write_state( + &data.blocks[i].1, + crate::OriginalValuesKnown::No, + StateWriteConfig::default(), + ) + .unwrap(); } provider_rw.commit().unwrap(); @@ -4991,7 +5184,9 @@ mod tests { }]], ..Default::default() }; - provider_rw.write_state(&outcome, crate::OriginalValuesKnown::No).unwrap(); + provider_rw + .write_state(&outcome, crate::OriginalValuesKnown::No, StateWriteConfig::default()) + .unwrap(); provider_rw.commit().unwrap(); }; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 2ff34c7d2a4..14f112a27b7 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -10,7 +10,7 @@ pub use database::*; mod static_file; pub use static_file::{ StaticFileAccess, StaticFileJarProvider, StaticFileProvider, StaticFileProviderBuilder, - StaticFileProviderRW, StaticFileProviderRWRefMut, StaticFileWriter, + StaticFileProviderRW, StaticFileProviderRWRefMut, StaticFileWriteCtx, StaticFileWriter, }; mod state; diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 59d58ae00e1..718283114f2 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -14,6 +14,7 @@ use alloy_primitives::{b256, keccak256, Address, BlockHash, BlockNumber, TxHash, use dashmap::DashMap; use notify::{RecommendedWatcher, RecursiveMode, Watcher}; use parking_lot::RwLock; +use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, NamedChain}; use reth_db::{ lockfile::StorageLock, @@ -24,7 +25,7 @@ use reth_db::{ }; use reth_db_api::{ cursor::DbCursorRO, - models::StoredBlockBodyIndices, + models::{AccountBeforeTx, StoredBlockBodyIndices}, table::{Decompress, Table, Value}, tables, transaction::DbTx, @@ -32,7 +33,9 @@ use reth_db_api::{ use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; use reth_node_types::NodePrimitives; -use reth_primitives_traits::{RecoveredBlock, SealedHeader, SignedTransaction}; +use reth_primitives_traits::{ + AlloyBlockHeader as _, BlockBody as _, RecoveredBlock, SealedHeader, SignedTransaction, +}; use reth_stages_types::{PipelineTarget, StageId}; use reth_static_file_types::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, StaticFileMap, @@ -41,15 +44,16 @@ use reth_static_file_types::{ use reth_storage_api::{ BlockBodyIndicesProvider, ChangeSetReader, DBProvider, StorageSettingsCache, }; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use reth_storage_errors::provider::{ProviderError, ProviderResult, StaticFileWriterError}; use std::{ collections::BTreeMap, fmt::Debug, ops::{Deref, Range, RangeBounds, RangeInclusive}, path::{Path, PathBuf}, sync::{atomic::AtomicU64, mpsc, Arc}, + thread, }; -use tracing::{debug, info, trace, warn}; +use tracing::{debug, info, instrument, trace, warn}; /// Alias type for a map that can be queried for block or transaction ranges. It uses `u64` to /// represent either a block or a transaction number end of a static file range. @@ -77,6 +81,25 @@ impl StaticFileAccess { } } +/// Context for static file block writes. +/// +/// Contains target segments and pruning configuration. +#[derive(Debug, Clone, Copy, Default)] +pub struct StaticFileWriteCtx { + /// Whether transaction senders should be written to static files. + pub write_senders: bool, + /// Whether receipts should be written to static files. + pub write_receipts: bool, + /// Whether account changesets should be written to static files. + pub write_account_changesets: bool, + /// The current chain tip block number (for pruning). + pub tip: BlockNumber, + /// The prune mode for receipts, if any. + pub receipts_prune_mode: Option, + /// Whether receipts are prunable (based on storage settings and prune distance). + pub receipts_prunable: bool, +} + /// [`StaticFileProvider`] manages all existing [`StaticFileJarProvider`]. /// /// "Static files" contain immutable chain history data, such as: @@ -504,6 +527,192 @@ impl StaticFileProvider { Ok(()) } + /// Writes headers for all blocks to the static file segment. + #[instrument(level = "debug", target = "providers::db", skip_all)] + fn write_headers( + w: &mut StaticFileProviderRWRefMut<'_, N>, + blocks: &[ExecutedBlock], + ) -> ProviderResult<()> { + for block in blocks { + let b = block.recovered_block(); + w.append_header(b.header(), &b.hash())?; + } + Ok(()) + } + + /// Writes transactions for all blocks to the static file segment. + #[instrument(level = "debug", target = "providers::db", skip_all)] + fn write_transactions( + w: &mut StaticFileProviderRWRefMut<'_, N>, + blocks: &[ExecutedBlock], + tx_nums: &[TxNumber], + ) -> ProviderResult<()> { + for (block, &first_tx) in blocks.iter().zip(tx_nums) { + let b = block.recovered_block(); + w.increment_block(b.number())?; + for (i, tx) in b.body().transactions().iter().enumerate() { + w.append_transaction(first_tx + i as u64, tx)?; + } + } + Ok(()) + } + + /// Writes transaction senders for all blocks to the static file segment. + #[instrument(level = "debug", target = "providers::db", skip_all)] + fn write_transaction_senders( + w: &mut StaticFileProviderRWRefMut<'_, N>, + blocks: &[ExecutedBlock], + tx_nums: &[TxNumber], + ) -> ProviderResult<()> { + for (block, &first_tx) in blocks.iter().zip(tx_nums) { + let b = block.recovered_block(); + w.increment_block(b.number())?; + for (i, sender) in b.senders_iter().enumerate() { + w.append_transaction_sender(first_tx + i as u64, sender)?; + } + } + Ok(()) + } + + /// Writes receipts for all blocks to the static file segment. + #[instrument(level = "debug", target = "providers::db", skip_all)] + fn write_receipts( + w: &mut StaticFileProviderRWRefMut<'_, N>, + blocks: &[ExecutedBlock], + tx_nums: &[TxNumber], + ctx: &StaticFileWriteCtx, + ) -> ProviderResult<()> { + for (block, &first_tx) in blocks.iter().zip(tx_nums) { + let block_number = block.recovered_block().number(); + w.increment_block(block_number)?; + + // skip writing receipts if pruning configuration requires us to. + if ctx.receipts_prunable && + ctx.receipts_prune_mode + .is_some_and(|mode| mode.should_prune(block_number, ctx.tip)) + { + continue + } + + for (i, receipt) in block.execution_outcome().receipts.iter().flatten().enumerate() { + w.append_receipt(first_tx + i as u64, receipt)?; + } + } + Ok(()) + } + + /// Writes account changesets for all blocks to the static file segment. + #[instrument(level = "debug", target = "providers::db", skip_all)] + fn write_account_changesets( + w: &mut StaticFileProviderRWRefMut<'_, N>, + blocks: &[ExecutedBlock], + ) -> ProviderResult<()> { + for block in blocks { + let block_number = block.recovered_block().number(); + let reverts = block.execution_outcome().bundle.reverts.to_plain_state_reverts(); + + for account_block_reverts in reverts.accounts { + let changeset = account_block_reverts + .into_iter() + .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }) + .collect::>(); + w.append_account_changeset(changeset, block_number)?; + } + } + Ok(()) + } + + /// Spawns a scoped thread that writes to a static file segment using the provided closure. + /// + /// The closure receives a mutable reference to the segment writer. After the closure completes, + /// `sync_all()` is called to flush writes to disk. + fn spawn_segment_writer<'scope, 'env, F>( + &'env self, + scope: &'scope thread::Scope<'scope, 'env>, + segment: StaticFileSegment, + first_block_number: BlockNumber, + f: F, + ) -> thread::ScopedJoinHandle<'scope, ProviderResult<()>> + where + F: FnOnce(&mut StaticFileProviderRWRefMut<'_, N>) -> ProviderResult<()> + Send + 'env, + { + scope.spawn(move || { + let mut w = self.get_writer(first_block_number, segment)?; + f(&mut w)?; + w.sync_all() + }) + } + + /// Writes all static file data for multiple blocks in parallel per-segment. + /// + /// This spawns separate threads for each segment type and each thread calls `sync_all()` on its + /// writer when done. + #[instrument(level = "debug", target = "providers::db", skip_all)] + pub fn write_blocks_data( + &self, + blocks: &[ExecutedBlock], + tx_nums: &[TxNumber], + ctx: StaticFileWriteCtx, + ) -> ProviderResult<()> { + if blocks.is_empty() { + return Ok(()); + } + + let first_block_number = blocks[0].recovered_block().number(); + + thread::scope(|s| { + let h_headers = + self.spawn_segment_writer(s, StaticFileSegment::Headers, first_block_number, |w| { + Self::write_headers(w, blocks) + }); + + let h_txs = self.spawn_segment_writer( + s, + StaticFileSegment::Transactions, + first_block_number, + |w| Self::write_transactions(w, blocks, tx_nums), + ); + + let h_senders = ctx.write_senders.then(|| { + self.spawn_segment_writer( + s, + StaticFileSegment::TransactionSenders, + first_block_number, + |w| Self::write_transaction_senders(w, blocks, tx_nums), + ) + }); + + let h_receipts = ctx.write_receipts.then(|| { + self.spawn_segment_writer(s, StaticFileSegment::Receipts, first_block_number, |w| { + Self::write_receipts(w, blocks, tx_nums, &ctx) + }) + }); + + let h_account_changesets = ctx.write_account_changesets.then(|| { + self.spawn_segment_writer( + s, + StaticFileSegment::AccountChangeSets, + first_block_number, + |w| Self::write_account_changesets(w, blocks), + ) + }); + + h_headers.join().map_err(|_| StaticFileWriterError::ThreadPanic("headers"))??; + h_txs.join().map_err(|_| StaticFileWriterError::ThreadPanic("transactions"))??; + if let Some(h) = h_senders { + h.join().map_err(|_| StaticFileWriterError::ThreadPanic("senders"))??; + } + if let Some(h) = h_receipts { + h.join().map_err(|_| StaticFileWriterError::ThreadPanic("receipts"))??; + } + if let Some(h) = h_account_changesets { + h.join() + .map_err(|_| StaticFileWriterError::ThreadPanic("account_changesets"))??; + } + Ok(()) + }) + } + /// Gets the [`StaticFileJarProvider`] of the requested segment and start index that can be /// either block or transaction. pub fn get_segment_provider( diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index a20dd6a3ffd..aa5b61171ac 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -1,6 +1,7 @@ mod manager; pub use manager::{ - StaticFileAccess, StaticFileProvider, StaticFileProviderBuilder, StaticFileWriter, + StaticFileAccess, StaticFileProvider, StaticFileProviderBuilder, StaticFileWriteCtx, + StaticFileWriter, }; mod jar; diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 1d893e4291b..869554cc793 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -206,6 +206,8 @@ pub struct StaticFileProviderRW { metrics: Option>, /// On commit, contains the pruning strategy to apply for the segment. prune_on_commit: Option, + /// Whether `sync_all()` has been called. Used by `finalize()` to avoid redundant syncs. + synced: bool, } impl StaticFileProviderRW { @@ -227,6 +229,7 @@ impl StaticFileProviderRW { reader, metrics, prune_on_commit: None, + synced: false, }; writer.ensure_end_range_consistency()?; @@ -335,12 +338,13 @@ impl StaticFileProviderRW { if self.writer.is_dirty() { self.writer.sync_all().map_err(ProviderError::other)?; } + self.synced = true; Ok(()) } /// Commits configuration to disk and updates the reader index. /// - /// Must be called after [`Self::sync_all`] to complete the commit. + /// If `sync_all()` was not called, this will call it first to ensure data is persisted. /// /// Returns an error if prune is queued (use [`Self::commit`] instead). pub fn finalize(&mut self) -> ProviderResult<()> { @@ -348,9 +352,14 @@ impl StaticFileProviderRW { return Err(StaticFileWriterError::FinalizeWithPruneQueued.into()); } if self.writer.is_dirty() { + if !self.synced { + self.writer.sync_all().map_err(ProviderError::other)?; + } + self.writer.finalize().map_err(ProviderError::other)?; self.update_index()?; } + self.synced = false; Ok(()) } diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 0c67634dbfc..c361bfc7af8 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -13,7 +13,9 @@ mod tests { use reth_ethereum_primitives::Receipt; use reth_execution_types::ExecutionOutcome; use reth_primitives_traits::{Account, StorageEntry}; - use reth_storage_api::{DatabaseProviderFactory, HashedPostStateProvider, StateWriter}; + use reth_storage_api::{ + DatabaseProviderFactory, HashedPostStateProvider, StateWriteConfig, StateWriter, + }; use reth_trie::{ test_utils::{state_root, storage_root_prehashed}, HashedPostState, HashedStorage, StateRoot, StorageRoot, StorageRootProgress, @@ -135,7 +137,7 @@ mod tests { provider.write_state_changes(plain_state).expect("Could not write plain state to DB"); assert_eq!(reverts.storage, [[]]); - provider.write_state_reverts(reverts, 1).expect("Could not write reverts to DB"); + provider.write_state_reverts(reverts, 1, StateWriteConfig::default()).expect("Could not write reverts to DB"); let reth_account_a = account_a.into(); let reth_account_b = account_b.into(); @@ -201,7 +203,7 @@ mod tests { reverts.storage, [[PlainStorageRevert { address: address_b, wiped: true, storage_revert: vec![] }]] ); - provider.write_state_reverts(reverts, 2).expect("Could not write reverts to DB"); + provider.write_state_reverts(reverts, 2, StateWriteConfig::default()).expect("Could not write reverts to DB"); // Check new plain state for account B assert_eq!( @@ -280,7 +282,7 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Default::default(), 1, Vec::new()); provider - .write_state(&outcome, OriginalValuesKnown::Yes) + .write_state(&outcome, OriginalValuesKnown::Yes, StateWriteConfig::default()) .expect("Could not write bundle state to DB"); // Check plain storage state @@ -380,7 +382,7 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(state.take_bundle(), Default::default(), 2, Vec::new()); provider - .write_state(&outcome, OriginalValuesKnown::Yes) + .write_state(&outcome, OriginalValuesKnown::Yes, StateWriteConfig::default()) .expect("Could not write bundle state to DB"); assert_eq!( @@ -448,7 +450,7 @@ mod tests { let outcome = ExecutionOutcome::new(init_state.take_bundle(), Default::default(), 0, Vec::new()); provider - .write_state(&outcome, OriginalValuesKnown::Yes) + .write_state(&outcome, OriginalValuesKnown::Yes, StateWriteConfig::default()) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -607,7 +609,7 @@ mod tests { let outcome: ExecutionOutcome = ExecutionOutcome::new(bundle, Default::default(), 1, Vec::new()); provider - .write_state(&outcome, OriginalValuesKnown::Yes) + .write_state(&outcome, OriginalValuesKnown::Yes, StateWriteConfig::default()) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider @@ -773,7 +775,7 @@ mod tests { let outcome = ExecutionOutcome::new(init_state.take_bundle(), Default::default(), 0, Vec::new()); provider - .write_state(&outcome, OriginalValuesKnown::Yes) + .write_state(&outcome, OriginalValuesKnown::Yes, StateWriteConfig::default()) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -822,7 +824,7 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(state.take_bundle(), Default::default(), 1, Vec::new()); provider - .write_state(&outcome, OriginalValuesKnown::Yes) + .write_state(&outcome, OriginalValuesKnown::Yes, StateWriteConfig::default()) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider diff --git a/crates/storage/storage-api/src/state_writer.rs b/crates/storage/storage-api/src/state_writer.rs index 711b9e569f5..3daab1a85ad 100644 --- a/crates/storage/storage-api/src/state_writer.rs +++ b/crates/storage/storage-api/src/state_writer.rs @@ -12,21 +12,26 @@ pub trait StateWriter { /// Receipt type included into [`ExecutionOutcome`]. type Receipt; - /// Write the state and receipts to the database or static files if `static_file_producer` is - /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. + /// Write the state and optionally receipts to the database. + /// + /// Use `config` to skip writing certain data types when they are written elsewhere. fn write_state( &self, execution_outcome: &ExecutionOutcome, is_value_known: OriginalValuesKnown, + config: StateWriteConfig, ) -> ProviderResult<()>; /// Write state reverts to the database. /// /// NOTE: Reverts will delete all wiped storage from plain state. + /// + /// Use `config` to skip writing certain data types when they are written elsewhere. fn write_state_reverts( &self, reverts: PlainStateReverts, first_block: BlockNumber, + config: StateWriteConfig, ) -> ProviderResult<()>; /// Write state changes to the database. @@ -46,3 +51,20 @@ pub trait StateWriter { block: BlockNumber, ) -> ProviderResult>; } + +/// Configuration for what to write when calling [`StateWriter::write_state`]. +/// +/// Used to skip writing certain data types, when they are being written separately. +#[derive(Debug, Clone, Copy)] +pub struct StateWriteConfig { + /// Whether to write receipts. + pub write_receipts: bool, + /// Whether to write account changesets. + pub write_account_changesets: bool, +} + +impl Default for StateWriteConfig { + fn default() -> Self { + Self { write_receipts: true, write_account_changesets: true } + } +} diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 1ecbe9a3b12..6d8dbc6827c 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -17,7 +17,7 @@ use reth_primitives_traits::{Block as BlockTrait, RecoveredBlock, SealedBlock}; use reth_provider::{ test_utils::create_test_provider_factory_with_chain_spec, BlockWriter, DatabaseProviderFactory, ExecutionOutcome, HeaderProvider, HistoryWriter, OriginalValuesKnown, StateProofProvider, - StateWriter, StaticFileProviderFactory, StaticFileSegment, StaticFileWriter, + StateWriteConfig, StateWriter, StaticFileProviderFactory, StaticFileSegment, StaticFileWriter, }; use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord, State}; use reth_stateless::{ @@ -325,7 +325,11 @@ fn run_case( // Commit the post state/state diff to the database provider - .write_state(&ExecutionOutcome::single(block.number, output), OriginalValuesKnown::Yes) + .write_state( + &ExecutionOutcome::single(block.number, output), + OriginalValuesKnown::Yes, + StateWriteConfig::default(), + ) .map_err(|err| Error::block_failed(block_number, program_inputs.clone(), err))?; provider From 7d0e7e72de9a00d6cc415def287f2354e9714092 Mon Sep 17 00:00:00 2001 From: YK Date: Thu, 15 Jan 2026 23:22:15 +0800 Subject: [PATCH 031/267] perf(trie): add k-way merge batch optimization for merge_overlay_trie_input (#21080) --- .../engine/tree/src/tree/payload_validator.rs | 80 +++++++++++----- crates/trie/common/src/hashed_state.rs | 75 ++++++++++++++- crates/trie/common/src/updates.rs | 93 ++++++++++++++++++- crates/trie/common/src/utils.rs | 73 ++++++++++++++- 4 files changed, 296 insertions(+), 25 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index fbc62d7d0e3..746b9077f2f 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -1,5 +1,11 @@ //! Types and traits for validating blocks and payloads. +/// Threshold for switching from `extend_ref` loop to `merge_batch` in `merge_overlay_trie_input`. +/// +/// Benchmarked crossover: `extend_ref` wins up to ~64 blocks, `merge_batch` wins beyond. +/// Using 64 as threshold since they're roughly equal there. +const MERGE_BATCH_THRESHOLD: usize = 64; + use crate::tree::{ cached_state::CachedStateProvider, error::{InsertBlockError, InsertBlockErrorKind, InsertPayloadError}, @@ -40,7 +46,10 @@ use reth_provider::{ StateProvider, StateProviderFactory, StateReader, TrieReader, }; use reth_revm::db::State; -use reth_trie::{updates::TrieUpdates, HashedPostState, StateRoot, TrieInputSorted}; +use reth_trie::{ + updates::{TrieUpdates, TrieUpdatesSorted}, + HashedPostState, HashedPostStateSorted, StateRoot, TrieInputSorted, +}; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use revm_primitives::Address; use std::{ @@ -1012,34 +1021,63 @@ where Ok((input, block_hash)) } - /// Aggregates multiple in-memory blocks into a single [`TrieInputSorted`] by combining their + /// Aggregates in-memory blocks into a single [`TrieInputSorted`] by combining their /// state changes. /// /// The input `blocks` vector is ordered newest -> oldest (see `TreeState::blocks_by_hash`). - /// We iterate it in reverse so we start with the oldest block's trie data and extend forward - /// toward the newest, ensuring newer state takes precedence. + /// + /// Uses `extend_ref` loop for small k, k-way `merge_batch` for large k. + /// See [`MERGE_BATCH_THRESHOLD`] for crossover point. fn merge_overlay_trie_input(blocks: &[ExecutedBlock]) -> TrieInputSorted { - let mut input = TrieInputSorted::default(); - let mut blocks_iter = blocks.iter().rev().peekable(); + if blocks.is_empty() { + return TrieInputSorted::default(); + } - if let Some(first) = blocks_iter.next() { + // Single block: return Arc directly without cloning + if blocks.len() == 1 { + let data = blocks[0].trie_data(); + return TrieInputSorted { + state: Arc::clone(&data.hashed_state), + nodes: Arc::clone(&data.trie_updates), + prefix_sets: Default::default(), + }; + } + + if blocks.len() < MERGE_BATCH_THRESHOLD { + // Small k: extend_ref loop is faster + // Iterate oldest->newest so newer values override older ones + let mut blocks_iter = blocks.iter().rev(); + let first = blocks_iter.next().expect("blocks is non-empty"); let data = first.trie_data(); - input.state = data.hashed_state; - input.nodes = data.trie_updates; - - // Only clone and mutate if there are more in-memory blocks. - if blocks_iter.peek().is_some() { - let state_mut = Arc::make_mut(&mut input.state); - let nodes_mut = Arc::make_mut(&mut input.nodes); - for block in blocks_iter { - let data = block.trie_data(); - state_mut.extend_ref(data.hashed_state.as_ref()); - nodes_mut.extend_ref(data.trie_updates.as_ref()); - } + + let mut state = Arc::clone(&data.hashed_state); + let mut nodes = Arc::clone(&data.trie_updates); + let state_mut = Arc::make_mut(&mut state); + let nodes_mut = Arc::make_mut(&mut nodes); + + for block in blocks_iter { + let data = block.trie_data(); + state_mut.extend_ref(data.hashed_state.as_ref()); + nodes_mut.extend_ref(data.trie_updates.as_ref()); } - } - input + TrieInputSorted { state, nodes, prefix_sets: Default::default() } + } else { + // Large k: merge_batch is faster (O(n log k) via k-way merge) + let trie_data: Vec<_> = blocks.iter().map(|b| b.trie_data()).collect(); + + let merged_state = HashedPostStateSorted::merge_batch( + trie_data.iter().map(|d| d.hashed_state.as_ref()), + ); + let merged_nodes = + TrieUpdatesSorted::merge_batch(trie_data.iter().map(|d| d.trie_updates.as_ref())); + + TrieInputSorted { + state: Arc::new(merged_state), + nodes: Arc::new(merged_nodes), + prefix_sets: Default::default(), + } + } } /// Spawns a background task to compute and sort trie data for the executed block. diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 5f771aae0ec..283f1d3b69d 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -3,7 +3,7 @@ use core::ops::Not; use crate::{ added_removed_keys::MultiAddedRemovedKeys, prefix_set::{PrefixSetMut, TriePrefixSetsMut}, - utils::extend_sorted_vec, + utils::{extend_sorted_vec, kway_merge_sorted}, KeyHasher, MultiProofTargets, Nibbles, }; use alloc::{borrow::Cow, vec::Vec}; @@ -634,6 +634,62 @@ impl HashedPostStateSorted { } } + /// Batch-merge sorted hashed post states. Iterator yields **newest to oldest**. + /// + /// Uses k-way merge for O(n log k) complexity and one-pass accumulation for storages. + pub fn merge_batch<'a>(states: impl IntoIterator) -> Self { + let states: Vec<_> = states.into_iter().collect(); + if states.is_empty() { + return Self::default(); + } + + let accounts = kway_merge_sorted(states.iter().map(|s| s.accounts.as_slice())); + + struct StorageAcc<'a> { + /// Account storage was cleared (e.g., SELFDESTRUCT). + wiped: bool, + /// Stop collecting older slices after seeing a wipe. + sealed: bool, + /// Storage slot slices to merge, ordered newest to oldest. + slices: Vec<&'a [(B256, U256)]>, + } + + let mut acc: B256Map> = B256Map::default(); + + // Accumulate storage slices per address from newest to oldest state. + // Once we see a `wiped` flag, the account was cleared at that point, + // so older storage slots are irrelevant - we "seal" and stop collecting. + for state in &states { + for (addr, storage) in &state.storages { + let entry = acc.entry(*addr).or_insert_with(|| StorageAcc { + wiped: false, + sealed: false, + slices: Vec::new(), + }); + + if entry.sealed { + continue; + } + + entry.slices.push(storage.storage_slots.as_slice()); + if storage.wiped { + entry.wiped = true; + entry.sealed = true; + } + } + } + + let storages = acc + .into_iter() + .map(|(addr, entry)| { + let storage_slots = kway_merge_sorted(entry.slices); + (addr, HashedStorageSorted { wiped: entry.wiped, storage_slots }) + }) + .collect(); + + Self { accounts, storages } + } + /// Clears all accounts and storage data. pub fn clear(&mut self) { self.accounts.clear(); @@ -648,7 +704,7 @@ impl AsRef for HashedPostStateSorted { } /// Sorted hashed storage optimized for iterating during state trie calculation. -#[derive(Clone, Eq, PartialEq, Debug)] +#[derive(Clone, Eq, PartialEq, Debug, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct HashedStorageSorted { /// Sorted collection of updated storage slots. [`U256::ZERO`] indicates a deleted value. @@ -694,6 +750,21 @@ impl HashedStorageSorted { // Extend the sorted non-zero valued slots extend_sorted_vec(&mut self.storage_slots, &other.storage_slots); } + + /// Batch-merge sorted hashed storage. Iterator yields **newest to oldest**. + /// If any update is wiped, prior data is discarded. + pub fn merge_batch<'a>(updates: impl IntoIterator) -> Self { + let updates: Vec<_> = updates.into_iter().collect(); + if updates.is_empty() { + return Self::default(); + } + + let wipe_idx = updates.iter().position(|u| u.wiped); + let relevant = wipe_idx.map_or(&updates[..], |idx| &updates[..=idx]); + let storage_slots = kway_merge_sorted(relevant.iter().map(|u| u.storage_slots.as_slice())); + + Self { wiped: wipe_idx.is_some(), storage_slots } + } } impl From for HashedStorage { diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index f515fc20f67..6214d5ec084 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -1,4 +1,7 @@ -use crate::{utils::extend_sorted_vec, BranchNodeCompact, HashBuilder, Nibbles}; +use crate::{ + utils::{extend_sorted_vec, kway_merge_sorted}, + BranchNodeCompact, HashBuilder, Nibbles, +}; use alloc::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, vec::Vec, @@ -23,6 +26,15 @@ pub struct TrieUpdates { } impl TrieUpdates { + /// Creates a new `TrieUpdates` with pre-allocated capacity. + pub fn with_capacity(account_nodes: usize, storage_tries: usize) -> Self { + Self { + account_nodes: HashMap::with_capacity_and_hasher(account_nodes, Default::default()), + removed_nodes: HashSet::with_capacity_and_hasher(account_nodes / 4, Default::default()), + storage_tries: B256Map::with_capacity_and_hasher(storage_tries, Default::default()), + } + } + /// Returns `true` if the updates are empty. pub fn is_empty(&self) -> bool { self.account_nodes.is_empty() && @@ -611,6 +623,69 @@ impl TrieUpdatesSorted { self.account_nodes.clear(); self.storage_tries.clear(); } + + /// Batch-merge sorted trie updates. Iterator yields **newest to oldest**. + /// + /// This is more efficient than repeated `extend_ref` calls for large batches, + /// using k-way merge for O(n log k) complexity instead of O(n * k). + pub fn merge_batch<'a>(updates: impl IntoIterator) -> Self { + let updates: Vec<_> = updates.into_iter().collect(); + if updates.is_empty() { + return Self::default(); + } + + // Merge account nodes using k-way merge. Newest (index 0) takes precedence. + let account_nodes = kway_merge_sorted(updates.iter().map(|u| u.account_nodes.as_slice())); + + // Accumulator for collecting storage trie slices per address. + // We process updates newest-to-oldest and stop collecting for an address + // once we hit a "deleted" storage (sealed=true), since older data is irrelevant. + struct StorageAcc<'a> { + /// Storage trie was deleted (account removed or cleared). + is_deleted: bool, + /// Stop collecting older slices after seeing a deletion. + sealed: bool, + /// Storage trie node slices to merge, ordered newest to oldest. + slices: Vec<&'a [(Nibbles, Option)]>, + } + + let mut acc: B256Map> = B256Map::default(); + + // Collect storage slices per address, respecting deletion boundaries + for update in &updates { + for (addr, storage) in &update.storage_tries { + let entry = acc.entry(*addr).or_insert_with(|| StorageAcc { + is_deleted: false, + sealed: false, + slices: Vec::new(), + }); + + // Skip if we already hit a deletion for this address (older data is irrelevant) + if entry.sealed { + continue; + } + + entry.slices.push(storage.storage_nodes.as_slice()); + + // If this storage was deleted, mark as deleted and seal to ignore older updates + if storage.is_deleted { + entry.is_deleted = true; + entry.sealed = true; + } + } + } + + // Merge each address's storage slices using k-way merge + let storage_tries = acc + .into_iter() + .map(|(addr, entry)| { + let storage_nodes = kway_merge_sorted(entry.slices); + (addr, StorageTrieUpdatesSorted { is_deleted: entry.is_deleted, storage_nodes }) + }) + .collect(); + + Self { account_nodes, storage_tries } + } } impl AsRef for TrieUpdatesSorted { @@ -702,6 +777,22 @@ impl StorageTrieUpdatesSorted { extend_sorted_vec(&mut self.storage_nodes, &other.storage_nodes); self.is_deleted = self.is_deleted || other.is_deleted; } + + /// Batch-merge sorted storage trie updates. Iterator yields **newest to oldest**. + /// If any update is deleted, older data is discarded. + pub fn merge_batch<'a>(updates: impl IntoIterator) -> Self { + let updates: Vec<_> = updates.into_iter().collect(); + if updates.is_empty() { + return Self::default(); + } + + // Discard updates older than the first deletion since the trie was wiped at that point. + let del_idx = updates.iter().position(|u| u.is_deleted); + let relevant = del_idx.map_or(&updates[..], |idx| &updates[..=idx]); + let storage_nodes = kway_merge_sorted(relevant.iter().map(|u| u.storage_nodes.as_slice())); + + Self { is_deleted: del_idx.is_some(), storage_nodes } + } } /// Excludes empty nibbles from the given iterator. diff --git a/crates/trie/common/src/utils.rs b/crates/trie/common/src/utils.rs index a70608ea603..7c1d454a6fa 100644 --- a/crates/trie/common/src/utils.rs +++ b/crates/trie/common/src/utils.rs @@ -1,7 +1,33 @@ use alloc::vec::Vec; use core::cmp::Ordering; +use itertools::Itertools; -/// Helper function to extend a sorted vector with another sorted vector. +/// Merge sorted slices into a sorted `Vec`. First occurrence wins for duplicate keys. +/// +/// Callers pass slices in priority order (index 0 = highest priority), so the first +/// slice's value for a key takes precedence over later slices. +pub(crate) fn kway_merge_sorted<'a, K, V>( + slices: impl IntoIterator, +) -> Vec<(K, V)> +where + K: Ord + Clone + 'a, + V: Clone + 'a, +{ + slices + .into_iter() + .filter(|s| !s.is_empty()) + .enumerate() + // Merge by reference: (priority, &K, &V) - avoids cloning all elements upfront + .map(|(i, s)| s.iter().map(move |(k, v)| (i, k, v))) + .kmerge_by(|(i1, k1, _), (i2, k2, _)| (k1, i1) < (k2, i2)) + .dedup_by(|(_, k1, _), (_, k2, _)| *k1 == *k2) + // Clone only surviving elements after dedup + .map(|(_, k, v)| (k.clone(), v.clone())) + .collect() +} + +/// Extend a sorted vector with another sorted vector. +/// Values from `other` take precedence for duplicate keys. /// /// Values from `other` take precedence for duplicate keys. pub(crate) fn extend_sorted_vec(target: &mut Vec<(K, V)>, other: &[(K, V)]) @@ -52,4 +78,49 @@ mod tests { extend_sorted_vec(&mut target, &other); assert_eq!(target, vec![(1, "a"), (2, "b"), (3, "c_new")]); } + + #[test] + fn test_kway_merge_sorted_basic() { + let slice1 = vec![(1, "a1"), (3, "c1")]; + let slice2 = vec![(2, "b2"), (3, "c2")]; + let slice3 = vec![(1, "a3"), (4, "d3")]; + + let result = kway_merge_sorted([slice1.as_slice(), slice2.as_slice(), slice3.as_slice()]); + // First occurrence wins: key 1 -> a1 (slice1), key 3 -> c1 (slice1) + assert_eq!(result, vec![(1, "a1"), (2, "b2"), (3, "c1"), (4, "d3")]); + } + + #[test] + fn test_kway_merge_sorted_empty_slices() { + let slice1: Vec<(i32, &str)> = vec![]; + let slice2 = vec![(1, "a")]; + let slice3: Vec<(i32, &str)> = vec![]; + + let result = kway_merge_sorted([slice1.as_slice(), slice2.as_slice(), slice3.as_slice()]); + assert_eq!(result, vec![(1, "a")]); + } + + #[test] + fn test_kway_merge_sorted_all_same_key() { + let slice1 = vec![(5, "first")]; + let slice2 = vec![(5, "middle")]; + let slice3 = vec![(5, "last")]; + + let result = kway_merge_sorted([slice1.as_slice(), slice2.as_slice(), slice3.as_slice()]); + // First occurrence wins (slice1 has highest priority) + assert_eq!(result, vec![(5, "first")]); + } + + #[test] + fn test_kway_merge_sorted_single_slice() { + let slice = vec![(1, "a"), (2, "b"), (3, "c")]; + let result = kway_merge_sorted([slice.as_slice()]); + assert_eq!(result, vec![(1, "a"), (2, "b"), (3, "c")]); + } + + #[test] + fn test_kway_merge_sorted_no_slices() { + let result: Vec<(i32, &str)> = kway_merge_sorted(Vec::<&[(i32, &str)]>::new()); + assert!(result.is_empty()); + } } From b1f107b17156a16cada115ff387d582a293548b7 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 15 Jan 2026 15:30:04 +0000 Subject: [PATCH 032/267] feat(reth-bench): add generate-big-block command (#21082) --- Cargo.lock | 454 ++++++------- bin/reth-bench/Cargo.toml | 7 +- bin/reth-bench/src/bench/gas_limit_ramp.rs | 160 +++++ .../src/bench/generate_big_block.rs | 617 ++++++++++++++++++ bin/reth-bench/src/bench/helpers.rs | 196 ++++++ bin/reth-bench/src/bench/mod.rs | 36 + bin/reth-bench/src/bench/new_payload_fcu.rs | 28 +- bin/reth-bench/src/bench/new_payload_only.rs | 7 +- bin/reth-bench/src/bench/output.rs | 55 +- bin/reth-bench/src/bench/replay_payloads.rs | 332 ++++++++++ bin/reth-bench/src/valid_payload.rs | 65 +- crates/chainspec/src/spec.rs | 12 + crates/ethereum/payload/Cargo.toml | 2 +- crates/rpc/rpc-api/Cargo.toml | 1 - crates/rpc/rpc-api/src/testing.rs | 36 +- crates/rpc/rpc/src/testing.rs | 24 +- 16 files changed, 1722 insertions(+), 310 deletions(-) create mode 100644 bin/reth-bench/src/bench/gas_limit_ramp.rs create mode 100644 bin/reth-bench/src/bench/generate_big_block.rs create mode 100644 bin/reth-bench/src/bench/helpers.rs create mode 100644 bin/reth-bench/src/bench/replay_payloads.rs diff --git a/Cargo.lock b/Cargo.lock index 9e8d52a102b..8b9d736e441 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,9 +106,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.24" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b163ff4acf0eac29af05a911397cc418a76e153467b859398adc26cb9335a611" +checksum = "25db5bcdd086f0b1b9610140a12c59b757397be90bd130d8d836fc8da0815a34" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -448,7 +448,7 @@ dependencies = [ "foldhash 0.2.0", "getrandom 0.3.4", "hashbrown 0.16.1", - "indexmap 2.12.1", + "indexmap 2.13.0", "itoa", "k256", "keccak-asm", @@ -550,7 +550,7 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -800,7 +800,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -812,11 +812,11 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.12.1", + "indexmap 2.13.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "syn-solidity", "tiny-keccak", ] @@ -833,7 +833,7 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "syn-solidity", ] @@ -936,9 +936,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b77b56af09ead281337d06b1d036c88e2dc8a2e45da512a532476dbee94912b" +checksum = "428aa0f0e0658ff091f8f667c406e034b431cb10abd39de4f507520968acc499" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -963,7 +963,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1048,7 +1048,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1190,7 +1190,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1228,7 +1228,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1317,7 +1317,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1390,13 +1390,12 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.36" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98ec5f6c2f8bc326c994cb9e241cc257ddaba9afa8555a43cffbb5dd86efaa37" +checksum = "d10e4f991a553474232bc0a31799f6d24b034a84c0971d80d2e2f78b2e576e40" dependencies = [ "compression-codecs", "compression-core", - "futures-core", "pin-project-lite", "tokio", ] @@ -1434,7 +1433,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1445,7 +1444,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1483,7 +1482,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1565,9 +1564,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d809780667f4410e7c41b07f52439b94d2bdf8528eeedc287fa38d3b7f95d82" +checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" [[package]] name = "bech32" @@ -1625,7 +1624,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1643,7 +1642,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1755,7 +1754,7 @@ dependencies = [ "boa_interner", "boa_macros", "boa_string", - "indexmap 2.12.1", + "indexmap 2.13.0", "num-bigint", "rustc-hash", ] @@ -1787,7 +1786,7 @@ dependencies = [ "futures-lite 2.6.1", "hashbrown 0.16.1", "icu_normalizer", - "indexmap 2.12.1", + "indexmap 2.13.0", "intrusive-collections", "itertools 0.14.0", "num-bigint", @@ -1833,7 +1832,7 @@ dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.16.1", - "indexmap 2.12.1", + "indexmap 2.13.0", "once_cell", "phf", "rustc-hash", @@ -1850,7 +1849,7 @@ dependencies = [ "cow-utils", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "synstructure", ] @@ -1906,7 +1905,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1995,7 +1994,7 @@ checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2107,10 +2106,11 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.15" +version = "1.2.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c736e259eea577f443d5c86c304f9f4ae0295c43f3ba05c21f1d66b5f06001af" +checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -2145,9 +2145,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ "iana-time-zone", "js-sys", @@ -2236,20 +2236,20 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] name = "clap_lex" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" [[package]] name = "cmake" -version = "0.1.54" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" dependencies = [ "cc", ] @@ -2415,9 +2415,9 @@ dependencies = [ [[package]] name = "comfy-table" -version = "7.2.1" +version = "7.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03b7db8e0b4b2fdad6c551e634134e99ec000e5c8c3b6856c65e8bbaded7a3b" +checksum = "958c5d6ecf1f214b4c2bbbbf6ab9523a864bd136dcf71a7e8904799acfe1ad47" dependencies = [ "crossterm 0.29.0", "unicode-segmentation", @@ -2440,9 +2440,9 @@ dependencies = [ [[package]] name = "compression-codecs" -version = "0.4.35" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0f7ac3e5b97fdce45e8922fb05cae2c37f7bbd63d30dd94821dacfd8f3f2bf2" +checksum = "00828ba6fd27b45a448e57dbfe84f1029d4c9f26b368157e9a448a5f49a2ec2a" dependencies = [ "brotli", "compression-core", @@ -2541,16 +2541,6 @@ dependencies = [ "unicode-segmentation", ] -[[package]] -name = "cordyceps" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688d7fbb8092b8de775ef2536f36c8c31f2bc4006ece2e8d8ad2d17d00ce0a2a" -dependencies = [ - "loom", - "tracing", -] - [[package]] name = "core-foundation" version = "0.10.1" @@ -2798,7 +2788,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2855,7 +2845,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2870,7 +2860,7 @@ dependencies = [ "quote", "serde", "strsim", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2883,7 +2873,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2894,7 +2884,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2905,7 +2895,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2916,7 +2906,7 @@ checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" dependencies = [ "darling_core 0.23.0", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2948,15 +2938,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" +checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" [[package]] name = "data-encoding-macro" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d" +checksum = "8142a83c17aa9461d637e649271eae18bf2edd00e91f2e105df36c3c16355bdb" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2964,12 +2954,12 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" +checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de" dependencies = [ "data-encoding", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3028,7 +3018,7 @@ checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3039,7 +3029,7 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3060,7 +3050,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3070,7 +3060,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3092,16 +3082,10 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.113", + "syn 2.0.114", "unicode-xid", ] -[[package]] -name = "diatomic-waker" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c" - [[package]] name = "diff" version = "0.1.13" @@ -3224,7 +3208,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3271,7 +3255,7 @@ checksum = "1ec431cd708430d5029356535259c5d645d60edd3d39c54e5eea9782d46caa7d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3323,7 +3307,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3431,7 +3415,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3451,7 +3435,7 @@ checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3471,7 +3455,7 @@ checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3547,7 +3531,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -4070,11 +4054,17 @@ dependencies = [ "windows-sys 0.60.2", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41" + [[package]] name = "fixed-cache" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba59b6c98ba422a13f17ee1305c995cb5742bba7997f5b4d9af61b2ff0ffb213" +checksum = "25d3af83468398d500e9bc19e001812dcb1a11e4d3d6a5956c789aa3c11a8cb5" dependencies = [ "equivalent", ] @@ -4109,7 +4099,7 @@ checksum = "6dc7a9cb3326bafb80642c5ce99b39a2c0702d4bfa8ee8a3e773791a6cbe2407" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -4120,9 +4110,9 @@ checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flate2" -version = "1.1.5" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" dependencies = [ "crc32fast", "miniz_oxide", @@ -4195,19 +4185,6 @@ dependencies = [ "futures-util", ] -[[package]] -name = "futures-buffered" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8e0e1f38ec07ba4abbde21eed377082f17ccb988be9d988a5adbf4bafc118fd" -dependencies = [ - "cordyceps", - "diatomic-waker", - "futures-core", - "pin-project-lite", - "spin", -] - [[package]] name = "futures-channel" version = "0.3.31" @@ -4220,16 +4197,14 @@ dependencies = [ [[package]] name = "futures-concurrency" -version = "7.6.3" +version = "7.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eb68017df91f2e477ed4bea586c59eaecaa47ed885a770d0444e21e62572cd2" +checksum = "69a9561702beff46b705a8ac9c0803ec4c7fc5d01330a99b1feaf86e206e92ba" dependencies = [ "fixedbitset", - "futures-buffered", "futures-core", "futures-lite 2.6.1", "pin-project", - "slab", "smallvec", ] @@ -4292,7 +4267,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -4380,9 +4355,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", "js-sys", @@ -4509,9 +4484,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", @@ -4519,7 +4494,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.12.1", + "indexmap 2.13.0", "slab", "tokio", "tokio-util", @@ -5039,7 +5014,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -5080,9 +5055,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "arbitrary", "equivalent", @@ -5158,7 +5133,7 @@ dependencies = [ "indoc", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -5435,7 +5410,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -5580,9 +5555,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.179" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5a2d376baa530d1238d133232d15e239abad80d05838b4b59354e5268af431f" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" [[package]] name = "libgit2-sys" @@ -5816,7 +5791,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -5834,13 +5809,13 @@ dependencies = [ [[package]] name = "match-lookup" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e" +checksum = "757aee279b8bdbb9f9e676796fd459e4207a1f986e87886700abf589f5abf771" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.114", ] [[package]] @@ -5895,7 +5870,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -5905,7 +5880,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3589659543c04c7dc5526ec858591015b87cd8746583b51b48ef4353f99dbcda" dependencies = [ "base64 0.22.1", - "indexmap 2.12.1", + "indexmap 2.13.0", "metrics", "metrics-util", "quanta", @@ -5937,7 +5912,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.16.1", - "indexmap 2.12.1", + "indexmap 2.13.0", "metrics", "ordered-float", "quanta", @@ -6297,7 +6272,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6311,9 +6286,9 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c4b5ecbd0beec843101bffe848217f770e8b8da81d8355b7d6e226f2199b3dc" +checksum = "7b5676b5c379cf5b03da1df2b3061c4a4e2aa691086a56ac923e08c143f53f59" dependencies = [ "alloy-rlp", "arbitrary", @@ -6584,7 +6559,7 @@ dependencies = [ "opentelemetry-http", "opentelemetry-proto", "opentelemetry_sdk", - "prost 0.14.1", + "prost 0.14.3", "reqwest", "thiserror 2.0.17", "tokio", @@ -6600,7 +6575,7 @@ checksum = "a7175df06de5eaee9909d4805a3d07e28bb752c34cab57fa9cff549da596b30f" dependencies = [ "opentelemetry", "opentelemetry_sdk", - "prost 0.14.1", + "prost 0.14.3", "tonic", "tonic-prost", ] @@ -6696,7 +6671,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6811,7 +6786,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6840,7 +6815,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6981,7 +6956,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7032,14 +7007,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] name = "proc-macro2" -version = "1.0.104" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9695f8df41bb4f3d222c95a67532365f569318332d03d5f3f67f37b20e6ebdf0" +checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" dependencies = [ "unicode-ident", ] @@ -7127,7 +7102,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7138,7 +7113,7 @@ checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7153,12 +7128,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" dependencies = [ "bytes", - "prost-derive 0.14.1", + "prost-derive 0.14.3", ] [[package]] @@ -7171,20 +7146,20 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] name = "prost-derive" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7285,9 +7260,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.42" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" dependencies = [ "proc-macro2", ] @@ -7336,7 +7311,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.3", + "rand_core 0.9.5", "serde", ] @@ -7367,7 +7342,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -7385,14 +7360,14 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", ] [[package]] name = "rand_core" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" dependencies = [ "getrandom 0.3.4", "serde", @@ -7413,7 +7388,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" dependencies = [ - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -7422,14 +7397,14 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f703f4665700daf5512dcca5f43afa6af89f09db47fb56be587f80636bda2d41" dependencies = [ - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] name = "rapidhash" -version = "4.2.0" +version = "4.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2988730ee014541157f48ce4dcc603940e00915edc3c7f9a8d78092256bb2493" +checksum = "5d8b5b858a440a0bc02625b62dd95131b9201aa9f69f411195dd4a7cfb1de3d7" dependencies = [ "rand 0.9.2", "rustversion", @@ -7515,7 +7490,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "libredox", "thiserror 1.0.69", ] @@ -7526,7 +7501,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "libredox", "thiserror 2.0.17", ] @@ -7548,7 +7523,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7719,6 +7694,7 @@ dependencies = [ name = "reth-bench" version = "1.10.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-json-rpc", "alloy-network", @@ -7741,13 +7717,16 @@ dependencies = [ "op-alloy-consensus", "op-alloy-rpc-types-engine", "reqwest", + "reth-chainspec", "reth-cli-runner", "reth-cli-util", "reth-engine-primitives", + "reth-ethereum-primitives", "reth-fs-util", "reth-node-api", "reth-node-core", "reth-primitives-traits", + "reth-rpc-api", "reth-tracing", "serde", "serde_json", @@ -7997,7 +7976,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -10493,7 +10472,6 @@ dependencies = [ "reth-network-peers", "reth-rpc-eth-api", "reth-trie-common", - "serde", "serde_json", "tokio", ] @@ -11564,7 +11542,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.16", + "getrandom 0.2.17", "libc", "untrusted", "windows-sys 0.52.0", @@ -11684,7 +11662,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.113", + "syn 2.0.114", "unicode-ident", ] @@ -11811,9 +11789,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.35" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ "log", "once_cell", @@ -12149,16 +12127,16 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] name = "serde_json" -version = "1.0.148" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3084b546a1dd6289475996f182a22aba973866ea8e8b02c51d9f46b1336a22da" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "itoa", "memchr", "serde", @@ -12208,7 +12186,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.12.1", + "indexmap 2.13.0", "schemars 0.9.0", "schemars 1.2.0", "serde_core", @@ -12226,7 +12204,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12498,12 +12476,6 @@ dependencies = [ "sha1", ] -[[package]] -name = "spin" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" - [[package]] name = "spki" version = "0.7.3" @@ -12560,7 +12532,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12572,7 +12544,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12594,9 +12566,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.113" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678faa00651c9eb72dd2020cbdf275d92eccb2400d568e419efdd64838145cb4" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", @@ -12612,7 +12584,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12632,7 +12604,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12719,7 +12691,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12730,7 +12702,7 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "test-case-core", ] @@ -12770,7 +12742,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12818,7 +12790,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12829,7 +12801,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12883,9 +12855,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.44" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" dependencies = [ "deranged", "itoa", @@ -12894,22 +12866,22 @@ dependencies = [ "num-conv", "num_threads", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" +checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" [[package]] name = "time-macros" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" dependencies = [ "num-conv", "time-core", @@ -12985,7 +12957,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -13000,9 +12972,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" dependencies = [ "futures-core", "pin-project-lite", @@ -13029,9 +13001,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.17" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", @@ -13078,7 +13050,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "serde", "serde_spanned", "toml_datetime 0.6.11", @@ -13092,7 +13064,7 @@ version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", @@ -13146,20 +13118,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" dependencies = [ "bytes", - "prost 0.14.1", + "prost 0.14.3", "tonic", ] [[package]] name = "tower" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", "hdrhistogram", - "indexmap 2.12.1", + "indexmap 2.13.0", "pin-project-lite", "slab", "sync_wrapper", @@ -13245,7 +13217,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -13314,16 +13286,13 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.32.0" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6e5658463dd88089aba75c7791e1d3120633b1bfde22478b28f625a9bb1b8e" +checksum = "1ac28f2d093c6c477eaa76b23525478f38de514fa9aeb1285738d4b97a9552fc" dependencies = [ "js-sys", "opentelemetry", - "opentelemetry_sdk", - "rustversion", "smallvec", - "thiserror 2.0.17", "tracing", "tracing-core", "tracing-log", @@ -13400,9 +13369,9 @@ dependencies = [ [[package]] name = "tracy-client" -version = "0.18.3" +version = "0.18.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d722a05fe49b31fef971c4732a7d4aa6a18283d9ba46abddab35f484872947" +checksum = "a4f6fc3baeac5d86ab90c772e9e30620fc653bf1864295029921a15ef478e6a5" dependencies = [ "loom", "once_cell", @@ -13412,9 +13381,9 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fb391ac70462b3097a755618fbf9c8f95ecc1eb379a414f7b46f202ed10db1f" +checksum = "c5f7c95348f20c1c913d72157b3c6dee6ea3e30b3d19502c5a7f6d3f160dacbf" dependencies = [ "cc", "windows-targets 0.52.6", @@ -13442,7 +13411,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -13530,9 +13499,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" -version = "2.8.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" @@ -13605,14 +13574,15 @@ checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae" [[package]] name = "url" -version = "2.5.7" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", "serde", + "serde_derive", ] [[package]] @@ -13723,7 +13693,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -13826,7 +13796,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "wasm-bindgen-shared", ] @@ -14034,7 +14004,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -14045,7 +14015,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -14056,7 +14026,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -14067,7 +14037,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -14525,28 +14495,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.31" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.31" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -14566,7 +14536,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "synstructure", ] @@ -14587,7 +14557,7 @@ checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -14621,14 +14591,14 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] name = "zmij" -version = "1.0.9" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee2a72b10d087f75fb2e1c2c7343e308fe6970527c22a41caf8372e165ff5c1" +checksum = "bd8f3f50b848df28f887acb68e41201b5aea6bc8a8dacc00fb40635ff9a72fea" [[package]] name = "zstd" diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index bfc0051fab8..52bb9b036fc 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -17,21 +17,26 @@ workspace = true reth-cli-runner.workspace = true reth-cli-util.workspace = true reth-engine-primitives.workspace = true +reth-ethereum-primitives.workspace = true reth-fs-util.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-primitives-traits.workspace = true +reth-rpc-api.workspace = true + reth-tracing.workspace = true +reth-chainspec.workspace = true # alloy alloy-eips.workspace = true alloy-json-rpc.workspace = true +alloy-consensus.workspace = true alloy-network.workspace = true alloy-primitives.workspace = true alloy-provider = { workspace = true, features = ["engine-api", "pubsub", "reqwest-rustls-tls"], default-features = false } alloy-pubsub.workspace = true alloy-rpc-client = { workspace = true, features = ["pubsub"] } -alloy-rpc-types-engine.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["kzg"] } alloy-transport-http.workspace = true alloy-transport-ipc.workspace = true alloy-transport-ws.workspace = true diff --git a/bin/reth-bench/src/bench/gas_limit_ramp.rs b/bin/reth-bench/src/bench/gas_limit_ramp.rs new file mode 100644 index 00000000000..4f0d58dbd96 --- /dev/null +++ b/bin/reth-bench/src/bench/gas_limit_ramp.rs @@ -0,0 +1,160 @@ +//! Benchmarks empty block processing by ramping the block gas limit. + +use crate::{ + authenticated_transport::AuthenticatedTransportConnect, + bench::{ + helpers::{build_payload, prepare_payload_request, rpc_block_to_header}, + output::GasRampPayloadFile, + }, + valid_payload::{call_forkchoice_updated, call_new_payload, payload_to_new_payload}, +}; +use alloy_eips::BlockNumberOrTag; +use alloy_provider::{network::AnyNetwork, Provider, RootProvider}; +use alloy_rpc_client::ClientBuilder; +use alloy_rpc_types_engine::{ExecutionPayload, ForkchoiceState, JwtSecret}; + +use clap::Parser; +use reqwest::Url; +use reth_chainspec::ChainSpec; +use reth_cli_runner::CliContext; +use reth_ethereum_primitives::TransactionSigned; +use reth_primitives_traits::constants::{GAS_LIMIT_BOUND_DIVISOR, MAXIMUM_GAS_LIMIT_BLOCK}; +use std::{path::PathBuf, time::Instant}; +use tracing::info; + +/// `reth benchmark gas-limit-ramp` command. +#[derive(Debug, Parser)] +pub struct Command { + /// Number of blocks to generate. + #[arg(long, value_name = "BLOCKS")] + blocks: u64, + + /// The Engine API RPC URL. + #[arg(long = "engine-rpc-url", value_name = "ENGINE_RPC_URL")] + engine_rpc_url: String, + + /// Path to the JWT secret for Engine API authentication. + #[arg(long = "jwt-secret", value_name = "JWT_SECRET")] + jwt_secret: PathBuf, + + /// Output directory for benchmark results and generated payloads. + #[arg(long, value_name = "OUTPUT")] + output: PathBuf, +} + +impl Command { + /// Execute `benchmark gas-limit-ramp` command. + pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { + if self.blocks == 0 { + return Err(eyre::eyre!("--blocks must be greater than 0")); + } + + // Ensure output directory exists + if self.output.is_file() { + return Err(eyre::eyre!("Output path must be a directory")); + } + if !self.output.exists() { + std::fs::create_dir_all(&self.output)?; + info!("Created output directory: {:?}", self.output); + } + + // Set up authenticated provider (used for both Engine API and eth_ methods) + let jwt = std::fs::read_to_string(&self.jwt_secret)?; + let jwt = JwtSecret::from_hex(jwt)?; + let auth_url = Url::parse(&self.engine_rpc_url)?; + + info!("Connecting to Engine RPC at {}", auth_url); + let auth_transport = AuthenticatedTransportConnect::new(auth_url, jwt); + let client = ClientBuilder::default().connect_with(auth_transport).await?; + let provider = RootProvider::::new(client); + + // Get chain spec - required for fork detection + let chain_id = provider.get_chain_id().await?; + let chain_spec = ChainSpec::from_chain_id(chain_id) + .ok_or_else(|| eyre::eyre!("Unsupported chain id: {chain_id}"))?; + + // Fetch the current head block as parent + let parent_block = provider + .get_block_by_number(BlockNumberOrTag::Latest) + .full() + .await? + .ok_or_else(|| eyre::eyre!("Failed to fetch latest block"))?; + + let (mut parent_header, mut parent_hash) = rpc_block_to_header(parent_block); + + let canonical_parent = parent_header.number; + let start_block = canonical_parent + 1; + let end_block = start_block + self.blocks - 1; + + info!(canonical_parent, start_block, end_block, "Starting gas limit ramp benchmark"); + + let mut next_block_number = start_block; + let total_benchmark_duration = Instant::now(); + + while next_block_number <= end_block { + let timestamp = parent_header.timestamp.saturating_add(1); + + let request = prepare_payload_request(&chain_spec, timestamp, parent_hash); + let new_payload_version = request.new_payload_version; + + let (payload, sidecar) = build_payload(&provider, request).await?; + + let mut block = + payload.clone().try_into_block_with_sidecar::(&sidecar)?; + + let max_increase = max_gas_limit_increase(parent_header.gas_limit); + let gas_limit = + parent_header.gas_limit.saturating_add(max_increase).min(MAXIMUM_GAS_LIMIT_BLOCK); + + block.header.gas_limit = gas_limit; + + let block_hash = block.header.hash_slow(); + // Regenerate the payload from the modified block, but keep the original sidecar + // which contains the actual execution requests data (not just the hash) + let (payload, _) = ExecutionPayload::from_block_unchecked(block_hash, &block); + let (version, params) = payload_to_new_payload( + payload, + sidecar, + false, + block.header.withdrawals_root, + Some(new_payload_version), + )?; + + // Save payload to file with version info for replay + let payload_path = + self.output.join(format!("payload_block_{}.json", block.header.number)); + let file = + GasRampPayloadFile { version: version as u8, block_hash, params: params.clone() }; + let payload_json = serde_json::to_string_pretty(&file)?; + std::fs::write(&payload_path, &payload_json)?; + info!(block_number = block.header.number, path = %payload_path.display(), "Saved payload"); + + call_new_payload(&provider, version, params).await?; + + let forkchoice_state = ForkchoiceState { + head_block_hash: block_hash, + safe_block_hash: block_hash, + finalized_block_hash: block_hash, + }; + call_forkchoice_updated(&provider, version, forkchoice_state, None).await?; + + parent_header = block.header; + parent_hash = block_hash; + next_block_number += 1; + } + + let final_gas_limit = parent_header.gas_limit; + info!( + total_duration=?total_benchmark_duration.elapsed(), + blocks_processed = self.blocks, + final_gas_limit, + "Benchmark complete" + ); + + Ok(()) + } +} + +const fn max_gas_limit_increase(parent_gas_limit: u64) -> u64 { + (parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR).saturating_sub(1) +} diff --git a/bin/reth-bench/src/bench/generate_big_block.rs b/bin/reth-bench/src/bench/generate_big_block.rs new file mode 100644 index 00000000000..7ddab1125e6 --- /dev/null +++ b/bin/reth-bench/src/bench/generate_big_block.rs @@ -0,0 +1,617 @@ +//! Command for generating large blocks by packing transactions from real blocks. +//! +//! This command fetches transactions from existing blocks and packs them into a single +//! large block using the `testing_buildBlockV1` RPC endpoint. + +use crate::authenticated_transport::AuthenticatedTransportConnect; +use alloy_eips::{BlockNumberOrTag, Typed2718}; +use alloy_primitives::{Bytes, B256}; +use alloy_provider::{ext::EngineApi, network::AnyNetwork, Provider, RootProvider}; +use alloy_rpc_client::ClientBuilder; +use alloy_rpc_types_engine::{ + ExecutionPayloadEnvelopeV4, ExecutionPayloadEnvelopeV5, ForkchoiceState, JwtSecret, + PayloadAttributes, +}; +use alloy_transport::layers::RetryBackoffLayer; +use clap::Parser; +use eyre::Context; +use reqwest::Url; +use reth_cli_runner::CliContext; +use reth_rpc_api::TestingBuildBlockRequestV1; +use std::future::Future; +use tokio::sync::mpsc; +use tracing::{info, warn}; + +/// A single transaction with its gas used and raw encoded bytes. +#[derive(Debug, Clone)] +pub struct RawTransaction { + /// The actual gas used by the transaction (from receipt). + pub gas_used: u64, + /// The transaction type (e.g., 3 for EIP-4844 blob txs). + pub tx_type: u8, + /// The raw RLP-encoded transaction bytes. + pub raw: Bytes, +} + +/// Abstraction over sources of transactions for big block generation. +/// +/// Implementors provide transactions from different sources (RPC, database, files, etc.) +pub trait TransactionSource { + /// Fetch transactions from a specific block number. + /// + /// Returns `Ok(None)` if the block doesn't exist. + /// Returns `Ok(Some((transactions, gas_used)))` with the block's transactions and total gas. + fn fetch_block_transactions( + &self, + block_number: u64, + ) -> impl Future, u64)>>> + Send; +} + +/// RPC-based transaction source that fetches from a remote node. +#[derive(Debug)] +pub struct RpcTransactionSource { + provider: RootProvider, +} + +impl RpcTransactionSource { + /// Create a new RPC transaction source. + pub const fn new(provider: RootProvider) -> Self { + Self { provider } + } + + /// Create from an RPC URL with retry backoff. + pub fn from_url(rpc_url: &str) -> eyre::Result { + let client = ClientBuilder::default() + .layer(RetryBackoffLayer::new(10, 800, u64::MAX)) + .http(rpc_url.parse()?); + let provider = RootProvider::::new(client); + Ok(Self { provider }) + } +} + +impl TransactionSource for RpcTransactionSource { + async fn fetch_block_transactions( + &self, + block_number: u64, + ) -> eyre::Result, u64)>> { + // Fetch block and receipts in parallel + let (block, receipts) = tokio::try_join!( + self.provider.get_block_by_number(block_number.into()).full(), + self.provider.get_block_receipts(block_number.into()) + )?; + + let Some(block) = block else { + return Ok(None); + }; + + let Some(receipts) = receipts else { + return Err(eyre::eyre!("Receipts not found for block {}", block_number)); + }; + + let block_gas_used = block.header.gas_used; + + // Convert cumulative gas from receipts to per-tx gas_used + let mut prev_cumulative = 0u64; + let transactions: Vec = block + .transactions + .txns() + .zip(receipts.iter()) + .map(|(tx, receipt)| { + let cumulative = receipt.inner.inner.inner.receipt.cumulative_gas_used; + let gas_used = cumulative - prev_cumulative; + prev_cumulative = cumulative; + + let with_encoded = tx.inner.inner.clone().into_encoded(); + RawTransaction { + gas_used, + tx_type: tx.inner.ty(), + raw: with_encoded.encoded_bytes().clone(), + } + }) + .collect(); + + Ok(Some((transactions, block_gas_used))) + } +} + +/// Collects transactions from a source up to a target gas usage. +#[derive(Debug)] +pub struct TransactionCollector { + source: S, + target_gas: u64, +} + +impl TransactionCollector { + /// Create a new transaction collector. + pub const fn new(source: S, target_gas: u64) -> Self { + Self { source, target_gas } + } + + /// Collect transactions starting from the given block number. + /// + /// Skips blob transactions (type 3) and collects until target gas is reached. + /// Returns the collected raw transaction bytes, total gas used, and the next block number. + pub async fn collect(&self, start_block: u64) -> eyre::Result<(Vec, u64, u64)> { + let mut transactions: Vec = Vec::new(); + let mut total_gas: u64 = 0; + let mut current_block = start_block; + + while total_gas < self.target_gas { + let Some((block_txs, _)) = self.source.fetch_block_transactions(current_block).await? + else { + warn!(block = current_block, "Block not found, stopping"); + break; + }; + + for tx in block_txs { + // Skip blob transactions (EIP-4844, type 3) + if tx.tx_type == 3 { + continue; + } + + if total_gas + tx.gas_used <= self.target_gas { + transactions.push(tx.raw); + total_gas += tx.gas_used; + } + + if total_gas >= self.target_gas { + break; + } + } + + current_block += 1; + + // Stop early if remaining gas is under 1M (close enough to target) + let remaining_gas = self.target_gas.saturating_sub(total_gas); + if remaining_gas < 1_000_000 { + break; + } + } + + info!( + total_txs = transactions.len(), + total_gas, + next_block = current_block, + "Finished collecting transactions" + ); + + Ok((transactions, total_gas, current_block)) + } +} + +/// `reth bench generate-big-block` command +/// +/// Generates a large block by fetching transactions from existing blocks and packing them +/// into a single block using the `testing_buildBlockV1` RPC endpoint. +#[derive(Debug, Parser)] +pub struct Command { + /// The RPC URL to use for fetching blocks (can be an external archive node). + #[arg(long, value_name = "RPC_URL")] + rpc_url: String, + + /// The engine RPC URL (with JWT authentication). + #[arg(long, value_name = "ENGINE_RPC_URL", default_value = "http://localhost:8551")] + engine_rpc_url: String, + + /// The RPC URL for `testing_buildBlockV1` calls (same node as engine, regular RPC port). + #[arg(long, value_name = "TESTING_RPC_URL", default_value = "http://localhost:8545")] + testing_rpc_url: String, + + /// Path to the JWT secret file for engine API authentication. + #[arg(long, value_name = "JWT_SECRET")] + jwt_secret: std::path::PathBuf, + + /// Target gas to pack into the block. + #[arg(long, value_name = "TARGET_GAS", default_value = "30000000")] + target_gas: u64, + + /// Starting block number to fetch transactions from. + /// If not specified, starts from the engine's latest block. + #[arg(long, value_name = "FROM_BLOCK")] + from_block: Option, + + /// Execute the payload (call newPayload + forkchoiceUpdated). + /// If false, only builds the payload and prints it. + #[arg(long, default_value = "false")] + execute: bool, + + /// Number of payloads to generate. Each payload uses the previous as parent. + /// When count == 1, the payload is only generated and saved, not executed. + /// When count > 1, each payload is executed before building the next. + #[arg(long, default_value = "1")] + count: u64, + + /// Number of transaction batches to prefetch in background when count > 1. + /// Higher values reduce latency but use more memory. + #[arg(long, default_value = "4")] + prefetch_buffer: usize, + + /// Output directory for generated payloads. Each payload is saved as `payload_block_N.json`. + #[arg(long, value_name = "OUTPUT_DIR")] + output_dir: std::path::PathBuf, +} + +/// A built payload ready for execution. +struct BuiltPayload { + block_number: u64, + envelope: ExecutionPayloadEnvelopeV4, + block_hash: B256, + timestamp: u64, +} + +impl Command { + /// Execute the `generate-big-block` command + pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { + info!(target_gas = self.target_gas, count = self.count, "Generating big block(s)"); + + // Set up authenticated engine provider + let jwt = + std::fs::read_to_string(&self.jwt_secret).wrap_err("Failed to read JWT secret file")?; + let jwt = JwtSecret::from_hex(jwt.trim())?; + let auth_url = Url::parse(&self.engine_rpc_url)?; + + info!("Connecting to Engine RPC at {}", auth_url); + let auth_transport = AuthenticatedTransportConnect::new(auth_url.clone(), jwt); + let auth_client = ClientBuilder::default().connect_with(auth_transport).await?; + let auth_provider = RootProvider::::new(auth_client); + + // Set up testing RPC provider (for testing_buildBlockV1) + info!("Connecting to Testing RPC at {}", self.testing_rpc_url); + let testing_client = ClientBuilder::default() + .layer(RetryBackoffLayer::new(10, 800, u64::MAX)) + .http(self.testing_rpc_url.parse()?); + let testing_provider = RootProvider::::new(testing_client); + + // Get the parent block (latest canonical block) + info!(endpoint = "engine", method = "eth_getBlockByNumber", block = "latest", "RPC call"); + let parent_block = auth_provider + .get_block_by_number(BlockNumberOrTag::Latest) + .await? + .ok_or_else(|| eyre::eyre!("Failed to fetch latest block"))?; + + let parent_hash = parent_block.header.hash; + let parent_number = parent_block.header.number; + let parent_timestamp = parent_block.header.timestamp; + + info!( + parent_hash = %parent_hash, + parent_number = parent_number, + "Using initial parent block" + ); + + // Create output directory + std::fs::create_dir_all(&self.output_dir).wrap_err_with(|| { + format!("Failed to create output directory: {:?}", self.output_dir) + })?; + + let start_block = self.from_block.unwrap_or(parent_number); + + // Use pipelined execution when generating multiple payloads + if self.count > 1 { + self.execute_pipelined( + &auth_provider, + &testing_provider, + start_block, + parent_hash, + parent_timestamp, + ) + .await?; + } else { + // Single payload - collect transactions and build + let tx_source = RpcTransactionSource::from_url(&self.rpc_url)?; + let collector = TransactionCollector::new(tx_source, self.target_gas); + let (transactions, _total_gas, _next_block) = collector.collect(start_block).await?; + + if transactions.is_empty() { + return Err(eyre::eyre!("No transactions collected")); + } + + self.execute_sequential( + &auth_provider, + &testing_provider, + transactions, + parent_hash, + parent_timestamp, + ) + .await?; + } + + info!(count = self.count, output_dir = %self.output_dir.display(), "All payloads generated"); + Ok(()) + } + + /// Sequential execution path for single payload or no-execute mode. + async fn execute_sequential( + &self, + auth_provider: &RootProvider, + testing_provider: &RootProvider, + transactions: Vec, + mut parent_hash: B256, + mut parent_timestamp: u64, + ) -> eyre::Result<()> { + for i in 0..self.count { + info!( + payload = i + 1, + total = self.count, + parent_hash = %parent_hash, + parent_timestamp = parent_timestamp, + "Building payload via testing_buildBlockV1" + ); + + let built = self + .build_payload(testing_provider, &transactions, i, parent_hash, parent_timestamp) + .await?; + + self.save_payload(&built)?; + + if self.execute || self.count > 1 { + info!(payload = i + 1, block_hash = %built.block_hash, "Executing payload (newPayload + FCU)"); + self.execute_payload_v4(auth_provider, built.envelope, parent_hash).await?; + info!(payload = i + 1, "Payload executed successfully"); + } + + parent_hash = built.block_hash; + parent_timestamp = built.timestamp; + } + Ok(()) + } + + /// Pipelined execution - fetches transactions and builds payloads in background. + async fn execute_pipelined( + &self, + auth_provider: &RootProvider, + testing_provider: &RootProvider, + start_block: u64, + initial_parent_hash: B256, + initial_parent_timestamp: u64, + ) -> eyre::Result<()> { + // Create channel for transaction batches (one batch per payload) + let (tx_sender, mut tx_receiver) = mpsc::channel::>(self.prefetch_buffer); + + // Spawn background task to continuously fetch transaction batches + let rpc_url = self.rpc_url.clone(); + let target_gas = self.target_gas; + let count = self.count; + + let fetcher_handle = tokio::spawn(async move { + let tx_source = match RpcTransactionSource::from_url(&rpc_url) { + Ok(source) => source, + Err(e) => { + warn!(error = %e, "Failed to create transaction source"); + return; + } + }; + + let collector = TransactionCollector::new(tx_source, target_gas); + let mut current_block = start_block; + + for payload_idx in 0..count { + match collector.collect(current_block).await { + Ok((transactions, total_gas, next_block)) => { + info!( + payload = payload_idx + 1, + tx_count = transactions.len(), + total_gas, + blocks = format!("{}..{}", current_block, next_block), + "Fetched transactions" + ); + current_block = next_block; + + if tx_sender.send(transactions).await.is_err() { + break; + } + } + Err(e) => { + warn!(payload = payload_idx + 1, error = %e, "Failed to fetch transactions"); + break; + } + } + } + }); + + let mut parent_hash = initial_parent_hash; + let mut parent_timestamp = initial_parent_timestamp; + let mut pending_build: Option>> = None; + + for i in 0..self.count { + let is_last = i == self.count - 1; + + // Get current payload (either from pending build or build now) + let current_payload = if let Some(handle) = pending_build.take() { + handle.await?? + } else { + // First payload - wait for transactions and build synchronously + let transactions = tx_receiver + .recv() + .await + .ok_or_else(|| eyre::eyre!("Transaction fetcher stopped unexpectedly"))?; + + if transactions.is_empty() { + return Err(eyre::eyre!("No transactions collected for payload {}", i + 1)); + } + + info!( + payload = i + 1, + total = self.count, + parent_hash = %parent_hash, + parent_timestamp = parent_timestamp, + tx_count = transactions.len(), + "Building payload via testing_buildBlockV1" + ); + self.build_payload( + testing_provider, + &transactions, + i, + parent_hash, + parent_timestamp, + ) + .await? + }; + + self.save_payload(¤t_payload)?; + + let current_block_hash = current_payload.block_hash; + let current_timestamp = current_payload.timestamp; + + // Execute current payload first + info!(payload = i + 1, block_hash = %current_block_hash, "Executing payload (newPayload + FCU)"); + self.execute_payload_v4(auth_provider, current_payload.envelope, parent_hash).await?; + info!(payload = i + 1, "Payload executed successfully"); + + // Start building next payload in background (if not last) - AFTER execution + if !is_last { + // Get transactions for next payload (should already be fetched or fetching) + let next_transactions = tx_receiver + .recv() + .await + .ok_or_else(|| eyre::eyre!("Transaction fetcher stopped unexpectedly"))?; + + if next_transactions.is_empty() { + return Err(eyre::eyre!("No transactions collected for payload {}", i + 2)); + } + + let testing_provider = testing_provider.clone(); + let next_index = i + 1; + let total = self.count; + + pending_build = Some(tokio::spawn(async move { + info!( + payload = next_index + 1, + total = total, + parent_hash = %current_block_hash, + parent_timestamp = current_timestamp, + tx_count = next_transactions.len(), + "Building payload via testing_buildBlockV1" + ); + + Self::build_payload_static( + &testing_provider, + &next_transactions, + next_index, + current_block_hash, + current_timestamp, + ) + .await + })); + } + + parent_hash = current_block_hash; + parent_timestamp = current_timestamp; + } + + // Clean up the fetcher task + drop(tx_receiver); + let _ = fetcher_handle.await; + + Ok(()) + } + + /// Build a single payload via `testing_buildBlockV1`. + async fn build_payload( + &self, + testing_provider: &RootProvider, + transactions: &[Bytes], + index: u64, + parent_hash: B256, + parent_timestamp: u64, + ) -> eyre::Result { + Self::build_payload_static( + testing_provider, + transactions, + index, + parent_hash, + parent_timestamp, + ) + .await + } + + /// Static version for use in spawned tasks. + async fn build_payload_static( + testing_provider: &RootProvider, + transactions: &[Bytes], + index: u64, + parent_hash: B256, + parent_timestamp: u64, + ) -> eyre::Result { + let request = TestingBuildBlockRequestV1 { + parent_block_hash: parent_hash, + payload_attributes: PayloadAttributes { + timestamp: parent_timestamp + 12, + prev_randao: B256::ZERO, + suggested_fee_recipient: alloy_primitives::Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }, + transactions: transactions.to_vec(), + extra_data: None, + }; + + let total_tx_bytes: usize = transactions.iter().map(|tx| tx.len()).sum(); + info!( + payload = index + 1, + tx_count = transactions.len(), + total_tx_bytes = total_tx_bytes, + parent_hash = %parent_hash, + "Sending to testing_buildBlockV1" + ); + let envelope: ExecutionPayloadEnvelopeV5 = + testing_provider.client().request("testing_buildBlockV1", [request]).await?; + + let v4_envelope = envelope.try_into_v4()?; + + let inner = &v4_envelope.envelope_inner.execution_payload.payload_inner.payload_inner; + let block_hash = inner.block_hash; + let block_number = inner.block_number; + let timestamp = inner.timestamp; + + Ok(BuiltPayload { block_number, envelope: v4_envelope, block_hash, timestamp }) + } + + /// Save a payload to disk. + fn save_payload(&self, payload: &BuiltPayload) -> eyre::Result<()> { + let filename = format!("payload_block_{}.json", payload.block_number); + let filepath = self.output_dir.join(&filename); + let json = serde_json::to_string_pretty(&payload.envelope)?; + std::fs::write(&filepath, &json) + .wrap_err_with(|| format!("Failed to write payload to {:?}", filepath))?; + info!(block_number = payload.block_number, block_hash = %payload.block_hash, path = %filepath.display(), "Payload saved"); + Ok(()) + } + + async fn execute_payload_v4( + &self, + provider: &RootProvider, + envelope: ExecutionPayloadEnvelopeV4, + parent_hash: B256, + ) -> eyre::Result<()> { + let block_hash = + envelope.envelope_inner.execution_payload.payload_inner.payload_inner.block_hash; + + let status = provider + .new_payload_v4( + envelope.envelope_inner.execution_payload, + vec![], + B256::ZERO, + envelope.execution_requests.to_vec(), + ) + .await?; + + if !status.is_valid() { + return Err(eyre::eyre!("Payload rejected: {:?}", status)); + } + + let fcu_state = ForkchoiceState { + head_block_hash: block_hash, + safe_block_hash: parent_hash, + finalized_block_hash: parent_hash, + }; + + let fcu_result = provider.fork_choice_updated_v3(fcu_state, None).await?; + + if !fcu_result.is_valid() { + return Err(eyre::eyre!("FCU rejected: {:?}", fcu_result)); + } + + Ok(()) + } +} diff --git a/bin/reth-bench/src/bench/helpers.rs b/bin/reth-bench/src/bench/helpers.rs new file mode 100644 index 00000000000..f367fd69a1d --- /dev/null +++ b/bin/reth-bench/src/bench/helpers.rs @@ -0,0 +1,196 @@ +//! Common helpers for reth-bench commands. + +use crate::valid_payload::call_forkchoice_updated; +use alloy_consensus::Header; +use alloy_eips::eip4844::kzg_to_versioned_hash; +use alloy_primitives::{Address, B256}; +use alloy_provider::{ext::EngineApi, network::AnyNetwork, RootProvider}; +use alloy_rpc_types_engine::{ + CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, + PayloadAttributes, PayloadId, PraguePayloadFields, +}; +use eyre::OptionExt; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_node_api::EngineApiMessageVersion; +use tracing::debug; + +/// Prepared payload request data for triggering block building. +pub(crate) struct PayloadRequest { + /// The payload attributes for the new block. + pub(crate) attributes: PayloadAttributes, + /// The forkchoice state pointing to the parent block. + pub(crate) forkchoice_state: ForkchoiceState, + /// The engine API version for FCU calls. + pub(crate) fcu_version: EngineApiMessageVersion, + /// The getPayload version to use (1-5). + pub(crate) get_payload_version: u8, + /// The newPayload version to use. + pub(crate) new_payload_version: EngineApiMessageVersion, +} + +/// Prepare payload attributes and forkchoice state for a new block. +pub(crate) fn prepare_payload_request( + chain_spec: &ChainSpec, + timestamp: u64, + parent_hash: B256, +) -> PayloadRequest { + let shanghai_active = chain_spec.is_shanghai_active_at_timestamp(timestamp); + let cancun_active = chain_spec.is_cancun_active_at_timestamp(timestamp); + let prague_active = chain_spec.is_prague_active_at_timestamp(timestamp); + let osaka_active = chain_spec.is_osaka_active_at_timestamp(timestamp); + + // FCU version: V3 for Cancun+Prague+Osaka, V2 for Shanghai, V1 otherwise + let fcu_version = if cancun_active { + EngineApiMessageVersion::V3 + } else if shanghai_active { + EngineApiMessageVersion::V2 + } else { + EngineApiMessageVersion::V1 + }; + + // getPayload version: 5 for Osaka, 4 for Prague, 3 for Cancun, 2 for Shanghai, 1 otherwise + // newPayload version: 4 for Prague+Osaka (no V5), 3 for Cancun, 2 for Shanghai, 1 otherwise + let (get_payload_version, new_payload_version) = if osaka_active { + (5, EngineApiMessageVersion::V4) // Osaka uses getPayloadV5 but newPayloadV4 + } else if prague_active { + (4, EngineApiMessageVersion::V4) + } else if cancun_active { + (3, EngineApiMessageVersion::V3) + } else if shanghai_active { + (2, EngineApiMessageVersion::V2) + } else { + (1, EngineApiMessageVersion::V1) + }; + + PayloadRequest { + attributes: PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: shanghai_active.then(Vec::new), + parent_beacon_block_root: cancun_active.then_some(B256::ZERO), + }, + forkchoice_state: ForkchoiceState { + head_block_hash: parent_hash, + safe_block_hash: parent_hash, + finalized_block_hash: parent_hash, + }, + fcu_version, + get_payload_version, + new_payload_version, + } +} + +/// Trigger payload building via FCU and retrieve the built payload. +/// +/// This sends a forkchoiceUpdated with payload attributes to start building, +/// then calls getPayload to retrieve the result. +pub(crate) async fn build_payload( + provider: &RootProvider, + request: PayloadRequest, +) -> eyre::Result<(ExecutionPayload, ExecutionPayloadSidecar)> { + let fcu_result = call_forkchoice_updated( + provider, + request.fcu_version, + request.forkchoice_state, + Some(request.attributes.clone()), + ) + .await?; + + let payload_id = + fcu_result.payload_id.ok_or_eyre("Payload builder did not return a payload id")?; + + get_payload_with_sidecar( + provider, + request.get_payload_version, + payload_id, + request.attributes.parent_beacon_block_root, + ) + .await +} + +/// Convert an RPC block to a consensus header and block hash. +pub(crate) fn rpc_block_to_header(block: alloy_provider::network::AnyRpcBlock) -> (Header, B256) { + let block_hash = block.header.hash; + let header = block.header.inner.clone().into_header_with_defaults(); + (header, block_hash) +} + +/// Compute versioned hashes from KZG commitments. +fn versioned_hashes_from_commitments( + commitments: &[alloy_primitives::FixedBytes<48>], +) -> Vec { + commitments.iter().map(|c| kzg_to_versioned_hash(c.as_ref())).collect() +} + +/// Fetch an execution payload using the appropriate engine API version. +pub(crate) async fn get_payload_with_sidecar( + provider: &RootProvider, + version: u8, + payload_id: PayloadId, + parent_beacon_block_root: Option, +) -> eyre::Result<(ExecutionPayload, ExecutionPayloadSidecar)> { + debug!(get_payload_version = ?version, ?payload_id, "Sending getPayload"); + + match version { + 1 => { + let payload = provider.get_payload_v1(payload_id).await?; + Ok((ExecutionPayload::V1(payload), ExecutionPayloadSidecar::none())) + } + 2 => { + let envelope = provider.get_payload_v2(payload_id).await?; + let payload = match envelope.execution_payload { + alloy_rpc_types_engine::ExecutionPayloadFieldV2::V1(p) => ExecutionPayload::V1(p), + alloy_rpc_types_engine::ExecutionPayloadFieldV2::V2(p) => ExecutionPayload::V2(p), + }; + Ok((payload, ExecutionPayloadSidecar::none())) + } + 3 => { + let envelope = provider.get_payload_v3(payload_id).await?; + let versioned_hashes = + versioned_hashes_from_commitments(&envelope.blobs_bundle.commitments); + let cancun_fields = CancunPayloadFields { + parent_beacon_block_root: parent_beacon_block_root + .ok_or_eyre("parent_beacon_block_root required for V3")?, + versioned_hashes, + }; + Ok(( + ExecutionPayload::V3(envelope.execution_payload), + ExecutionPayloadSidecar::v3(cancun_fields), + )) + } + 4 => { + let envelope = provider.get_payload_v4(payload_id).await?; + let versioned_hashes = versioned_hashes_from_commitments( + &envelope.envelope_inner.blobs_bundle.commitments, + ); + let cancun_fields = CancunPayloadFields { + parent_beacon_block_root: parent_beacon_block_root + .ok_or_eyre("parent_beacon_block_root required for V4")?, + versioned_hashes, + }; + let prague_fields = PraguePayloadFields::new(envelope.execution_requests); + Ok(( + ExecutionPayload::V3(envelope.envelope_inner.execution_payload), + ExecutionPayloadSidecar::v4(cancun_fields, prague_fields), + )) + } + 5 => { + // V5 (Osaka) - use raw request since alloy doesn't have get_payload_v5 yet + let envelope = provider.get_payload_v5(payload_id).await?; + let versioned_hashes = + versioned_hashes_from_commitments(&envelope.blobs_bundle.commitments); + let cancun_fields = CancunPayloadFields { + parent_beacon_block_root: parent_beacon_block_root + .ok_or_eyre("parent_beacon_block_root required for V5")?, + versioned_hashes, + }; + let prague_fields = PraguePayloadFields::new(envelope.execution_requests); + Ok(( + ExecutionPayload::V3(envelope.execution_payload), + ExecutionPayloadSidecar::v4(cancun_fields, prague_fields), + )) + } + _ => panic!("This tool does not support getPayload versions past v5"), + } +} diff --git a/bin/reth-bench/src/bench/mod.rs b/bin/reth-bench/src/bench/mod.rs index da3ccb1a8bb..fd1d0cccd34 100644 --- a/bin/reth-bench/src/bench/mod.rs +++ b/bin/reth-bench/src/bench/mod.rs @@ -6,9 +6,16 @@ use reth_node_core::args::LogArgs; use reth_tracing::FileWorkerGuard; mod context; +mod gas_limit_ramp; +mod generate_big_block; +pub(crate) mod helpers; +pub use generate_big_block::{ + RawTransaction, RpcTransactionSource, TransactionCollector, TransactionSource, +}; mod new_payload_fcu; mod new_payload_only; mod output; +mod replay_payloads; mod send_payload; /// `reth bench` command @@ -27,6 +34,9 @@ pub enum Subcommands { /// Benchmark which calls `newPayload`, then `forkchoiceUpdated`. NewPayloadFcu(new_payload_fcu::Command), + /// Benchmark which builds empty blocks with a ramped gas limit. + GasLimitRamp(gas_limit_ramp::Command), + /// Benchmark which only calls subsequent `newPayload` calls. NewPayloadOnly(new_payload_only::Command), @@ -41,6 +51,29 @@ pub enum Subcommands { /// `cast block latest --full --json | reth-bench send-payload --rpc-url localhost:5000 /// --jwt-secret $(cat ~/.local/share/reth/mainnet/jwt.hex)` SendPayload(send_payload::Command), + + /// Generate a large block by packing transactions from existing blocks. + /// + /// This command fetches transactions from real blocks and packs them into a single + /// block using the `testing_buildBlockV1` RPC endpoint. + /// + /// Example: + /// + /// `reth-bench generate-big-block --rpc-url http://localhost:8545 --engine-rpc-url + /// http://localhost:8551 --jwt-secret ~/.local/share/reth/mainnet/jwt.hex --target-gas + /// 30000000` + GenerateBigBlock(generate_big_block::Command), + + /// Replay pre-generated payloads from a directory. + /// + /// This command reads payload files from a previous `generate-big-block` run and replays + /// them in sequence using `newPayload` followed by `forkchoiceUpdated`. + /// + /// Example: + /// + /// `reth-bench replay-payloads --payload-dir ./payloads --engine-rpc-url + /// http://localhost:8551 --jwt-secret ~/.local/share/reth/mainnet/jwt.hex` + ReplayPayloads(replay_payloads::Command), } impl BenchmarkCommand { @@ -51,8 +84,11 @@ impl BenchmarkCommand { match self.command { Subcommands::NewPayloadFcu(command) => command.execute(ctx).await, + Subcommands::GasLimitRamp(command) => command.execute(ctx).await, Subcommands::NewPayloadOnly(command) => command.execute(ctx).await, Subcommands::SendPayload(command) => command.execute(ctx).await, + Subcommands::GenerateBigBlock(command) => command.execute(ctx).await, + Subcommands::ReplayPayloads(command) => command.execute(ctx).await, } } diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index 5c7f3851996..62e0aef2594 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -13,8 +13,7 @@ use crate::{ bench::{ context::BenchContext, output::{ - CombinedResult, NewPayloadResult, TotalGasOutput, TotalGasRow, COMBINED_OUTPUT_SUFFIX, - GAS_OUTPUT_SUFFIX, + write_benchmark_results, CombinedResult, NewPayloadResult, TotalGasOutput, TotalGasRow, }, }, valid_payload::{block_to_new_payload, call_forkchoice_updated, call_new_payload}, @@ -27,7 +26,6 @@ use alloy_rpc_client::RpcClient; use alloy_rpc_types_engine::ForkchoiceState; use alloy_transport_ws::WsConnect; use clap::Parser; -use csv::Writer; use eyre::{Context, OptionExt}; use futures::StreamExt; use humantime::parse_duration; @@ -123,6 +121,7 @@ impl Command { auth_provider, mut next_block, is_optimism, + .. } = BenchContext::new(&self.benchmark, self.rpc_url).await?; let buffer_size = self.rpc_block_buffer_size; @@ -188,6 +187,7 @@ impl Command { result } { let gas_used = block.header.gas_used; + let gas_limit = block.header.gas_limit; let block_number = block.header.number; let transaction_count = block.transactions.len() as u64; @@ -211,6 +211,7 @@ impl Command { let fcu_latency = total_latency - new_payload_result.latency; let combined_result = CombinedResult { block_number, + gas_limit, transaction_count, new_payload_result, fcu_latency, @@ -240,28 +241,11 @@ impl Command { // since the benchmark goal is measuring Ggas/s of newPayload/FCU, not persistence. drop(waiter); - let (gas_output_results, combined_results): (_, Vec) = + let (gas_output_results, combined_results): (Vec, Vec) = results.into_iter().unzip(); - // Write CSV output files if let Some(ref path) = self.benchmark.output { - let output_path = path.join(COMBINED_OUTPUT_SUFFIX); - info!("Writing engine api call latency output to file: {:?}", output_path); - let mut writer = Writer::from_path(&output_path)?; - for result in combined_results { - writer.serialize(result)?; - } - writer.flush()?; - - let output_path = path.join(GAS_OUTPUT_SUFFIX); - info!("Writing total gas output to file: {:?}", output_path); - let mut writer = Writer::from_path(&output_path)?; - for row in &gas_output_results { - writer.serialize(row)?; - } - writer.flush()?; - - info!("Finished writing benchmark output files to {:?}.", path); + write_benchmark_results(path, &gas_output_results, combined_results)?; } let gas_output = TotalGasOutput::new(gas_output_results)?; diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 748ac999a9f..c642f8b23b6 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -49,6 +49,7 @@ impl Command { auth_provider, mut next_block, is_optimism, + .. } = BenchContext::new(&self.benchmark, self.rpc_url).await?; let buffer_size = self.rpc_block_buffer_size; @@ -96,11 +97,7 @@ impl Command { let transaction_count = block.transactions.len() as u64; let gas_used = block.header.gas_used; - debug!( - target: "reth-bench", - number=?block.header.number, - "Sending payload to engine", - ); + debug!(number=?block.header.number, "Sending payload to engine"); let (version, params) = block_to_new_payload(block, is_optimism)?; diff --git a/bin/reth-bench/src/bench/output.rs b/bin/reth-bench/src/bench/output.rs index 17e9ad4a7a9..25a1deaf22c 100644 --- a/bin/reth-bench/src/bench/output.rs +++ b/bin/reth-bench/src/bench/output.rs @@ -1,10 +1,13 @@ //! Contains various benchmark output formats, either for logging or for //! serialization to / from files. +use alloy_primitives::B256; +use csv::Writer; use eyre::OptionExt; use reth_primitives_traits::constants::GIGAGAS; -use serde::{ser::SerializeStruct, Serialize}; -use std::time::Duration; +use serde::{ser::SerializeStruct, Deserialize, Serialize}; +use std::{path::Path, time::Duration}; +use tracing::info; /// This is the suffix for gas output csv files. pub(crate) const GAS_OUTPUT_SUFFIX: &str = "total_gas.csv"; @@ -15,6 +18,17 @@ pub(crate) const COMBINED_OUTPUT_SUFFIX: &str = "combined_latency.csv"; /// This is the suffix for new payload output csv files. pub(crate) const NEW_PAYLOAD_OUTPUT_SUFFIX: &str = "new_payload_latency.csv"; +/// Serialized format for gas ramp payloads on disk. +#[derive(Debug, Serialize, Deserialize)] +pub(crate) struct GasRampPayloadFile { + /// Engine API version (1-5). + pub(crate) version: u8, + /// The block hash for FCU. + pub(crate) block_hash: B256, + /// The params to pass to newPayload. + pub(crate) params: serde_json::Value, +} + /// This represents the results of a single `newPayload` call in the benchmark, containing the gas /// used and the `newPayload` latency. #[derive(Debug)] @@ -67,6 +81,8 @@ impl Serialize for NewPayloadResult { pub(crate) struct CombinedResult { /// The block number of the block being processed. pub(crate) block_number: u64, + /// The gas limit of the block. + pub(crate) gas_limit: u64, /// The number of transactions in the block. pub(crate) transaction_count: u64, /// The `newPayload` result. @@ -88,7 +104,7 @@ impl std::fmt::Display for CombinedResult { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, - "Payload {} processed at {:.4} Ggas/s, used {} total gas. Combined gas per second: {:.4} Ggas/s. fcu latency: {:?}, newPayload latency: {:?}", + "Block {} processed at {:.4} Ggas/s, used {} total gas. Combined: {:.4} Ggas/s. fcu: {:?}, newPayload: {:?}", self.block_number, self.new_payload_result.gas_per_second() / GIGAGAS as f64, self.new_payload_result.gas_used, @@ -110,10 +126,11 @@ impl Serialize for CombinedResult { let fcu_latency = self.fcu_latency.as_micros(); let new_payload_latency = self.new_payload_result.latency.as_micros(); let total_latency = self.total_latency.as_micros(); - let mut state = serializer.serialize_struct("CombinedResult", 6)?; + let mut state = serializer.serialize_struct("CombinedResult", 7)?; // flatten the new payload result because this is meant for CSV writing state.serialize_field("block_number", &self.block_number)?; + state.serialize_field("gas_limit", &self.gas_limit)?; state.serialize_field("transaction_count", &self.transaction_count)?; state.serialize_field("gas_used", &self.new_payload_result.gas_used)?; state.serialize_field("new_payload_latency", &new_payload_latency)?; @@ -167,6 +184,36 @@ impl TotalGasOutput { } } +/// Write benchmark results to CSV files. +/// +/// Writes two files to the output directory: +/// - `combined_latency.csv`: Per-block latency results +/// - `total_gas.csv`: Per-block gas usage over time +pub(crate) fn write_benchmark_results( + output_dir: &Path, + gas_results: &[TotalGasRow], + combined_results: Vec, +) -> eyre::Result<()> { + let output_path = output_dir.join(COMBINED_OUTPUT_SUFFIX); + info!("Writing engine api call latency output to file: {:?}", output_path); + let mut writer = Writer::from_path(&output_path)?; + for result in combined_results { + writer.serialize(result)?; + } + writer.flush()?; + + let output_path = output_dir.join(GAS_OUTPUT_SUFFIX); + info!("Writing total gas output to file: {:?}", output_path); + let mut writer = Writer::from_path(&output_path)?; + for row in gas_results { + writer.serialize(row)?; + } + writer.flush()?; + + info!("Finished writing benchmark output files to {:?}.", output_dir); + Ok(()) +} + /// This serializes the `time` field of the [`TotalGasRow`] to microseconds. /// /// This is essentially just for the csv writer, which would have headers diff --git a/bin/reth-bench/src/bench/replay_payloads.rs b/bin/reth-bench/src/bench/replay_payloads.rs new file mode 100644 index 00000000000..a2595f81f30 --- /dev/null +++ b/bin/reth-bench/src/bench/replay_payloads.rs @@ -0,0 +1,332 @@ +//! Command for replaying pre-generated payloads from disk. +//! +//! This command reads `ExecutionPayloadEnvelopeV4` files from a directory and replays them +//! in sequence using `newPayload` followed by `forkchoiceUpdated`. + +use crate::{ + authenticated_transport::AuthenticatedTransportConnect, + bench::output::GasRampPayloadFile, + valid_payload::{call_forkchoice_updated, call_new_payload}, +}; +use alloy_primitives::B256; +use alloy_provider::{ext::EngineApi, network::AnyNetwork, Provider, RootProvider}; +use alloy_rpc_client::ClientBuilder; +use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV4, ForkchoiceState, JwtSecret}; +use clap::Parser; +use eyre::Context; +use reqwest::Url; +use reth_cli_runner::CliContext; +use reth_node_api::EngineApiMessageVersion; +use std::path::PathBuf; +use tracing::{debug, info}; + +/// `reth bench replay-payloads` command +/// +/// Replays pre-generated payloads from a directory by calling `newPayload` followed by +/// `forkchoiceUpdated` for each payload in sequence. +#[derive(Debug, Parser)] +pub struct Command { + /// The engine RPC URL (with JWT authentication). + #[arg(long, value_name = "ENGINE_RPC_URL", default_value = "http://localhost:8551")] + engine_rpc_url: String, + + /// Path to the JWT secret file for engine API authentication. + #[arg(long, value_name = "JWT_SECRET")] + jwt_secret: PathBuf, + + /// Directory containing payload files (`payload_block_N.json`). + #[arg(long, value_name = "PAYLOAD_DIR")] + payload_dir: PathBuf, + + /// Optional limit on the number of payloads to replay. + /// If not specified, replays all payloads in the directory. + #[arg(long, value_name = "COUNT")] + count: Option, + + /// Skip the first N payloads. + #[arg(long, value_name = "SKIP", default_value = "0")] + skip: usize, + + /// Optional directory containing gas ramp payloads to replay first. + /// These are replayed before the main payloads to warm up the gas limit. + #[arg(long, value_name = "GAS_RAMP_DIR")] + gas_ramp_dir: Option, +} + +/// A loaded payload ready for execution. +struct LoadedPayload { + /// The index (from filename). + index: u64, + /// The payload envelope. + envelope: ExecutionPayloadEnvelopeV4, + /// The block hash. + block_hash: B256, +} + +/// A gas ramp payload loaded from disk. +struct GasRampPayload { + /// Block number from filename. + block_number: u64, + /// Engine API version for newPayload. + version: EngineApiMessageVersion, + /// The file contents. + file: GasRampPayloadFile, +} + +impl Command { + /// Execute the `replay-payloads` command. + pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { + info!(payload_dir = %self.payload_dir.display(), "Replaying payloads"); + + // Set up authenticated engine provider + let jwt = + std::fs::read_to_string(&self.jwt_secret).wrap_err("Failed to read JWT secret file")?; + let jwt = JwtSecret::from_hex(jwt.trim())?; + let auth_url = Url::parse(&self.engine_rpc_url)?; + + info!("Connecting to Engine RPC at {}", auth_url); + let auth_transport = AuthenticatedTransportConnect::new(auth_url.clone(), jwt); + let auth_client = ClientBuilder::default().connect_with(auth_transport).await?; + let auth_provider = RootProvider::::new(auth_client); + + // Get parent block (latest canonical block) - we need this for the first FCU + let parent_block = auth_provider + .get_block_by_number(alloy_eips::BlockNumberOrTag::Latest) + .await? + .ok_or_else(|| eyre::eyre!("Failed to fetch latest block"))?; + + let initial_parent_hash = parent_block.header.hash; + let initial_parent_number = parent_block.header.number; + + info!( + parent_hash = %initial_parent_hash, + parent_number = initial_parent_number, + "Using initial parent block" + ); + + // Load all payloads upfront to avoid I/O delays between phases + let gas_ramp_payloads = if let Some(ref gas_ramp_dir) = self.gas_ramp_dir { + let payloads = self.load_gas_ramp_payloads(gas_ramp_dir)?; + if payloads.is_empty() { + return Err(eyre::eyre!("No gas ramp payload files found in {:?}", gas_ramp_dir)); + } + info!(count = payloads.len(), "Loaded gas ramp payloads from disk"); + payloads + } else { + Vec::new() + }; + + let payloads = self.load_payloads()?; + if payloads.is_empty() { + return Err(eyre::eyre!("No payload files found in {:?}", self.payload_dir)); + } + info!(count = payloads.len(), "Loaded main payloads from disk"); + + let mut parent_hash = initial_parent_hash; + + // Replay gas ramp payloads first + for (i, payload) in gas_ramp_payloads.iter().enumerate() { + info!( + gas_ramp_payload = i + 1, + total = gas_ramp_payloads.len(), + block_number = payload.block_number, + block_hash = %payload.file.block_hash, + "Executing gas ramp payload (newPayload + FCU)" + ); + + call_new_payload(&auth_provider, payload.version, payload.file.params.clone()).await?; + + let fcu_state = ForkchoiceState { + head_block_hash: payload.file.block_hash, + safe_block_hash: parent_hash, + finalized_block_hash: parent_hash, + }; + call_forkchoice_updated(&auth_provider, payload.version, fcu_state, None).await?; + + info!(gas_ramp_payload = i + 1, "Gas ramp payload executed successfully"); + parent_hash = payload.file.block_hash; + } + + if !gas_ramp_payloads.is_empty() { + info!(count = gas_ramp_payloads.len(), "All gas ramp payloads replayed"); + } + + for (i, payload) in payloads.iter().enumerate() { + info!( + payload = i + 1, + total = payloads.len(), + index = payload.index, + block_hash = %payload.block_hash, + "Executing payload (newPayload + FCU)" + ); + + self.execute_payload_v4(&auth_provider, &payload.envelope, parent_hash).await?; + + info!(payload = i + 1, "Payload executed successfully"); + parent_hash = payload.block_hash; + } + + info!(count = payloads.len(), "All payloads replayed successfully"); + Ok(()) + } + + /// Load and parse all payload files from the directory. + fn load_payloads(&self) -> eyre::Result> { + let mut payloads = Vec::new(); + + // Read directory entries + let entries: Vec<_> = std::fs::read_dir(&self.payload_dir) + .wrap_err_with(|| format!("Failed to read directory {:?}", self.payload_dir))? + .filter_map(|e| e.ok()) + .filter(|e| { + e.path().extension().and_then(|s| s.to_str()) == Some("json") && + e.file_name().to_string_lossy().starts_with("payload_") + }) + .collect(); + + // Parse filenames to get indices and sort + let mut indexed_paths: Vec<(u64, PathBuf)> = entries + .into_iter() + .filter_map(|e| { + let name = e.file_name(); + let name_str = name.to_string_lossy(); + // Extract index from "payload_NNN.json" + let index_str = name_str.strip_prefix("payload_")?.strip_suffix(".json")?; + let index: u64 = index_str.parse().ok()?; + Some((index, e.path())) + }) + .collect(); + + indexed_paths.sort_by_key(|(idx, _)| *idx); + + // Apply skip and count + let indexed_paths: Vec<_> = indexed_paths.into_iter().skip(self.skip).collect(); + let indexed_paths: Vec<_> = match self.count { + Some(count) => indexed_paths.into_iter().take(count).collect(), + None => indexed_paths, + }; + + // Load each payload + for (index, path) in indexed_paths { + let content = std::fs::read_to_string(&path) + .wrap_err_with(|| format!("Failed to read {:?}", path))?; + let envelope: ExecutionPayloadEnvelopeV4 = serde_json::from_str(&content) + .wrap_err_with(|| format!("Failed to parse {:?}", path))?; + + let block_hash = + envelope.envelope_inner.execution_payload.payload_inner.payload_inner.block_hash; + + info!( + index = index, + block_hash = %block_hash, + path = %path.display(), + "Loaded payload" + ); + + payloads.push(LoadedPayload { index, envelope, block_hash }); + } + + Ok(payloads) + } + + /// Load and parse gas ramp payload files from a directory. + fn load_gas_ramp_payloads(&self, dir: &PathBuf) -> eyre::Result> { + let mut payloads = Vec::new(); + + let entries: Vec<_> = std::fs::read_dir(dir) + .wrap_err_with(|| format!("Failed to read directory {:?}", dir))? + .filter_map(|e| e.ok()) + .filter(|e| { + e.path().extension().and_then(|s| s.to_str()) == Some("json") && + e.file_name().to_string_lossy().starts_with("payload_block_") + }) + .collect(); + + // Parse filenames to get block numbers and sort + let mut indexed_paths: Vec<(u64, PathBuf)> = entries + .into_iter() + .filter_map(|e| { + let name = e.file_name(); + let name_str = name.to_string_lossy(); + // Extract block number from "payload_block_NNN.json" + let block_str = name_str.strip_prefix("payload_block_")?.strip_suffix(".json")?; + let block_number: u64 = block_str.parse().ok()?; + Some((block_number, e.path())) + }) + .collect(); + + indexed_paths.sort_by_key(|(num, _)| *num); + + for (block_number, path) in indexed_paths { + let content = std::fs::read_to_string(&path) + .wrap_err_with(|| format!("Failed to read {:?}", path))?; + let file: GasRampPayloadFile = serde_json::from_str(&content) + .wrap_err_with(|| format!("Failed to parse {:?}", path))?; + + let version = match file.version { + 1 => EngineApiMessageVersion::V1, + 2 => EngineApiMessageVersion::V2, + 3 => EngineApiMessageVersion::V3, + 4 => EngineApiMessageVersion::V4, + 5 => EngineApiMessageVersion::V5, + v => return Err(eyre::eyre!("Invalid version {} in {:?}", v, path)), + }; + + info!( + block_number, + block_hash = %file.block_hash, + path = %path.display(), + "Loaded gas ramp payload" + ); + + payloads.push(GasRampPayload { block_number, version, file }); + } + + Ok(payloads) + } + + async fn execute_payload_v4( + &self, + provider: &RootProvider, + envelope: &ExecutionPayloadEnvelopeV4, + parent_hash: B256, + ) -> eyre::Result<()> { + let block_hash = + envelope.envelope_inner.execution_payload.payload_inner.payload_inner.block_hash; + + debug!( + method = "engine_newPayloadV4", + block_hash = %block_hash, + "Sending newPayload" + ); + + let status = provider + .new_payload_v4( + envelope.envelope_inner.execution_payload.clone(), + vec![], + B256::ZERO, + envelope.execution_requests.to_vec(), + ) + .await?; + + info!(?status, "newPayloadV4 response"); + + if !status.is_valid() { + return Err(eyre::eyre!("Payload rejected: {:?}", status)); + } + + let fcu_state = ForkchoiceState { + head_block_hash: block_hash, + safe_block_hash: parent_hash, + finalized_block_hash: parent_hash, + }; + + debug!(method = "engine_forkchoiceUpdatedV3", ?fcu_state, "Sending forkchoiceUpdated"); + + let fcu_result = provider.fork_choice_updated_v3(fcu_state, None).await?; + + info!(?fcu_result, "forkchoiceUpdatedV3 response"); + + Ok(()) + } +} diff --git a/bin/reth-bench/src/valid_payload.rs b/bin/reth-bench/src/valid_payload.rs index d253506b22b..76d562f7f42 100644 --- a/bin/reth-bench/src/valid_payload.rs +++ b/bin/reth-bench/src/valid_payload.rs @@ -3,15 +3,16 @@ //! before sending additional calls. use alloy_eips::eip7685::Requests; +use alloy_primitives::B256; use alloy_provider::{ext::EngineApi, network::AnyRpcBlock, Network, Provider}; use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadInputV2, ForkchoiceState, ForkchoiceUpdated, - PayloadAttributes, PayloadStatus, + ExecutionPayload, ExecutionPayloadInputV2, ExecutionPayloadSidecar, ForkchoiceState, + ForkchoiceUpdated, PayloadAttributes, PayloadStatus, }; use alloy_transport::TransportResult; use op_alloy_rpc_types_engine::OpExecutionPayloadV4; use reth_node_api::EngineApiMessageVersion; -use tracing::error; +use tracing::{debug, error}; /// An extension trait for providers that implement the engine API, to wait for a VALID response. #[async_trait::async_trait] @@ -52,6 +53,13 @@ where fork_choice_state: ForkchoiceState, payload_attributes: Option, ) -> TransportResult { + debug!( + method = "engine_forkchoiceUpdatedV1", + ?fork_choice_state, + ?payload_attributes, + "Sending forkchoiceUpdated" + ); + let mut status = self.fork_choice_updated_v1(fork_choice_state, payload_attributes.clone()).await?; @@ -82,6 +90,13 @@ where fork_choice_state: ForkchoiceState, payload_attributes: Option, ) -> TransportResult { + debug!( + method = "engine_forkchoiceUpdatedV2", + ?fork_choice_state, + ?payload_attributes, + "Sending forkchoiceUpdated" + ); + let mut status = self.fork_choice_updated_v2(fork_choice_state, payload_attributes.clone()).await?; @@ -112,6 +127,13 @@ where fork_choice_state: ForkchoiceState, payload_attributes: Option, ) -> TransportResult { + debug!( + method = "engine_forkchoiceUpdatedV3", + ?fork_choice_state, + ?payload_attributes, + "Sending forkchoiceUpdated" + ); + let mut status = self.fork_choice_updated_v3(fork_choice_state, payload_attributes.clone()).await?; @@ -148,33 +170,51 @@ pub(crate) fn block_to_new_payload( // Convert to execution payload let (payload, sidecar) = ExecutionPayload::from_block_slow(&block); + payload_to_new_payload(payload, sidecar, is_optimism, block.withdrawals_root, None) +} +pub(crate) fn payload_to_new_payload( + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + is_optimism: bool, + withdrawals_root: Option, + target_version: Option, +) -> eyre::Result<(EngineApiMessageVersion, serde_json::Value)> { let (version, params) = match payload { ExecutionPayload::V3(payload) => { let cancun = sidecar.cancun().unwrap(); if let Some(prague) = sidecar.prague() { + // Use target version if provided (for Osaka), otherwise default to V4 + let version = target_version.unwrap_or(EngineApiMessageVersion::V4); + if is_optimism { + let withdrawals_root = withdrawals_root.ok_or_else(|| { + eyre::eyre!("Missing withdrawals root for Optimism payload") + })?; ( - EngineApiMessageVersion::V4, + version, serde_json::to_value(( - OpExecutionPayloadV4 { - payload_inner: payload, - withdrawals_root: block.withdrawals_root.unwrap(), - }, + OpExecutionPayloadV4 { payload_inner: payload, withdrawals_root }, cancun.versioned_hashes.clone(), cancun.parent_beacon_block_root, Requests::default(), ))?, ) } else { + // Extract actual Requests from RequestsOrHash + let requests = prague + .requests + .requests() + .cloned() + .ok_or_else(|| eyre::eyre!("Prague sidecar has hash, not requests"))?; ( - EngineApiMessageVersion::V4, + version, serde_json::to_value(( payload, cancun.versioned_hashes.clone(), cancun.parent_beacon_block_root, - prague.requests.requests_hash(), + requests, ))?, ) } @@ -217,6 +257,8 @@ pub(crate) async fn call_new_payload>( ) -> TransportResult<()> { let method = version.method_name(); + debug!(method, "Sending newPayload"); + let mut status: PayloadStatus = provider.client().request(method, ¶ms).await?; while !status.is_valid() { @@ -237,12 +279,15 @@ pub(crate) async fn call_new_payload>( /// Calls the correct `engine_forkchoiceUpdated` method depending on the given /// `EngineApiMessageVersion`, using the provided forkchoice state and payload attributes for the /// actual engine api message call. +/// +/// Note: For Prague (V4), we still use forkchoiceUpdatedV3 as there is no V4. pub(crate) async fn call_forkchoice_updated>( provider: P, message_version: EngineApiMessageVersion, forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> TransportResult { + // FCU V3 is used for both Cancun and Prague (there is no FCU V4) match message_version { EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 | EngineApiMessageVersion::V5 => { provider.fork_choice_updated_v3_wait(forkchoice_state, payload_attributes).await diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 65144a9571d..5927c262c9c 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -460,6 +460,18 @@ impl ChainSpec { pub fn builder() -> ChainSpecBuilder { ChainSpecBuilder::default() } + + /// Map a chain ID to a known chain spec, if available. + pub fn from_chain_id(chain_id: u64) -> Option> { + match NamedChain::try_from(chain_id).ok()? { + NamedChain::Mainnet => Some(MAINNET.clone()), + NamedChain::Sepolia => Some(SEPOLIA.clone()), + NamedChain::Holesky => Some(HOLESKY.clone()), + NamedChain::Hoodi => Some(HOODI.clone()), + NamedChain::Dev => Some(DEV.clone()), + _ => None, + } + } } impl ChainSpec { diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index 4326d9b193f..afdb88e6bb7 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -31,8 +31,8 @@ reth-payload-validator.workspace = true # ethereum alloy-rlp.workspace = true -revm.workspace = true alloy-rpc-types-engine.workspace = true +revm.workspace = true # alloy alloy-eips.workspace = true diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 786ea15ab09..9287c174657 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -36,7 +36,6 @@ alloy-serde.workspace = true alloy-rpc-types-beacon.workspace = true alloy-rpc-types-engine.workspace = true alloy-genesis.workspace = true -serde = { workspace = true, features = ["derive"] } # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } diff --git a/crates/rpc/rpc-api/src/testing.rs b/crates/rpc/rpc-api/src/testing.rs index f49380058e2..e7dbeb853d4 100644 --- a/crates/rpc/rpc-api/src/testing.rs +++ b/crates/rpc/rpc-api/src/testing.rs @@ -5,32 +5,24 @@ //! disabled by default and never be exposed on public-facing RPC without an //! explicit operator flag. -use alloy_primitives::{Bytes, B256}; -use alloy_rpc_types_engine::{ - ExecutionPayloadEnvelopeV5, PayloadAttributes as EthPayloadAttributes, -}; +use alloy_rpc_types_engine::ExecutionPayloadEnvelopeV5; use jsonrpsee::proc_macros::rpc; -use serde::{Deserialize, Serialize}; -/// Capability string for `testing_buildBlockV1`. -pub const TESTING_BUILD_BLOCK_V1: &str = "testing_buildBlockV1"; - -/// Request payload for `testing_buildBlockV1`. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TestingBuildBlockRequestV1 { - /// Parent block hash of the block to build. - pub parent_block_hash: B256, - /// Payload attributes (Cancun version). - pub payload_attributes: EthPayloadAttributes, - /// Raw signed transactions to force-include in order. - pub transactions: Vec, - /// Optional extra data for the block header. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub extra_data: Option, -} +pub use alloy_rpc_types_engine::{TestingBuildBlockRequestV1, TESTING_BUILD_BLOCK_V1}; /// Testing RPC interface for building a block in a single call. +/// +/// # Enabling +/// +/// This namespace is disabled by default for security reasons. To enable it, +/// add `testing` to the `--http.api` flag: +/// +/// ```sh +/// reth node --http --http.api eth,testing +/// ``` +/// +/// **Warning:** Never expose this on public-facing RPC endpoints without proper +/// authentication. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "testing"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "testing"))] pub trait TestingApi { diff --git a/crates/rpc/rpc/src/testing.rs b/crates/rpc/rpc/src/testing.rs index c1c8a65d1ce..dfaea2bb545 100644 --- a/crates/rpc/rpc/src/testing.rs +++ b/crates/rpc/rpc/src/testing.rs @@ -1,6 +1,18 @@ //! Implementation of the `testing` namespace. //! //! This exposes `testing_buildBlockV1`, intended for non-production/debug use. +//! +//! # Enabling the testing namespace +//! +//! The `testing_` namespace is disabled by default for security reasons. +//! To enable it, add `testing` to the `--http.api` flag when starting the node: +//! +//! ```sh +//! reth node --http --http.api eth,testing +//! ``` +//! +//! **Warning:** This namespace allows building arbitrary blocks. Never expose it +//! on public-facing RPC endpoints without proper authentication. use alloy_consensus::{Header, Transaction}; use alloy_evm::Evm; @@ -94,8 +106,8 @@ where let mut invalid_senders: HashSet = HashSet::default(); - for tx in request.transactions { - let tx: Recovered> = recover_raw_transaction(&tx)?; + for (idx, tx) in request.transactions.iter().enumerate() { + let tx: Recovered> = recover_raw_transaction(tx)?; let sender = tx.signer(); if skip_invalid_transactions && invalid_senders.contains(&sender) { @@ -109,6 +121,7 @@ where if skip_invalid_transactions { debug!( target: "rpc::testing", + tx_idx = idx, ?sender, error = ?err, "Skipping invalid transaction" @@ -116,6 +129,13 @@ where invalid_senders.insert(sender); continue; } + debug!( + target: "rpc::testing", + tx_idx = idx, + ?sender, + error = ?err, + "Transaction execution failed" + ); return Err(Eth::Error::from_eth_err(err)); } }; From e9b079ad624e049c04ed67b238f61ffd45753e79 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 15 Jan 2026 18:33:19 +0000 Subject: [PATCH 033/267] feat: add `rocksdb` to `save_blocks` (#21003) Co-authored-by: Sergei Shulepov Co-authored-by: Sergei Shulepov Co-authored-by: yongkangc --- crates/storage/db-api/src/models/metadata.rs | 7 + .../src/providers/database/provider.rs | 58 +++-- .../provider/src/providers/rocksdb/mod.rs | 1 + .../src/providers/rocksdb/provider.rs | 158 ++++++++++++- .../provider/src/providers/rocksdb_stub.rs | 222 +++--------------- 5 files changed, 237 insertions(+), 209 deletions(-) diff --git a/crates/storage/db-api/src/models/metadata.rs b/crates/storage/db-api/src/models/metadata.rs index 6fa9ea6443e..6586c8b7f46 100644 --- a/crates/storage/db-api/src/models/metadata.rs +++ b/crates/storage/db-api/src/models/metadata.rs @@ -101,4 +101,11 @@ impl StorageSettings { self.account_changesets_in_static_files = value; self } + + /// Returns `true` if any tables are configured to be stored in `RocksDB`. + pub const fn any_in_rocksdb(&self) -> bool { + self.transaction_hash_numbers_in_rocksdb || + self.account_history_in_rocksdb || + self.storages_history_in_rocksdb + } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 692bc7737cd..dec302f8f04 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -4,7 +4,7 @@ use crate::{ }, providers::{ database::{chain::ChainStorage, metrics}, - rocksdb::RocksDBProvider, + rocksdb::{PendingRocksDBBatches, RocksDBProvider, RocksDBWriteCtx}, static_file::{StaticFileWriteCtx, StaticFileWriter}, NodeTypesForProvider, StaticFileProvider, }, @@ -188,8 +188,8 @@ pub struct DatabaseProvider { /// `RocksDB` provider rocksdb_provider: RocksDBProvider, /// Pending `RocksDB` batches to be committed at provider commit time. - #[cfg(all(unix, feature = "rocksdb"))] - pending_rocksdb_batches: parking_lot::Mutex>>, + #[cfg_attr(not(all(unix, feature = "rocksdb")), allow(dead_code))] + pending_rocksdb_batches: PendingRocksDBBatches, /// Minimum distance from tip required for pruning minimum_pruning_distance: u64, /// Database provider metrics @@ -205,10 +205,10 @@ impl Debug for DatabaseProvider { .field("prune_modes", &self.prune_modes) .field("storage", &self.storage) .field("storage_settings", &self.storage_settings) - .field("rocksdb_provider", &self.rocksdb_provider); - #[cfg(all(unix, feature = "rocksdb"))] - s.field("pending_rocksdb_batches", &""); - s.field("minimum_pruning_distance", &self.minimum_pruning_distance).finish() + .field("rocksdb_provider", &self.rocksdb_provider) + .field("pending_rocksdb_batches", &"") + .field("minimum_pruning_distance", &self.minimum_pruning_distance) + .finish() } } @@ -336,8 +336,7 @@ impl DatabaseProvider { storage, storage_settings, rocksdb_provider, - #[cfg(all(unix, feature = "rocksdb"))] - pending_rocksdb_batches: parking_lot::Mutex::new(Vec::new()), + pending_rocksdb_batches: Default::default(), minimum_pruning_distance: MINIMUM_PRUNING_DISTANCE, metrics: metrics::DatabaseProviderMetrics::default(), } @@ -403,6 +402,17 @@ impl DatabaseProvider RocksDBWriteCtx { + RocksDBWriteCtx { + first_block_number: first_block, + prune_tx_lookup: self.prune_modes.transaction_lookup, + storage_settings: self.cached_storage_settings(), + pending_batches: self.pending_rocksdb_batches.clone(), + } + } + /// Writes executed blocks and state to storage. /// /// This method parallelizes static file (SF) writes with MDBX writes. @@ -452,6 +462,10 @@ impl DatabaseProvider DatabaseProvider(start.elapsed()) }); + // RocksDB writes + #[cfg(all(unix, feature = "rocksdb"))] + let rocksdb_handle = rocksdb_ctx.storage_settings.any_in_rocksdb().then(|| { + s.spawn(|| { + let start = Instant::now(); + rocksdb_provider.write_blocks_data(&blocks, &tx_nums, rocksdb_ctx)?; + Ok::<_, ProviderError>(start.elapsed()) + }) + }); + // MDBX writes let mdbx_start = Instant::now(); @@ -557,6 +581,12 @@ impl DatabaseProvider DatabaseProvider { storage, storage_settings, rocksdb_provider, - #[cfg(all(unix, feature = "rocksdb"))] - pending_rocksdb_batches: parking_lot::Mutex::new(Vec::new()), + pending_rocksdb_batches: Default::default(), minimum_pruning_distance: MINIMUM_PRUNING_DISTANCE, metrics: metrics::DatabaseProviderMetrics::default(), } @@ -3167,14 +3196,13 @@ impl HistoryWriter for DatabaseProvi #[instrument(level = "debug", target = "providers::db", skip_all)] fn update_history_indices(&self, range: RangeInclusive) -> ProviderResult<()> { - // account history stage - { + let storage_settings = self.cached_storage_settings(); + if !storage_settings.account_history_in_rocksdb { let indices = self.changed_accounts_and_blocks_with_range(range.clone())?; self.insert_account_history_index(indices)?; } - // storage history stage - { + if !storage_settings.storages_history_in_rocksdb { let indices = self.changed_storages_and_blocks_with_range(range)?; self.insert_storage_history_index(indices)?; } diff --git a/crates/storage/provider/src/providers/rocksdb/mod.rs b/crates/storage/provider/src/providers/rocksdb/mod.rs index 5c6cf11f320..f9b4ff83041 100644 --- a/crates/storage/provider/src/providers/rocksdb/mod.rs +++ b/crates/storage/provider/src/providers/rocksdb/mod.rs @@ -4,4 +4,5 @@ mod invariants; mod metrics; mod provider; +pub(crate) use provider::{PendingRocksDBBatches, RocksDBWriteCtx}; pub use provider::{RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksTx}; diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 670ab0ccba0..88f09a9d350 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -1,11 +1,16 @@ use super::metrics::{RocksDBMetrics, RocksDBOperation}; use crate::providers::{needs_prev_shard_check, HistoryInfo}; -use alloy_primitives::{Address, BlockNumber, B256}; +use alloy_consensus::transaction::TxHashRef; +use alloy_primitives::{Address, BlockNumber, TxNumber, B256}; +use parking_lot::Mutex; +use reth_chain_state::ExecutedBlock; use reth_db_api::{ - models::{storage_sharded_key::StorageShardedKey, ShardedKey}, + models::{storage_sharded_key::StorageShardedKey, ShardedKey, StorageSettings}, table::{Compress, Decode, Decompress, Encode, Table}, tables, BlockNumberList, DatabaseError, }; +use reth_primitives_traits::BlockBody as _; +use reth_prune_types::PruneMode; use reth_storage_errors::{ db::{DatabaseErrorInfo, DatabaseWriteError, DatabaseWriteOperation, LogLevel}, provider::{ProviderError, ProviderResult}, @@ -16,11 +21,41 @@ use rocksdb::{ OptimisticTransactionOptions, Options, Transaction, WriteBatchWithTransaction, WriteOptions, }; use std::{ + collections::BTreeMap, fmt, path::{Path, PathBuf}, sync::Arc, + thread, time::Instant, }; +use tracing::instrument; + +/// Pending `RocksDB` batches type alias. +pub(crate) type PendingRocksDBBatches = Arc>>>; + +/// Context for `RocksDB` block writes. +#[derive(Clone)] +pub(crate) struct RocksDBWriteCtx { + /// The first block number being written. + pub first_block_number: BlockNumber, + /// The prune mode for transaction lookup, if any. + pub prune_tx_lookup: Option, + /// Storage settings determining what goes to `RocksDB`. + pub storage_settings: StorageSettings, + /// Pending batches to push to after writing. + pub pending_batches: PendingRocksDBBatches, +} + +impl fmt::Debug for RocksDBWriteCtx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RocksDBWriteCtx") + .field("first_block_number", &self.first_block_number) + .field("prune_tx_lookup", &self.prune_tx_lookup) + .field("storage_settings", &self.storage_settings) + .field("pending_batches", &"") + .finish() + } +} /// Default cache size for `RocksDB` block cache (128 MB). const DEFAULT_CACHE_SIZE: usize = 128 << 20; @@ -474,6 +509,125 @@ impl RocksDBProvider { })) }) } + + /// Writes all `RocksDB` data for multiple blocks in parallel. + /// + /// This handles transaction hash numbers, account history, and storage history based on + /// the provided storage settings. Each operation runs in parallel with its own batch, + /// pushing to `ctx.pending_batches` for later commit. + #[instrument(level = "debug", target = "providers::db", skip_all)] + pub(crate) fn write_blocks_data( + &self, + blocks: &[ExecutedBlock], + tx_nums: &[TxNumber], + ctx: RocksDBWriteCtx, + ) -> ProviderResult<()> { + if !ctx.storage_settings.any_in_rocksdb() { + return Ok(()); + } + + thread::scope(|s| { + let handles: Vec<_> = [ + (ctx.storage_settings.transaction_hash_numbers_in_rocksdb && + ctx.prune_tx_lookup.is_none_or(|m| !m.is_full())) + .then(|| s.spawn(|| self.write_tx_hash_numbers(blocks, tx_nums, &ctx))), + ctx.storage_settings + .account_history_in_rocksdb + .then(|| s.spawn(|| self.write_account_history(blocks, &ctx))), + ctx.storage_settings + .storages_history_in_rocksdb + .then(|| s.spawn(|| self.write_storage_history(blocks, &ctx))), + ] + .into_iter() + .enumerate() + .filter_map(|(i, h)| h.map(|h| (i, h))) + .collect(); + + for (i, handle) in handles { + handle.join().map_err(|_| { + ProviderError::Database(DatabaseError::Other(format!( + "rocksdb write thread {i} panicked" + ))) + })??; + } + + Ok(()) + }) + } + + /// Writes transaction hash to number mappings for the given blocks. + #[instrument(level = "debug", target = "providers::db", skip_all)] + fn write_tx_hash_numbers( + &self, + blocks: &[ExecutedBlock], + tx_nums: &[TxNumber], + ctx: &RocksDBWriteCtx, + ) -> ProviderResult<()> { + let mut batch = self.batch(); + for (block, &first_tx_num) in blocks.iter().zip(tx_nums) { + let body = block.recovered_block().body(); + let mut tx_num = first_tx_num; + for transaction in body.transactions_iter() { + batch.put::(*transaction.tx_hash(), &tx_num)?; + tx_num += 1; + } + } + ctx.pending_batches.lock().push(batch.into_inner()); + Ok(()) + } + + /// Writes account history indices for the given blocks. + #[instrument(level = "debug", target = "providers::db", skip_all)] + fn write_account_history( + &self, + blocks: &[ExecutedBlock], + ctx: &RocksDBWriteCtx, + ) -> ProviderResult<()> { + let mut batch = self.batch(); + let mut account_history: BTreeMap> = BTreeMap::new(); + for (block_idx, block) in blocks.iter().enumerate() { + let block_number = ctx.first_block_number + block_idx as u64; + let bundle = &block.execution_outcome().bundle; + for &address in bundle.state().keys() { + account_history.entry(address).or_default().push(block_number); + } + } + for (address, blocks) in account_history { + let key = ShardedKey::new(address, u64::MAX); + let value = BlockNumberList::new_pre_sorted(blocks); + batch.put::(key, &value)?; + } + ctx.pending_batches.lock().push(batch.into_inner()); + Ok(()) + } + + /// Writes storage history indices for the given blocks. + #[instrument(level = "debug", target = "providers::db", skip_all)] + fn write_storage_history( + &self, + blocks: &[ExecutedBlock], + ctx: &RocksDBWriteCtx, + ) -> ProviderResult<()> { + let mut batch = self.batch(); + let mut storage_history: BTreeMap<(Address, B256), Vec> = BTreeMap::new(); + for (block_idx, block) in blocks.iter().enumerate() { + let block_number = ctx.first_block_number + block_idx as u64; + let bundle = &block.execution_outcome().bundle; + for (&address, account) in bundle.state() { + for &slot in account.storage.keys() { + let key = B256::new(slot.to_be_bytes()); + storage_history.entry((address, key)).or_default().push(block_number); + } + } + } + for ((address, slot), blocks) in storage_history { + let key = StorageShardedKey::new(address, slot, u64::MAX); + let value = BlockNumberList::new_pre_sorted(blocks); + batch.put::(key, &value)?; + } + ctx.pending_batches.lock().push(batch.into_inner()); + Ok(()) + } } /// Handle for building a batch of operations atomically. diff --git a/crates/storage/provider/src/providers/rocksdb_stub.rs b/crates/storage/provider/src/providers/rocksdb_stub.rs index 5fac73eca77..0160ef87021 100644 --- a/crates/storage/provider/src/providers/rocksdb_stub.rs +++ b/crates/storage/provider/src/providers/rocksdb_stub.rs @@ -2,28 +2,42 @@ //! //! This module provides placeholder types that allow the code to compile when `RocksDB` is not //! available (either on non-Unix platforms or when the `rocksdb` feature is not enabled). -//! Operations will produce errors if actually attempted. +//! All method calls are cfg-guarded in the calling code, so only type definitions are needed here. -use reth_db_api::table::{Encode, Table}; -use reth_storage_errors::{ - db::LogLevel, - provider::{ProviderError::UnsupportedProvider, ProviderResult}, -}; -use std::path::Path; +use alloy_primitives::BlockNumber; +use parking_lot::Mutex; +use reth_db_api::models::StorageSettings; +use reth_prune_types::PruneMode; +use reth_storage_errors::{db::LogLevel, provider::ProviderResult}; +use std::{path::Path, sync::Arc}; + +/// Pending `RocksDB` batches type alias (stub - uses unit type). +pub(crate) type PendingRocksDBBatches = Arc>>; + +/// Context for `RocksDB` block writes (stub). +#[derive(Debug, Clone)] +#[allow(dead_code)] +pub(crate) struct RocksDBWriteCtx { + /// The first block number being written. + pub first_block_number: BlockNumber, + /// The prune mode for transaction lookup, if any. + pub prune_tx_lookup: Option, + /// Storage settings determining what goes to `RocksDB`. + pub storage_settings: StorageSettings, + /// Pending batches (stub - unused). + pub pending_batches: PendingRocksDBBatches, +} /// A stub `RocksDB` provider. /// /// This type exists to allow code to compile when `RocksDB` is not available (either on non-Unix -/// platforms or when the `rocksdb` feature is not enabled). When using this stub, the -/// `transaction_hash_numbers_in_rocksdb` flag should be set to `false` to ensure all operations -/// route to MDBX instead. +/// platforms or when the `rocksdb` feature is not enabled). All method calls on `RocksDBProvider` +/// are cfg-guarded in the calling code, so this stub only provides type definitions. #[derive(Debug, Clone)] pub struct RocksDBProvider; impl RocksDBProvider { /// Creates a new stub `RocksDB` provider. - /// - /// On non-Unix platforms, this returns an error indicating `RocksDB` is not supported. pub fn new(_path: impl AsRef) -> ProviderResult { Ok(Self) } @@ -33,130 +47,22 @@ impl RocksDBProvider { RocksDBBuilder::new(path) } - /// Get a value from `RocksDB` (stub implementation). - pub fn get(&self, _key: T::Key) -> ProviderResult> { - Err(UnsupportedProvider) - } - - /// Get a value from `RocksDB` using pre-encoded key (stub implementation). - pub const fn get_encoded( - &self, - _key: &::Encoded, - ) -> ProviderResult> { - Err(UnsupportedProvider) - } - - /// Put a value into `RocksDB` (stub implementation). - pub fn put(&self, _key: T::Key, _value: &T::Value) -> ProviderResult<()> { - Err(UnsupportedProvider) - } - - /// Put a value into `RocksDB` using pre-encoded key (stub implementation). - pub const fn put_encoded( - &self, - _key: &::Encoded, - _value: &T::Value, - ) -> ProviderResult<()> { - Err(UnsupportedProvider) - } - - /// Delete a value from `RocksDB` (stub implementation). - pub fn delete(&self, _key: T::Key) -> ProviderResult<()> { - Err(UnsupportedProvider) - } - - /// Write a batch of operations (stub implementation). - pub fn write_batch(&self, _f: F) -> ProviderResult<()> - where - F: FnOnce(&mut RocksDBBatch) -> ProviderResult<()>, - { - Err(UnsupportedProvider) - } - - /// Creates a new transaction (stub implementation). - pub const fn tx(&self) -> RocksTx { - RocksTx - } - - /// Creates a new batch for atomic writes (stub implementation). - pub const fn batch(&self) -> RocksDBBatch { - RocksDBBatch - } - - /// Gets the first key-value pair from a table (stub implementation). - pub const fn first(&self) -> ProviderResult> { - Ok(None) - } - - /// Gets the last key-value pair from a table (stub implementation). - pub const fn last(&self) -> ProviderResult> { - Ok(None) - } - - /// Creates an iterator for the specified table (stub implementation). - /// - /// Returns an empty iterator. This is consistent with `first()` and `last()` returning - /// `Ok(None)` - the stub behaves as if the database is empty rather than unavailable. - pub const fn iter(&self) -> ProviderResult> { - Ok(RocksDBIter { _marker: std::marker::PhantomData }) - } - /// Check consistency of `RocksDB` tables (stub implementation). /// /// Returns `None` since there is no `RocksDB` data to check when the feature is disabled. pub const fn check_consistency( &self, _provider: &Provider, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(None) } } -/// A stub batch writer for `RocksDB` on non-Unix platforms. +/// A stub batch writer for `RocksDB`. #[derive(Debug)] pub struct RocksDBBatch; -impl RocksDBBatch { - /// Puts a value into the batch (stub implementation). - pub fn put(&self, _key: T::Key, _value: &T::Value) -> ProviderResult<()> { - Err(UnsupportedProvider) - } - - /// Puts a value into the batch using pre-encoded key (stub implementation). - pub const fn put_encoded( - &self, - _key: &::Encoded, - _value: &T::Value, - ) -> ProviderResult<()> { - Err(UnsupportedProvider) - } - - /// Deletes a value from the batch (stub implementation). - pub fn delete(&self, _key: T::Key) -> ProviderResult<()> { - Err(UnsupportedProvider) - } - - /// Commits the batch (stub implementation). - pub const fn commit(self) -> ProviderResult<()> { - Err(UnsupportedProvider) - } -} - -/// A stub iterator for `RocksDB` (non-transactional). -#[derive(Debug)] -pub struct RocksDBIter<'a, T> { - _marker: std::marker::PhantomData<(&'a (), T)>, -} - -impl Iterator for RocksDBIter<'_, T> { - type Item = ProviderResult<(T::Key, T::Value)>; - - fn next(&mut self) -> Option { - None - } -} - -/// A stub builder for `RocksDB` on non-Unix platforms. +/// A stub builder for `RocksDB`. #[derive(Debug)] pub struct RocksDBBuilder; @@ -167,7 +73,7 @@ impl RocksDBBuilder { } /// Adds a column family for a specific table type (stub implementation). - pub const fn with_table(self) -> Self { + pub const fn with_table(self) -> Self { self } @@ -205,71 +111,3 @@ impl RocksDBBuilder { /// A stub transaction for `RocksDB`. #[derive(Debug)] pub struct RocksTx; - -impl RocksTx { - /// Gets a value from the specified table (stub implementation). - pub fn get(&self, _key: T::Key) -> ProviderResult> { - Err(UnsupportedProvider) - } - - /// Gets a value using pre-encoded key (stub implementation). - pub const fn get_encoded( - &self, - _key: &::Encoded, - ) -> ProviderResult> { - Err(UnsupportedProvider) - } - - /// Puts a value into the specified table (stub implementation). - pub fn put(&self, _key: T::Key, _value: &T::Value) -> ProviderResult<()> { - Err(UnsupportedProvider) - } - - /// Puts a value using pre-encoded key (stub implementation). - pub const fn put_encoded( - &self, - _key: &::Encoded, - _value: &T::Value, - ) -> ProviderResult<()> { - Err(UnsupportedProvider) - } - - /// Deletes a value from the specified table (stub implementation). - pub fn delete(&self, _key: T::Key) -> ProviderResult<()> { - Err(UnsupportedProvider) - } - - /// Creates an iterator for the specified table (stub implementation). - pub const fn iter(&self) -> ProviderResult> { - Err(UnsupportedProvider) - } - - /// Creates an iterator starting from the given key (stub implementation). - pub fn iter_from(&self, _key: T::Key) -> ProviderResult> { - Err(UnsupportedProvider) - } - - /// Commits the transaction (stub implementation). - pub const fn commit(self) -> ProviderResult<()> { - Err(UnsupportedProvider) - } - - /// Rolls back the transaction (stub implementation). - pub const fn rollback(self) -> ProviderResult<()> { - Err(UnsupportedProvider) - } -} - -/// A stub iterator for `RocksDB` transactions. -#[derive(Debug)] -pub struct RocksTxIter<'a, T> { - _marker: std::marker::PhantomData<(&'a (), T)>, -} - -impl Iterator for RocksTxIter<'_, T> { - type Item = ProviderResult<(T::Key, T::Value)>; - - fn next(&mut self) -> Option { - None - } -} From 079f59c2bef9dcac59bdc0255d976dd0d0e7a8a9 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Thu, 15 Jan 2026 19:10:20 +0000 Subject: [PATCH 034/267] perf: reserve in extend_sorted_vec (#21109) --- crates/trie/common/src/utils.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/trie/common/src/utils.rs b/crates/trie/common/src/utils.rs index 7c1d454a6fa..9ee876cada6 100644 --- a/crates/trie/common/src/utils.rs +++ b/crates/trie/common/src/utils.rs @@ -40,6 +40,7 @@ where if other.is_empty() { return; } + target.reserve(other.len()); let mut other_iter = other.iter().peekable(); let initial_len = target.len(); From 26cd132631bbd1e49f5a04e94ca48e91eae52d1f Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 15 Jan 2026 19:19:16 +0000 Subject: [PATCH 035/267] fix(reth-bench): use requests hash (#21111) --- bin/reth-bench/src/valid_payload.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/bin/reth-bench/src/valid_payload.rs b/bin/reth-bench/src/valid_payload.rs index 76d562f7f42..3680211fdc0 100644 --- a/bin/reth-bench/src/valid_payload.rs +++ b/bin/reth-bench/src/valid_payload.rs @@ -203,11 +203,7 @@ pub(crate) fn payload_to_new_payload( ) } else { // Extract actual Requests from RequestsOrHash - let requests = prague - .requests - .requests() - .cloned() - .ok_or_else(|| eyre::eyre!("Prague sidecar has hash, not requests"))?; + let requests = prague.requests.requests_hash(); ( version, serde_json::to_value(( From ec3323bba0135b08054bcf0c485d671e67c4bbf5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 16 Jan 2026 00:27:11 +0100 Subject: [PATCH 036/267] refactor(chain-state): extract blocks_to_chain helper (#21110) --- crates/chain-state/src/in_memory.rs | 75 ++++++++++++++++------------- 1 file changed, 42 insertions(+), 33 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 9a79bcf437b..7ffd939c83c 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -942,37 +942,35 @@ impl> NewCanonicalChain { pub fn to_chain_notification(&self) -> CanonStateNotification { match self { Self::Commit { new } => { - let new = Arc::new(new.iter().fold(Chain::default(), |mut chain, exec| { - chain.append_block( - exec.recovered_block().clone(), - exec.execution_outcome().clone(), - exec.trie_updates(), - exec.hashed_state(), - ); - chain - })); - CanonStateNotification::Commit { new } + CanonStateNotification::Commit { new: Arc::new(Self::blocks_to_chain(new)) } } - Self::Reorg { new, old } => { - let new = Arc::new(new.iter().fold(Chain::default(), |mut chain, exec| { - chain.append_block( - exec.recovered_block().clone(), - exec.execution_outcome().clone(), - exec.trie_updates(), - exec.hashed_state(), - ); - chain - })); - let old = Arc::new(old.iter().fold(Chain::default(), |mut chain, exec| { + Self::Reorg { new, old } => CanonStateNotification::Reorg { + new: Arc::new(Self::blocks_to_chain(new)), + old: Arc::new(Self::blocks_to_chain(old)), + }, + } + } + + /// Converts a slice of executed blocks into a [`Chain`]. + fn blocks_to_chain(blocks: &[ExecutedBlock]) -> Chain { + match blocks { + [] => Chain::default(), + [first, rest @ ..] => { + let mut chain = Chain::from_block( + first.recovered_block().clone(), + first.execution_outcome().clone(), + first.trie_updates(), + first.hashed_state(), + ); + for exec in rest { chain.append_block( exec.recovered_block().clone(), exec.execution_outcome().clone(), exec.trie_updates(), exec.hashed_state(), ); - chain - })); - CanonStateNotification::Reorg { new, old } + } + chain } } } @@ -1543,12 +1541,6 @@ mod tests { let block2a = test_block_builder.get_executed_block_with_number(2, block1.recovered_block.hash()); - let sample_execution_outcome = ExecutionOutcome { - receipts: vec![vec![], vec![]], - requests: vec![Requests::default(), Requests::default()], - ..Default::default() - }; - // Test commit notification let chain_commit = NewCanonicalChain::Commit { new: vec![block0.clone(), block1.clone()] }; @@ -1562,12 +1554,20 @@ mod tests { expected_hashed_state.insert(0, block0.hashed_state()); expected_hashed_state.insert(1, block1.hashed_state()); + // Build expected execution outcome (first_block matches first block number) + let commit_execution_outcome = ExecutionOutcome { + receipts: vec![vec![], vec![]], + requests: vec![Requests::default(), Requests::default()], + first_block: 0, + ..Default::default() + }; + assert_eq!( chain_commit.to_chain_notification(), CanonStateNotification::Commit { new: Arc::new(Chain::new( vec![block0.recovered_block().clone(), block1.recovered_block().clone()], - sample_execution_outcome.clone(), + commit_execution_outcome, expected_trie_updates, expected_hashed_state )) @@ -1600,18 +1600,27 @@ mod tests { new_hashed_state.insert(1, block1a.hashed_state()); new_hashed_state.insert(2, block2a.hashed_state()); + // Build expected execution outcome for reorg chains (first_block matches first block + // number) + let reorg_execution_outcome = ExecutionOutcome { + receipts: vec![vec![], vec![]], + requests: vec![Requests::default(), Requests::default()], + first_block: 1, + ..Default::default() + }; + assert_eq!( chain_reorg.to_chain_notification(), CanonStateNotification::Reorg { old: Arc::new(Chain::new( vec![block1.recovered_block().clone(), block2.recovered_block().clone()], - sample_execution_outcome.clone(), + reorg_execution_outcome.clone(), old_trie_updates, old_hashed_state )), new: Arc::new(Chain::new( vec![block1a.recovered_block().clone(), block2a.recovered_block().clone()], - sample_execution_outcome, + reorg_execution_outcome, new_trie_updates, new_hashed_state )) From e25411c32b1f9c3e987e705af8d094218b31273f Mon Sep 17 00:00:00 2001 From: YK Date: Fri, 16 Jan 2026 08:17:22 +0800 Subject: [PATCH 037/267] =?UTF-8?q?perf(trie):=20fix=20extend=5Fsorted=5Fv?= =?UTF-8?q?ec=20O(n=20log=20n)=20=E2=86=92=20O(n+m)=20merge=20(#21098)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crates/trie/common/src/utils.rs | 113 ++++++++++++++++++++++++-------- 1 file changed, 86 insertions(+), 27 deletions(-) diff --git a/crates/trie/common/src/utils.rs b/crates/trie/common/src/utils.rs index 9ee876cada6..6d6f134a3ac 100644 --- a/crates/trie/common/src/utils.rs +++ b/crates/trie/common/src/utils.rs @@ -26,46 +26,56 @@ where .collect() } -/// Extend a sorted vector with another sorted vector. -/// Values from `other` take precedence for duplicate keys. -/// +/// Extend a sorted vector with another sorted vector using 2 pointer merge. /// Values from `other` take precedence for duplicate keys. pub(crate) fn extend_sorted_vec(target: &mut Vec<(K, V)>, other: &[(K, V)]) where K: Clone + Ord, V: Clone, { - let cmp = |a: &(K, V), b: &(K, V)| a.0.cmp(&b.0); - if other.is_empty() { return; } - target.reserve(other.len()); - - let mut other_iter = other.iter().peekable(); - let initial_len = target.len(); - for i in 0..initial_len { - while let Some(other_item) = other_iter.peek() { - let target_item = &mut target[i]; - match cmp(other_item, target_item) { - Ordering::Less => { - target.push(other_iter.next().unwrap().clone()); - } - Ordering::Equal => { - target_item.1 = other_iter.next().unwrap().1.clone(); - break; - } - Ordering::Greater => { - break; - } + + if target.is_empty() { + target.extend_from_slice(other); + return; + } + + // Fast path: non-overlapping ranges - just append + if target.last().map(|(k, _)| k) < other.first().map(|(k, _)| k) { + target.extend_from_slice(other); + return; + } + + // Move ownership of target to avoid cloning owned elements + let left = core::mem::take(target); + let mut out = Vec::with_capacity(left.len() + other.len()); + + let mut a = left.into_iter().peekable(); + let mut b = other.iter().peekable(); + + while let (Some(aa), Some(bb)) = (a.peek(), b.peek()) { + match aa.0.cmp(&bb.0) { + Ordering::Less => { + out.push(a.next().unwrap()); + } + Ordering::Greater => { + out.push(b.next().unwrap().clone()); + } + Ordering::Equal => { + // `other` takes precedence for duplicate keys - reuse key from `a` + let (k, _) = a.next().unwrap(); + out.push((k, b.next().unwrap().1.clone())); } } } - target.extend(other_iter.cloned()); - if target.len() > initial_len { - target.sort_by(cmp); - } + // Drain remaining: `a` moves, `b` clones + out.extend(a); + out.extend(b.cloned()); + + *target = out; } #[cfg(test)] @@ -80,6 +90,55 @@ mod tests { assert_eq!(target, vec![(1, "a"), (2, "b"), (3, "c_new")]); } + #[test] + fn test_extend_sorted_vec_empty_target() { + let mut target: Vec<(i32, &str)> = vec![]; + let other = vec![(1, "a"), (2, "b")]; + extend_sorted_vec(&mut target, &other); + assert_eq!(target, vec![(1, "a"), (2, "b")]); + } + + #[test] + fn test_extend_sorted_vec_empty_other() { + let mut target = vec![(1, "a"), (2, "b")]; + let other: Vec<(i32, &str)> = vec![]; + extend_sorted_vec(&mut target, &other); + assert_eq!(target, vec![(1, "a"), (2, "b")]); + } + + #[test] + fn test_extend_sorted_vec_all_duplicates() { + let mut target = vec![(1, "old1"), (2, "old2"), (3, "old3")]; + let other = vec![(1, "new1"), (2, "new2"), (3, "new3")]; + extend_sorted_vec(&mut target, &other); + // other takes precedence + assert_eq!(target, vec![(1, "new1"), (2, "new2"), (3, "new3")]); + } + + #[test] + fn test_extend_sorted_vec_interleaved() { + let mut target = vec![(1, "a"), (3, "c"), (5, "e")]; + let other = vec![(2, "b"), (4, "d"), (6, "f")]; + extend_sorted_vec(&mut target, &other); + assert_eq!(target, vec![(1, "a"), (2, "b"), (3, "c"), (4, "d"), (5, "e"), (6, "f")]); + } + + #[test] + fn test_extend_sorted_vec_other_all_smaller() { + let mut target = vec![(5, "e"), (6, "f")]; + let other = vec![(1, "a"), (2, "b")]; + extend_sorted_vec(&mut target, &other); + assert_eq!(target, vec![(1, "a"), (2, "b"), (5, "e"), (6, "f")]); + } + + #[test] + fn test_extend_sorted_vec_other_all_larger() { + let mut target = vec![(1, "a"), (2, "b")]; + let other = vec![(5, "e"), (6, "f")]; + extend_sorted_vec(&mut target, &other); + assert_eq!(target, vec![(1, "a"), (2, "b"), (5, "e"), (6, "f")]); + } + #[test] fn test_kway_merge_sorted_basic() { let slice1 = vec![(1, "a1"), (3, "c1")]; From a74cb9cbc38a5590b2596c67e64a3c7642024aca Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Fri, 16 Jan 2026 02:06:31 +0100 Subject: [PATCH 038/267] feat(trie): in-memory trie changesets (#20997) --- Cargo.lock | 9 + crates/cli/commands/src/stage/drop.rs | 6 +- crates/config/src/config.rs | 7 - crates/engine/service/Cargo.toml | 3 + crates/engine/service/src/service.rs | 8 + crates/engine/tree/Cargo.toml | 2 + crates/engine/tree/benches/state_root_task.rs | 5 +- crates/engine/tree/src/persistence.rs | 1 + crates/engine/tree/src/tree/mod.rs | 49 +- .../tree/src/tree/payload_processor/mod.rs | 3 +- .../src/tree/payload_processor/multiproof.rs | 9 +- .../engine/tree/src/tree/payload_validator.rs | 188 ++-- crates/engine/tree/src/tree/tests.rs | 6 + crates/node/builder/Cargo.toml | 2 + crates/node/builder/src/launch/common.rs | 12 +- crates/node/builder/src/launch/engine.rs | 19 +- crates/node/builder/src/rpc.rs | 5 + crates/node/core/src/args/pruning.rs | 7 +- crates/node/core/src/args/stage.rs | 5 - crates/optimism/node/Cargo.toml | 2 + crates/optimism/node/src/rpc.rs | 3 +- crates/prune/prune/src/db_ext.rs | 1 + crates/prune/prune/src/segments/mod.rs | 4 +- crates/prune/prune/src/segments/set.rs | 7 +- crates/prune/prune/src/segments/user/mod.rs | 2 - crates/prune/types/src/lib.rs | 5 +- crates/prune/types/src/segment.rs | 8 +- crates/prune/types/src/target.rs | 40 +- crates/stages/stages/src/sets.rs | 10 +- crates/stages/stages/src/stages/mod.rs | 3 - crates/stages/types/src/checkpoints.rs | 25 +- crates/stages/types/src/id.rs | 5 +- crates/stages/types/src/lib.rs | 4 +- .../db-api/src/tables/codecs/fuzz/mod.rs | 2 +- .../provider/src/changesets_utils/mod.rs | 3 - .../provider/src/changesets_utils/trie.rs | 147 --- .../src/providers/blockchain_provider.rs | 17 +- .../provider/src/providers/consistent.rs | 16 +- .../src/providers/database/metrics.rs | 4 - .../provider/src/providers/database/mod.rs | 16 + .../src/providers/database/provider.rs | 499 +---------- .../provider/src/providers/state/overlay.rs | 67 +- .../storage/provider/src/test_utils/mock.rs | 20 +- crates/storage/provider/src/traits/full.rs | 14 +- crates/storage/storage-api/src/noop.rs | 20 +- crates/storage/storage-api/src/trie.rs | 14 - crates/trie/db/Cargo.toml | 14 +- crates/trie/db/src/changesets.rs | 841 ++++++++++++++++++ crates/trie/db/src/lib.rs | 2 + crates/trie/parallel/benches/root.rs | 3 +- crates/trie/parallel/src/proof.rs | 4 +- crates/trie/parallel/src/proof_task.rs | 7 +- crates/trie/parallel/src/root.rs | 7 +- crates/trie/trie/src/changesets.rs | 476 ++++++++++ crates/trie/trie/src/lib.rs | 3 + .../docs/pages/cli/op-reth/stage/drop.mdx | 23 +- .../vocs/docs/pages/cli/op-reth/stage/run.mdx | 23 +- docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 23 +- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 23 +- 59 files changed, 1768 insertions(+), 985 deletions(-) delete mode 100644 crates/storage/provider/src/changesets_utils/trie.rs create mode 100644 crates/trie/db/src/changesets.rs create mode 100644 crates/trie/trie/src/changesets.rs diff --git a/Cargo.lock b/Cargo.lock index 8b9d736e441..c42bb019e39 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8411,6 +8411,7 @@ dependencies = [ name = "reth-engine-service" version = "1.10.0" dependencies = [ + "alloy-eips", "futures", "pin-project", "reth-chainspec", @@ -8432,6 +8433,7 @@ dependencies = [ "reth-prune", "reth-stages-api", "reth-tasks", + "reth-trie-db", "tokio", "tokio-stream", ] @@ -8495,6 +8497,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "reth-trie", + "reth-trie-db", "reth-trie-parallel", "reth-trie-sparse", "reth-trie-sparse-parallel", @@ -9387,6 +9390,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "reth-transaction-pool", + "reth-trie-db", "secp256k1 0.30.0", "serde_json", "tempfile", @@ -9876,6 +9880,7 @@ dependencies = [ "reth-tracing", "reth-transaction-pool", "reth-trie-common", + "reth-trie-db", "revm", "serde", "serde_json", @@ -11194,14 +11199,18 @@ dependencies = [ "alloy-consensus", "alloy-primitives", "alloy-rlp", + "metrics", + "parking_lot", "proptest", "proptest-arbitrary-interop", "reth-chainspec", "reth-db", "reth-db-api", "reth-execution-errors", + "reth-metrics", "reth-primitives-traits", "reth-provider", + "reth-stages-types", "reth-storage-api", "reth-storage-errors", "reth-trie", diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 64106ae8956..46bad48fd97 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -15,7 +15,7 @@ use reth_db_common::{ use reth_node_api::{HeaderTy, ReceiptTy, TxTy}; use reth_node_core::args::StageEnum; use reth_provider::{ - DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, TrieWriter, + DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; use reth_prune::PruneSegment; use reth_stages::StageId; @@ -167,10 +167,6 @@ impl Command { None, )?; } - StageEnum::MerkleChangeSets => { - provider_rw.clear_trie_changesets()?; - reset_stage_checkpoint(tx, StageId::MerkleChangeSets)?; - } StageEnum::AccountHistory | StageEnum::StorageHistory => { tx.clear::()?; tx.clear::()?; diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 8a88718f8c1..3f979f7d65a 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -550,7 +550,6 @@ impl PruneConfig { /// - `Option` fields: set from `other` only if `self` is `None`. /// - `block_interval`: set from `other` only if `self.block_interval == /// DEFAULT_BLOCK_INTERVAL`. - /// - `merkle_changesets`: always set from `other`. /// - `receipts_log_filter`: set from `other` only if `self` is empty and `other` is non-empty. pub fn merge(&mut self, other: Self) { let Self { @@ -563,7 +562,6 @@ impl PruneConfig { account_history, storage_history, bodies_history, - merkle_changesets, receipts_log_filter, }, } = other; @@ -580,8 +578,6 @@ impl PruneConfig { self.segments.account_history = self.segments.account_history.or(account_history); self.segments.storage_history = self.segments.storage_history.or(storage_history); self.segments.bodies_history = self.segments.bodies_history.or(bodies_history); - // Merkle changesets is not optional; always take the value from `other` - self.segments.merkle_changesets = merkle_changesets; if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() { self.segments.receipts_log_filter = receipts_log_filter; @@ -1091,7 +1087,6 @@ receipts = { distance = 16384 } account_history: None, storage_history: Some(PruneMode::Before(5000)), bodies_history: None, - merkle_changesets: PruneMode::Before(0), receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([( Address::random(), PruneMode::Full, @@ -1108,7 +1103,6 @@ receipts = { distance = 16384 } account_history: Some(PruneMode::Distance(2000)), storage_history: Some(PruneMode::Distance(3000)), bodies_history: None, - merkle_changesets: PruneMode::Distance(10000), receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([ (Address::random(), PruneMode::Distance(1000)), (Address::random(), PruneMode::Before(2000)), @@ -1127,7 +1121,6 @@ receipts = { distance = 16384 } assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000))); assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000))); assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000))); - assert_eq!(config1.segments.merkle_changesets, PruneMode::Distance(10000)); assert_eq!(config1.segments.receipts_log_filter, original_filter); } diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index 6c7b746c741..33468dafdbd 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -25,6 +25,7 @@ reth-tasks.workspace = true reth-node-types.workspace = true reth-chainspec.workspace = true reth-engine-primitives.workspace = true +reth-trie-db.workspace = true # async futures.workspace = true @@ -40,6 +41,8 @@ reth-evm-ethereum.workspace = true reth-exex-types.workspace = true reth-primitives-traits.workspace = true reth-node-ethereum.workspace = true +reth-trie-db.workspace = true +alloy-eips.workspace = true tokio = { workspace = true, features = ["sync"] } tokio-stream.workspace = true diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index cd4fbd6b00c..496f994fc43 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -26,6 +26,7 @@ use reth_provider::{ use reth_prune::PrunerWithFactory; use reth_stages_api::{MetricEventsSender, Pipeline}; use reth_tasks::TaskSpawner; +use reth_trie_db::ChangesetCache; use std::{ pin::Pin, sync::Arc, @@ -84,6 +85,7 @@ where tree_config: TreeConfig, sync_metrics_tx: MetricEventsSender, evm_config: C, + changeset_cache: ChangesetCache, ) -> Self where V: EngineValidator, @@ -109,6 +111,7 @@ where tree_config, engine_kind, evm_config, + changeset_cache, ); let engine_handler = EngineApiRequestHandler::new(to_tree_tx, from_tree); @@ -156,6 +159,7 @@ mod tests { }; use reth_prune::Pruner; use reth_tasks::TokioTaskExecutor; + use reth_trie_db::ChangesetCache; use std::sync::Arc; use tokio::sync::{mpsc::unbounded_channel, watch}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -188,6 +192,8 @@ mod tests { let pruner = Pruner::new_with_factory(provider_factory.clone(), vec![], 0, 0, None, rx); let evm_config = EthEvmConfig::new(chain_spec.clone()); + let changeset_cache = ChangesetCache::new(); + let engine_validator = BasicEngineValidator::new( blockchain_db.clone(), consensus.clone(), @@ -195,6 +201,7 @@ mod tests { engine_payload_validator, TreeConfig::default(), Box::new(NoopInvalidBlockHook::default()), + changeset_cache.clone(), ); let (sync_metrics_tx, _sync_metrics_rx) = unbounded_channel(); @@ -214,6 +221,7 @@ mod tests { TreeConfig::default(), sync_metrics_tx, evm_config, + changeset_cache, ); } } diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index e877c83536c..ea679e4e404 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -34,6 +34,7 @@ reth-trie-parallel.workspace = true reth-trie-sparse = { workspace = true, features = ["std", "metrics"] } reth-trie-sparse-parallel = { workspace = true, features = ["std"] } reth-trie.workspace = true +reth-trie-db.workspace = true # alloy alloy-evm.workspace = true @@ -133,6 +134,7 @@ test-utils = [ "reth-static-file", "reth-tracing", "reth-trie/test-utils", + "reth-trie-db/test-utils", "reth-trie-sparse/test-utils", "reth-prune-types?/test-utils", "reth-trie-parallel/test-utils", diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index cfd17a8ecfc..6db51361363 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -239,7 +239,10 @@ fn bench_state_root(c: &mut Criterion) { std::convert::identity, ), StateProviderBuilder::new(provider.clone(), genesis_hash, None), - OverlayStateProviderFactory::new(provider), + OverlayStateProviderFactory::new( + provider, + reth_trie_db::ChangesetCache::new(), + ), &TreeConfig::default(), None, ); diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 314d0eba9de..d867e91ca29 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -159,6 +159,7 @@ where self.metrics.save_blocks_block_count.record(block_count as f64); self.metrics.save_blocks_duration_seconds.record(start_time.elapsed()); + Ok(last_block) } } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 8a222cf411f..c0eb40d337c 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -30,11 +30,13 @@ use reth_payload_primitives::{ }; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; use reth_provider::{ - BlockReader, DatabaseProviderFactory, HashedPostStateProvider, ProviderError, StateProviderBox, - StateProviderFactory, StateReader, TransactionVariant, TrieReader, + BlockNumReader, BlockReader, ChangeSetReader, DatabaseProviderFactory, HashedPostStateProvider, + ProviderError, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, + TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; +use reth_trie_db::ChangesetCache; use revm::state::EvmState; use state::TreeState; use std::{fmt::Debug, ops, sync::Arc, time::Instant}; @@ -271,6 +273,8 @@ where engine_kind: EngineApiKind, /// The EVM configuration. evm_config: C, + /// Changeset cache for in-memory trie changesets + changeset_cache: ChangesetCache, } impl std::fmt::Debug @@ -295,6 +299,7 @@ where .field("metrics", &self.metrics) .field("engine_kind", &self.engine_kind) .field("evm_config", &self.evm_config) + .field("changeset_cache", &self.changeset_cache) .finish() } } @@ -307,11 +312,12 @@ where + StateProviderFactory + StateReader + HashedPostStateProvider - + TrieReader + Clone + 'static, -

::Provider: - BlockReader, +

::Provider: BlockReader + + StageCheckpointReader + + ChangeSetReader + + BlockNumReader, C: ConfigureEvm + 'static, T: PayloadTypes>, V: EngineValidator, @@ -331,6 +337,7 @@ where config: TreeConfig, engine_kind: EngineApiKind, evm_config: C, + changeset_cache: ChangesetCache, ) -> Self { let (incoming_tx, incoming) = crossbeam_channel::unbounded(); @@ -351,6 +358,7 @@ where incoming_tx, engine_kind, evm_config, + changeset_cache, } } @@ -370,6 +378,7 @@ where config: TreeConfig, kind: EngineApiKind, evm_config: C, + changeset_cache: ChangesetCache, ) -> (Sender, N::Block>>, UnboundedReceiver>) { let best_block_number = provider.best_block_number().unwrap_or(0); @@ -401,6 +410,7 @@ where config, kind, evm_config, + changeset_cache, ); let incoming = task.incoming_tx.clone(); std::thread::Builder::new().name("Engine Task".to_string()).spawn(|| task.run()).unwrap(); @@ -1365,6 +1375,21 @@ where debug!(target: "engine::tree", ?last_persisted_block_hash, ?last_persisted_block_number, elapsed=?start_time.elapsed(), "Finished persisting, calling finish"); self.persistence_state.finish(last_persisted_block_hash, last_persisted_block_number); + + // Evict trie changesets for blocks below the finalized block, but keep at least 64 blocks + if let Some(finalized) = self.canonical_in_memory_state.get_finalized_num_hash() { + let min_threshold = last_persisted_block_number.saturating_sub(64); + let eviction_threshold = finalized.number.min(min_threshold); + debug!( + target: "engine::tree", + last_persisted = last_persisted_block_number, + finalized_number = finalized.number, + eviction_threshold, + "Evicting changesets below threshold" + ); + self.changeset_cache.evict(eviction_threshold); + } + self.on_new_persisted_block()?; Ok(()) } @@ -1818,6 +1843,7 @@ where /// or the database. If the required historical data (such as trie change sets) has been /// pruned for a given block, this operation will return an error. On archive nodes, it /// can retrieve any block. + #[instrument(level = "debug", target = "engine::tree", skip(self))] fn canonical_block_by_hash(&self, hash: B256) -> ProviderResult>> { trace!(target: "engine::tree", ?hash, "Fetching executed block by hash"); // check memory first @@ -1835,7 +1861,18 @@ where .get_state(block.header().number())? .ok_or_else(|| ProviderError::StateForNumberNotFound(block.header().number()))?; let hashed_state = self.provider.hashed_post_state(execution_output.state()); - let trie_updates = self.provider.get_block_trie_updates(block.number())?; + + debug!( + target: "engine::tree", + number = ?block.number(), + "computing block trie updates", + ); + let db_provider = self.provider.database_provider_ro()?; + let trie_updates = reth_trie_db::compute_block_trie_updates( + &self.changeset_cache, + &db_provider, + block.number(), + )?; let sorted_hashed_state = Arc::new(hashed_state.into_sorted()); let sorted_trie_updates = Arc::new(trie_updates); diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index a9e5a961ad6..3df06652d37 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -886,6 +886,7 @@ mod tests { use reth_revm::db::BundleState; use reth_testing_utils::generators; use reth_trie::{test_utils::state_root, HashedPostState}; + use reth_trie_db::ChangesetCache; use revm_primitives::{Address, HashMap, B256, KECCAK_EMPTY, U256}; use revm_state::{AccountInfo, AccountStatus, EvmState, EvmStorageSlot}; use std::sync::Arc; @@ -1141,7 +1142,7 @@ mod tests { std::convert::identity, ), StateProviderBuilder::new(provider_factory.clone(), genesis_hash, None), - OverlayStateProviderFactory::new(provider_factory), + OverlayStateProviderFactory::new(provider_factory, ChangesetCache::new()), &TreeConfig::default(), None, // No BAL for test ); diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index db44b1f98d6..a61ef525363 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1318,9 +1318,10 @@ mod tests { use reth_provider::{ providers::OverlayStateProviderFactory, test_utils::create_test_provider_factory, BlockNumReader, BlockReader, ChangeSetReader, DatabaseProviderFactory, LatestStateProvider, - PruneCheckpointReader, StageCheckpointReader, StateProviderBox, TrieReader, + PruneCheckpointReader, StageCheckpointReader, StateProviderBox, }; use reth_trie::MultiProof; + use reth_trie_db::ChangesetCache; use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofWorkerHandle}; use revm_primitives::{B256, U256}; use std::sync::{Arc, OnceLock}; @@ -1341,7 +1342,6 @@ mod tests { where F: DatabaseProviderFactory< Provider: BlockReader - + TrieReader + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader @@ -1351,7 +1351,8 @@ mod tests { + 'static, { let rt_handle = get_test_runtime_handle(); - let overlay_factory = OverlayStateProviderFactory::new(factory); + let changeset_cache = ChangesetCache::new(); + let overlay_factory = OverlayStateProviderFactory::new(factory, changeset_cache); let task_ctx = ProofTaskCtx::new(overlay_factory); let proof_handle = ProofWorkerHandle::new(rt_handle, task_ctx, 1, 1, false); let (to_sparse_trie, _receiver) = std::sync::mpsc::channel(); @@ -1363,7 +1364,7 @@ mod tests { fn create_cached_provider(factory: F) -> CachedStateProvider where F: DatabaseProviderFactory< - Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + Provider: BlockReader + StageCheckpointReader + PruneCheckpointReader, > + Clone + Send + 'static, diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 746b9077f2f..056b413e7ac 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -43,13 +43,14 @@ use reth_provider::{ providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockNumReader, BlockReader, ChangeSetReader, DatabaseProviderFactory, DatabaseProviderROFactory, ExecutionOutcome, HashedPostStateProvider, ProviderError, PruneCheckpointReader, StageCheckpointReader, - StateProvider, StateProviderFactory, StateReader, TrieReader, + StateProvider, StateProviderFactory, StateReader, }; use reth_revm::db::State; use reth_trie::{ updates::{TrieUpdates, TrieUpdatesSorted}, HashedPostState, HashedPostStateSorted, StateRoot, TrieInputSorted, }; +use reth_trie_db::ChangesetCache; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use revm_primitives::Address; use std::{ @@ -138,6 +139,8 @@ where metrics: EngineApiMetrics, /// Validator for the payload. validator: V, + /// Changeset cache for in-memory trie changesets + changeset_cache: ChangesetCache, } impl BasicEngineValidator @@ -145,7 +148,6 @@ where N: NodePrimitives, P: DatabaseProviderFactory< Provider: BlockReader - + TrieReader + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader @@ -169,6 +171,7 @@ where validator: V, config: TreeConfig, invalid_block_hook: Box>, + changeset_cache: ChangesetCache, ) -> Self { let precompile_cache_map = PrecompileCacheMap::default(); let payload_processor = PayloadProcessor::new( @@ -188,6 +191,7 @@ where invalid_block_hook, metrics: EngineApiMetrics::default(), validator, + changeset_cache, } } @@ -427,13 +431,33 @@ where .map_err(Box::::from)) .map(Arc::new); + // Compute trie input from ancestors once, before spawning payload processor. + // This will be extended with the current block's hashed state after execution. + let trie_input_start = Instant::now(); + let (trie_input, block_hash_for_overlay) = + ensure_ok!(self.compute_trie_input(parent_hash, ctx.state())); + + self.metrics + .block_validation + .trie_input_duration + .record(trie_input_start.elapsed().as_secs_f64()); + + // Create overlay factory for payload processor (StateRootTask path needs it for + // multiproofs) + let overlay_factory = { + let TrieInputSorted { nodes, state, .. } = &trie_input; + OverlayStateProviderFactory::new(self.provider.clone(), self.changeset_cache.clone()) + .with_block_hash(Some(block_hash_for_overlay)) + .with_trie_overlay(Some(Arc::clone(nodes))) + .with_hashed_state_overlay(Some(Arc::clone(state))) + }; + // Spawn the appropriate processor based on strategy let mut handle = ensure_ok!(self.spawn_payload_processor( env.clone(), txs, provider_builder, - parent_hash, - ctx.state(), + overlay_factory.clone(), strategy, block_access_list, )); @@ -494,11 +518,7 @@ where } StateRootStrategy::Parallel => { debug!(target: "engine::tree::payload_validator", "Using parallel state root algorithm"); - match self.compute_state_root_parallel( - block.parent_hash(), - &hashed_state, - ctx.state(), - ) { + match self.compute_state_root_parallel(overlay_factory.clone(), &hashed_state) { Ok(result) => { let elapsed = root_time.elapsed(); info!( @@ -534,7 +554,7 @@ where } let (root, updates) = ensure_ok_post_block!( - self.compute_state_root_serial(block.parent_hash(), &hashed_state, ctx.state()), + self.compute_state_root_serial(overlay_factory.clone(), &hashed_state), block ); (root, updates, root_time.elapsed()) @@ -571,7 +591,14 @@ where // Terminate prewarming task with the shared execution outcome handle.terminate_caching(Some(Arc::clone(&execution_outcome))); - Ok(self.spawn_deferred_trie_task(block, execution_outcome, &ctx, hashed_state, trie_output)) + Ok(self.spawn_deferred_trie_task( + block, + execution_outcome, + &ctx, + hashed_state, + trie_output, + overlay_factory, + )) } /// Return sealed block header from database or in-memory state by hash. @@ -670,6 +697,10 @@ where /// Compute state root for the given hashed post state in parallel. /// + /// Uses an overlay factory which provides the state of the parent block, along with the + /// [`HashedPostState`] containing the changes of this block, to compute the state root and + /// trie updates for this block. + /// /// # Returns /// /// Returns `Ok(_)` if computed successfully. @@ -677,58 +708,39 @@ where #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn compute_state_root_parallel( &self, - parent_hash: B256, + overlay_factory: OverlayStateProviderFactory

, hashed_state: &HashedPostState, - state: &EngineApiTreeState, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { - let (mut input, block_hash) = self.compute_trie_input(parent_hash, state)?; - - // Extend state overlay with current block's sorted state. - input.prefix_sets.extend(hashed_state.construct_prefix_sets()); - let sorted_hashed_state = hashed_state.clone_into_sorted(); - Arc::make_mut(&mut input.state).extend_ref(&sorted_hashed_state); - - let TrieInputSorted { nodes, state, prefix_sets: prefix_sets_mut } = input; - - let factory = OverlayStateProviderFactory::new(self.provider.clone()) - .with_block_hash(Some(block_hash)) - .with_trie_overlay(Some(nodes)) - .with_hashed_state_overlay(Some(state)); - - // The `hashed_state` argument is already taken into account as part of the overlay, but we + // The `hashed_state` argument will be taken into account as part of the overlay, but we // need to use the prefix sets which were generated from it to indicate to the // ParallelStateRoot which parts of the trie need to be recomputed. - let prefix_sets = prefix_sets_mut.freeze(); - - ParallelStateRoot::new(factory, prefix_sets).incremental_root_with_updates() + let prefix_sets = hashed_state.construct_prefix_sets().freeze(); + let overlay_factory = + overlay_factory.with_extended_hashed_state_overlay(hashed_state.clone_into_sorted()); + ParallelStateRoot::new(overlay_factory, prefix_sets).incremental_root_with_updates() } /// Compute state root for the given hashed post state in serial. + /// + /// Uses an overlay factory which provides the state of the parent block, along with the + /// [`HashedPostState`] containing the changes of this block, to compute the state root and + /// trie updates for this block. fn compute_state_root_serial( &self, - parent_hash: B256, + overlay_factory: OverlayStateProviderFactory

, hashed_state: &HashedPostState, - state: &EngineApiTreeState, ) -> ProviderResult<(B256, TrieUpdates)> { - let (mut input, block_hash) = self.compute_trie_input(parent_hash, state)?; - - // Extend state overlay with current block's sorted state. - input.prefix_sets.extend(hashed_state.construct_prefix_sets()); - let sorted_hashed_state = hashed_state.clone_into_sorted(); - Arc::make_mut(&mut input.state).extend_ref(&sorted_hashed_state); - - let TrieInputSorted { nodes, state, .. } = input; - let prefix_sets = hashed_state.construct_prefix_sets(); - - let factory = OverlayStateProviderFactory::new(self.provider.clone()) - .with_block_hash(Some(block_hash)) - .with_trie_overlay(Some(nodes)) - .with_hashed_state_overlay(Some(state)); + // The `hashed_state` argument will be taken into account as part of the overlay, but we + // need to use the prefix sets which were generated from it to indicate to the + // StateRoot which parts of the trie need to be recomputed. + let prefix_sets = hashed_state.construct_prefix_sets().freeze(); + let overlay_factory = + overlay_factory.with_extended_hashed_state_overlay(hashed_state.clone_into_sorted()); - let provider = factory.database_provider_ro()?; + let provider = overlay_factory.database_provider_ro()?; Ok(StateRoot::new(&provider, &provider) - .with_prefix_sets(prefix_sets.freeze()) + .with_prefix_sets(prefix_sets) .root_with_updates()?) } @@ -811,6 +823,11 @@ where /// /// The method handles strategy fallbacks if the preferred approach fails, ensuring /// block execution always completes with a valid state root. + /// + /// # Arguments + /// + /// * `overlay_factory` - Pre-computed overlay factory for multiproof generation + /// (`StateRootTask`) #[allow(clippy::too_many_arguments)] #[instrument( level = "debug", @@ -823,8 +840,7 @@ where env: ExecutionEnv, txs: T, provider_builder: StateProviderBuilder, - parent_hash: B256, - state: &EngineApiTreeState, + overlay_factory: OverlayStateProviderFactory

, strategy: StateRootStrategy, block_access_list: Option>, ) -> Result< @@ -837,32 +853,14 @@ where > { match strategy { StateRootStrategy::StateRootTask => { - // Compute trie input - let trie_input_start = Instant::now(); - let (trie_input, block_hash) = self.compute_trie_input(parent_hash, state)?; - - // Create OverlayStateProviderFactory with sorted trie data for multiproofs - let TrieInputSorted { nodes, state, .. } = trie_input; - - let multiproof_provider_factory = - OverlayStateProviderFactory::new(self.provider.clone()) - .with_block_hash(Some(block_hash)) - .with_trie_overlay(Some(nodes)) - .with_hashed_state_overlay(Some(state)); - - // Record trie input duration including OverlayStateProviderFactory setup - self.metrics - .block_validation - .trie_input_duration - .record(trie_input_start.elapsed().as_secs_f64()); - let spawn_start = Instant::now(); + // Use the pre-computed overlay factory for multiproofs let handle = self.payload_processor.spawn( env, txs, provider_builder, - multiproof_provider_factory, + overlay_factory, &self.config, block_access_list, ); @@ -1103,6 +1101,7 @@ where ctx: &TreeCtx<'_, N>, hashed_state: HashedPostState, trie_output: TrieUpdates, + overlay_factory: OverlayStateProviderFactory

, ) -> ExecutedBlock { // Capture parent hash and ancestor overlays for deferred trie input construction. let (anchor_hash, overlay_blocks) = ctx @@ -1126,9 +1125,21 @@ where let deferred_handle_task = deferred_trie_data.clone(); let block_validation_metrics = self.metrics.block_validation.clone(); + // Capture block info and cache handle for changeset computation + let block_hash = block.hash(); + let block_number = block.number(); + let changeset_cache = self.changeset_cache.clone(); + // Spawn background task to compute trie data. Calling `wait_cloned` will compute from // the stored inputs and cache the result, so subsequent calls return immediately. let compute_trie_input_task = move || { + let _span = debug_span!( + target: "engine::tree::payload_validator", + "compute_trie_input_task", + block_number + ) + .entered(); + let result = panic::catch_unwind(AssertUnwindSafe(|| { let compute_start = Instant::now(); let computed = deferred_handle_task.wait_cloned(); @@ -1151,6 +1162,40 @@ where .anchored_overlay_hashed_state_size .record(anchored.trie_input.state.total_len() as f64); } + + // Compute and cache changesets using the computed trie_updates + let changeset_start = Instant::now(); + + // Get a provider from the overlay factory for trie cursor access + let changeset_result = + overlay_factory.database_provider_ro().and_then(|provider| { + reth_trie::changesets::compute_trie_changesets( + &provider, + &computed.trie_updates, + ) + .map_err(ProviderError::Database) + }); + + match changeset_result { + Ok(changesets) => { + debug!( + target: "engine::tree::changeset", + ?block_number, + elapsed = ?changeset_start.elapsed(), + "Computed and caching changesets" + ); + + changeset_cache.insert(block_hash, block_number, Arc::new(changesets)); + } + Err(e) => { + warn!( + target: "engine::tree::changeset", + ?block_number, + ?e, + "Failed to compute changesets in deferred trie task" + ); + } + } })); if result.is_err() { @@ -1247,7 +1292,6 @@ impl EngineValidator for BasicEngineValidator(&self) -> eyre::Result> + pub async fn create_provider_factory( + &self, + changeset_cache: ChangesetCache, + ) -> eyre::Result> where N: ProviderNodeTypes, Evm: ConfigureEvm + 'static, @@ -500,7 +504,8 @@ where static_file_provider, rocksdb_provider, )? - .with_prune_modes(self.prune_modes()); + .with_prune_modes(self.prune_modes()) + .with_changeset_cache(changeset_cache); // Keep MDBX, static files, and RocksDB aligned. If any check fails, unwind to the // earliest consistent block. @@ -593,12 +598,13 @@ where /// Creates a new [`ProviderFactory`] and attaches it to the launch context. pub async fn with_provider_factory( self, + changeset_cache: ChangesetCache, ) -> eyre::Result, ProviderFactory>>> where N: ProviderNodeTypes, Evm: ConfigureEvm + 'static, { - let factory = self.create_provider_factory::().await?; + let factory = self.create_provider_factory::(changeset_cache).await?; let ctx = LaunchContextWith { inner: self.inner, attachment: self.attachment.map_right(|_| factory), diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index e9dd5344c4a..dcefbeeab68 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -37,6 +37,7 @@ use reth_provider::{ use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; use reth_tracing::tracing::{debug, error, info}; +use reth_trie_db::ChangesetCache; use std::{future::Future, pin::Pin, sync::Arc}; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -87,6 +88,9 @@ impl EngineNodeLauncher { } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; + // Create changeset cache that will be shared across the engine + let changeset_cache = ChangesetCache::new(); + // setup the launch context let ctx = ctx .with_configured_globals(engine_tree_config.reserved_cpu_cores()) @@ -98,8 +102,8 @@ impl EngineNodeLauncher { .attach(database.clone()) // ensure certain settings take effect .with_adjusted_configs() - // Create the provider factory - .with_provider_factory::<_, >::Evm>().await? + // Create the provider factory with changeset cache + .with_provider_factory::<_, >::Evm>(changeset_cache.clone()).await? .inspect(|ctx| { info!(target: "reth::cli", "Database opened"); match ctx.provider_factory().storage_settings() { @@ -204,7 +208,7 @@ impl EngineNodeLauncher { // Build the engine validator with all required components let engine_validator = validator_builder .clone() - .build_tree_validator(&add_ons_ctx, engine_tree_config.clone()) + .build_tree_validator(&add_ons_ctx, engine_tree_config.clone(), changeset_cache.clone()) .await?; // Create the consensus engine stream with optional reorg @@ -214,7 +218,13 @@ impl EngineNodeLauncher { .maybe_reorg( ctx.blockchain_db().clone(), ctx.components().evm_config().clone(), - || validator_builder.build_tree_validator(&add_ons_ctx, engine_tree_config.clone()), + || async { + // Create a separate cache for reorg validator (not shared with main engine) + let reorg_cache = ChangesetCache::new(); + validator_builder + .build_tree_validator(&add_ons_ctx, engine_tree_config.clone(), reorg_cache) + .await + }, node_config.debug.reorg_frequency, node_config.debug.reorg_depth, ) @@ -239,6 +249,7 @@ impl EngineNodeLauncher { engine_tree_config, ctx.sync_metrics_tx(), ctx.components().evm_config().clone(), + changeset_cache, ); info!(target: "reth::cli", "Consensus engine initialized"); diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 360019e0ea3..c2097ce474c 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -3,6 +3,7 @@ pub use jsonrpsee::server::middleware::rpc::{RpcService, RpcServiceBuilder}; pub use reth_engine_tree::tree::{BasicEngineValidator, EngineValidator}; pub use reth_rpc_builder::{middleware::RethRpcMiddleware, Identity, Stack}; +pub use reth_trie_db::ChangesetCache; use crate::{ invalid_block_hook::InvalidBlockHookExt, ConfigureEngineEvm, ConsensusEngineEvent, @@ -1288,6 +1289,7 @@ pub trait EngineValidatorBuilder: Send + Sync + Clone self, ctx: &AddOnsContext<'_, Node>, tree_config: TreeConfig, + changeset_cache: ChangesetCache, ) -> impl Future> + Send; } @@ -1335,10 +1337,12 @@ where self, ctx: &AddOnsContext<'_, Node>, tree_config: TreeConfig, + changeset_cache: ChangesetCache, ) -> eyre::Result { let validator = self.payload_validator_builder.build(ctx).await?; let data_dir = ctx.config.datadir.clone().resolve_datadir(ctx.config.chain.chain()); let invalid_block_hook = ctx.create_invalid_block_hook(&data_dir).await?; + Ok(BasicEngineValidator::new( ctx.node.provider().clone(), std::sync::Arc::new(ctx.node.consensus().clone()), @@ -1346,6 +1350,7 @@ where validator, tree_config, invalid_block_hook, + changeset_cache, )) } } diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 82a081228a6..0bd65253724 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -5,10 +5,7 @@ use alloy_primitives::{Address, BlockNumber}; use clap::{builder::RangedU64ValueParser, Args}; use reth_chainspec::EthereumHardforks; use reth_config::config::PruneConfig; -use reth_prune_types::{ - PruneMode, PruneModes, ReceiptsLogPruneConfig, MERKLE_CHANGESETS_RETENTION_BLOCKS, - MINIMUM_PRUNING_DISTANCE, -}; +use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE}; use std::{collections::BTreeMap, ops::Not}; /// Parameters for pruning and full node @@ -143,7 +140,6 @@ impl PruningArgs { .ethereum_fork_activation(EthereumHardfork::Paris) .block_number() .map(PruneMode::Before), - merkle_changesets: PruneMode::Distance(MERKLE_CHANGESETS_RETENTION_BLOCKS), receipts_log_filter: Default::default(), }, } @@ -160,7 +156,6 @@ impl PruningArgs { account_history: Some(PruneMode::Distance(10064)), storage_history: Some(PruneMode::Distance(10064)), bodies_history: Some(PruneMode::Distance(10064)), - merkle_changesets: PruneMode::Distance(MERKLE_CHANGESETS_RETENTION_BLOCKS), receipts_log_filter: Default::default(), }, } diff --git a/crates/node/core/src/args/stage.rs b/crates/node/core/src/args/stage.rs index 7718fb85605..337f5a4a60b 100644 --- a/crates/node/core/src/args/stage.rs +++ b/crates/node/core/src/args/stage.rs @@ -38,11 +38,6 @@ pub enum StageEnum { /// /// Handles Merkle tree-related computations and data processing. Merkle, - /// The merkle changesets stage within the pipeline. - /// - /// Handles Merkle trie changesets for storage and accounts. - #[value(name = "merkle-changesets")] - MerkleChangeSets, /// The transaction lookup stage within the pipeline. /// /// Deals with the retrieval and processing of transactions. diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 7e79b3a8869..062b9c8c810 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -81,6 +81,7 @@ reth-revm = { workspace = true, features = ["std"] } reth-rpc.workspace = true reth-rpc-eth-types.workspace = true reth-stages-types.workspace = true +reth-trie-db.workspace = true alloy-network.workspace = true alloy-op-hardforks.workspace = true @@ -125,6 +126,7 @@ test-utils = [ "reth-optimism-primitives/arbitrary", "reth-primitives-traits/test-utils", "reth-trie-common/test-utils", + "reth-trie-db/test-utils", "reth-stages-types/test-utils", ] reth-codec = ["reth-optimism-primitives/reth-codec"] diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index e2a8e5c489a..9030935d64f 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -24,6 +24,7 @@ //! use reth_rpc::TraceApi; //! use reth_rpc_eth_types::{EthConfig, EthStateCache}; //! use reth_tasks::{pool::BlockingTaskGuard, TaskManager}; +//! use reth_trie_db::ChangesetCache; //! use std::sync::Arc; //! //! #[tokio::main] @@ -37,7 +38,7 @@ //! .with_loaded_toml_config(sepolia) //! .unwrap() //! .attach(Arc::new(db)) -//! .with_provider_factory::<_, OpEvmConfig>() +//! .with_provider_factory::<_, OpEvmConfig>(ChangesetCache::new()) //! .await //! .unwrap() //! .with_genesis() diff --git a/crates/prune/prune/src/db_ext.rs b/crates/prune/prune/src/db_ext.rs index ee1b3cec948..642063378a2 100644 --- a/crates/prune/prune/src/db_ext.rs +++ b/crates/prune/prune/src/db_ext.rs @@ -127,6 +127,7 @@ pub(crate) trait DbTxPruneExt: DbTxMut + DbTx { /// Prune a DUPSORT table for the specified key range. /// /// Returns number of rows pruned. + #[expect(unused)] fn prune_dupsort_table_with_range( &self, keys: impl RangeBounds + Clone + Debug, diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index 04eaaceed10..5a09fd41c93 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -17,8 +17,8 @@ pub use set::SegmentSet; use std::{fmt::Debug, ops::RangeInclusive}; use tracing::error; pub use user::{ - AccountHistory, Bodies, MerkleChangeSets, Receipts as UserReceipts, ReceiptsByLogs, - SenderRecovery, StorageHistory, TransactionLookup, + AccountHistory, Bodies, Receipts as UserReceipts, ReceiptsByLogs, SenderRecovery, + StorageHistory, TransactionLookup, }; /// Prunes data from static files for a given segment. diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 479ab4f25b0..f5ceae63256 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -1,6 +1,6 @@ use crate::segments::{ - user::ReceiptsByLogs, AccountHistory, Bodies, MerkleChangeSets, Segment, SenderRecovery, - StorageHistory, TransactionLookup, UserReceipts, + user::ReceiptsByLogs, AccountHistory, Bodies, Segment, SenderRecovery, StorageHistory, + TransactionLookup, UserReceipts, }; use alloy_eips::eip2718::Encodable2718; use reth_db_api::{table::Value, transaction::DbTxMut}; @@ -67,15 +67,12 @@ where account_history, storage_history, bodies_history, - merkle_changesets, receipts_log_filter, } = prune_modes; Self::default() // Bodies - run first since file deletion is fast .segment_opt(bodies_history.map(Bodies::new)) - // Merkle changesets - .segment(MerkleChangeSets::new(merkle_changesets)) // Account history .segment_opt(account_history.map(AccountHistory::new)) // Storage history diff --git a/crates/prune/prune/src/segments/user/mod.rs b/crates/prune/prune/src/segments/user/mod.rs index b993d3f2616..96031a94c22 100644 --- a/crates/prune/prune/src/segments/user/mod.rs +++ b/crates/prune/prune/src/segments/user/mod.rs @@ -1,7 +1,6 @@ mod account_history; mod bodies; mod history; -mod merkle_change_sets; mod receipts; mod receipts_by_logs; mod sender_recovery; @@ -10,7 +9,6 @@ mod transaction_lookup; pub use account_history::AccountHistory; pub use bodies::Bodies; -pub use merkle_change_sets::MerkleChangeSets; pub use receipts::Receipts; pub use receipts_by_logs::ReceiptsByLogs; pub use sender_recovery::SenderRecovery; diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 8233a3487ba..315063278b2 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -30,10 +30,7 @@ pub use pruner::{ SegmentOutputCheckpoint, }; pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; -pub use target::{ - PruneModes, UnwindTargetPrunedError, MERKLE_CHANGESETS_RETENTION_BLOCKS, - MINIMUM_PRUNING_DISTANCE, -}; +pub use target::{PruneModes, UnwindTargetPrunedError, MINIMUM_PRUNING_DISTANCE}; /// Configuration for pruning receipts not associated with logs emitted by the specified contracts. #[derive(Debug, Clone, PartialEq, Eq, Default)] diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index b731a7efa92..d3643b2ee8d 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -1,6 +1,6 @@ #![allow(deprecated)] // necessary to all defining deprecated `PruneSegment` variants -use crate::{MERKLE_CHANGESETS_RETENTION_BLOCKS, MINIMUM_PRUNING_DISTANCE}; +use crate::MINIMUM_PRUNING_DISTANCE; use derive_more::Display; use strum::{EnumIter, IntoEnumIterator}; use thiserror::Error; @@ -36,6 +36,8 @@ pub enum PruneSegment { #[strum(disabled)] /// Prune segment responsible for the `Transactions` table. Transactions, + #[deprecated = "Variant indexes cannot be changed"] + #[strum(disabled)] /// Prune segment responsible for all rows in `AccountsTrieChangeSets` and /// `StoragesTrieChangeSets` table. MerkleChangeSets, @@ -67,10 +69,9 @@ impl PruneSegment { Self::ContractLogs | Self::AccountHistory | Self::StorageHistory => { MINIMUM_PRUNING_DISTANCE } - Self::MerkleChangeSets => MERKLE_CHANGESETS_RETENTION_BLOCKS, #[expect(deprecated)] #[expect(clippy::match_same_arms)] - Self::Headers | Self::Transactions => 0, + Self::Headers | Self::Transactions | Self::MerkleChangeSets => 0, } } @@ -127,6 +128,7 @@ mod tests { { assert!(!segments.contains(&PruneSegment::Headers)); assert!(!segments.contains(&PruneSegment::Transactions)); + assert!(!segments.contains(&PruneSegment::MerkleChangeSets)); } } } diff --git a/crates/prune/types/src/target.rs b/crates/prune/types/src/target.rs index 5eee7e5aba7..92a01fc2e5b 100644 --- a/crates/prune/types/src/target.rs +++ b/crates/prune/types/src/target.rs @@ -36,17 +36,8 @@ pub enum HistoryType { StorageHistory, } -/// Default number of blocks to retain for merkle changesets. -/// This is used by both the `MerkleChangeSets` stage and the pruner segment. -pub const MERKLE_CHANGESETS_RETENTION_BLOCKS: u64 = 128; - -/// Default pruning mode for merkle changesets -const fn default_merkle_changesets_mode() -> PruneMode { - PruneMode::Distance(MERKLE_CHANGESETS_RETENTION_BLOCKS) -} - /// Pruning configuration for every segment of the data that can be pruned. -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, Default)] #[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "serde"), serde(default))] pub struct PruneModes { @@ -81,10 +72,6 @@ pub struct PruneModes { /// Bodies History pruning configuration. #[cfg_attr(any(test, feature = "serde"), serde(skip_serializing_if = "Option::is_none",))] pub bodies_history: Option, - /// Merkle Changesets pruning configuration for `AccountsTrieChangeSets` and - /// `StoragesTrieChangeSets`. - #[cfg_attr(any(test, feature = "serde"), serde(default = "default_merkle_changesets_mode"))] - pub merkle_changesets: PruneMode, /// Receipts pruning configuration by retaining only those receipts that contain logs emitted /// by the specified addresses, discarding others. This setting is overridden by `receipts`. /// @@ -97,21 +84,6 @@ pub struct PruneModes { pub receipts_log_filter: ReceiptsLogPruneConfig, } -impl Default for PruneModes { - fn default() -> Self { - Self { - sender_recovery: None, - transaction_lookup: None, - receipts: None, - account_history: None, - storage_history: None, - bodies_history: None, - merkle_changesets: default_merkle_changesets_mode(), - receipts_log_filter: ReceiptsLogPruneConfig::default(), - } - } -} - impl PruneModes { /// Sets pruning to all targets. pub fn all() -> Self { @@ -122,7 +94,6 @@ impl PruneModes { account_history: Some(PruneMode::Full), storage_history: Some(PruneMode::Full), bodies_history: Some(PruneMode::Full), - merkle_changesets: PruneMode::Full, receipts_log_filter: Default::default(), } } @@ -135,16 +106,7 @@ impl PruneModes { /// Migrates deprecated prune mode values to their new defaults. /// /// Returns `true` if any migration was performed. - /// - /// Currently migrates: - /// - `merkle_changesets`: `Distance(n)` where `n < 128` or `n == 10064` -> `Distance(128)` pub const fn migrate(&mut self) -> bool { - if let PruneMode::Distance(d) = self.merkle_changesets && - (d < MERKLE_CHANGESETS_RETENTION_BLOCKS || d == MINIMUM_PRUNING_DISTANCE) - { - self.merkle_changesets = PruneMode::Distance(MERKLE_CHANGESETS_RETENTION_BLOCKS); - return true; - } false } diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index f946a622ca0..2c1948307eb 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -39,9 +39,9 @@ use crate::{ stages::{ AccountHashingStage, BodyStage, EraImportSource, EraStage, ExecutionStage, FinishStage, - HeaderStage, IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleChangeSets, - MerkleStage, PruneSenderRecoveryStage, PruneStage, SenderRecoveryStage, - StorageHashingStage, TransactionLookupStage, + HeaderStage, IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, + PruneSenderRecoveryStage, PruneStage, SenderRecoveryStage, StorageHashingStage, + TransactionLookupStage, }, StageSet, StageSetBuilder, }; @@ -76,7 +76,6 @@ use tokio::sync::watch; /// - [`AccountHashingStage`] /// - [`StorageHashingStage`] /// - [`MerkleStage`] (execute) -/// - [`MerkleChangeSets`] /// - [`TransactionLookupStage`] /// - [`IndexStorageHistoryStage`] /// - [`IndexAccountHistoryStage`] @@ -401,7 +400,6 @@ where /// - [`AccountHashingStage`] /// - [`StorageHashingStage`] /// - [`MerkleStage`] (execute) -/// - [`MerkleChangeSets`] #[derive(Debug, Default)] #[non_exhaustive] pub struct HashingStages { @@ -414,7 +412,6 @@ where MerkleStage: Stage, AccountHashingStage: Stage, StorageHashingStage: Stage, - MerkleChangeSets: Stage, { fn builder(self) -> StageSetBuilder { StageSetBuilder::default() @@ -431,7 +428,6 @@ where self.stages_config.merkle.rebuild_threshold, self.stages_config.merkle.incremental_threshold, )) - .add_stage(MerkleChangeSets::new()) } } diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index d6747a39607..8249d749147 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -16,8 +16,6 @@ mod index_account_history; mod index_storage_history; /// Stage for computing state root. mod merkle; -/// Stage for computing merkle changesets. -mod merkle_changesets; mod prune; /// The sender recovery stage. mod sender_recovery; @@ -34,7 +32,6 @@ pub use headers::*; pub use index_account_history::*; pub use index_storage_history::*; pub use merkle::*; -pub use merkle_changesets::*; pub use prune::*; pub use sender_recovery::*; pub use tx_lookup::*; diff --git a/crates/stages/types/src/checkpoints.rs b/crates/stages/types/src/checkpoints.rs index c21412ea436..4f62cdcb251 100644 --- a/crates/stages/types/src/checkpoints.rs +++ b/crates/stages/types/src/checkpoints.rs @@ -290,6 +290,9 @@ pub struct IndexHistoryCheckpoint { } /// Saves the progress of `MerkleChangeSets` stage. +/// +/// Note: This type is only kept for backward compatibility with the Compact codec. +/// The `MerkleChangeSets` stage has been removed. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] @@ -401,9 +404,6 @@ impl StageCheckpoint { StageId::IndexStorageHistory | StageId::IndexAccountHistory => { StageUnitCheckpoint::IndexHistory(IndexHistoryCheckpoint::default()) } - StageId::MerkleChangeSets => { - StageUnitCheckpoint::MerkleChangeSets(MerkleChangeSetsCheckpoint::default()) - } _ => return self, }); _ = self.stage_checkpoint.map(|mut checkpoint| checkpoint.set_block_range(from, to)); @@ -456,6 +456,9 @@ pub enum StageUnitCheckpoint { /// Saves the progress of Index History stage. IndexHistory(IndexHistoryCheckpoint), /// Saves the progress of `MerkleChangeSets` stage. + /// + /// Note: This variant is only kept for backward compatibility with the Compact codec. + /// The `MerkleChangeSets` stage has been removed. MerkleChangeSets(MerkleChangeSetsCheckpoint), } @@ -467,8 +470,7 @@ impl StageUnitCheckpoint { Self::Account(AccountHashingCheckpoint { block_range, .. }) | Self::Storage(StorageHashingCheckpoint { block_range, .. }) | Self::Execution(ExecutionCheckpoint { block_range, .. }) | - Self::IndexHistory(IndexHistoryCheckpoint { block_range, .. }) | - Self::MerkleChangeSets(MerkleChangeSetsCheckpoint { block_range, .. }) => { + Self::IndexHistory(IndexHistoryCheckpoint { block_range, .. }) => { let old_range = *block_range; *block_range = CheckpointBlockRange { from, to }; @@ -492,7 +494,7 @@ macro_rules! stage_unit_checkpoints { impl StageCheckpoint { $( #[doc = $fn_get_doc] -pub const fn $fn_get_name(&self) -> Option<$checkpoint_ty> { + pub const fn $fn_get_name(&self) -> Option<$checkpoint_ty> { match self.stage_checkpoint { Some(StageUnitCheckpoint::$enum_variant(checkpoint)) => Some(checkpoint), _ => None, @@ -500,7 +502,7 @@ pub const fn $fn_get_name(&self) -> Option<$checkpoint_ty> { } #[doc = $fn_build_doc] -pub const fn $fn_build_name( + pub const fn $fn_build_name( mut self, checkpoint: $checkpoint_ty, ) -> Self { @@ -566,15 +568,6 @@ stage_unit_checkpoints!( index_history_stage_checkpoint, /// Sets the stage checkpoint to index history. with_index_history_stage_checkpoint - ), - ( - 6, - MerkleChangeSets, - MerkleChangeSetsCheckpoint, - /// Returns the merkle changesets stage checkpoint, if any. - merkle_changesets_stage_checkpoint, - /// Sets the stage checkpoint to merkle changesets. - with_merkle_changesets_stage_checkpoint ) ); diff --git a/crates/stages/types/src/id.rs b/crates/stages/types/src/id.rs index 8c0a91c8731..78d7e0ec1b6 100644 --- a/crates/stages/types/src/id.rs +++ b/crates/stages/types/src/id.rs @@ -25,7 +25,6 @@ pub enum StageId { TransactionLookup, IndexStorageHistory, IndexAccountHistory, - MerkleChangeSets, Prune, Finish, /// Other custom stage with a provided string identifier. @@ -40,7 +39,7 @@ static ENCODED_STAGE_IDS: OnceLock>> = OnceLock::new(); impl StageId { /// All supported Stages - pub const ALL: [Self; 16] = [ + pub const ALL: [Self; 15] = [ Self::Era, Self::Headers, Self::Bodies, @@ -54,7 +53,6 @@ impl StageId { Self::TransactionLookup, Self::IndexStorageHistory, Self::IndexAccountHistory, - Self::MerkleChangeSets, Self::Prune, Self::Finish, ]; @@ -90,7 +88,6 @@ impl StageId { Self::TransactionLookup => "TransactionLookup", Self::IndexAccountHistory => "IndexAccountHistory", Self::IndexStorageHistory => "IndexStorageHistory", - Self::MerkleChangeSets => "MerkleChangeSets", Self::Prune => "Prune", Self::Finish => "Finish", Self::Other(s) => s, diff --git a/crates/stages/types/src/lib.rs b/crates/stages/types/src/lib.rs index 83585fee7ce..4e30ce27cd7 100644 --- a/crates/stages/types/src/lib.rs +++ b/crates/stages/types/src/lib.rs @@ -18,8 +18,8 @@ pub use id::StageId; mod checkpoints; pub use checkpoints::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, - HeadersCheckpoint, IndexHistoryCheckpoint, MerkleChangeSetsCheckpoint, MerkleCheckpoint, - StageCheckpoint, StageUnitCheckpoint, StorageHashingCheckpoint, StorageRootMerkleCheckpoint, + HeadersCheckpoint, IndexHistoryCheckpoint, MerkleCheckpoint, StageCheckpoint, + StageUnitCheckpoint, StorageHashingCheckpoint, StorageRootMerkleCheckpoint, }; mod execution; diff --git a/crates/storage/db-api/src/tables/codecs/fuzz/mod.rs b/crates/storage/db-api/src/tables/codecs/fuzz/mod.rs index cc62004dbb3..47f597d1d53 100644 --- a/crates/storage/db-api/src/tables/codecs/fuzz/mod.rs +++ b/crates/storage/db-api/src/tables/codecs/fuzz/mod.rs @@ -5,7 +5,7 @@ mod inputs; /// Fuzzer generates a random instance of the object and proceeds to encode and decode it. It then /// makes sure that it matches the original object. /// -/// Some types like [`IntegerList`] might have some restrictions on how they're fuzzed. For example, +/// Some types like `IntegerList` might have some restrictions on how they're fuzzed. For example, /// the list is assumed to be sorted before creating the object. macro_rules! impl_fuzzer_with_input { ($(($name:tt, $input_type:tt, $encode:tt, $encode_method:tt, $decode:tt, $decode_method:tt)),+) => { diff --git a/crates/storage/provider/src/changesets_utils/mod.rs b/crates/storage/provider/src/changesets_utils/mod.rs index 3b65825264b..9bccd29c4cb 100644 --- a/crates/storage/provider/src/changesets_utils/mod.rs +++ b/crates/storage/provider/src/changesets_utils/mod.rs @@ -2,6 +2,3 @@ mod state_reverts; pub use state_reverts::StorageRevertsIter; - -mod trie; -pub use trie::*; diff --git a/crates/storage/provider/src/changesets_utils/trie.rs b/crates/storage/provider/src/changesets_utils/trie.rs deleted file mode 100644 index cc14b516b30..00000000000 --- a/crates/storage/provider/src/changesets_utils/trie.rs +++ /dev/null @@ -1,147 +0,0 @@ -use itertools::{merge_join_by, EitherOrBoth}; -use reth_db_api::DatabaseError; -use reth_trie::{trie_cursor::TrieCursor, BranchNodeCompact, Nibbles}; -use std::cmp::{Ord, Ordering}; - -/// Combines a sorted iterator of trie node paths and a storage trie cursor into a new -/// iterator which produces the current values of all given paths in the same order. -#[derive(Debug)] -pub struct StorageTrieCurrentValuesIter<'cursor, P, C> { - /// Sorted iterator of node paths which we want the values of. - paths: P, - /// Storage trie cursor. - cursor: &'cursor mut C, - /// Current value at the cursor, allows us to treat the cursor as a peekable iterator. - cursor_current: Option<(Nibbles, BranchNodeCompact)>, -} - -impl<'cursor, P, C> StorageTrieCurrentValuesIter<'cursor, P, C> -where - P: Iterator, - C: TrieCursor, -{ - /// Instantiate a [`StorageTrieCurrentValuesIter`] from a sorted paths iterator and a cursor. - pub fn new(paths: P, cursor: &'cursor mut C) -> Result { - let mut new_self = Self { paths, cursor, cursor_current: None }; - new_self.seek_cursor(Nibbles::default())?; - Ok(new_self) - } - - fn seek_cursor(&mut self, path: Nibbles) -> Result<(), DatabaseError> { - self.cursor_current = self.cursor.seek(path)?; - Ok(()) - } -} - -impl<'cursor, P, C> Iterator for StorageTrieCurrentValuesIter<'cursor, P, C> -where - P: Iterator, - C: TrieCursor, -{ - type Item = Result<(Nibbles, Option), DatabaseError>; - - fn next(&mut self) -> Option { - let Some(curr_path) = self.paths.next() else { - // If there are no more paths then there is no further possible output. - return None - }; - - // If the path is ahead of the cursor then seek the cursor forward to catch up. The cursor - // will seek either to `curr_path` or beyond it. - if self.cursor_current.as_ref().is_some_and(|(cursor_path, _)| curr_path > *cursor_path) && - let Err(err) = self.seek_cursor(curr_path) - { - return Some(Err(err)) - } - - // If there is a path but the cursor is empty then that path has no node. - if self.cursor_current.is_none() { - return Some(Ok((curr_path, None))) - } - - let (cursor_path, cursor_node) = - self.cursor_current.as_mut().expect("already checked for None"); - - // There is both a path and a cursor value, compare their paths. - match curr_path.cmp(cursor_path) { - Ordering::Less => { - // If the path is behind the cursor then there is no value for that - // path, produce None. - Some(Ok((curr_path, None))) - } - Ordering::Equal => { - // If the target path and cursor's path match then there is a value for that path, - // return the value. We don't seek the cursor here, that will be handled on the - // next call to `next` after checking that `paths` isn't None. - let cursor_node = core::mem::take(cursor_node); - Some(Ok((*cursor_path, Some(cursor_node)))) - } - Ordering::Greater => { - panic!("cursor was seeked to {curr_path:?}, but produced a node at a lower path {cursor_path:?}") - } - } - } -} - -/// Returns an iterator which produces the values to be inserted into the `StoragesTrieChangeSets` -/// table for an account whose storage was wiped during a block. It is expected that this is called -/// prior to inserting the block's trie updates. -/// -/// ## Arguments -/// -/// - `curr_values_of_changed` is an iterator over the current values of all trie nodes modified by -/// the block, ordered by path. -/// - `all_nodes` is an iterator over all existing trie nodes for the account, ordered by path. -/// -/// ## Returns -/// -/// An iterator of trie node paths and a `Some(node)` (indicating the node was wiped) or a `None` -/// (indicating the node was modified in the block but didn't previously exist. The iterator's -/// results will be ordered by path. -pub fn storage_trie_wiped_changeset_iter( - curr_values_of_changed: impl Iterator< - Item = Result<(Nibbles, Option), DatabaseError>, - >, - all_nodes: impl Iterator>, -) -> Result< - impl Iterator), DatabaseError>>, - DatabaseError, -> { - let all_nodes = all_nodes.map(|e| e.map(|(nibbles, node)| (nibbles, Some(node)))); - - let merged = merge_join_by(curr_values_of_changed, all_nodes, |a, b| match (a, b) { - (Err(_), _) => Ordering::Less, - (_, Err(_)) => Ordering::Greater, - (Ok(a), Ok(b)) => a.0.cmp(&b.0), - }); - - Ok(merged.map(|either_or| match either_or { - EitherOrBoth::Left(changed) => { - // A path of a changed node (given in `paths`) which was not found in the database (or - // there's an error). The current value of this path must be None, otherwise it would - // have also been returned by the `all_nodes` iter. - debug_assert!( - changed.as_ref().is_err() || changed.as_ref().is_ok_and(|(_, node)| node.is_none()), - "changed node is Some but wasn't returned by `all_nodes` iterator: {changed:?}", - ); - changed - } - EitherOrBoth::Right(wiped) => { - // A node was found in the db (indicating it was wiped) but was not given in `paths`. - // Return it as-is. - wiped - } - EitherOrBoth::Both(changed, _wiped) => { - // A path of a changed node (given in `paths`) was found with a previous value in the - // database. The changed node must have a value which is equal to the one found by the - // `all_nodes` iterator. If the changed node had no previous value (None) it wouldn't - // be returned by `all_nodes` and so would be in the Left branch. - // - // Due to the ordering closure passed to `merge_join_by` it's not possible for either - // value to be an error here. - debug_assert!(changed.is_ok(), "unreachable error condition: {changed:?}"); - debug_assert_eq!(*changed.as_ref().unwrap(), _wiped.unwrap()); - changed - } - })) -} diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index e12095ff446..0e290f4aecd 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -9,7 +9,7 @@ use crate::{ HashedPostStateProvider, HeaderProvider, ProviderError, ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, RocksDBProviderFactory, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, TrieReader, + TransactionVariant, TransactionsProvider, }; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; @@ -29,7 +29,7 @@ use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{BlockBodyIndicesProvider, NodePrimitivesProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; -use reth_trie::{updates::TrieUpdatesSorted, HashedPostState, KeccakKeyHasher}; +use reth_trie::{HashedPostState, KeccakKeyHasher}; use revm_database::BundleState; use std::{ ops::{RangeBounds, RangeInclusive}, @@ -768,19 +768,6 @@ impl StateReader for BlockchainProvider { } } -impl TrieReader for BlockchainProvider { - fn trie_reverts(&self, from: BlockNumber) -> ProviderResult { - self.consistent_provider()?.trie_reverts(from) - } - - fn get_block_trie_updates( - &self, - block_number: BlockNumber, - ) -> ProviderResult { - self.consistent_provider()?.get_block_trie_updates(block_number) - } -} - #[cfg(test)] mod tests { use crate::{ diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 335417a16a6..b4eb5769c6b 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -5,7 +5,7 @@ use crate::{ BlockReaderIdExt, BlockSource, ChainSpecProvider, ChangeSetReader, HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, - TransactionsProvider, TrieReader, + TransactionsProvider, }; use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_eips::{ @@ -30,7 +30,6 @@ use reth_storage_api::{ StateProviderBox, StorageChangeSetReader, TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; -use reth_trie::updates::TrieUpdatesSorted; use revm_database::states::PlainStorageRevert; use std::{ ops::{Add, Bound, RangeBounds, RangeInclusive, Sub}, @@ -1559,19 +1558,6 @@ impl StateReader for ConsistentProvider { } } -impl TrieReader for ConsistentProvider { - fn trie_reverts(&self, from: BlockNumber) -> ProviderResult { - self.storage_provider.trie_reverts(from) - } - - fn get_block_trie_updates( - &self, - block_number: BlockNumber, - ) -> ProviderResult { - self.storage_provider.get_block_trie_updates(block_number) - } -} - #[cfg(test)] mod tests { use crate::{ diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index 45186da71db..0888b34d9f1 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -82,8 +82,6 @@ pub(crate) struct DatabaseProviderMetrics { save_blocks_write_state: Histogram, /// Duration of `write_hashed_state` in `save_blocks` save_blocks_write_hashed_state: Histogram, - /// Duration of `write_trie_changesets` in `save_blocks` - save_blocks_write_trie_changesets: Histogram, /// Duration of `write_trie_updates` in `save_blocks` save_blocks_write_trie_updates: Histogram, /// Duration of `update_history_indices` in `save_blocks` @@ -110,7 +108,6 @@ pub(crate) struct SaveBlocksTimings { pub insert_block: Duration, pub write_state: Duration, pub write_hashed_state: Duration, - pub write_trie_changesets: Duration, pub write_trie_updates: Duration, pub update_history_indices: Duration, pub update_pipeline_stages: Duration, @@ -153,7 +150,6 @@ impl DatabaseProviderMetrics { self.save_blocks_insert_block.record(timings.insert_block); self.save_blocks_write_state.record(timings.write_state); self.save_blocks_write_hashed_state.record(timings.write_hashed_state); - self.save_blocks_write_trie_changesets.record(timings.write_trie_changesets); self.save_blocks_write_trie_updates.record(timings.write_trie_updates); self.save_blocks_update_history_indices.record(timings.update_history_indices); self.save_blocks_update_pipeline_stages.record(timings.update_pipeline_stages); diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 99c81755b47..387f8a86935 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -33,6 +33,7 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::HashedPostState; +use reth_trie_db::ChangesetCache; use revm_database::BundleState; use std::{ ops::{RangeBounds, RangeInclusive}, @@ -74,6 +75,8 @@ pub struct ProviderFactory { storage_settings: Arc>, /// `RocksDB` provider rocksdb_provider: RocksDBProvider, + /// Changeset cache for trie unwinding + changeset_cache: ChangesetCache, } impl ProviderFactory>> { @@ -104,6 +107,7 @@ impl ProviderFactory { Default::default(), Arc::new(RwLock::new(legacy_settings)), rocksdb_provider.clone(), + ChangesetCache::new(), ) .storage_settings()? .unwrap_or(legacy_settings); @@ -116,6 +120,7 @@ impl ProviderFactory { storage: Default::default(), storage_settings: Arc::new(RwLock::new(storage_settings)), rocksdb_provider, + changeset_cache: ChangesetCache::new(), }) } } @@ -127,6 +132,12 @@ impl ProviderFactory { self } + /// Sets the changeset cache for an existing [`ProviderFactory`]. + pub fn with_changeset_cache(mut self, changeset_cache: ChangesetCache) -> Self { + self.changeset_cache = changeset_cache; + self + } + /// Returns reference to the underlying database. pub const fn db_ref(&self) -> &N::DB { &self.db @@ -197,6 +208,7 @@ impl ProviderFactory { self.storage.clone(), self.storage_settings.clone(), self.rocksdb_provider.clone(), + self.changeset_cache.clone(), )) } @@ -214,6 +226,7 @@ impl ProviderFactory { self.storage.clone(), self.storage_settings.clone(), self.rocksdb_provider.clone(), + self.changeset_cache.clone(), ))) } @@ -623,6 +636,7 @@ where storage, storage_settings, rocksdb_provider, + changeset_cache, } = self; f.debug_struct("ProviderFactory") .field("db", &db) @@ -632,6 +646,7 @@ where .field("storage", &storage) .field("storage_settings", &*storage_settings.read()) .field("rocksdb_provider", &rocksdb_provider) + .field("changeset_cache", &changeset_cache) .finish() } } @@ -646,6 +661,7 @@ impl Clone for ProviderFactory { storage: self.storage.clone(), storage_settings: self.storage_settings.clone(), rocksdb_provider: self.rocksdb_provider.clone(), + changeset_cache: self.changeset_cache.clone(), } } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index dec302f8f04..1f4637d33f1 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,7 +1,5 @@ use crate::{ - changesets_utils::{ - storage_trie_wiped_changeset_iter, StorageRevertsIter, StorageTrieCurrentValuesIter, - }, + changesets_utils::StorageRevertsIter, providers::{ database::{chain::ChainStorage, metrics}, rocksdb::{PendingRocksDBBatches, RocksDBProvider, RocksDBWriteCtx}, @@ -20,7 +18,7 @@ use crate::{ PruneCheckpointReader, PruneCheckpointWriter, RawRocksDBBatch, RevertsInit, RocksBatchArg, RocksDBProviderFactory, RocksTxRefArg, StageCheckpointReader, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, TrieReader, TrieWriter, + TransactionsProvider, TransactionsProviderExt, TrieWriter, }; use alloy_consensus::{ transaction::{SignerRecoverable, TransactionMeta, TxHashRef}, @@ -29,7 +27,7 @@ use alloy_consensus::{ use alloy_eips::BlockHashOrNumber; use alloy_primitives::{ keccak256, - map::{hash_map, B256Map, HashMap, HashSet}, + map::{hash_map, HashMap, HashSet}, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, }; use itertools::Itertools; @@ -66,16 +64,12 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::{ProviderResult, StaticFileWriterError}; use reth_trie::{ - trie_cursor::{ - InMemoryTrieCursor, InMemoryTrieCursorFactory, TrieCursor, TrieCursorFactory, - TrieCursorIter, - }, + changesets::storage_trie_wiped_changeset_iter, + trie_cursor::{InMemoryTrieCursor, TrieCursor, TrieCursorIter, TrieStorageCursor}, updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, HashedPostStateSorted, StoredNibbles, StoredNibblesSubKey, TrieChangeSetsEntry, }; -use reth_trie_db::{ - DatabaseAccountTrieCursor, DatabaseStorageTrieCursor, DatabaseTrieCursorFactory, -}; +use reth_trie_db::{ChangesetCache, DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}; use revm_database::states::{ PlainStateReverts, PlainStorageChangeset, PlainStorageRevert, StateChangeset, }; @@ -187,6 +181,8 @@ pub struct DatabaseProvider { storage_settings: Arc>, /// `RocksDB` provider rocksdb_provider: RocksDBProvider, + /// Changeset cache for trie unwinding + changeset_cache: ChangesetCache, /// Pending `RocksDB` batches to be committed at provider commit time. #[cfg_attr(not(all(unix, feature = "rocksdb")), allow(dead_code))] pending_rocksdb_batches: PendingRocksDBBatches, @@ -206,6 +202,7 @@ impl Debug for DatabaseProvider { .field("storage", &self.storage) .field("storage_settings", &self.storage_settings) .field("rocksdb_provider", &self.rocksdb_provider) + .field("changeset_cache", &self.changeset_cache) .field("pending_rocksdb_batches", &"") .field("minimum_pruning_distance", &self.minimum_pruning_distance) .finish() @@ -319,6 +316,7 @@ impl> ChainSpe impl DatabaseProvider { /// Creates a provider with an inner read-write transaction. + #[allow(clippy::too_many_arguments)] pub fn new_rw( tx: TX, chain_spec: Arc, @@ -327,6 +325,7 @@ impl DatabaseProvider { storage: Arc, storage_settings: Arc>, rocksdb_provider: RocksDBProvider, + changeset_cache: ChangesetCache, ) -> Self { Self { tx, @@ -336,6 +335,7 @@ impl DatabaseProvider { storage, storage_settings, rocksdb_provider, + changeset_cache, pending_rocksdb_batches: Default::default(), minimum_pruning_distance: MINIMUM_PRUNING_DISTANCE, metrics: metrics::DatabaseProviderMetrics::default(), @@ -529,7 +529,6 @@ impl DatabaseProvider DatabaseProvider DatabaseProvider DatabaseProvider { /// Creates a provider with an inner read-only transaction. + #[allow(clippy::too_many_arguments)] pub fn new( tx: TX, chain_spec: Arc, @@ -842,6 +848,7 @@ impl DatabaseProvider { storage: Arc, storage_settings: Arc>, rocksdb_provider: RocksDBProvider, + changeset_cache: ChangesetCache, ) -> Self { Self { tx, @@ -851,6 +858,7 @@ impl DatabaseProvider { storage, storage_settings, rocksdb_provider, + changeset_cache, pending_rocksdb_batches: Default::default(), minimum_pruning_distance: MINIMUM_PRUNING_DISTANCE, metrics: metrics::DatabaseProviderMetrics::default(), @@ -2187,7 +2195,7 @@ impl StateWriter } // Write account changes - tracing::debug!(target: "sync::stages::merkle_changesets", ?first_block, "Writing account changes"); + tracing::trace!(?first_block, "Writing account changes"); for (block_index, account_block_reverts) in reverts.accounts.into_iter().enumerate() { let block_number = first_block + block_index as BlockNumber; let changeset = account_block_reverts @@ -2680,127 +2688,6 @@ impl TrieWriter for DatabaseProvider } } -impl TrieReader for DatabaseProvider { - fn trie_reverts(&self, from: BlockNumber) -> ProviderResult { - let tx = self.tx_ref(); - - // Read account trie changes directly into a Vec - data is already sorted by nibbles - // within each block, and we want the oldest (first) version of each node sorted by path. - let mut account_nodes = Vec::new(); - let mut seen_account_keys = HashSet::new(); - let mut accounts_cursor = tx.cursor_dup_read::()?; - - for entry in accounts_cursor.walk_range(from..)? { - let (_, TrieChangeSetsEntry { nibbles, node }) = entry?; - // Only keep the first (oldest) version of each node - if seen_account_keys.insert(nibbles.0) { - account_nodes.push((nibbles.0, node)); - } - } - - account_nodes.sort_by_key(|(path, _)| *path); - - // Read storage trie changes - data is sorted by (block, hashed_address, nibbles) - // Keep track of seen (address, nibbles) pairs to only keep the oldest version per address, - // sorted by path. - let mut storage_tries = B256Map::>::default(); - let mut seen_storage_keys = HashSet::new(); - let mut storages_cursor = tx.cursor_dup_read::()?; - - // Create storage range starting from `from` block - let storage_range_start = BlockNumberHashedAddress((from, B256::ZERO)); - - for entry in storages_cursor.walk_range(storage_range_start..)? { - let ( - BlockNumberHashedAddress((_, hashed_address)), - TrieChangeSetsEntry { nibbles, node }, - ) = entry?; - - // Only keep the first (oldest) version of each node for this address - if seen_storage_keys.insert((hashed_address, nibbles.0)) { - storage_tries.entry(hashed_address).or_default().push((nibbles.0, node)); - } - } - - // Convert to StorageTrieUpdatesSorted - let storage_tries = storage_tries - .into_iter() - .map(|(address, mut nodes)| { - nodes.sort_by_key(|(path, _)| *path); - (address, StorageTrieUpdatesSorted { storage_nodes: nodes, is_deleted: false }) - }) - .collect(); - - Ok(TrieUpdatesSorted::new(account_nodes, storage_tries)) - } - - fn get_block_trie_updates( - &self, - block_number: BlockNumber, - ) -> ProviderResult { - let tx = self.tx_ref(); - - // Step 1: Get the trie reverts for the state after the target block - let reverts = self.trie_reverts(block_number + 1)?; - - // Step 2: Create an InMemoryTrieCursorFactory with the reverts - // This gives us the trie state as it was after the target block was processed - let db_cursor_factory = DatabaseTrieCursorFactory::new(tx); - let cursor_factory = InMemoryTrieCursorFactory::new(db_cursor_factory, &reverts); - - // Step 3: Collect all account trie nodes that changed in the target block - let mut account_nodes = Vec::new(); - - // Walk through all account trie changes for this block - let mut accounts_trie_cursor = tx.cursor_dup_read::()?; - let mut account_cursor = cursor_factory.account_trie_cursor()?; - - for entry in accounts_trie_cursor.walk_dup(Some(block_number), None)? { - let (_, TrieChangeSetsEntry { nibbles, .. }) = entry?; - // Look up the current value of this trie node using the overlay cursor - let node_value = account_cursor.seek_exact(nibbles.0)?.map(|(_, node)| node); - account_nodes.push((nibbles.0, node_value)); - } - - // Step 4: Collect all storage trie nodes that changed in the target block - let mut storage_tries = B256Map::default(); - let mut storages_trie_cursor = tx.cursor_dup_read::()?; - let storage_range_start = BlockNumberHashedAddress((block_number, B256::ZERO)); - let storage_range_end = BlockNumberHashedAddress((block_number + 1, B256::ZERO)); - - let mut current_hashed_address = None; - let mut storage_cursor = None; - - for entry in storages_trie_cursor.walk_range(storage_range_start..storage_range_end)? { - let ( - BlockNumberHashedAddress((_, hashed_address)), - TrieChangeSetsEntry { nibbles, .. }, - ) = entry?; - - // Check if we need to create a new storage cursor for a different account - if current_hashed_address != Some(hashed_address) { - storage_cursor = Some(cursor_factory.storage_trie_cursor(hashed_address)?); - current_hashed_address = Some(hashed_address); - } - - // Look up the current value of this storage trie node - let cursor = - storage_cursor.as_mut().expect("storage_cursor was just initialized above"); - let node_value = cursor.seek_exact(nibbles.0)?.map(|(_, node)| node); - storage_tries - .entry(hashed_address) - .or_insert_with(|| StorageTrieUpdatesSorted { - storage_nodes: Vec::new(), - is_deleted: false, - }) - .storage_nodes - .push((nibbles.0, node_value)); - } - - Ok(TrieUpdatesSorted::new(account_nodes, storage_tries)) - } -} - impl StorageTrieWriter for DatabaseProvider { /// Writes storage trie updates from the given storage trie map with already sorted updates. /// @@ -2843,22 +2730,11 @@ impl StorageTrieWriter for DatabaseP let mut changeset_cursor = self.tx_ref().cursor_dup_write::()?; + let curr_values_cursor = self.tx_ref().cursor_dup_read::()?; - // We hold two cursors to the same table because we use them simultaneously when an - // account's storage is wiped. We keep them outside the for-loop so they can be re-used - // between accounts. - let changed_curr_values_cursor = self.tx_ref().cursor_dup_read::()?; - let wiped_nodes_cursor = self.tx_ref().cursor_dup_read::()?; - - // DatabaseStorageTrieCursor requires ownership of the cursor. The easiest way to deal with - // this is to create this outer variable with an initial dummy account, and overwrite it on - // every loop for every real account. - let mut changed_curr_values_cursor = DatabaseStorageTrieCursor::new( - changed_curr_values_cursor, - B256::default(), // Will be set per iteration - ); - let mut wiped_nodes_cursor = DatabaseStorageTrieCursor::new( - wiped_nodes_cursor, + // Wrap the cursor in DatabaseStorageTrieCursor + let mut db_storage_cursor = DatabaseStorageTrieCursor::new( + curr_values_cursor, B256::default(), // Will be set per iteration ); @@ -2868,43 +2744,22 @@ impl StorageTrieWriter for DatabaseP for (hashed_address, storage_trie_updates) in storage_tries { let changeset_key = BlockNumberHashedAddress((block_number, *hashed_address)); - // Update the hashed address for the cursors - changed_curr_values_cursor = - DatabaseStorageTrieCursor::new(changed_curr_values_cursor.cursor, *hashed_address); + // Update the hashed address for the cursor + db_storage_cursor.set_hashed_address(*hashed_address); // Get the overlay updates, or use empty updates let overlay = updates_overlay.unwrap_or(&empty_updates); // Wrap the cursor in InMemoryTrieCursor with the overlay - let mut in_memory_changed_cursor = InMemoryTrieCursor::new_storage( - &mut changed_curr_values_cursor, - overlay, - *hashed_address, - ); + let mut in_memory_storage_cursor = + InMemoryTrieCursor::new_storage(&mut db_storage_cursor, overlay, *hashed_address); - // Create an iterator which produces the current values of all updated paths, or None if - // they are currently unset. - let curr_values_of_changed = StorageTrieCurrentValuesIter::new( - storage_trie_updates.storage_nodes.iter().map(|e| e.0), - &mut in_memory_changed_cursor, - )?; + let changed_paths = storage_trie_updates.storage_nodes.iter().map(|e| e.0); if storage_trie_updates.is_deleted() { - // Create an iterator that starts from the beginning of the storage trie for this - // account - wiped_nodes_cursor = - DatabaseStorageTrieCursor::new(wiped_nodes_cursor.cursor, *hashed_address); - - // Wrap the wiped nodes cursor in InMemoryTrieCursor with the overlay - let mut in_memory_wiped_cursor = InMemoryTrieCursor::new_storage( - &mut wiped_nodes_cursor, - overlay, - *hashed_address, - ); - - let all_nodes = TrieCursorIter::new(&mut in_memory_wiped_cursor); + let all_nodes = TrieCursorIter::new(&mut in_memory_storage_cursor); - for wiped in storage_trie_wiped_changeset_iter(curr_values_of_changed, all_nodes)? { + for wiped in storage_trie_wiped_changeset_iter(changed_paths, all_nodes)? { let (path, node) = wiped?; num_written += 1; changeset_cursor.append_dup( @@ -2913,8 +2768,8 @@ impl StorageTrieWriter for DatabaseP )?; } } else { - for curr_value in curr_values_of_changed { - let (path, node) = curr_value?; + for path in changed_paths { + let node = in_memory_storage_cursor.seek_exact(path)?.map(|(_, node)| node); num_written += 1; changeset_cursor.append_dup( changeset_key, @@ -3672,6 +3527,7 @@ mod tests { test_utils::{blocks::BlockchainTestData, create_test_provider_factory}, BlockWriter, }; + use alloy_primitives::map::B256Map; use reth_ethereum_primitives::Receipt; use reth_testing_utils::generators::{self, random_block, BlockParams}; use reth_trie::Nibbles; @@ -4913,279 +4769,6 @@ mod tests { provider_rw.commit().unwrap(); } - #[test] - fn test_get_block_trie_updates() { - use reth_db_api::models::BlockNumberHashedAddress; - use reth_trie::{BranchNodeCompact, StorageTrieEntry}; - - let factory = create_test_provider_factory(); - let provider_rw = factory.provider_rw().unwrap(); - - let target_block = 2u64; - let next_block = 3u64; - - // Create test nibbles and nodes for accounts - let account_nibbles1 = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); - let account_nibbles2 = Nibbles::from_nibbles([0x5, 0x6, 0x7, 0x8]); - let account_nibbles3 = Nibbles::from_nibbles([0x9, 0xa, 0xb, 0xc]); - - let node1 = BranchNodeCompact::new( - 0b1111_1111_0000_0000, - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - ); - - let node2 = BranchNodeCompact::new( - 0b0000_0000_1111_1111, - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - ); - - let node3 = BranchNodeCompact::new( - 0b1010_1010_1010_1010, - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - ); - - // Pre-populate AccountsTrie with nodes that will be the final state - { - let mut cursor = provider_rw.tx_ref().cursor_write::().unwrap(); - cursor.insert(StoredNibbles(account_nibbles1), &node1).unwrap(); - cursor.insert(StoredNibbles(account_nibbles2), &node2).unwrap(); - // account_nibbles3 will be deleted (not in final state) - } - - // Insert trie changesets for target_block - { - let mut cursor = - provider_rw.tx_ref().cursor_dup_write::().unwrap(); - // nibbles1 was updated in target_block (old value stored) - cursor - .append_dup( - target_block, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(account_nibbles1), - node: Some(BranchNodeCompact::new( - 0b1111_0000_0000_0000, // old value - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - )), - }, - ) - .unwrap(); - // nibbles2 was created in target_block (no old value) - cursor - .append_dup( - target_block, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(account_nibbles2), - node: None, - }, - ) - .unwrap(); - } - - // Insert trie changesets for next_block (to test overlay) - { - let mut cursor = - provider_rw.tx_ref().cursor_dup_write::().unwrap(); - // nibbles3 was deleted in next_block (old value stored) - cursor - .append_dup( - next_block, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(account_nibbles3), - node: Some(node3), - }, - ) - .unwrap(); - } - - // Storage trie updates - let storage_address1 = B256::from([1u8; 32]); - let storage_nibbles1 = Nibbles::from_nibbles([0xa, 0xb]); - let storage_nibbles2 = Nibbles::from_nibbles([0xc, 0xd]); - - let storage_node1 = BranchNodeCompact::new( - 0b1111_1111_1111_0000, - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - ); - - let storage_node2 = BranchNodeCompact::new( - 0b0101_0101_0101_0101, - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - ); - - // Pre-populate StoragesTrie with final state - { - let mut cursor = - provider_rw.tx_ref().cursor_dup_write::().unwrap(); - cursor - .upsert( - storage_address1, - &StorageTrieEntry { - nibbles: StoredNibblesSubKey(storage_nibbles1), - node: storage_node1.clone(), - }, - ) - .unwrap(); - // storage_nibbles2 was deleted in next_block, so it's not in final state - } - - // Insert storage trie changesets for target_block - { - let mut cursor = - provider_rw.tx_ref().cursor_dup_write::().unwrap(); - let key = BlockNumberHashedAddress((target_block, storage_address1)); - - // storage_nibbles1 was updated - cursor - .append_dup( - key, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles1), - node: Some(BranchNodeCompact::new( - 0b0000_0000_1111_1111, // old value - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - )), - }, - ) - .unwrap(); - - // storage_nibbles2 was created - cursor - .append_dup( - key, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles2), - node: None, - }, - ) - .unwrap(); - } - - // Insert storage trie changesets for next_block (to test overlay) - { - let mut cursor = - provider_rw.tx_ref().cursor_dup_write::().unwrap(); - let key = BlockNumberHashedAddress((next_block, storage_address1)); - - // storage_nibbles2 was deleted in next_block - cursor - .append_dup( - key, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles2), - node: Some(BranchNodeCompact::new( - 0b0101_0101_0101_0101, // value that was deleted - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - )), - }, - ) - .unwrap(); - } - - provider_rw.commit().unwrap(); - - // Now test get_block_trie_updates - let provider = factory.provider().unwrap(); - let result = provider.get_block_trie_updates(target_block).unwrap(); - - // Verify account trie updates - assert_eq!(result.account_nodes_ref().len(), 2, "Should have 2 account trie updates"); - - // Check nibbles1 - should have the current value (node1) - let nibbles1_update = result - .account_nodes_ref() - .iter() - .find(|(n, _)| n == &account_nibbles1) - .expect("Should find nibbles1"); - assert!(nibbles1_update.1.is_some(), "nibbles1 should have a value"); - assert_eq!( - nibbles1_update.1.as_ref().unwrap().state_mask, - node1.state_mask, - "nibbles1 should have current value" - ); - - // Check nibbles2 - should have the current value (node2) - let nibbles2_update = result - .account_nodes_ref() - .iter() - .find(|(n, _)| n == &account_nibbles2) - .expect("Should find nibbles2"); - assert!(nibbles2_update.1.is_some(), "nibbles2 should have a value"); - assert_eq!( - nibbles2_update.1.as_ref().unwrap().state_mask, - node2.state_mask, - "nibbles2 should have current value" - ); - - // nibbles3 should NOT be in the result (it was changed in next_block, not target_block) - assert!( - !result.account_nodes_ref().iter().any(|(n, _)| n == &account_nibbles3), - "nibbles3 should not be in target_block updates" - ); - - // Verify storage trie updates - assert_eq!(result.storage_tries_ref().len(), 1, "Should have 1 storage trie"); - let storage_updates = result - .storage_tries_ref() - .get(&storage_address1) - .expect("Should have storage updates for address1"); - - assert_eq!(storage_updates.storage_nodes.len(), 2, "Should have 2 storage node updates"); - - // Check storage_nibbles1 - should have current value - let storage1_update = storage_updates - .storage_nodes - .iter() - .find(|(n, _)| n == &storage_nibbles1) - .expect("Should find storage_nibbles1"); - assert!(storage1_update.1.is_some(), "storage_nibbles1 should have a value"); - assert_eq!( - storage1_update.1.as_ref().unwrap().state_mask, - storage_node1.state_mask, - "storage_nibbles1 should have current value" - ); - - // Check storage_nibbles2 - was created in target_block, will be deleted in next_block - // So it should have a value (the value that will be deleted) - let storage2_update = storage_updates - .storage_nodes - .iter() - .find(|(n, _)| n == &storage_nibbles2) - .expect("Should find storage_nibbles2"); - assert!( - storage2_update.1.is_some(), - "storage_nibbles2 should have a value (the node that will be deleted in next block)" - ); - assert_eq!( - storage2_update.1.as_ref().unwrap().state_mask, - storage_node2.state_mask, - "storage_nibbles2 should have the value that was created and will be deleted" - ); - } - #[test] fn test_prunable_receipts_logic() { let insert_blocks = diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 181dcf312cd..5c7877f7b14 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -8,7 +8,7 @@ use reth_prune_types::PruneSegment; use reth_stages_types::StageId; use reth_storage_api::{ BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory, - DatabaseProviderROFactory, PruneCheckpointReader, StageCheckpointReader, TrieReader, + DatabaseProviderROFactory, PruneCheckpointReader, StageCheckpointReader, }; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, @@ -17,7 +17,7 @@ use reth_trie::{ HashedPostStateSorted, KeccakKeyHasher, }; use reth_trie_db::{ - DatabaseHashedCursorFactory, DatabaseHashedPostState, DatabaseTrieCursorFactory, + ChangesetCache, DatabaseHashedCursorFactory, DatabaseHashedPostState, DatabaseTrieCursorFactory, }; use std::{ collections::{hash_map::Entry, HashMap}, @@ -67,6 +67,8 @@ pub struct OverlayStateProviderFactory { trie_overlay: Option>, /// Optional hashed state overlay hashed_state_overlay: Option>, + /// Changeset cache handle for retrieving trie changesets + changeset_cache: ChangesetCache, /// Metrics for tracking provider operations metrics: OverlayStateProviderMetrics, /// A cache which maps `db_tip -> Overlay`. If the db tip changes during usage of the factory @@ -76,12 +78,13 @@ pub struct OverlayStateProviderFactory { impl OverlayStateProviderFactory { /// Create a new overlay state provider factory - pub fn new(factory: F) -> Self { + pub fn new(factory: F, changeset_cache: ChangesetCache) -> Self { Self { factory, block_hash: None, trie_overlay: None, hashed_state_overlay: None, + changeset_cache, metrics: OverlayStateProviderMetrics::default(), overlay_cache: Default::default(), } @@ -112,13 +115,22 @@ impl OverlayStateProviderFactory { self.hashed_state_overlay = hashed_state_overlay; self } + + /// Extends the existing hashed state overlay with the given [`HashedPostStateSorted`]. + pub fn with_extended_hashed_state_overlay(mut self, other: HashedPostStateSorted) -> Self { + if let Some(overlay) = self.hashed_state_overlay.as_mut() { + Arc::make_mut(overlay).extend_ref(&other); + } else { + self.hashed_state_overlay = Some(Arc::new(other)) + } + self + } } impl OverlayStateProviderFactory where F: DatabaseProviderFactory, - F::Provider: TrieReader - + StageCheckpointReader + F::Provider: StageCheckpointReader + PruneCheckpointReader + ChangeSetReader + DBProvider @@ -144,7 +156,7 @@ where /// the DB are currently synced to. fn get_db_tip_block_number(&self, provider: &F::Provider) -> ProviderResult { provider - .get_stage_checkpoint(StageId::MerkleChangeSets)? + .get_stage_checkpoint(StageId::Finish)? .as_ref() .map(|chk| chk.block_number) .ok_or_else(|| ProviderError::InsufficientChangesets { requested: 0, available: 0..=0 }) @@ -153,7 +165,6 @@ where /// Returns whether or not it is required to collect reverts, and validates that there are /// sufficient changesets to revert to the requested block number if so. /// - /// Returns an error if the `MerkleChangeSets` checkpoint doesn't cover the requested block. /// Takes into account both the stage checkpoint and the prune checkpoint to determine the /// available data range. fn reverts_required( @@ -168,18 +179,10 @@ where return Ok(false) } - // Get the MerkleChangeSets prune checkpoints, which will be used to determine the lower - // bound. - let prune_checkpoint = provider.get_prune_checkpoint(PruneSegment::MerkleChangeSets)?; - - // Extract the lower bound from prune checkpoint if available. - // - // If not available we assume pruning has never ran and so there is no lower bound. This - // should not generally happen, since MerkleChangeSets always have pruning enabled, but when - // starting a new node from scratch (e.g. in a test case or benchmark) it can surface. - // + // Check account history prune checkpoint to determine the lower bound of available data. // The prune checkpoint's block_number is the highest pruned block, so data is available - // starting from the next block + // starting from the next block. + let prune_checkpoint = provider.get_prune_checkpoint(PruneSegment::AccountHistory)?; let lower_bound = prune_checkpoint .and_then(|chk| chk.block_number) .map(|block_number| block_number + 1) @@ -223,16 +226,32 @@ where self.get_requested_block_number(provider)? && self.reverts_required(provider, db_tip_block, from_block)? { - // Collect trie reverts + debug!( + target: "providers::state::overlay", + block_hash = ?self.block_hash, + from_block, + db_tip_block, + range_start = from_block + 1, + range_end = db_tip_block, + "Collecting trie reverts for overlay state provider" + ); + + // Collect trie reverts using changeset cache let mut trie_reverts = { let _guard = debug_span!(target: "providers::state::overlay", "Retrieving trie reverts") .entered(); let start = Instant::now(); - let res = provider.trie_reverts(from_block + 1)?; + + // Use changeset cache to retrieve and accumulate reverts to restore state after + // from_block + let accumulated_reverts = self + .changeset_cache + .get_or_compute_range(provider, (from_block + 1)..=db_tip_block)?; + retrieve_trie_reverts_duration = start.elapsed(); - res + accumulated_reverts }; // Collect state reverts @@ -361,11 +380,7 @@ where impl DatabaseProviderROFactory for OverlayStateProviderFactory where F: DatabaseProviderFactory, - F::Provider: TrieReader - + StageCheckpointReader - + PruneCheckpointReader - + BlockNumReader - + ChangeSetReader, + F::Provider: StageCheckpointReader + PruneCheckpointReader + BlockNumReader + ChangeSetReader, { type Provider = OverlayStateProvider; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index f5c40978a73..fba585cc793 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -34,13 +34,12 @@ use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BytecodeReader, DBProvider, DatabaseProviderFactory, HashedPostStateProvider, NodePrimitivesProvider, StageCheckpointReader, StateProofProvider, - StorageRootProvider, TrieReader, + StorageRootProvider, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ - updates::{TrieUpdates, TrieUpdatesSorted}, - AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, - StorageProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; use std::{ collections::BTreeMap, @@ -1001,19 +1000,6 @@ impl StateReader for MockEthProvider< } } -impl TrieReader for MockEthProvider { - fn trie_reverts(&self, _from: BlockNumber) -> ProviderResult { - Ok(TrieUpdatesSorted::default()) - } - - fn get_block_trie_updates( - &self, - _block_number: BlockNumber, - ) -> ProviderResult { - Ok(TrieUpdatesSorted::default()) - } -} - impl CanonStateSubscriptions for MockEthProvider { diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 737685799e5..67c633559c9 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -4,7 +4,7 @@ use crate::{ AccountReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, HashedPostStateProvider, PruneCheckpointReader, RocksDBProviderFactory, StageCheckpointReader, StateProviderFactory, StateReader, - StaticFileProviderFactory, TrieReader, + StaticFileProviderFactory, }; use reth_chain_state::{ CanonStateSubscriptions, ForkChoiceSubscriptions, PersistedBlockSubscriptions, @@ -17,11 +17,7 @@ use std::fmt::Debug; pub trait FullProvider: DatabaseProviderFactory< DB = N::DB, - Provider: BlockReader - + TrieReader - + StageCheckpointReader - + PruneCheckpointReader - + ChangeSetReader, + Provider: BlockReader + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader, > + NodePrimitivesProvider + StaticFileProviderFactory + RocksDBProviderFactory @@ -50,11 +46,7 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory< DB = N::DB, - Provider: BlockReader - + TrieReader - + StageCheckpointReader - + PruneCheckpointReader - + ChangeSetReader, + Provider: BlockReader + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader, > + NodePrimitivesProvider + StaticFileProviderFactory + RocksDBProviderFactory diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 8e912c23a40..beb9d23165b 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -6,7 +6,7 @@ use crate::{ HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProofProvider, StateProvider, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, - StorageRootProvider, TransactionVariant, TransactionsProvider, TrieReader, + StorageRootProvider, TransactionVariant, TransactionsProvider, }; #[cfg(feature = "db-api")] @@ -35,9 +35,8 @@ use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie_common::{ - updates::{TrieUpdates, TrieUpdatesSorted}, - AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, - StorageProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; /// Supports various api interfaces for testing purposes. @@ -646,19 +645,6 @@ impl DBProvider for NoopProvider TrieReader for NoopProvider { - fn trie_reverts(&self, _from: BlockNumber) -> ProviderResult { - Ok(TrieUpdatesSorted::default()) - } - - fn get_block_trie_updates( - &self, - _block_number: BlockNumber, - ) -> ProviderResult { - Ok(TrieUpdatesSorted::default()) - } -} - #[cfg(feature = "db-api")] impl DatabaseProviderFactory for NoopProvider diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index 45ee5ce8036..50ea5a05670 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -89,20 +89,6 @@ pub trait StateProofProvider { fn witness(&self, input: TrieInput, target: HashedPostState) -> ProviderResult>; } -/// Trie Reader -#[auto_impl::auto_impl(&, Box)] -pub trait TrieReader: Send { - /// Returns the [`TrieUpdatesSorted`] for reverting the trie database to its state prior to the - /// given block and onwards having been processed. - fn trie_reverts(&self, from: BlockNumber) -> ProviderResult; - - /// Returns the trie updates that were applied by the specified block. - fn get_block_trie_updates( - &self, - block_number: BlockNumber, - ) -> ProviderResult; -} - /// Trie Writer #[auto_impl::auto_impl(&, Box)] pub trait TrieWriter: Send { diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index d430c756338..a6c4d743031 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -17,12 +17,21 @@ reth-primitives-traits.workspace = true reth-execution-errors.workspace = true reth-db-api.workspace = true reth-trie.workspace = true +reth-trie-common.workspace = true reth-storage-api = { workspace = true, features = ["db-api"] } reth-storage-errors.workspace = true +reth-stages-types.workspace = true +reth-metrics = { workspace = true, optional = true } # alloy alloy-primitives.workspace = true +# misc +parking_lot.workspace = true + +# metrics +metrics = { workspace = true, optional = true } + # tracing tracing.workspace = true @@ -50,11 +59,13 @@ serde_json.workspace = true similar-asserts.workspace = true [features] -metrics = ["reth-trie/metrics"] +metrics = ["reth-trie/metrics", "dep:reth-metrics", "dep:metrics"] serde = [ "similar-asserts/serde", "alloy-consensus/serde", "alloy-primitives/serde", + "parking_lot/serde", + "reth-stages-types/serde", "reth-storage-api/serde", "reth-trie/serde", "reth-trie-common/serde", @@ -69,5 +80,6 @@ test-utils = [ "reth-db/test-utils", "reth-db-api/test-utils", "reth-provider/test-utils", + "reth-stages-types/test-utils", "reth-trie/test-utils", ] diff --git a/crates/trie/db/src/changesets.rs b/crates/trie/db/src/changesets.rs new file mode 100644 index 00000000000..efc7fb62e87 --- /dev/null +++ b/crates/trie/db/src/changesets.rs @@ -0,0 +1,841 @@ +//! Trie changeset computation and caching utilities. +//! +//! This module provides functionality to compute trie changesets for a given block, +//! which represent the old trie node values before the block was processed. +//! +//! It also provides an efficient in-memory cache for these changesets, which is essential for: +//! - **Reorg support**: Quickly access changesets to revert blocks during chain reorganizations +//! - **Memory efficiency**: Automatic eviction ensures bounded memory usage + +use crate::{DatabaseHashedPostState, DatabaseStateRoot, DatabaseTrieCursorFactory}; +use alloy_primitives::{map::B256Map, BlockNumber, B256}; +use parking_lot::RwLock; +use reth_storage_api::{BlockNumReader, ChangeSetReader, DBProvider, StageCheckpointReader}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use reth_trie::{ + changesets::compute_trie_changesets, + trie_cursor::{InMemoryTrieCursorFactory, TrieCursor, TrieCursorFactory}, + HashedPostStateSorted, KeccakKeyHasher, StateRoot, TrieInputSorted, +}; +use reth_trie_common::updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}; +use std::{ + collections::{BTreeMap, HashMap}, + ops::RangeInclusive, + sync::Arc, + time::Instant, +}; +use tracing::debug; + +#[cfg(feature = "metrics")] +use reth_metrics::{ + metrics::{Counter, Gauge}, + Metrics, +}; + +/// Computes trie changesets for a block. +/// +/// # Algorithm +/// +/// For block N: +/// 1. Query cumulative `HashedPostState` revert for block N-1 (from db tip to after N-1) +/// 2. Use that to calculate cumulative `TrieUpdates` revert for block N-1 +/// 3. Query per-block `HashedPostState` revert for block N +/// 4. Create prefix sets from the per-block revert (step 3) +/// 5. Create overlay with cumulative trie updates and cumulative state revert for N-1 +/// 6. Calculate trie updates for block N using the overlay and per-block `HashedPostState`. +/// 7. Compute changesets using the N-1 overlay and the newly calculated trie updates for N +/// +/// # Arguments +/// +/// * `provider` - Database provider with changeset access +/// * `block_number` - Block number to compute changesets for +/// +/// # Returns +/// +/// Changesets (old trie node values) for the specified block +/// +/// # Errors +/// +/// Returns error if: +/// - Block number exceeds database tip (based on Finish stage checkpoint) +/// - Database access fails +/// - State root computation fails +pub fn compute_block_trie_changesets( + provider: &Provider, + block_number: BlockNumber, +) -> Result +where + Provider: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader, +{ + debug!( + target: "trie::changeset_cache", + block_number, + "Computing block trie changesets from database state" + ); + + // Step 1: Collect/calculate state reverts + + // This is just the changes from this specific block + let individual_state_revert = HashedPostStateSorted::from_reverts::( + provider, + block_number..=block_number, + )?; + + // This reverts all changes from db tip back to just after block was processed + let cumulative_state_revert = + HashedPostStateSorted::from_reverts::(provider, (block_number + 1)..)?; + + // This reverts all changes from db tip back to just after block-1 was processed + let mut cumulative_state_revert_prev = cumulative_state_revert.clone(); + cumulative_state_revert_prev.extend_ref(&individual_state_revert); + + // Step 2: Calculate cumulative trie updates revert for block-1 + // This gives us the trie state as it was after block-1 was processed + let prefix_sets_prev = cumulative_state_revert_prev.construct_prefix_sets(); + let input_prev = TrieInputSorted::new( + Arc::default(), + Arc::new(cumulative_state_revert_prev), + prefix_sets_prev, + ); + + let cumulative_trie_updates_prev = + StateRoot::overlay_root_from_nodes_with_updates(provider.tx_ref(), input_prev) + .map_err(ProviderError::other)? + .1 + .into_sorted(); + + // Step 2: Create prefix sets from individual revert (only paths changed by this block) + let prefix_sets = individual_state_revert.construct_prefix_sets(); + + // Step 3: Calculate trie updates for block + // Use cumulative trie updates for block-1 as the node overlay and cumulative state for block + let input = TrieInputSorted::new( + Arc::new(cumulative_trie_updates_prev.clone()), + Arc::new(cumulative_state_revert), + prefix_sets, + ); + + let trie_updates = StateRoot::overlay_root_from_nodes_with_updates(provider.tx_ref(), input) + .map_err(ProviderError::other)? + .1 + .into_sorted(); + + // Step 4: Compute changesets using cumulative trie updates for block-1 as overlay + // Create an overlay cursor factory that has the trie state from after block-1 + let db_cursor_factory = DatabaseTrieCursorFactory::new(provider.tx_ref()); + let overlay_factory = + InMemoryTrieCursorFactory::new(db_cursor_factory, &cumulative_trie_updates_prev); + + let changesets = + compute_trie_changesets(&overlay_factory, &trie_updates).map_err(ProviderError::other)?; + + debug!( + target: "trie::changeset_cache", + block_number, + num_account_nodes = changesets.account_nodes_ref().len(), + num_storage_tries = changesets.storage_tries_ref().len(), + "Computed block trie changesets successfully" + ); + + Ok(changesets) +} + +/// Computes block trie updates using the changeset cache. +/// +/// # Algorithm +/// +/// For block N: +/// 1. Get cumulative trie reverts from block N+1 to db tip using the cache +/// 2. Create an overlay cursor factory with these reverts (representing trie state after block N) +/// 3. Walk through account trie changesets for block N +/// 4. For each changed path, look up the current value using the overlay cursor +/// 5. Walk through storage trie changesets for block N +/// 6. For each changed path, look up the current value using the overlay cursor +/// 7. Return the collected trie updates +/// +/// # Arguments +/// +/// * `cache` - Handle to the changeset cache for retrieving trie reverts +/// * `provider` - Database provider for accessing changesets and block data +/// * `block_number` - Block number to compute trie updates for +/// +/// # Returns +/// +/// Trie updates representing the state of trie nodes after the block was processed +/// +/// # Errors +/// +/// Returns error if: +/// - Block number exceeds database tip +/// - Database access fails +/// - Cache retrieval fails +pub fn compute_block_trie_updates( + cache: &ChangesetCache, + provider: &Provider, + block_number: BlockNumber, +) -> ProviderResult +where + Provider: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader, +{ + let tx = provider.tx_ref(); + + // Get the database tip block number + let db_tip_block = provider + .get_stage_checkpoint(reth_stages_types::StageId::Finish)? + .as_ref() + .map(|chk| chk.block_number) + .ok_or_else(|| ProviderError::InsufficientChangesets { + requested: block_number, + available: 0..=0, + })?; + + // Step 1: Get the block hash for the target block + let block_hash = provider.block_hash(block_number)?.ok_or_else(|| { + ProviderError::other(std::io::Error::new( + std::io::ErrorKind::NotFound, + format!("block hash not found for block number {}", block_number), + )) + })?; + + // Step 2: Get the trie changesets for the target block from cache + let changesets = cache.get_or_compute(block_hash, block_number, provider)?; + + // Step 3: Get the trie reverts for the state after the target block using the cache + let reverts = cache.get_or_compute_range(provider, (block_number + 1)..=db_tip_block)?; + + // Step 4: Create an InMemoryTrieCursorFactory with the reverts + // This gives us the trie state as it was after the target block was processed + let db_cursor_factory = DatabaseTrieCursorFactory::new(tx); + let cursor_factory = InMemoryTrieCursorFactory::new(db_cursor_factory, &reverts); + + // Step 5: Collect all account trie nodes that changed in the target block + let mut account_nodes = Vec::new(); + let mut account_cursor = cursor_factory.account_trie_cursor()?; + + // Iterate over the account nodes from the changesets + for (nibbles, _old_node) in changesets.account_nodes_ref() { + // Look up the current value of this trie node using the overlay cursor + let node_value = account_cursor.seek_exact(*nibbles)?.map(|(_, node)| node); + account_nodes.push((*nibbles, node_value)); + } + + // Step 6: Collect all storage trie nodes that changed in the target block + let mut storage_tries = B256Map::default(); + + // Iterate over the storage tries from the changesets + for (hashed_address, storage_changeset) in changesets.storage_tries_ref() { + let mut storage_cursor = cursor_factory.storage_trie_cursor(*hashed_address)?; + let mut storage_nodes = Vec::new(); + + // Iterate over the storage nodes for this account + for (nibbles, _old_node) in storage_changeset.storage_nodes_ref() { + // Look up the current value of this storage trie node + let node_value = storage_cursor.seek_exact(*nibbles)?.map(|(_, node)| node); + storage_nodes.push((*nibbles, node_value)); + } + + storage_tries.insert( + *hashed_address, + StorageTrieUpdatesSorted { storage_nodes, is_deleted: storage_changeset.is_deleted }, + ); + } + + Ok(TrieUpdatesSorted::new(account_nodes, storage_tries)) +} + +/// Thread-safe changeset cache. +/// +/// This type wraps a shared, mutable reference to the cache inner. +/// The `RwLock` enables concurrent reads while ensuring exclusive access for writes. +#[derive(Debug, Clone)] +pub struct ChangesetCache { + inner: Arc>, +} + +impl Default for ChangesetCache { + fn default() -> Self { + Self::new() + } +} + +impl ChangesetCache { + /// Creates a new cache. + /// + /// The cache has no capacity limit and relies on explicit eviction + /// via the `evict()` method to manage memory usage. + pub fn new() -> Self { + Self { inner: Arc::new(RwLock::new(ChangesetCacheInner::new())) } + } + + /// Retrieves changesets for a block by hash. + /// + /// Returns `None` if the block is not in the cache (either evicted or never computed). + /// Updates hit/miss metrics accordingly. + pub fn get(&self, block_hash: &B256) -> Option> { + self.inner.read().get(block_hash) + } + + /// Inserts changesets for a block into the cache. + /// + /// This method does not perform any eviction. Eviction must be explicitly + /// triggered by calling `evict()`. + /// + /// # Arguments + /// + /// * `block_hash` - Hash of the block + /// * `block_number` - Block number for tracking and eviction + /// * `changesets` - Trie changesets to cache + pub fn insert(&self, block_hash: B256, block_number: u64, changesets: Arc) { + self.inner.write().insert(block_hash, block_number, changesets) + } + + /// Evicts changesets for blocks below the given block number. + /// + /// This should be called after blocks are persisted to the database to free + /// memory for changesets that are no longer needed in the cache. + /// + /// # Arguments + /// + /// * `up_to_block` - Evict blocks with number < this value. Blocks with number >= this value + /// are retained. + pub fn evict(&self, up_to_block: BlockNumber) { + self.inner.write().evict(up_to_block) + } + + /// Gets changesets from cache, or computes them on-the-fly if missing. + /// + /// This is the primary API for retrieving changesets. On cache miss, + /// it computes changesets from the database state and populates the cache. + /// + /// # Arguments + /// + /// * `block_hash` - Hash of the block to get changesets for + /// * `block_number` - Block number (for cache insertion and logging) + /// * `provider` - Database provider for DB access + /// + /// # Returns + /// + /// Changesets for the block, either from cache or computed on-the-fly + pub fn get_or_compute

( + &self, + block_hash: B256, + block_number: u64, + provider: &P, + ) -> ProviderResult> + where + P: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader, + { + // Try cache first (with read lock) + { + let cache = self.inner.read(); + if let Some(changesets) = cache.get(&block_hash) { + debug!( + target: "trie::changeset_cache", + ?block_hash, + block_number, + "Changeset cache HIT" + ); + return Ok(changesets); + } + } + + // Cache miss - compute from database + debug!( + target: "trie::changeset_cache", + ?block_hash, + block_number, + "Changeset cache MISS, computing from database" + ); + + let start = Instant::now(); + + // Compute changesets + let changesets = + compute_block_trie_changesets(provider, block_number).map_err(ProviderError::other)?; + + let changesets = Arc::new(changesets); + let elapsed = start.elapsed(); + + debug!( + target: "trie::changeset_cache", + ?elapsed, + block_number, + ?block_hash, + "Changeset computed from database and inserting into cache" + ); + + // Store in cache (with write lock) + { + let mut cache = self.inner.write(); + cache.insert(block_hash, block_number, Arc::clone(&changesets)); + } + + debug!( + target: "trie::changeset_cache", + ?block_hash, + block_number, + "Changeset successfully cached" + ); + + Ok(changesets) + } + + /// Gets or computes accumulated trie reverts for a range of blocks. + /// + /// This method retrieves and accumulates all trie changesets (reverts) for the specified + /// block range (inclusive). The changesets are accumulated in reverse order (newest to oldest) + /// so that older values take precedence when there are conflicts. + /// + /// # Arguments + /// + /// * `provider` - Database provider for DB access and block lookups + /// * `range` - Block range to accumulate reverts for (inclusive) + /// + /// # Returns + /// + /// Accumulated trie reverts for all blocks in the specified range + /// + /// # Errors + /// + /// Returns error if: + /// - Any block in the range is beyond the database tip + /// - Database access fails + /// - Block hash lookup fails + /// - Changeset computation fails + pub fn get_or_compute_range

( + &self, + provider: &P, + range: RangeInclusive, + ) -> ProviderResult + where + P: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader, + { + // Get the database tip block number + let db_tip_block = provider + .get_stage_checkpoint(reth_stages_types::StageId::Finish)? + .as_ref() + .map(|chk| chk.block_number) + .ok_or_else(|| ProviderError::InsufficientChangesets { + requested: *range.start(), + available: 0..=0, + })?; + + let start_block = *range.start(); + let end_block = *range.end(); + + // If range end is beyond the tip, return an error + if end_block > db_tip_block { + return Err(ProviderError::InsufficientChangesets { + requested: end_block, + available: 0..=db_tip_block, + }); + } + + let timer = Instant::now(); + + debug!( + target: "trie::changeset_cache", + start_block, + end_block, + db_tip_block, + "Starting get_or_compute_range" + ); + + // Use changeset cache to retrieve and accumulate reverts block by block. + // Iterate in reverse order (newest to oldest) so that older changesets + // take precedence when there are conflicting updates. + let mut accumulated_reverts = TrieUpdatesSorted::default(); + + for block_number in range.rev() { + // Get the block hash for this block number + let block_hash = provider.block_hash(block_number)?.ok_or_else(|| { + ProviderError::other(std::io::Error::new( + std::io::ErrorKind::NotFound, + format!("block hash not found for block number {}", block_number), + )) + })?; + + debug!( + target: "trie::changeset_cache", + block_number, + ?block_hash, + "Looked up block hash for block number in range" + ); + + // Get changesets from cache (or compute on-the-fly) + let changesets = self.get_or_compute(block_hash, block_number, provider)?; + + // Overlay this block's changesets on top of accumulated reverts. + // Since we iterate newest to oldest, older values are added last + // and overwrite any conflicting newer values (oldest changeset values take + // precedence). + accumulated_reverts.extend_ref(&changesets); + } + + let elapsed = timer.elapsed(); + + let num_account_nodes = accumulated_reverts.account_nodes_ref().len(); + let num_storage_tries = accumulated_reverts.storage_tries_ref().len(); + + debug!( + target: "trie::changeset_cache", + ?elapsed, + start_block, + end_block, + num_blocks = end_block.saturating_sub(start_block).saturating_add(1), + num_account_nodes, + num_storage_tries, + "Finished accumulating trie reverts for block range" + ); + + Ok(accumulated_reverts) + } +} + +/// In-memory cache for trie changesets with explicit eviction policy. +/// +/// Holds changesets for blocks that have been validated but not yet persisted. +/// Keyed by block hash for fast lookup during reorgs. Eviction is controlled +/// explicitly by the engine API tree handler when persistence completes. +/// +/// ## Eviction Policy +/// +/// Unlike traditional caches with automatic eviction, this cache requires explicit +/// eviction calls. The engine API tree handler calls `evict(block_number)` after +/// blocks are persisted to the database, ensuring changesets remain available +/// until their corresponding blocks are safely on disk. +/// +/// ## Metrics +/// +/// The cache maintains several metrics for observability: +/// - `hits`: Number of successful cache lookups +/// - `misses`: Number of failed cache lookups +/// - `evictions`: Number of blocks evicted +/// - `size`: Current number of cached blocks +#[derive(Debug)] +struct ChangesetCacheInner { + /// Cache entries: block hash -> (block number, changesets) + entries: HashMap)>, + + /// Block number to hashes mapping for eviction + block_numbers: BTreeMap>, + + /// Metrics for monitoring cache behavior + #[cfg(feature = "metrics")] + metrics: ChangesetCacheMetrics, +} + +#[cfg(feature = "metrics")] +/// Metrics for the changeset cache. +/// +/// These metrics provide visibility into cache performance and help identify +/// potential issues like high miss rates. +#[derive(Metrics, Clone)] +#[metrics(scope = "trie.changeset_cache")] +struct ChangesetCacheMetrics { + /// Cache hit counter + hits: Counter, + + /// Cache miss counter + misses: Counter, + + /// Eviction counter + evictions: Counter, + + /// Current cache size (number of entries) + size: Gauge, +} + +impl Default for ChangesetCacheInner { + fn default() -> Self { + Self::new() + } +} + +impl ChangesetCacheInner { + /// Creates a new empty changeset cache. + /// + /// The cache has no capacity limit and relies on explicit eviction + /// via the `evict()` method to manage memory usage. + fn new() -> Self { + Self { + entries: HashMap::new(), + block_numbers: BTreeMap::new(), + #[cfg(feature = "metrics")] + metrics: Default::default(), + } + } + + fn get(&self, block_hash: &B256) -> Option> { + match self.entries.get(block_hash) { + Some((_, changesets)) => { + #[cfg(feature = "metrics")] + self.metrics.hits.increment(1); + Some(Arc::clone(changesets)) + } + None => { + #[cfg(feature = "metrics")] + self.metrics.misses.increment(1); + None + } + } + } + + fn insert(&mut self, block_hash: B256, block_number: u64, changesets: Arc) { + debug!( + target: "trie::changeset_cache", + ?block_hash, + block_number, + cache_size_before = self.entries.len(), + "Inserting changeset into cache" + ); + + // Insert the entry + self.entries.insert(block_hash, (block_number, changesets)); + + // Add block hash to block_numbers mapping + self.block_numbers.entry(block_number).or_default().push(block_hash); + + // Update size metric + #[cfg(feature = "metrics")] + self.metrics.size.set(self.entries.len() as f64); + + debug!( + target: "trie::changeset_cache", + ?block_hash, + block_number, + cache_size_after = self.entries.len(), + "Changeset inserted into cache" + ); + } + + fn evict(&mut self, up_to_block: BlockNumber) { + debug!( + target: "trie::changeset_cache", + up_to_block, + cache_size_before = self.entries.len(), + "Starting cache eviction" + ); + + // Find all block numbers that should be evicted (< up_to_block) + let blocks_to_evict: Vec = + self.block_numbers.range(..up_to_block).map(|(num, _)| *num).collect(); + + // Remove entries for each block number below threshold + #[cfg(feature = "metrics")] + let mut evicted_count = 0; + #[cfg(not(feature = "metrics"))] + let mut evicted_count = 0; + + for block_number in &blocks_to_evict { + if let Some(hashes) = self.block_numbers.remove(block_number) { + debug!( + target: "trie::changeset_cache", + block_number, + num_hashes = hashes.len(), + "Evicting block from cache" + ); + for hash in hashes { + if self.entries.remove(&hash).is_some() { + evicted_count += 1; + } + } + } + } + + debug!( + target: "trie::changeset_cache", + up_to_block, + evicted_count, + cache_size_after = self.entries.len(), + "Finished cache eviction" + ); + + // Update metrics if we evicted anything + #[cfg(feature = "metrics")] + if evicted_count > 0 { + self.metrics.evictions.increment(evicted_count as u64); + self.metrics.size.set(self.entries.len() as f64); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::map::B256Map; + + // Helper function to create empty TrieUpdatesSorted for testing + fn create_test_changesets() -> Arc { + Arc::new(TrieUpdatesSorted::new(vec![], B256Map::default())) + } + + #[test] + fn test_insert_and_retrieve_single_entry() { + let mut cache = ChangesetCacheInner::new(); + let hash = B256::random(); + let changesets = create_test_changesets(); + + cache.insert(hash, 100, Arc::clone(&changesets)); + + // Should be able to retrieve it + let retrieved = cache.get(&hash); + assert!(retrieved.is_some()); + assert_eq!(cache.entries.len(), 1); + } + + #[test] + fn test_insert_multiple_entries() { + let mut cache = ChangesetCacheInner::new(); + + // Insert 10 blocks + let mut hashes = Vec::new(); + for i in 0..10 { + let hash = B256::random(); + cache.insert(hash, 100 + i, create_test_changesets()); + hashes.push(hash); + } + + // Should be able to retrieve all + assert_eq!(cache.entries.len(), 10); + for hash in &hashes { + assert!(cache.get(hash).is_some()); + } + } + + #[test] + fn test_eviction_when_explicitly_called() { + let mut cache = ChangesetCacheInner::new(); + + // Insert 15 blocks (0-14) + let mut hashes = Vec::new(); + for i in 0..15 { + let hash = B256::random(); + cache.insert(hash, i, create_test_changesets()); + hashes.push((i, hash)); + } + + // All blocks should be present (no automatic eviction) + assert_eq!(cache.entries.len(), 15); + + // Explicitly evict blocks < 4 + cache.evict(4); + + // Blocks 0-3 should be evicted + assert_eq!(cache.entries.len(), 11); // blocks 4-14 = 11 blocks + + // Verify blocks 0-3 are evicted + for i in 0..4 { + assert!(cache.get(&hashes[i as usize].1).is_none(), "Block {} should be evicted", i); + } + + // Verify blocks 4-14 are still present + for i in 4..15 { + assert!(cache.get(&hashes[i as usize].1).is_some(), "Block {} should be present", i); + } + } + + #[test] + fn test_eviction_with_persistence_watermark() { + let mut cache = ChangesetCacheInner::new(); + + // Insert blocks 100-165 + let mut hashes = std::collections::HashMap::new(); + for i in 100..=165 { + let hash = B256::random(); + cache.insert(hash, i, create_test_changesets()); + hashes.insert(i, hash); + } + + // All blocks should be present (no automatic eviction) + assert_eq!(cache.entries.len(), 66); + + // Simulate persistence up to block 164, with 64-block retention window + // Eviction threshold = 164 - 64 = 100 + cache.evict(100); + + // Blocks 100-165 should remain (66 blocks) + assert_eq!(cache.entries.len(), 66); + + // Simulate persistence up to block 165 + // Eviction threshold = 165 - 64 = 101 + cache.evict(101); + + // Blocks 101-165 should remain (65 blocks) + assert_eq!(cache.entries.len(), 65); + assert!(cache.get(&hashes[&100]).is_none()); + assert!(cache.get(&hashes[&101]).is_some()); + } + + #[test] + fn test_out_of_order_inserts_with_explicit_eviction() { + let mut cache = ChangesetCacheInner::new(); + + // Insert blocks in random order + let hash_10 = B256::random(); + cache.insert(hash_10, 10, create_test_changesets()); + + let hash_5 = B256::random(); + cache.insert(hash_5, 5, create_test_changesets()); + + let hash_15 = B256::random(); + cache.insert(hash_15, 15, create_test_changesets()); + + let hash_3 = B256::random(); + cache.insert(hash_3, 3, create_test_changesets()); + + // All blocks should be present (no automatic eviction) + assert_eq!(cache.entries.len(), 4); + + // Explicitly evict blocks < 5 + cache.evict(5); + + assert!(cache.get(&hash_3).is_none(), "Block 3 should be evicted"); + assert!(cache.get(&hash_5).is_some(), "Block 5 should be present"); + assert!(cache.get(&hash_10).is_some(), "Block 10 should be present"); + assert!(cache.get(&hash_15).is_some(), "Block 15 should be present"); + } + + #[test] + fn test_multiple_blocks_same_number() { + let mut cache = ChangesetCacheInner::new(); + + // Insert multiple blocks with same number (side chains) + let hash_1a = B256::random(); + let hash_1b = B256::random(); + cache.insert(hash_1a, 100, create_test_changesets()); + cache.insert(hash_1b, 100, create_test_changesets()); + + // Both should be retrievable + assert!(cache.get(&hash_1a).is_some()); + assert!(cache.get(&hash_1b).is_some()); + assert_eq!(cache.entries.len(), 2); + } + + #[test] + fn test_eviction_removes_all_side_chains() { + let mut cache = ChangesetCacheInner::new(); + + // Insert multiple blocks at same height (side chains) + let hash_10a = B256::random(); + let hash_10b = B256::random(); + let hash_10c = B256::random(); + cache.insert(hash_10a, 10, create_test_changesets()); + cache.insert(hash_10b, 10, create_test_changesets()); + cache.insert(hash_10c, 10, create_test_changesets()); + + let hash_20 = B256::random(); + cache.insert(hash_20, 20, create_test_changesets()); + + assert_eq!(cache.entries.len(), 4); + + // Evict blocks < 15 - should remove all three side chains at height 10 + cache.evict(15); + + assert_eq!(cache.entries.len(), 1); + assert!(cache.get(&hash_10a).is_none()); + assert!(cache.get(&hash_10b).is_none()); + assert!(cache.get(&hash_10c).is_none()); + assert!(cache.get(&hash_20).is_some()); + } +} diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs index 6610e7d0dc1..f509702c2e0 100644 --- a/crates/trie/db/src/lib.rs +++ b/crates/trie/db/src/lib.rs @@ -2,6 +2,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +mod changesets; +pub use changesets::*; mod hashed_cursor; mod prefix_set; mod proof; diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index 53719892748..f07fce527a4 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -38,7 +38,8 @@ pub fn calculate_state_root(c: &mut Criterion) { provider_rw.commit().unwrap(); } - let factory = OverlayStateProviderFactory::new(provider_factory.clone()); + let changeset_cache = reth_trie_db::ChangesetCache::new(); + let factory = OverlayStateProviderFactory::new(provider_factory.clone(), changeset_cache); // state root group.bench_function(BenchmarkId::new("sync root", size), |b| { diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index d821c7fdc25..7bf936bad3a 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -322,7 +322,9 @@ mod tests { let rt = Runtime::new().unwrap(); - let factory = reth_provider::providers::OverlayStateProviderFactory::new(factory); + let changeset_cache = reth_trie_db::ChangesetCache::new(); + let factory = + reth_provider::providers::OverlayStateProviderFactory::new(factory, changeset_cache); let task_ctx = ProofTaskCtx::new(factory); let proof_worker_handle = ProofWorkerHandle::new(rt.handle().clone(), task_ctx, 1, 1, false); diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 7c95091b9c0..c6c0d895559 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -1726,8 +1726,11 @@ mod tests { runtime.block_on(async { let handle = tokio::runtime::Handle::current(); let provider_factory = create_test_provider_factory(); - let factory = - reth_provider::providers::OverlayStateProviderFactory::new(provider_factory); + let changeset_cache = reth_trie_db::ChangesetCache::new(); + let factory = reth_provider::providers::OverlayStateProviderFactory::new( + provider_factory, + changeset_cache, + ); let ctx = test_ctx(factory); let proof_handle = ProofWorkerHandle::new(handle.clone(), ctx, 5, 3, false); diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index 74f1295e466..dec55705f38 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -298,8 +298,11 @@ mod tests { #[tokio::test] async fn random_parallel_root() { let factory = create_test_provider_factory(); - let mut overlay_factory = - reth_provider::providers::OverlayStateProviderFactory::new(factory.clone()); + let changeset_cache = reth_trie_db::ChangesetCache::new(); + let mut overlay_factory = reth_provider::providers::OverlayStateProviderFactory::new( + factory.clone(), + changeset_cache, + ); let mut rng = rand::rng(); let mut state = (0..100) diff --git a/crates/trie/trie/src/changesets.rs b/crates/trie/trie/src/changesets.rs new file mode 100644 index 00000000000..6f0bc85f80d --- /dev/null +++ b/crates/trie/trie/src/changesets.rs @@ -0,0 +1,476 @@ +//! Trie changeset computation. +//! +//! This module provides functionality to compute trie changesets from trie updates. +//! Changesets represent the old values of trie nodes before a block was applied, +//! enabling reorgs by reverting blocks to their previous state. +//! +//! ## Overview +//! +//! When a block is executed, the trie is updated with new node values. To support +//! chain reorganizations, we need to preserve the old values that existed before +//! the block was applied. These old values are called "changesets". +//! +//! ## Usage +//! +//! The primary function is `compute_trie_changesets`, which takes: +//! - A `TrieCursorFactory` for reading current trie state +//! - `TrieUpdatesSorted` containing the new node values +//! +//! And returns `TrieUpdatesSorted` containing the old node values. + +use crate::trie_cursor::TrieCursorIter; +use alloy_primitives::{map::B256Map, B256}; +use itertools::{merge_join_by, EitherOrBoth}; +use reth_storage_errors::db::DatabaseError; +use reth_trie_common::{ + updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, + BranchNodeCompact, Nibbles, +}; +use std::cmp::Ordering; + +use crate::trie_cursor::{TrieCursor, TrieCursorFactory, TrieStorageCursor}; + +/// Result type for changeset operations. +pub type ChangesetResult = Result; + +/// Computes trie changesets by looking up current node values from the trie. +/// +/// Takes the new trie updates and queries the trie for the old values of +/// changed nodes. Returns changesets representing the state before the block +/// was applied, suitable for reorg operations. +/// +/// # Arguments +/// +/// * `factory` - Trie cursor factory for reading current trie state +/// * `trie_updates` - New trie node values produced by state root computation +/// +/// # Returns +/// +/// `TrieUpdatesSorted` containing old node values (before this block) +pub fn compute_trie_changesets( + factory: &Factory, + trie_updates: &TrieUpdatesSorted, +) -> ChangesetResult +where + Factory: TrieCursorFactory, +{ + // Compute account trie changesets + let account_nodes = compute_account_changesets(factory, trie_updates)?; + + // Compute storage trie changesets + let mut storage_tries = B256Map::default(); + + // Create storage cursor once and reuse it for all addresses + let mut storage_cursor = factory.storage_trie_cursor(B256::default())?; + + for (hashed_address, storage_updates) in trie_updates.storage_tries_ref() { + storage_cursor.set_hashed_address(*hashed_address); + + let storage_changesets = if storage_updates.is_deleted() { + // Handle wiped storage + compute_wiped_storage_changesets(&mut storage_cursor, storage_updates)? + } else { + // Handle normal storage updates + compute_storage_changesets(&mut storage_cursor, storage_updates)? + }; + + if !storage_changesets.is_empty() { + storage_tries.insert( + *hashed_address, + StorageTrieUpdatesSorted { + is_deleted: storage_updates.is_deleted(), + storage_nodes: storage_changesets, + }, + ); + } + } + + // Build and return the result + Ok(TrieUpdatesSorted::new(account_nodes, storage_tries)) +} + +/// Computes account trie changesets. +/// +/// Looks up the current value for each changed account node path and returns +/// a vector of (path, `old_node`) pairs. The result is already sorted since +/// `trie_updates.account_nodes_ref()` is sorted. +fn compute_account_changesets( + factory: &Factory, + trie_updates: &TrieUpdatesSorted, +) -> ChangesetResult)>> +where + Factory: TrieCursorFactory, +{ + let mut cursor = factory.account_trie_cursor()?; + let mut account_changesets = Vec::with_capacity(trie_updates.account_nodes_ref().len()); + + // For each changed account node, look up its current value + // The input is already sorted, so the output will be sorted + for (path, _new_node) in trie_updates.account_nodes_ref() { + let old_node = cursor.seek_exact(*path)?.map(|(_path, node)| node); + account_changesets.push((*path, old_node)); + } + + Ok(account_changesets) +} + +/// Computes storage trie changesets for a single account. +/// +/// Looks up the current value for each changed storage node path and returns +/// a vector of (path, `old_node`) pairs. The result is already sorted since +/// `storage_updates.storage_nodes` is sorted. +/// +/// # Arguments +/// +/// * `cursor` - Reusable storage trie cursor. The hashed address will be set before use. +/// * `hashed_address` - The hashed address of the account +/// * `storage_updates` - Storage trie updates for this account +fn compute_storage_changesets( + cursor: &mut impl TrieStorageCursor, + storage_updates: &StorageTrieUpdatesSorted, +) -> ChangesetResult)>> { + let mut storage_changesets = Vec::with_capacity(storage_updates.storage_nodes.len()); + + // For each changed storage node, look up its current value + // The input is already sorted, so the output will be sorted + for (path, _new_node) in &storage_updates.storage_nodes { + let old_node = cursor.seek_exact(*path)?.map(|(_path, node)| node); + storage_changesets.push((*path, old_node)); + } + + Ok(storage_changesets) +} + +/// Handles wiped storage trie changeset computation. +/// +/// When an account's storage is completely wiped (e.g., account is destroyed), +/// we need to capture not just the changed nodes, but ALL existing nodes in +/// the storage trie, since they all will be deleted. +/// +/// This uses an iterator-based approach to avoid allocating an intermediate Vec. +/// It merges two sorted iterators: +/// - Current values of changed paths +/// - All existing nodes in the storage trie +/// +/// # Arguments +/// +/// * `changed_cursor` - Cursor for looking up changed node values +/// * `wiped_cursor` - Cursor for iterating all nodes in the storage trie +/// * `hashed_address` - The hashed address of the account +/// * `storage_updates` - Storage trie updates for this account +fn compute_wiped_storage_changesets( + cursor: &mut impl TrieStorageCursor, + storage_updates: &StorageTrieUpdatesSorted, +) -> ChangesetResult)>> { + // Set the hashed address for this account's storage trie + // Create an iterator that yields all nodes in the storage trie + let all_nodes = TrieCursorIter::new(cursor); + + // Merge the two sorted iterators + let merged = storage_trie_wiped_changeset_iter( + storage_updates.storage_nodes.iter().map(|e| e.0), + all_nodes, + )?; + + // Collect into a Vec + let mut storage_changesets = Vec::new(); + for result in merged { + storage_changesets.push(result?); + } + + Ok(storage_changesets) +} + +/// Returns an iterator which produces the changeset values for an account whose storage was wiped +/// during a block. +/// +/// ## Arguments +/// +/// - `curr_values_of_changed` is an iterator over the current values of all trie nodes modified by +/// the block, ordered by path. +/// - `all_nodes` is an iterator over all existing trie nodes for the account, ordered by path. +/// +/// ## Returns +/// +/// An iterator of trie node paths and a `Some(node)` (indicating the node was wiped) or a `None` +/// (indicating the node was modified in the block but didn't previously exist). The iterator's +/// results will be ordered by path. +pub fn storage_trie_wiped_changeset_iter( + changed_paths: impl Iterator, + all_nodes: impl Iterator>, +) -> Result< + impl Iterator), DatabaseError>>, + DatabaseError, +> { + let all_nodes = all_nodes.map(|e| e.map(|(nibbles, node)| (nibbles, Some(node)))); + + let merged = merge_join_by(changed_paths, all_nodes, |a, b| match (a, b) { + (_, Err(_)) => Ordering::Greater, + (a, Ok(b)) => a.cmp(&b.0), + }); + + Ok(merged.map(|either_or| match either_or { + EitherOrBoth::Left(changed) => { + // A path of a changed node which was not found in the database. The current value of + // this path must be None, otherwise it would have also been returned by the `all_nodes` + // iter. + Ok((changed, None)) + } + EitherOrBoth::Right(wiped) => { + // A node was found in the db (indicating it was wiped) but was not a changed node. + // Return it as-is. + wiped + } + EitherOrBoth::Both(_changed, wiped) => { + // A path of a changed node was found with a previous value in the database. If the + // changed node had no previous value (None) it wouldn't be returned by `all_nodes` and + // so would be in the Left branch. + // + // Due to the ordering closure passed to `merge_join_by` it's not possible for wrapped + // to be an error here. + debug_assert!(wiped.is_ok(), "unreachable error condition: {wiped:?}"); + wiped + } + })) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::trie_cursor::mock::MockTrieCursorFactory; + use alloy_primitives::map::B256Map; + use reth_trie_common::updates::StorageTrieUpdatesSorted; + use std::collections::BTreeMap; + + #[test] + fn test_empty_updates() { + // Create an empty mock factory + // Note: We need to include B256::default() in storage_tries because + // compute_trie_changesets creates cursors for it upfront + let mut storage_tries = B256Map::default(); + storage_tries.insert(B256::default(), BTreeMap::new()); + let factory = MockTrieCursorFactory::new(BTreeMap::new(), storage_tries); + + // Create empty updates + let updates = TrieUpdatesSorted::new(vec![], B256Map::default()); + + // Compute changesets + let changesets = compute_trie_changesets(&factory, &updates).unwrap(); + + // Should produce empty changesets + assert!(changesets.account_nodes_ref().is_empty()); + assert!(changesets.storage_tries_ref().is_empty()); + } + + #[test] + fn test_account_changesets() { + // Create some initial account trie state + let path1 = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + let path2 = Nibbles::from_nibbles([0x4, 0x5, 0x6]); + // tree_mask and hash_mask must be subsets of state_mask + let node1 = BranchNodeCompact::new(0b1111, 0b1010, 0, vec![], None); + let node2 = BranchNodeCompact::new(0b1111, 0b1100, 0, vec![], None); + + let mut account_nodes = BTreeMap::new(); + account_nodes.insert(path1, node1.clone()); + account_nodes.insert(path2, node2); + + // Need to include B256::default() for cursor creation + let mut storage_tries = B256Map::default(); + storage_tries.insert(B256::default(), BTreeMap::new()); + let factory = MockTrieCursorFactory::new(account_nodes, storage_tries); + + // Create updates that modify path1 and add a new path3 + let path3 = Nibbles::from_nibbles([0x7, 0x8, 0x9]); + let new_node1 = BranchNodeCompact::new(0b1111, 0b0001, 0, vec![], None); + let new_node3 = BranchNodeCompact::new(0b1111, 0b0000, 0, vec![], None); + + let updates = TrieUpdatesSorted::new( + vec![(path1, Some(new_node1)), (path3, Some(new_node3))], + B256Map::default(), + ); + + // Compute changesets + let changesets = compute_trie_changesets(&factory, &updates).unwrap(); + + // Check account changesets + assert_eq!(changesets.account_nodes_ref().len(), 2); + + // path1 should have the old node1 value + assert_eq!(changesets.account_nodes_ref()[0].0, path1); + assert_eq!(changesets.account_nodes_ref()[0].1, Some(node1)); + + // path3 should have None (it didn't exist before) + assert_eq!(changesets.account_nodes_ref()[1].0, path3); + assert_eq!(changesets.account_nodes_ref()[1].1, None); + } + + #[test] + fn test_storage_changesets() { + let hashed_address = B256::from([1u8; 32]); + + // Create some initial storage trie state + let path1 = Nibbles::from_nibbles([0x1, 0x2]); + let path2 = Nibbles::from_nibbles([0x3, 0x4]); + let node1 = BranchNodeCompact::new(0b1111, 0b0011, 0, vec![], None); + let node2 = BranchNodeCompact::new(0b1111, 0b0101, 0, vec![], None); + + let mut storage_nodes = BTreeMap::new(); + storage_nodes.insert(path1, node1.clone()); + storage_nodes.insert(path2, node2); + + let mut storage_tries = B256Map::default(); + storage_tries.insert(B256::default(), BTreeMap::new()); // For cursor creation + storage_tries.insert(hashed_address, storage_nodes); + + let factory = MockTrieCursorFactory::new(BTreeMap::new(), storage_tries); + + // Create updates that modify path1 and add a new path3 + let path3 = Nibbles::from_nibbles([0x5, 0x6]); + let new_node1 = BranchNodeCompact::new(0b1111, 0b1000, 0, vec![], None); + let new_node3 = BranchNodeCompact::new(0b1111, 0b0000, 0, vec![], None); + + let mut storage_updates = B256Map::default(); + storage_updates.insert( + hashed_address, + StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![(path1, Some(new_node1)), (path3, Some(new_node3))], + }, + ); + + let updates = TrieUpdatesSorted::new(vec![], storage_updates); + + // Compute changesets + let changesets = compute_trie_changesets(&factory, &updates).unwrap(); + + // Check storage changesets + assert_eq!(changesets.storage_tries_ref().len(), 1); + let storage_changesets = changesets.storage_tries_ref().get(&hashed_address).unwrap(); + assert!(!storage_changesets.is_deleted); + assert_eq!(storage_changesets.storage_nodes.len(), 2); + + // path1 should have the old node1 value + assert_eq!(storage_changesets.storage_nodes[0].0, path1); + assert_eq!(storage_changesets.storage_nodes[0].1, Some(node1)); + + // path3 should have None (it didn't exist before) + assert_eq!(storage_changesets.storage_nodes[1].0, path3); + assert_eq!(storage_changesets.storage_nodes[1].1, None); + } + + #[test] + fn test_wiped_storage() { + let hashed_address = B256::from([2u8; 32]); + + // Create initial storage trie with multiple nodes + let path1 = Nibbles::from_nibbles([0x1, 0x2]); + let path2 = Nibbles::from_nibbles([0x3, 0x4]); + let path3 = Nibbles::from_nibbles([0x5, 0x6]); + let node1 = BranchNodeCompact::new(0b1111, 0b0011, 0, vec![], None); + let node2 = BranchNodeCompact::new(0b1111, 0b0101, 0, vec![], None); + let node3 = BranchNodeCompact::new(0b1111, 0b1001, 0, vec![], None); + + let mut storage_nodes = BTreeMap::new(); + storage_nodes.insert(path1, node1.clone()); + storage_nodes.insert(path2, node2.clone()); + storage_nodes.insert(path3, node3.clone()); + + let mut storage_tries = B256Map::default(); + storage_tries.insert(B256::default(), BTreeMap::new()); // For cursor creation + storage_tries.insert(hashed_address, storage_nodes); + + let factory = MockTrieCursorFactory::new(BTreeMap::new(), storage_tries); + + // Create updates that modify path1 and mark storage as wiped + let new_node1 = BranchNodeCompact::new(0b1111, 0b1000, 0, vec![], None); + + let mut storage_updates = B256Map::default(); + storage_updates.insert( + hashed_address, + StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![(path1, Some(new_node1))], + }, + ); + + let updates = TrieUpdatesSorted::new(vec![], storage_updates); + + // Compute changesets + let changesets = compute_trie_changesets(&factory, &updates).unwrap(); + + // Check storage changesets + assert_eq!(changesets.storage_tries_ref().len(), 1); + let storage_changesets = changesets.storage_tries_ref().get(&hashed_address).unwrap(); + assert!(storage_changesets.is_deleted); + + // Should include all three nodes (changed path1 + wiped path2 and path3) + assert_eq!(storage_changesets.storage_nodes.len(), 3); + + // All paths should be present in sorted order + assert_eq!(storage_changesets.storage_nodes[0].0, path1); + assert_eq!(storage_changesets.storage_nodes[1].0, path2); + assert_eq!(storage_changesets.storage_nodes[2].0, path3); + + // All should have their old values + assert_eq!(storage_changesets.storage_nodes[0].1, Some(node1)); + assert_eq!(storage_changesets.storage_nodes[1].1, Some(node2)); + assert_eq!(storage_changesets.storage_nodes[2].1, Some(node3)); + } + + #[test] + fn test_wiped_storage_with_new_path() { + let hashed_address = B256::from([3u8; 32]); + + // Create initial storage trie with two nodes + let path1 = Nibbles::from_nibbles([0x1, 0x2]); + let path3 = Nibbles::from_nibbles([0x5, 0x6]); + let node1 = BranchNodeCompact::new(0b1111, 0b0011, 0, vec![], None); + let node3 = BranchNodeCompact::new(0b1111, 0b1001, 0, vec![], None); + + let mut storage_nodes = BTreeMap::new(); + storage_nodes.insert(path1, node1.clone()); + storage_nodes.insert(path3, node3.clone()); + + let mut storage_tries = B256Map::default(); + storage_tries.insert(B256::default(), BTreeMap::new()); // For cursor creation + storage_tries.insert(hashed_address, storage_nodes); + + let factory = MockTrieCursorFactory::new(BTreeMap::new(), storage_tries); + + // Create updates that add a new path2 that didn't exist before + let path2 = Nibbles::from_nibbles([0x3, 0x4]); + let new_node2 = BranchNodeCompact::new(0b1111, 0b0101, 0, vec![], None); + + let mut storage_updates = B256Map::default(); + storage_updates.insert( + hashed_address, + StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![(path2, Some(new_node2))], + }, + ); + + let updates = TrieUpdatesSorted::new(vec![], storage_updates); + + // Compute changesets + let changesets = compute_trie_changesets(&factory, &updates).unwrap(); + + // Check storage changesets + let storage_changesets = changesets.storage_tries_ref().get(&hashed_address).unwrap(); + assert!(storage_changesets.is_deleted); + + // Should include all three paths: existing path1, new path2, existing path3 + assert_eq!(storage_changesets.storage_nodes.len(), 3); + + // Check sorted order + assert_eq!(storage_changesets.storage_nodes[0].0, path1); + assert_eq!(storage_changesets.storage_nodes[1].0, path2); + assert_eq!(storage_changesets.storage_nodes[2].0, path3); + + // path1 and path3 have old values, path2 has None (didn't exist) + assert_eq!(storage_changesets.storage_nodes[0].1, Some(node1)); + assert_eq!(storage_changesets.storage_nodes[1].1, None); + assert_eq!(storage_changesets.storage_nodes[2].1, Some(node3)); + } +} diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index aef322fb7cc..90c54fbce28 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -38,6 +38,9 @@ pub mod proof_v2; /// Trie witness generation. pub mod witness; +/// Trie changeset computation. +pub mod changesets; + /// The implementation of the Merkle Patricia Trie. mod trie; pub use trie::{StateRoot, StorageRoot, TrieType}; diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx index d1f90a84676..25df549b934 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx @@ -131,18 +131,17 @@ Static Files: Possible values: - - headers: The headers stage within the pipeline - - bodies: The bodies stage within the pipeline - - senders: The senders stage within the pipeline - - execution: The execution stage within the pipeline - - account-hashing: The account hashing stage within the pipeline - - storage-hashing: The storage hashing stage within the pipeline - - hashing: The account and storage hashing stages within the pipeline - - merkle: The merkle stage within the pipeline - - merkle-changesets: The merkle changesets stage within the pipeline - - tx-lookup: The transaction lookup stage within the pipeline - - account-history: The account history stage within the pipeline - - storage-history: The storage history stage within the pipeline + - headers: The headers stage within the pipeline + - bodies: The bodies stage within the pipeline + - senders: The senders stage within the pipeline + - execution: The execution stage within the pipeline + - account-hashing: The account hashing stage within the pipeline + - storage-hashing: The storage hashing stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline + - tx-lookup: The transaction lookup stage within the pipeline + - account-history: The account history stage within the pipeline + - storage-history: The storage history stage within the pipeline Logging: --log.stdout.format diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx index 7f813ba789c..b5c5af7729d 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx @@ -162,18 +162,17 @@ Static Files: The name of the stage to run Possible values: - - headers: The headers stage within the pipeline - - bodies: The bodies stage within the pipeline - - senders: The senders stage within the pipeline - - execution: The execution stage within the pipeline - - account-hashing: The account hashing stage within the pipeline - - storage-hashing: The storage hashing stage within the pipeline - - hashing: The account and storage hashing stages within the pipeline - - merkle: The merkle stage within the pipeline - - merkle-changesets: The merkle changesets stage within the pipeline - - tx-lookup: The transaction lookup stage within the pipeline - - account-history: The account history stage within the pipeline - - storage-history: The storage history stage within the pipeline + - headers: The headers stage within the pipeline + - bodies: The bodies stage within the pipeline + - senders: The senders stage within the pipeline + - execution: The execution stage within the pipeline + - account-hashing: The account hashing stage within the pipeline + - storage-hashing: The storage hashing stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline + - tx-lookup: The transaction lookup stage within the pipeline + - account-history: The account history stage within the pipeline + - storage-history: The storage history stage within the pipeline Networking: -d, --disable-discovery diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 4b460cadd85..8e8135c5858 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -131,18 +131,17 @@ Static Files: Possible values: - - headers: The headers stage within the pipeline - - bodies: The bodies stage within the pipeline - - senders: The senders stage within the pipeline - - execution: The execution stage within the pipeline - - account-hashing: The account hashing stage within the pipeline - - storage-hashing: The storage hashing stage within the pipeline - - hashing: The account and storage hashing stages within the pipeline - - merkle: The merkle stage within the pipeline - - merkle-changesets: The merkle changesets stage within the pipeline - - tx-lookup: The transaction lookup stage within the pipeline - - account-history: The account history stage within the pipeline - - storage-history: The storage history stage within the pipeline + - headers: The headers stage within the pipeline + - bodies: The bodies stage within the pipeline + - senders: The senders stage within the pipeline + - execution: The execution stage within the pipeline + - account-hashing: The account hashing stage within the pipeline + - storage-hashing: The storage hashing stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline + - tx-lookup: The transaction lookup stage within the pipeline + - account-history: The account history stage within the pipeline + - storage-history: The storage history stage within the pipeline Logging: --log.stdout.format diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 131c2b04c2c..4ad13bc3fce 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -162,18 +162,17 @@ Static Files: The name of the stage to run Possible values: - - headers: The headers stage within the pipeline - - bodies: The bodies stage within the pipeline - - senders: The senders stage within the pipeline - - execution: The execution stage within the pipeline - - account-hashing: The account hashing stage within the pipeline - - storage-hashing: The storage hashing stage within the pipeline - - hashing: The account and storage hashing stages within the pipeline - - merkle: The merkle stage within the pipeline - - merkle-changesets: The merkle changesets stage within the pipeline - - tx-lookup: The transaction lookup stage within the pipeline - - account-history: The account history stage within the pipeline - - storage-history: The storage history stage within the pipeline + - headers: The headers stage within the pipeline + - bodies: The bodies stage within the pipeline + - senders: The senders stage within the pipeline + - execution: The execution stage within the pipeline + - account-hashing: The account hashing stage within the pipeline + - storage-hashing: The storage hashing stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline + - tx-lookup: The transaction lookup stage within the pipeline + - account-history: The account history stage within the pipeline + - storage-history: The storage history stage within the pipeline Networking: -d, --disable-discovery From 86c414081a74587f508e3eab36fafdf80cb1b0a7 Mon Sep 17 00:00:00 2001 From: rakita Date: Fri, 16 Jan 2026 15:56:27 +0100 Subject: [PATCH 039/267] feat: stagging revm v34.0.0 (#20627) Co-authored-by: Matthias Seitz --- Cargo.lock | 101 +++++++----------- Cargo.toml | 31 +++--- .../engine/invalid-block-hooks/src/witness.rs | 2 + crates/engine/tree/Cargo.toml | 2 +- crates/engine/tree/benches/channel_perf.rs | 2 + crates/engine/tree/benches/state_root_task.rs | 3 + crates/engine/tree/src/tree/metrics.rs | 2 + .../tree/src/tree/payload_processor/bal.rs | 52 +++++---- .../tree/src/tree/payload_processor/mod.rs | 2 + .../src/tree/payload_processor/multiproof.rs | 6 ++ crates/ethereum/evm/src/lib.rs | 12 ++- crates/ethereum/evm/tests/execute.rs | 3 + crates/ethereum/payload/src/lib.rs | 4 +- crates/evm/evm/src/execute.rs | 19 +++- crates/evm/evm/src/lib.rs | 2 +- .../execution-types/src/execution_outcome.rs | 18 +++- crates/optimism/evm/src/lib.rs | 22 ++-- crates/optimism/node/tests/it/builder.rs | 6 +- crates/optimism/payload/src/builder.rs | 2 +- crates/primitives-traits/src/account.rs | 14 ++- crates/rpc/rpc-eth-api/src/helpers/call.rs | 12 ++- .../rpc/rpc-eth-api/src/helpers/estimate.rs | 8 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 7 +- crates/rpc/rpc-eth-types/src/error/api.rs | 11 +- crates/rpc/rpc-eth-types/src/error/mod.rs | 26 ++++- crates/stateless/src/witness_db.rs | 1 + crates/storage/errors/Cargo.toml | 2 + crates/storage/errors/src/provider.rs | 12 ++- crates/trie/common/src/hashed_state.rs | 2 + 29 files changed, 235 insertions(+), 151 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c42bb019e39..0153d42fae3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -249,12 +249,13 @@ dependencies = [ [[package]] name = "alloy-eip7928" -version = "0.1.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "926b2c0d34e641cf8b17bf54ce50fda16715b9f68ad878fa6128bae410c6f890" +checksum = "6adac476434bf024279164dcdca299309f0c7d1e3557024eb7a83f8d9d01c6b5" dependencies = [ "alloy-primitives", "alloy-rlp", + "borsh", "serde", ] @@ -286,9 +287,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.25.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ccc4c702c840148af1ce784cc5c6ed9274a020ef32417c5b1dbeab8c317673" +checksum = "249337d8316a9ab983784597adfad66b78047ec9522d8b510185bc968c272618" dependencies = [ "alloy-consensus", "alloy-eips", @@ -403,9 +404,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.25.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f640da852f93ddaa3b9a602b7ca41d80e0023f77a67b68aaaf511c32f1fe0ce" +checksum = "cc9fc7cbe100f7b094428b488225c9aedb835f28a31b74f474ee1c41878e58c6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -1491,12 +1492,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" -[[package]] -name = "az" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" - [[package]] name = "backon" version = "1.6.0" @@ -4461,16 +4456,6 @@ dependencies = [ "web-sys", ] -[[package]] -name = "gmp-mpfr-sys" -version = "1.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f8970a75c006bb2f8ae79c6768a116dd215fa8346a87aed99bf9d82ca43394" -dependencies = [ - "libc", - "windows-sys 0.60.2", -] - [[package]] name = "group" version = "0.13.0" @@ -6488,9 +6473,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "14.1.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1475a779c73999fc803778524042319691b31f3d6699d2b560c4ed8be1db802a" +checksum = "79c92b75162c2ed1661849fa51683b11254a5b661798360a2c24be918edafd40" dependencies = [ "auto_impl", "revm", @@ -10968,6 +10953,7 @@ dependencies = [ "reth-prune-types", "reth-static-file-types", "revm-database-interface", + "revm-state", "thiserror 2.0.17", ] @@ -11325,9 +11311,9 @@ dependencies = [ [[package]] name = "revm" -version = "33.1.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c85ed0028f043f87b3c88d4a4cb6f0a76440085523b6a8afe5ff003cf418054" +checksum = "c2aabdebaa535b3575231a88d72b642897ae8106cf6b0d12eafc6bfdf50abfc7" dependencies = [ "revm-bytecode", "revm-context", @@ -11344,9 +11330,9 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "7.1.1" +version = "8.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2c6b5e6e8dd1e28a4a60e5f46615d4ef0809111c9e63208e55b5c7058200fb0" +checksum = "74d1e5c1eaa44d39d537f668bc5c3409dc01e5c8be954da6c83370bbdf006457" dependencies = [ "bitvec", "phf", @@ -11356,9 +11342,9 @@ dependencies = [ [[package]] name = "revm-context" -version = "12.1.0" +version = "13.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f038f0c9c723393ac897a5df9140b21cfa98f5753a2cb7d0f28fa430c4118abf" +checksum = "892ff3e6a566cf8d72ffb627fdced3becebbd9ba64089c25975b9b028af326a5" dependencies = [ "bitvec", "cfg-if", @@ -11373,9 +11359,9 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "13.1.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "431c9a14e4ef1be41ae503708fd02d974f80ef1f2b6b23b5e402e8d854d1b225" +checksum = "57f61cc6d23678c4840af895b19f8acfbbd546142ec8028b6526c53cc1c16c98" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -11389,9 +11375,9 @@ dependencies = [ [[package]] name = "revm-database" -version = "9.0.6" +version = "10.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "980d8d6bba78c5dd35b83abbb6585b0b902eb25ea4448ed7bfba6283b0337191" +checksum = "529528d0b05fe646be86223032c3e77aa8b05caa2a35447d538c55965956a511" dependencies = [ "alloy-eips", "revm-bytecode", @@ -11403,22 +11389,23 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "8.0.5" +version = "9.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cce03e3780287b07abe58faf4a7f5d8be7e81321f93ccf3343c8f7755602bae" +checksum = "b7bf93ac5b91347c057610c0d96e923db8c62807e03f036762d03e981feddc1d" dependencies = [ "auto_impl", "either", "revm-primitives", "revm-state", "serde", + "thiserror 2.0.17", ] [[package]] name = "revm-handler" -version = "14.1.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d44f8f6dbeec3fecf9fe55f78ef0a758bdd92ea46cd4f1ca6e2a946b32c367f3" +checksum = "0cd0e43e815a85eded249df886c4badec869195e70cdd808a13cfca2794622d2" dependencies = [ "auto_impl", "derive-where", @@ -11435,9 +11422,9 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "14.1.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5617e49216ce1ca6c8826bcead0386bc84f49359ef67cde6d189961735659f93" +checksum = "4f3ccad59db91ef93696536a0dbaf2f6f17cfe20d4d8843ae118edb7e97947ef" dependencies = [ "auto_impl", "either", @@ -11453,9 +11440,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.33.2" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01def7351cd9af844150b8e88980bcd11304f33ce23c3d7c25f2a8dab87c1345" +checksum = "4a1ce3f52a052d78cc251714d57bf05dc8bc75e269677de11805d3153300a2cd" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -11473,9 +11460,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "31.1.0" +version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26ec36405f7477b9dccdc6caa3be19adf5662a7a0dffa6270cdb13a090c077e5" +checksum = "11406408597bc249392d39295831c4b641b3a6f5c471a7c41104a7a1e3564c07" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -11486,9 +11473,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "31.0.0" +version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a62958af953cc4043e93b5be9b8497df84cc3bd612b865c49a7a7dfa26a84e2" +checksum = "50c1285c848d240678bf69cb0f6179ff5a4aee6fc8e921d89708087197a0aff3" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11504,16 +11491,15 @@ dependencies = [ "p256", "revm-primitives", "ripemd", - "rug", "secp256k1 0.31.1", "sha2", ] [[package]] name = "revm-primitives" -version = "21.0.2" +version = "22.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29e161db429d465c09ba9cbff0df49e31049fe6b549e28eb0b7bd642fcbd4412" +checksum = "ba580c56a8ec824a64f8a1683577876c2e1dbe5247044199e9b881421ad5dcf9" dependencies = [ "alloy-primitives", "num_enum", @@ -11523,10 +11509,11 @@ dependencies = [ [[package]] name = "revm-state" -version = "8.1.1" +version = "9.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d8be953b7e374dbdea0773cf360debed8df394ea8d82a8b240a6b5da37592fc" +checksum = "311720d4f0f239b041375e7ddafdbd20032a33b7bae718562ea188e188ed9fd3" dependencies = [ + "alloy-eip7928", "bitflags 2.10.0", "revm-bytecode", "revm-primitives", @@ -11675,18 +11662,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "rug" -version = "1.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad2e973fe3c3214251a840a621812a4f40468da814b1a3d6947d433c2af11f" -dependencies = [ - "az", - "gmp-mpfr-sys", - "libc", - "libm", -] - [[package]] name = "ruint" version = "1.17.2" diff --git a/Cargo.toml b/Cargo.toml index a4fc2e292ec..068a8f9ef62 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -473,22 +473,22 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { version = "33.1.0", default-features = false } -revm-bytecode = { version = "7.1.1", default-features = false } -revm-database = { version = "9.0.5", default-features = false } -revm-state = { version = "8.1.1", default-features = false } -revm-primitives = { version = "21.0.2", default-features = false } -revm-interpreter = { version = "31.1.0", default-features = false } -revm-database-interface = { version = "8.0.5", default-features = false } -op-revm = { version = "14.1.0", default-features = false } -revm-inspectors = "0.33.2" +revm = { version = "34.0.0", default-features = false } +revm-bytecode = { version = "8.0.0", default-features = false } +revm-database = { version = "10.0.0", default-features = false } +revm-state = { version = "9.0.0", default-features = false } +revm-primitives = { version = "22.0.0", default-features = false } +revm-interpreter = { version = "32.0.0", default-features = false } +revm-database-interface = { version = "9.0.0", default-features = false } +op-revm = { version = "15.0.0", default-features = false } +revm-inspectors = "0.34.0" # eth alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.4.3" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-eip7928 = { version = "0.1.0", default-features = false } -alloy-evm = { version = "0.25.1", default-features = false } +alloy-eip7928 = { version = "0.3.0", default-features = false } +alloy-evm = { version = "0.26.0", default-features = false } alloy-primitives = { version = "1.5.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.5.0" @@ -526,7 +526,7 @@ alloy-transport-ipc = { version = "1.4.3", default-features = false } alloy-transport-ws = { version = "1.4.3", default-features = false } # op -alloy-op-evm = { version = "0.25.0", default-features = false } +alloy-op-evm = { version = "0.26.0", default-features = false } alloy-op-hardforks = "0.4.4" op-alloy-rpc-types = { version = "0.23.1", default-features = false } op-alloy-rpc-types-engine = { version = "0.23.1", default-features = false } @@ -747,7 +747,7 @@ vergen-git2 = "1.0.5" # networking ipnet = "2.11" -# [patch.crates-io] +[patch.crates-io] # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } # alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } @@ -792,3 +792,8 @@ ipnet = "2.11" # alloy-evm = { git = "https://github.com/alloy-rs/evm", rev = "a69f0b45a6b0286e16072cb8399e02ce6ceca353" } # alloy-op-evm = { git = "https://github.com/alloy-rs/evm", rev = "a69f0b45a6b0286e16072cb8399e02ce6ceca353" } + +# revm-inspectors = { git = "https://github.com/paradigmxyz/revm-inspectors", rev = "3020ea8" } + +# alloy-evm = { git = "https://github.com/alloy-rs/evm", rev = "072c248" } +# alloy-op-evm = { git = "https://github.com/alloy-rs/evm", rev = "072c248" } diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 25d15a87227..c068220da8b 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -448,12 +448,14 @@ mod tests { nonce: account.nonce, code_hash: account.bytecode_hash.unwrap_or_default(), code: None, + account_id: None, }), original_info: (i == 0).then(|| AccountInfo { balance: account.balance.checked_div(U256::from(2)).unwrap_or(U256::ZERO), nonce: 0, code_hash: account.bytecode_hash.unwrap_or_default(), code: None, + account_id: None, }), storage, status: AccountStatus::default(), diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index ea679e4e404..006233c1908 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -95,7 +95,7 @@ reth-tracing.workspace = true reth-node-ethereum.workspace = true reth-e2e-test-utils.workspace = true -# alloy +# revm revm-state.workspace = true assert_matches.workspace = true diff --git a/crates/engine/tree/benches/channel_perf.rs b/crates/engine/tree/benches/channel_perf.rs index 41dd651c890..1bc5d7ceacb 100644 --- a/crates/engine/tree/benches/channel_perf.rs +++ b/crates/engine/tree/benches/channel_perf.rs @@ -26,7 +26,9 @@ fn create_bench_state(num_accounts: usize) -> EvmState { nonce: 10, code_hash: B256::from_slice(&rng.random::<[u8; 32]>()), code: Default::default(), + account_id: None, }, + original_info: Box::new(AccountInfo::default()), storage, status: AccountStatus::empty(), transaction_id: 0, diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index 6db51361363..f271e18811b 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -62,6 +62,7 @@ fn create_bench_state_updates(params: &BenchParams) -> Vec { storage: HashMap::default(), status: AccountStatus::SelfDestructed, transaction_id: 0, + original_info: Box::new(AccountInfo::default()), } } else { RevmAccount { @@ -70,6 +71,7 @@ fn create_bench_state_updates(params: &BenchParams) -> Vec { nonce: rng.random::(), code_hash: KECCAK_EMPTY, code: Some(Default::default()), + account_id: None, }, storage: (0..rng.random_range(0..=params.storage_slots_per_account)) .map(|_| { @@ -84,6 +86,7 @@ fn create_bench_state_updates(params: &BenchParams) -> Vec { }) .collect(), status: AccountStatus::Touched, + original_info: Box::new(AccountInfo::default()), transaction_id: 0, } }; diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 2308ab85c36..ac4bb50044f 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -580,7 +580,9 @@ mod tests { nonce: 10, code_hash: B256::random(), code: Default::default(), + account_id: None, }, + original_info: Box::new(AccountInfo::default()), storage, status: AccountStatus::default(), transaction_id: 0, diff --git a/crates/engine/tree/src/tree/payload_processor/bal.rs b/crates/engine/tree/src/tree/payload_processor/bal.rs index 415da6874e6..1353e1b6283 100644 --- a/crates/engine/tree/src/tree/payload_processor/bal.rs +++ b/crates/engine/tree/src/tree/payload_processor/bal.rs @@ -101,7 +101,7 @@ impl<'a> Iterator for BALSlotIter<'a> { return None; } - return Some((address, slot)); + return Some((address, StorageKey::from(slot))); } // Move to next account @@ -177,13 +177,11 @@ where let mut storage_map = HashedStorage::new(false); for slot_changes in &account_changes.storage_changes { - let hashed_slot = keccak256(slot_changes.slot); + let hashed_slot = keccak256(slot_changes.slot.to_be_bytes::<32>()); // Get the last change for this slot if let Some(last_change) = slot_changes.changes.last() { - storage_map - .storage - .insert(hashed_slot, U256::from_be_bytes(last_change.new_value.0)); + storage_map.storage.insert(hashed_slot, last_change.new_value); } } @@ -237,8 +235,8 @@ mod tests { let provider = StateProviderTest::default(); let address = Address::random(); - let slot = StorageKey::random(); - let value = B256::random(); + let slot = U256::random(); + let value = U256::random(); let slot_changes = SlotChanges { slot, changes: vec![StorageChange::new(0, value)] }; @@ -258,10 +256,10 @@ mod tests { assert!(result.storages.contains_key(&hashed_address)); let storage = result.storages.get(&hashed_address).unwrap(); - let hashed_slot = keccak256(slot); + let hashed_slot = keccak256(slot.to_be_bytes::<32>()); let stored_value = storage.storage.get(&hashed_slot).unwrap(); - assert_eq!(*stored_value, U256::from_be_bytes(value.0)); + assert_eq!(*stored_value, value); } #[test] @@ -392,15 +390,15 @@ mod tests { let provider = StateProviderTest::default(); let address = Address::random(); - let slot = StorageKey::random(); + let slot = U256::random(); // Multiple changes to the same slot - should take the last one let slot_changes = SlotChanges { slot, changes: vec![ - StorageChange::new(0, B256::from(U256::from(100).to_be_bytes::<32>())), - StorageChange::new(1, B256::from(U256::from(200).to_be_bytes::<32>())), - StorageChange::new(2, B256::from(U256::from(300).to_be_bytes::<32>())), + StorageChange::new(0, U256::from(100)), + StorageChange::new(1, U256::from(200)), + StorageChange::new(2, U256::from(300)), ], }; @@ -418,7 +416,7 @@ mod tests { let hashed_address = keccak256(address); let storage = result.storages.get(&hashed_address).unwrap(); - let hashed_slot = keccak256(slot); + let hashed_slot = keccak256(slot.to_be_bytes::<32>()); let stored_value = storage.storage.get(&hashed_slot).unwrap(); @@ -438,15 +436,15 @@ mod tests { address: addr1, storage_changes: vec![ SlotChanges { - slot: StorageKey::from(U256::from(100)), - changes: vec![StorageChange::new(0, B256::ZERO)], + slot: U256::from(100), + changes: vec![StorageChange::new(0, U256::ZERO)], }, SlotChanges { - slot: StorageKey::from(U256::from(101)), - changes: vec![StorageChange::new(0, B256::ZERO)], + slot: U256::from(101), + changes: vec![StorageChange::new(0, U256::ZERO)], }, ], - storage_reads: vec![StorageKey::from(U256::from(102))], + storage_reads: vec![U256::from(102)], balance_changes: vec![], nonce_changes: vec![], code_changes: vec![], @@ -456,10 +454,10 @@ mod tests { let account2 = AccountChanges { address: addr2, storage_changes: vec![SlotChanges { - slot: StorageKey::from(U256::from(200)), - changes: vec![StorageChange::new(0, B256::ZERO)], + slot: U256::from(200), + changes: vec![StorageChange::new(0, U256::ZERO)], }], - storage_reads: vec![StorageKey::from(U256::from(201))], + storage_reads: vec![U256::from(201)], balance_changes: vec![], nonce_changes: vec![], code_changes: vec![], @@ -470,15 +468,15 @@ mod tests { address: addr3, storage_changes: vec![ SlotChanges { - slot: StorageKey::from(U256::from(300)), - changes: vec![StorageChange::new(0, B256::ZERO)], + slot: U256::from(300), + changes: vec![StorageChange::new(0, U256::ZERO)], }, SlotChanges { - slot: StorageKey::from(U256::from(301)), - changes: vec![StorageChange::new(0, B256::ZERO)], + slot: U256::from(301), + changes: vec![StorageChange::new(0, U256::ZERO)], }, ], - storage_reads: vec![StorageKey::from(U256::from(302))], + storage_reads: vec![U256::from(302)], balance_changes: vec![], nonce_changes: vec![], code_changes: vec![], diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 3df06652d37..6ba285c3bc4 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -1059,7 +1059,9 @@ mod tests { nonce: rng.random::(), code_hash: KECCAK_EMPTY, code: Some(Default::default()), + account_id: None, }, + original_info: Box::new(AccountInfo::default()), storage, status: AccountStatus::Touched, transaction_id: 0, diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index a61ef525363..d3907720b5d 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1811,7 +1811,9 @@ mod tests { nonce: 1, code_hash: Default::default(), code: Default::default(), + account_id: None, }, + original_info: Box::new(revm_state::AccountInfo::default()), transaction_id: Default::default(), storage: Default::default(), status: revm_state::AccountStatus::Touched, @@ -1828,7 +1830,9 @@ mod tests { nonce: 2, code_hash: Default::default(), code: Default::default(), + account_id: None, }, + original_info: Box::new(revm_state::AccountInfo::default()), transaction_id: Default::default(), storage: Default::default(), status: revm_state::AccountStatus::Touched, @@ -1930,7 +1934,9 @@ mod tests { nonce: 1, code_hash: Default::default(), code: Default::default(), + account_id: None, }, + original_info: Box::new(revm_state::AccountInfo::default()), transaction_id: Default::default(), storage: Default::default(), status: revm_state::AccountStatus::Touched, diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index dbc686fe4f3..be7d1601740 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -188,6 +188,7 @@ where block: &'a SealedBlock, ) -> Result, Self::Error> { Ok(EthBlockExecutionCtx { + tx_count_hint: Some(block.transaction_count()), parent_hash: block.header().parent_hash, parent_beacon_block_root: block.header().parent_beacon_block_root, ommers: &block.body().ommers, @@ -202,6 +203,7 @@ where attributes: Self::NextBlockEnvCtx, ) -> Result, Self::Error> { Ok(EthBlockExecutionCtx { + tx_count_hint: None, parent_hash: parent.hash(), parent_beacon_block_root: attributes.parent_beacon_block_root, ommers: &[], @@ -238,8 +240,9 @@ where revm_spec_by_timestamp_and_block_number(self.chain_spec(), timestamp, block_number); // configure evm env based on parent block - let mut cfg_env = - CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec); + let mut cfg_env = CfgEnv::new() + .with_chain_id(self.chain_spec().chain().id()) + .with_spec_and_mainnet_gas_params(spec); if let Some(blob_params) = &blob_params { cfg_env.set_max_blobs_per_tx(blob_params.max_blobs_per_tx); @@ -280,6 +283,7 @@ where payload: &'a ExecutionData, ) -> Result, Self::Error> { Ok(EthBlockExecutionCtx { + tx_count_hint: Some(payload.payload.transactions().len()), parent_hash: payload.parent_hash(), parent_beacon_block_root: payload.sidecar.parent_beacon_block_root(), ommers: &[], @@ -407,7 +411,7 @@ mod tests { let db = CacheDB::>::default(); let evm_env = EvmEnv { - cfg_env: CfgEnv::new().with_spec(SpecId::CONSTANTINOPLE), + cfg_env: CfgEnv::new().with_spec_and_mainnet_gas_params(SpecId::CONSTANTINOPLE), ..Default::default() }; @@ -474,7 +478,7 @@ mod tests { let db = CacheDB::>::default(); let evm_env = EvmEnv { - cfg_env: CfgEnv::new().with_spec(SpecId::CONSTANTINOPLE), + cfg_env: CfgEnv::new().with_spec_and_mainnet_gas_params(SpecId::CONSTANTINOPLE), ..Default::default() }; diff --git a/crates/ethereum/evm/tests/execute.rs b/crates/ethereum/evm/tests/execute.rs index 61e0c1c4b66..c7d0a083376 100644 --- a/crates/ethereum/evm/tests/execute.rs +++ b/crates/ethereum/evm/tests/execute.rs @@ -38,6 +38,7 @@ fn create_database_with_beacon_root_contract() -> CacheDB { code_hash: keccak256(BEACON_ROOTS_CODE.clone()), nonce: 1, code: Some(Bytecode::new_raw(BEACON_ROOTS_CODE.clone())), + account_id: None, }; db.insert_account_info(BEACON_ROOTS_ADDRESS, beacon_root_contract_account); @@ -53,6 +54,7 @@ fn create_database_with_withdrawal_requests_contract() -> CacheDB { balance: U256::ZERO, code_hash: keccak256(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone()), code: Some(Bytecode::new_raw(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone())), + account_id: None, }; db.insert_account_info( @@ -339,6 +341,7 @@ fn create_database_with_block_hashes(latest_block: u64) -> CacheDB { code_hash: keccak256(HISTORY_STORAGE_CODE.clone()), code: Some(Bytecode::new_raw(HISTORY_STORAGE_CODE.clone())), nonce: 1, + account_id: None, }; db.insert_account_info(HISTORY_STORAGE_ADDRESS, blockhashes_contract_account); diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index a2baad0410c..e77e7a63245 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -155,7 +155,7 @@ where let state_provider = client.state_by_block_hash(parent_header.hash())?; let state = StateProviderDatabase::new(state_provider.as_ref()); let mut db = - State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); + State::builder().with_database(cached_reads.as_db_mut(state)).with_bundle_update().build(); let mut builder = evm_config .builder_for_next_block( @@ -247,7 +247,7 @@ where limit: MAX_RLP_BLOCK_SIZE, }, ); - continue; + continue } // There's only limited amount of blob space available per block, so we need to check if diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index fca8f6241d5..e70db5296b0 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -741,6 +741,7 @@ mod tests { nonce, code_hash: KECCAK_EMPTY, code: None, + account_id: None, }; state.insert_account(addr, account_info); state @@ -777,8 +778,13 @@ mod tests { let mut state = setup_state_with_account(addr1, 100, 1); - let account2 = - AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None }; + let account2 = AccountInfo { + balance: U256::from(200), + nonce: 1, + code_hash: KECCAK_EMPTY, + code: None, + account_id: None, + }; state.insert_account(addr2, account2); let mut increments = HashMap::default(); @@ -799,8 +805,13 @@ mod tests { let mut state = setup_state_with_account(addr1, 100, 1); - let account2 = - AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None }; + let account2 = AccountInfo { + balance: U256::from(200), + nonce: 1, + code_hash: KECCAK_EMPTY, + code: None, + account_id: None, + }; state.insert_account(addr2, account2); let mut increments = HashMap::default(); diff --git a/crates/evm/evm/src/lib.rs b/crates/evm/evm/src/lib.rs index 2ed79d7297b..e5bd089255a 100644 --- a/crates/evm/evm/src/lib.rs +++ b/crates/evm/evm/src/lib.rs @@ -399,7 +399,7 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { /// // Complete block building /// let outcome = builder.finish(state_provider)?; /// ``` - fn builder_for_next_block<'a, DB: Database>( + fn builder_for_next_block<'a, DB: Database + 'a>( &'a self, db: &'a mut State, parent: &'a SealedHeader<::BlockHeader>, diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 1b68be35af9..6df354219ea 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -934,10 +934,20 @@ mod tests { let address3 = Address::random(); // Set up account info with some changes - let account_info1 = - AccountInfo { nonce: 1, balance: U256::from(100), code_hash: B256::ZERO, code: None }; - let account_info2 = - AccountInfo { nonce: 2, balance: U256::from(200), code_hash: B256::ZERO, code: None }; + let account_info1 = AccountInfo { + nonce: 1, + balance: U256::from(100), + code_hash: B256::ZERO, + code: None, + account_id: None, + }; + let account_info2 = AccountInfo { + nonce: 2, + balance: U256::from(200), + code_hash: B256::ZERO, + code: None, + account_id: None, + }; // Set up the bundle state with these accounts let mut bundle_state = BundleState::default(); diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index fa3cf87696e..1dbd8c7e385 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -230,7 +230,9 @@ where let spec = revm_spec_by_timestamp_after_bedrock(self.chain_spec(), timestamp); - let cfg_env = CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec); + let cfg_env = CfgEnv::new() + .with_chain_id(self.chain_spec().chain().id()) + .with_spec_and_mainnet_gas_params(spec); let blob_excess_gas_and_price = spec .into_eth_spec() @@ -362,7 +364,8 @@ mod tests { let db = CacheDB::>::default(); // Create a custom configuration environment with a chain ID of 111 - let cfg = CfgEnv::new().with_chain_id(111).with_spec(OpSpecId::default()); + let cfg = + CfgEnv::new().with_chain_id(111).with_spec_and_mainnet_gas_params(OpSpecId::default()); let evm_env = EvmEnv { cfg_env: cfg.clone(), ..Default::default() }; @@ -400,8 +403,10 @@ mod tests { let db = CacheDB::>::default(); - let evm_env = - EvmEnv { cfg_env: CfgEnv::new().with_spec(OpSpecId::ECOTONE), ..Default::default() }; + let evm_env = EvmEnv { + cfg_env: CfgEnv::new().with_spec_and_mainnet_gas_params(OpSpecId::ECOTONE), + ..Default::default() + }; let evm = evm_config.evm_with_env(db, evm_env.clone()); @@ -427,7 +432,8 @@ mod tests { let evm_config = test_evm_config(); let db = CacheDB::>::default(); - let cfg = CfgEnv::new().with_chain_id(111).with_spec(OpSpecId::default()); + let cfg = + CfgEnv::new().with_chain_id(111).with_spec_and_mainnet_gas_params(OpSpecId::default()); let block = BlockEnv::default(); let evm_env = EvmEnv { block_env: block, cfg_env: cfg.clone() }; @@ -463,8 +469,10 @@ mod tests { let evm_config = test_evm_config(); let db = CacheDB::>::default(); - let evm_env = - EvmEnv { cfg_env: CfgEnv::new().with_spec(OpSpecId::ECOTONE), ..Default::default() }; + let evm_env = EvmEnv { + cfg_env: CfgEnv::new().with_spec_and_mainnet_gas_params(OpSpecId::ECOTONE), + ..Default::default() + }; let evm = evm_config.evm_with_env_and_inspector(db, evm_env.clone(), NoOpInspector {}); diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index b495fdb47ce..3d4eda33f75 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -19,7 +19,7 @@ use reth_optimism_node::{args::RollupArgs, OpEvmConfig, OpExecutorBuilder, OpNod use reth_optimism_primitives::OpPrimitives; use reth_provider::providers::BlockchainProvider; use revm::{ - context::{BlockEnv, Cfg, ContextTr, TxEnv}, + context::{BlockEnv, ContextTr, TxEnv}, context_interface::result::EVMError, inspector::NoOpInspector, interpreter::interpreter::EthInterpreter, @@ -103,7 +103,7 @@ fn test_setup_custom_precompiles() { input: EvmEnv, ) -> Self::Evm { let mut op_evm = OpEvmFactory::default().create_evm(db, input); - *op_evm.components_mut().2 = UniPrecompiles::precompiles(op_evm.ctx().cfg().spec()); + *op_evm.components_mut().2 = UniPrecompiles::precompiles(*op_evm.ctx().cfg().spec()); op_evm } @@ -119,7 +119,7 @@ fn test_setup_custom_precompiles() { ) -> Self::Evm { let mut op_evm = OpEvmFactory::default().create_evm_with_inspector(db, input, inspector); - *op_evm.components_mut().2 = UniPrecompiles::precompiles(op_evm.ctx().cfg().spec()); + *op_evm.components_mut().2 = UniPrecompiles::precompiles(*op_evm.ctx().cfg().spec()); op_evm } diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 99bc07065ae..05d156ab3b4 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -634,7 +634,7 @@ where if sequencer_tx.value().is_eip4844() { return Err(PayloadBuilderError::other( OpPayloadBuilderError::BlobTransactionRejected, - )) + )); } // Convert the transaction to a [RecoveredTx]. This is diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index 8c4a496dabd..99c148ae2d8 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -238,12 +238,15 @@ impl From for AccountInfo { nonce: reth_acc.nonce, code_hash: reth_acc.bytecode_hash.unwrap_or(KECCAK_EMPTY), code: None, + account_id: None, } } } #[cfg(test)] mod tests { + use std::sync::Arc; + use super::*; use alloy_primitives::{hex_literal::hex, B256, U256}; use reth_codecs::Compact; @@ -304,11 +307,12 @@ mod tests { assert_eq!(len, 17); let mut buf = vec![]; - let bytecode = Bytecode(RevmBytecode::LegacyAnalyzed(LegacyAnalyzedBytecode::new( - Bytes::from(&hex!("ff00")), - 2, - JumpTable::from_slice(&[0], 2), - ))); + let bytecode = + Bytecode(RevmBytecode::LegacyAnalyzed(Arc::new(LegacyAnalyzedBytecode::new( + Bytes::from(&hex!("ff00")), + 2, + JumpTable::from_slice(&[0], 2), + )))); let len = bytecode.to_compact(&mut buf); assert_eq!(len, 16); diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 0ad6c7ba0c7..359ce9fd0fa 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -25,7 +25,11 @@ use reth_evm::{ }; use reth_node_api::BlockBody; use reth_primitives_traits::Recovered; -use reth_revm::{cancelled::CancelOnDrop, database::StateProviderDatabase, db::State}; +use reth_revm::{ + cancelled::CancelOnDrop, + database::StateProviderDatabase, + db::{bal::EvmDatabaseError, State}, +}; use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ cache::db::StateProviderTraitObjWrapper, @@ -508,7 +512,7 @@ pub trait Call: tx_env: TxEnvFor, ) -> Result>, Self::Error> where - DB: Database + fmt::Debug, + DB: Database> + fmt::Debug, { let mut evm = self.evm_config().evm_with_env(db, evm_env); let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?; @@ -526,7 +530,7 @@ pub trait Call: inspector: I, ) -> Result>, Self::Error> where - DB: Database + fmt::Debug, + DB: Database> + fmt::Debug, I: InspectorFor, { let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector); @@ -703,7 +707,7 @@ pub trait Call: target_tx_hash: B256, ) -> Result where - DB: Database + DatabaseCommit + core::fmt::Debug, + DB: Database> + DatabaseCommit + core::fmt::Debug, I: IntoIterator>>, { let mut evm = self.evm_config().evm_with_env(db, evm_env); diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index fa9dec303a8..e5f46d24afc 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -12,7 +12,7 @@ use reth_errors::ProviderError; use reth_evm::{ConfigureEvm, Database, Evm, EvmEnvFor, EvmFor, TransactionEnv, TxEnvFor}; use reth_revm::{ database::{EvmStateProvider, StateProviderDatabase}, - db::State, + db::{bal::EvmDatabaseError, State}, }; use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ @@ -165,7 +165,7 @@ pub trait EstimateCall: Call { return Err(RpcInvalidTransactionError::GasRequiredExceedsAllowance { gas_limit: tx_env.gas_limit(), } - .into_eth_err()) + .into_eth_err()); } // Propagate other results (successful or other errors). ethres => ethres?, @@ -186,7 +186,7 @@ pub trait EstimateCall: Call { } else { // the transaction did revert Err(Self::Error::from_revert(output)) - } + }; } }; @@ -313,7 +313,7 @@ pub trait EstimateCall: Call { max_gas_limit: u64, ) -> Result where - DB: Database, + DB: Database>, EthApiError: From, { let req_gas_limit = tx_env.gas_limit(); diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index ef6443c382b..13ac2479158 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -13,7 +13,10 @@ use reth_evm::{ Evm, EvmEnvFor, EvmFor, HaltReasonFor, InspectorFor, TxEnvFor, }; use reth_primitives_traits::{BlockBody, Recovered, RecoveredBlock}; -use reth_revm::{database::StateProviderDatabase, db::State}; +use reth_revm::{ + database::StateProviderDatabase, + db::{bal::EvmDatabaseError, State}, +}; use reth_rpc_eth_types::{cache::db::StateCacheDb, EthApiError}; use reth_storage_api::{ProviderBlock, ProviderTx}; use revm::{context::Block, context_interface::result::ResultAndState, DatabaseCommit}; @@ -32,7 +35,7 @@ pub trait Trace: LoadState> + Call { inspector: I, ) -> Result>, Self::Error> where - DB: Database, + DB: Database>, I: InspectorFor, { let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector); diff --git a/crates/rpc/rpc-eth-types/src/error/api.rs b/crates/rpc/rpc-eth-types/src/error/api.rs index 744314ecb01..9417d04f5a8 100644 --- a/crates/rpc/rpc-eth-types/src/error/api.rs +++ b/crates/rpc/rpc-eth-types/src/error/api.rs @@ -5,6 +5,7 @@ use crate::{simulate::EthSimulateError, EthApiError, RevertError}; use alloy_primitives::Bytes; use reth_errors::ProviderError; use reth_evm::{ConfigureEvm, EvmErrorFor, HaltReasonFor}; +use reth_revm::db::bal::EvmDatabaseError; use revm::{context::result::ExecutionResult, context_interface::result::HaltReason}; use super::RpcInvalidTransactionError; @@ -110,10 +111,12 @@ impl AsEthApiError for EthApiError { /// Helper trait to convert from revm errors. pub trait FromEvmError: - From> + FromEvmHalt> + FromRevert + From>> + + FromEvmHalt> + + FromRevert { /// Converts from EVM error to this type. - fn from_evm_err(err: EvmErrorFor) -> Self { + fn from_evm_err(err: EvmErrorFor>) -> Self { err.into() } @@ -131,7 +134,9 @@ pub trait FromEvmError: impl FromEvmError for T where - T: From> + FromEvmHalt> + FromRevert, + T: From>> + + FromEvmHalt> + + FromRevert, Evm: ConfigureEvm, { } diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index b2dcc12cea0..c7e39527b2e 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -11,6 +11,7 @@ pub use api::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; use core::time::Duration; use reth_errors::{BlockExecutionError, BlockValidationError, RethError}; use reth_primitives_traits::transaction::{error::InvalidTransactionError, signed::RecoveryError}; +use reth_revm::db::bal::EvmDatabaseError; use reth_rpc_convert::{CallFeesError, EthTxEnvError, TransactionConversionError}; use reth_rpc_server_types::result::{ block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, @@ -19,8 +20,11 @@ use reth_transaction_pool::error::{ Eip4844PoolTransactionError, Eip7702PoolTransactionError, InvalidPoolTransactionError, PoolError, PoolErrorKind, PoolTransactionError, }; -use revm::context_interface::result::{ - EVMError, HaltReason, InvalidHeader, InvalidTransaction, OutOfGasError, +use revm::{ + context_interface::result::{ + EVMError, HaltReason, InvalidHeader, InvalidTransaction, OutOfGasError, + }, + state::bal::BalError, }; use revm_inspectors::tracing::{DebugInspectorError, MuxError}; use std::convert::Infallible; @@ -404,6 +408,24 @@ impl From for EthApiError { } } +impl From> for EthApiError +where + E: Into, +{ + fn from(value: EvmDatabaseError) -> Self { + match value { + EvmDatabaseError::Bal(err) => err.into(), + EvmDatabaseError::Database(err) => err.into(), + } + } +} + +impl From for EthApiError { + fn from(err: BalError) -> Self { + Self::EvmCustom(format!("bal error: {:?}", err)) + } +} + #[cfg(feature = "js-tracer")] impl From for EthApiError { fn from(error: revm_inspectors::tracing::js::JsInspectorError) -> Self { diff --git a/crates/stateless/src/witness_db.rs b/crates/stateless/src/witness_db.rs index 466b4de30b1..86ced518048 100644 --- a/crates/stateless/src/witness_db.rs +++ b/crates/stateless/src/witness_db.rs @@ -76,6 +76,7 @@ where nonce: account.nonce, code_hash: account.code_hash, code: None, + account_id: None, }) }) } diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index ac390343c50..d349c5aa6aa 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -26,6 +26,7 @@ derive_more.workspace = true thiserror.workspace = true revm-database-interface.workspace = true +revm-state.workspace = true [features] default = ["std"] @@ -39,4 +40,5 @@ std = [ "revm-database-interface/std", "reth-prune-types/std", "reth-static-file-types/std", + "revm-state/std", ] diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index c6d5a2e2609..13f8a0faa95 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -6,7 +6,8 @@ use derive_more::Display; use reth_primitives_traits::{transaction::signed::RecoveryError, GotExpected}; use reth_prune_types::PruneSegmentError; use reth_static_file_types::StaticFileSegment; -use revm_database_interface::DBErrorMarker; +use revm_database_interface::{bal::EvmDatabaseError, DBErrorMarker}; +use revm_state::bal::BalError; /// Provider result type. pub type ProviderResult = Result; @@ -17,6 +18,9 @@ pub enum ProviderError { /// Database error. #[error(transparent)] Database(#[from] DatabaseError), + /// BAL error. + #[error("BAL error:{_0}")] + Bal(BalError), /// Pruning error. #[error(transparent)] Pruning(#[from] PruneSegmentError), @@ -207,6 +211,12 @@ impl From for ProviderError { } } +impl From for EvmDatabaseError { + fn from(error: ProviderError) -> Self { + Self::Database(error) + } +} + /// A root mismatch error at a given block height. #[derive(Clone, Debug, PartialEq, Eq, Display)] #[display("root mismatch at #{block_number} ({block_hash}): {root}")] diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 283f1d3b69d..410fdf00998 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -993,6 +993,7 @@ mod tests { nonce: 42, code_hash: B256::random(), code: Some(Bytecode::new_raw(Bytes::from(vec![1, 2]))), + account_id: None, }; let mut storage = StorageWithOriginalValues::default(); @@ -1037,6 +1038,7 @@ mod tests { nonce: 1, code_hash: B256::random(), code: None, + account_id: None, }; // Create hashed accounts with addresses. From e313de818b2f27ffaf56635185408d15a922b7e4 Mon Sep 17 00:00:00 2001 From: figtracer <1gusredo@gmail.com> Date: Fri, 16 Jan 2026 15:40:47 +0000 Subject: [PATCH 040/267] chore(provider): pre alloc tx hashes (#21114) --- crates/storage/provider/src/providers/database/provider.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 1f4637d33f1..8a5e04b3c7c 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -493,7 +493,9 @@ impl DatabaseProvider Date: Fri, 16 Jan 2026 17:08:30 +0100 Subject: [PATCH 041/267] perf(cli): use available_parallelism as default for re-execute (#21010) --- crates/cli/commands/src/re_execute.rs | 16 ++++++++++------ docs/vocs/docs/pages/cli/op-reth/re-execute.mdx | 4 +--- docs/vocs/docs/pages/cli/reth/re-execute.mdx | 4 +--- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/crates/cli/commands/src/re_execute.rs b/crates/cli/commands/src/re_execute.rs index 8223ca01208..41b6afdbc30 100644 --- a/crates/cli/commands/src/re_execute.rs +++ b/crates/cli/commands/src/re_execute.rs @@ -42,9 +42,9 @@ pub struct Command { #[arg(long)] to: Option, - /// Number of tasks to run in parallel - #[arg(long, default_value = "10")] - num_tasks: u64, + /// Number of tasks to run in parallel. Defaults to the number of available CPUs. + #[arg(long)] + num_tasks: Option, /// Continues with execution when an invalid block is encountered and collects these blocks. #[arg(long)] @@ -84,12 +84,16 @@ impl } }; + let num_tasks = self.num_tasks.unwrap_or_else(|| { + std::thread::available_parallelism().map(|n| n.get() as u64).unwrap_or(10) + }); + let total_blocks = max_block - min_block; let total_gas = calculate_gas_used_from_headers( &provider_factory.static_file_provider(), min_block..=max_block, )?; - let blocks_per_task = total_blocks / self.num_tasks; + let blocks_per_task = total_blocks / num_tasks; let db_at = { let provider_factory = provider_factory.clone(); @@ -107,10 +111,10 @@ impl let _guard = cancellation.drop_guard(); let mut tasks = JoinSet::new(); - for i in 0..self.num_tasks { + for i in 0..num_tasks { let start_block = min_block + i * blocks_per_task; let end_block = - if i == self.num_tasks - 1 { max_block } else { start_block + blocks_per_task }; + if i == num_tasks - 1 { max_block } else { start_block + blocks_per_task }; // Spawn thread executing blocks let provider_factory = provider_factory.clone(); diff --git a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx index e0759212e37..247f8ead687 100644 --- a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx @@ -138,9 +138,7 @@ Static Files: The height to end at. Defaults to the latest block --num-tasks - Number of tasks to run in parallel - - [default: 10] + Number of tasks to run in parallel. Defaults to the number of available CPUs --skip-invalid-blocks Continues with execution when an invalid block is encountered and collects these blocks diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index 6cbacf37e4f..238f07c5655 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -138,9 +138,7 @@ Static Files: The height to end at. Defaults to the latest block --num-tasks - Number of tasks to run in parallel - - [default: 10] + Number of tasks to run in parallel. Defaults to the number of available CPUs --skip-invalid-blocks Continues with execution when an invalid block is encountered and collects these blocks From 5e178f6ac67f178bcd47a9203ad975c1b3319017 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 16 Jan 2026 17:24:45 +0100 Subject: [PATCH 042/267] chore(deps): update alloy-evm and alloy-op-evm to 0.26.3 (#21126) --- Cargo.lock | 8 ++++---- Cargo.toml | 4 ++-- crates/engine/tree/src/tree/metrics.rs | 11 ++++++++--- crates/ethereum/evm/src/test_utils.rs | 12 +++++++++++- examples/custom-beacon-withdrawals/src/main.rs | 4 ++++ examples/custom-node/src/evm/executor.rs | 4 ++++ 6 files changed, 33 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0153d42fae3..0cf63c85e4d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -287,9 +287,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.26.0" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "249337d8316a9ab983784597adfad66b78047ec9522d8b510185bc968c272618" +checksum = "a96827207397445a919a8adc49289b53cc74e48e460411740bba31cec2fc307d" dependencies = [ "alloy-consensus", "alloy-eips", @@ -404,9 +404,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.26.0" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9fc7cbe100f7b094428b488225c9aedb835f28a31b74f474ee1c41878e58c6" +checksum = "54dc5c46a92fc7267055a174d30efb34e2599a0047102a4d38a025ae521435ba" dependencies = [ "alloy-consensus", "alloy-eips", diff --git a/Cargo.toml b/Cargo.toml index 068a8f9ef62..fa6ae2f84ff 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -488,7 +488,7 @@ alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.4.3" alloy-eip2124 = { version = "0.2.0", default-features = false } alloy-eip7928 = { version = "0.3.0", default-features = false } -alloy-evm = { version = "0.26.0", default-features = false } +alloy-evm = { version = "0.26.3", default-features = false } alloy-primitives = { version = "1.5.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.5.0" @@ -526,7 +526,7 @@ alloy-transport-ipc = { version = "1.4.3", default-features = false } alloy-transport-ws = { version = "1.4.3", default-features = false } # op -alloy-op-evm = { version = "0.26.0", default-features = false } +alloy-op-evm = { version = "0.26.3", default-features = false } alloy-op-hardforks = "0.4.4" op-alloy-rpc-types = { version = "0.23.1", default-features = false } op-alloy-rpc-types-engine = { version = "0.23.1", default-features = false } diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index ac4bb50044f..5acf81104ed 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -408,12 +408,13 @@ mod tests { /// A simple mock executor for testing that doesn't require complex EVM setup struct MockExecutor { state: EvmState, + receipts: Vec, hook: Option>, } impl MockExecutor { fn new(state: EvmState) -> Self { - Self { state, hook: None } + Self { state, receipts: vec![], hook: None } } } @@ -495,12 +496,16 @@ mod tests { self.hook = hook; } + fn evm_mut(&mut self) -> &mut Self::Evm { + panic!("Mock executor evm_mut() not implemented") + } + fn evm(&self) -> &Self::Evm { panic!("Mock executor evm() not implemented") } - fn evm_mut(&mut self) -> &mut Self::Evm { - panic!("Mock executor evm_mut() not implemented") + fn receipts(&self) -> &[Self::Receipt] { + &self.receipts } } diff --git a/crates/ethereum/evm/src/test_utils.rs b/crates/ethereum/evm/src/test_utils.rs index fe791b9f5fd..cf32d9e6bd1 100644 --- a/crates/ethereum/evm/src/test_utils.rs +++ b/crates/ethereum/evm/src/test_utils.rs @@ -65,7 +65,12 @@ impl BlockExecutorFactory for MockEvmConfig { DB: Database + 'a, I: Inspector<::Context<&'a mut State>> + 'a, { - MockExecutor { result: self.exec_results.lock().pop().unwrap(), evm, hook: None } + MockExecutor { + result: self.exec_results.lock().pop().unwrap(), + evm, + hook: None, + receipts: Vec::new(), + } } } @@ -76,6 +81,7 @@ pub struct MockExecutor<'a, DB: Database, I> { evm: EthEvm<&'a mut State, I, PrecompilesMap>, #[debug(skip)] hook: Option>, + receipts: Vec, } impl<'a, DB: Database, I: Inspector>>> BlockExecutor @@ -89,6 +95,10 @@ impl<'a, DB: Database, I: Inspector>>> BlockExec Ok(()) } + fn receipts(&self) -> &[Self::Receipt] { + &self.receipts + } + fn execute_transaction_without_commit( &mut self, _tx: impl ExecutableTx, diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index 1d93226dd6a..d1e59384c5c 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -201,6 +201,10 @@ where self.inner.apply_pre_execution_changes() } + fn receipts(&self) -> &[Self::Receipt] { + self.inner.receipts() + } + fn execute_transaction_without_commit( &mut self, tx: impl ExecutableTx, diff --git a/examples/custom-node/src/evm/executor.rs b/examples/custom-node/src/evm/executor.rs index 5288e1d67a5..575b4949c0a 100644 --- a/examples/custom-node/src/evm/executor.rs +++ b/examples/custom-node/src/evm/executor.rs @@ -37,6 +37,10 @@ where self.inner.apply_pre_execution_changes() } + fn receipts(&self) -> &[Self::Receipt] { + self.inner.receipts() + } + fn execute_transaction_without_commit( &mut self, tx: impl ExecutableTx, From 80eb0d0fb64cd1d875cf2a4d47b1f64026cb86d6 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 16 Jan 2026 17:07:19 +0000 Subject: [PATCH 043/267] refactor: use `BlockExecutionOutcome` in `ExecutedBlock` (#21123) --- crates/chain-state/src/in_memory.rs | 56 ++++++------ crates/chain-state/src/test_utils.rs | 24 +++--- crates/engine/tree/src/tree/mod.rs | 20 +++-- .../tree/src/tree/payload_processor/mod.rs | 12 +-- .../src/tree/payload_processor/prewarm.rs | 12 +-- .../engine/tree/src/tree/payload_validator.rs | 13 ++- crates/engine/tree/src/tree/tests.rs | 6 +- crates/evm/execution-types/src/execute.rs | 35 ++++++++ crates/optimism/flashblocks/src/worker.rs | 14 +-- crates/optimism/payload/src/builder.rs | 10 +-- crates/payload/primitives/src/traits.rs | 4 +- .../rpc-eth-api/src/helpers/pending_block.rs | 10 +-- crates/rpc/rpc-eth-types/src/pending_block.rs | 4 +- .../src/providers/blockchain_provider.rs | 30 +++++-- .../provider/src/providers/consistent.rs | 26 +++--- .../src/providers/database/provider.rs | 33 ++++--- .../src/providers/rocksdb/provider.rs | 4 +- .../src/providers/static_file/manager.rs | 4 +- .../storage/storage-api/src/state_writer.rs | 85 +++++++++++++++++-- 19 files changed, 264 insertions(+), 138 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 7ffd939c83c..7f2f328b191 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -10,7 +10,7 @@ use alloy_primitives::{map::HashMap, BlockNumber, TxHash, B256}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_ethereum_primitives::EthPrimitives; -use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives_traits::{ BlockBody as _, IndexedTx, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, @@ -18,7 +18,7 @@ use reth_primitives_traits::{ }; use reth_storage_api::StateProviderBox; use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted, TrieInputSorted}; -use std::{collections::BTreeMap, ops::Deref, sync::Arc, time::Instant}; +use std::{collections::BTreeMap, sync::Arc, time::Instant}; use tokio::sync::{broadcast, watch}; /// Size of the broadcast channel used to notify canonical state events. @@ -648,7 +648,7 @@ impl BlockState { } /// Returns the `Receipts` of executed block that determines the state. - pub fn receipts(&self) -> &Vec> { + pub fn receipts(&self) -> &Vec { &self.block.execution_outcome().receipts } @@ -659,15 +659,7 @@ impl BlockState { /// /// This clones the vector of receipts. To avoid it, use [`Self::executed_block_receipts_ref`]. pub fn executed_block_receipts(&self) -> Vec { - let receipts = self.receipts(); - - debug_assert!( - receipts.len() <= 1, - "Expected at most one block's worth of receipts, found {}", - receipts.len() - ); - - receipts.first().cloned().unwrap_or_default() + self.receipts().clone() } /// Returns a slice of `Receipt` of executed block that determines the state. @@ -675,15 +667,7 @@ impl BlockState { /// has only one element corresponding to the executed block associated to /// the state. pub fn executed_block_receipts_ref(&self) -> &[N::Receipt] { - let receipts = self.receipts(); - - debug_assert!( - receipts.len() <= 1, - "Expected at most one block's worth of receipts, found {}", - receipts.len() - ); - - receipts.first().map(|receipts| receipts.deref()).unwrap_or_default() + self.receipts() } /// Returns an iterator over __parent__ `BlockStates`. @@ -767,7 +751,7 @@ pub struct ExecutedBlock { /// Recovered Block pub recovered_block: Arc>, /// Block's execution outcome. - pub execution_output: Arc>, + pub execution_output: Arc>, /// Deferred trie data produced by execution. /// /// This allows deferring the computation of the trie data which can be expensive. @@ -779,7 +763,15 @@ impl Default for ExecutedBlock { fn default() -> Self { Self { recovered_block: Default::default(), - execution_output: Default::default(), + execution_output: Arc::new(BlockExecutionOutput { + result: BlockExecutionResult { + receipts: Default::default(), + requests: Default::default(), + gas_used: 0, + blob_gas_used: 0, + }, + state: Default::default(), + }), trie_data: DeferredTrieData::ready(ComputedTrieData::default()), } } @@ -800,7 +792,7 @@ impl ExecutedBlock { /// payload builders). This is the safe default path. pub fn new( recovered_block: Arc>, - execution_output: Arc>, + execution_output: Arc>, trie_data: ComputedTrieData, ) -> Self { Self { recovered_block, execution_output, trie_data: DeferredTrieData::ready(trie_data) } @@ -822,7 +814,7 @@ impl ExecutedBlock { /// Use [`Self::new()`] instead when trie data is already computed and available immediately. pub const fn with_deferred_trie_data( recovered_block: Arc>, - execution_output: Arc>, + execution_output: Arc>, trie_data: DeferredTrieData, ) -> Self { Self { recovered_block, execution_output, trie_data } @@ -842,7 +834,7 @@ impl ExecutedBlock { /// Returns a reference to the block's execution outcome #[inline] - pub fn execution_outcome(&self) -> &ExecutionOutcome { + pub fn execution_outcome(&self) -> &BlockExecutionOutput { &self.execution_output } @@ -958,14 +950,20 @@ impl> NewCanonicalChain { [first, rest @ ..] => { let mut chain = Chain::from_block( first.recovered_block().clone(), - first.execution_outcome().clone(), + ExecutionOutcome::from(( + first.execution_outcome().clone(), + first.block_number(), + )), first.trie_updates(), first.hashed_state(), ); for exec in rest { chain.append_block( exec.recovered_block().clone(), - exec.execution_outcome().clone(), + ExecutionOutcome::from(( + exec.execution_outcome().clone(), + exec.block_number(), + )), exec.trie_updates(), exec.hashed_state(), ); @@ -1264,7 +1262,7 @@ mod tests { let state = BlockState::new(block); - assert_eq!(state.receipts(), &receipts); + assert_eq!(state.receipts(), receipts.first().unwrap()); } #[test] diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 2be8de2d78a..73bad27d79f 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -3,10 +3,7 @@ use crate::{ CanonStateSubscriptions, ComputedTrieData, }; use alloy_consensus::{Header, SignableTransaction, TxEip1559, TxReceipt, EMPTY_ROOT_HASH}; -use alloy_eips::{ - eip1559::{ETHEREUM_BLOCK_GAS_LIMIT_30M, INITIAL_BASE_FEE}, - eip7685::Requests, -}; +use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT_30M, INITIAL_BASE_FEE}; use alloy_primitives::{Address, BlockNumber, B256, U256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; @@ -16,7 +13,7 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_ethereum_primitives::{ Block, BlockBody, EthPrimitives, Receipt, Transaction, TransactionSigned, }; -use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, Chain, ExecutionOutcome}; use reth_primitives_traits::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, Account, NodePrimitives, Recovered, RecoveredBlock, SealedBlock, SealedHeader, @@ -201,7 +198,7 @@ impl TestBlockBuilder { fn get_executed_block( &mut self, block_number: BlockNumber, - receipts: Vec>, + mut receipts: Vec>, parent_hash: B256, ) -> ExecutedBlock { let block = self.generate_random_block(block_number, parent_hash); @@ -209,12 +206,15 @@ impl TestBlockBuilder { let trie_data = ComputedTrieData::default(); ExecutedBlock::new( Arc::new(RecoveredBlock::new_sealed(block, senders)), - Arc::new(ExecutionOutcome::new( - BundleState::default(), - receipts, - block_number, - vec![Requests::default()], - )), + Arc::new(BlockExecutionOutput { + result: BlockExecutionResult { + receipts: receipts.pop().unwrap_or_default(), + requests: Default::default(), + gas_used: 0, + blob_gas_used: 0, + }, + state: BundleState::default(), + }), trie_data, ) } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index c0eb40d337c..6796e098d1a 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -30,9 +30,9 @@ use reth_payload_primitives::{ }; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; use reth_provider::{ - BlockNumReader, BlockReader, ChangeSetReader, DatabaseProviderFactory, HashedPostStateProvider, - ProviderError, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, - TransactionVariant, + BlockExecutionOutput, BlockExecutionResult, BlockNumReader, BlockReader, ChangeSetReader, + DatabaseProviderFactory, HashedPostStateProvider, ProviderError, StageCheckpointReader, + StateProviderBox, StateProviderFactory, StateReader, TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; @@ -1856,7 +1856,7 @@ where .sealed_block_with_senders(hash.into(), TransactionVariant::WithHash)? .ok_or_else(|| ProviderError::HeaderNotFound(hash.into()))? .split_sealed(); - let execution_output = self + let mut execution_output = self .provider .get_state(block.header().number())? .ok_or_else(|| ProviderError::StateForNumberNotFound(block.header().number()))?; @@ -1880,9 +1880,19 @@ where let trie_data = ComputedTrieData::without_trie_input(sorted_hashed_state, sorted_trie_updates); + let execution_output = Arc::new(BlockExecutionOutput { + state: execution_output.bundle, + result: BlockExecutionResult { + receipts: execution_output.receipts.pop().unwrap_or_default(), + requests: execution_output.requests.pop().unwrap_or_default(), + gas_used: block.gas_used(), + blob_gas_used: block.blob_gas_used().unwrap_or_default(), + }, + }); + Ok(Some(ExecutedBlock::new( Arc::new(RecoveredBlock::new_sealed(block, senders)), - Arc::new(execution_output), + execution_output, trie_data, ))) } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 6ba285c3bc4..73924117059 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -28,10 +28,10 @@ use reth_evm::{ ConfigureEvm, EvmEnvFor, ExecutableTxIterator, ExecutableTxTuple, OnStateHook, SpecFor, TxEnvFor, }; -use reth_execution_types::ExecutionOutcome; use reth_primitives_traits::NodePrimitives; use reth_provider::{ - BlockReader, DatabaseProviderROFactory, StateProvider, StateProviderFactory, StateReader, + BlockExecutionOutput, BlockReader, DatabaseProviderROFactory, StateProvider, + StateProviderFactory, StateReader, }; use reth_revm::{db::BundleState, state::EvmState}; use reth_trie::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; @@ -665,12 +665,12 @@ impl PayloadHandle { /// Terminates the entire caching task. /// - /// If the [`ExecutionOutcome`] is provided it will update the shared cache using its + /// If the [`BlockExecutionOutput`] is provided it will update the shared cache using its /// bundle state. Using `Arc` allows sharing with the main execution /// path without cloning the expensive `BundleState`. pub(super) fn terminate_caching( &mut self, - execution_outcome: Option>>, + execution_outcome: Option>>, ) { self.prewarm_handle.terminate_caching(execution_outcome) } @@ -707,11 +707,11 @@ impl CacheTaskHandle { /// Terminates the entire pre-warming task. /// - /// If the [`ExecutionOutcome`] is provided it will update the shared cache using its + /// If the [`BlockExecutionOutput`] is provided it will update the shared cache using its /// bundle state. Using `Arc` avoids cloning the expensive `BundleState`. pub(super) fn terminate_caching( &mut self, - execution_outcome: Option>>, + execution_outcome: Option>>, ) { if let Some(tx) = self.to_prewarm_task.take() { let event = PrewarmTaskEvent::Terminate { execution_outcome }; diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 835cffe9e38..99c689dd196 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -30,10 +30,12 @@ use alloy_primitives::{keccak256, map::B256Set, B256}; use crossbeam_channel::Sender as CrossbeamSender; use metrics::{Counter, Gauge, Histogram}; use reth_evm::{execute::ExecutableTxFor, ConfigureEvm, Evm, EvmFor, SpecFor}; -use reth_execution_types::ExecutionOutcome; use reth_metrics::Metrics; use reth_primitives_traits::NodePrimitives; -use reth_provider::{AccountReader, BlockReader, StateProvider, StateProviderFactory, StateReader}; +use reth_provider::{ + AccountReader, BlockExecutionOutput, BlockReader, StateProvider, StateProviderFactory, + StateReader, +}; use reth_revm::{database::StateProviderDatabase, state::EvmState}; use reth_trie::MultiProofTargets; use std::{ @@ -259,7 +261,7 @@ where /// /// This method is called from `run()` only after all execution tasks are complete. #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] - fn save_cache(self, execution_outcome: Arc>) { + fn save_cache(self, execution_outcome: Arc>) { let start = Instant::now(); let Self { execution_cache, ctx: PrewarmContext { env, metrics, saved_cache, .. }, .. } = @@ -277,7 +279,7 @@ where // Insert state into cache while holding the lock // Access the BundleState through the shared ExecutionOutcome - if new_cache.cache().insert_state(execution_outcome.state()).is_err() { + if new_cache.cache().insert_state(&execution_outcome.state).is_err() { // Clear the cache on error to prevent having a polluted cache *cached = None; debug!(target: "engine::caching", "cleared execution cache on update error"); @@ -810,7 +812,7 @@ pub(super) enum PrewarmTaskEvent { Terminate { /// The final execution outcome. Using `Arc` allows sharing with the main execution /// path without cloning the expensive `BundleState`. - execution_outcome: Option>>, + execution_outcome: Option>>, }, /// The outcome of a pre-warm task Outcome { diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 056b413e7ac..2d2dbe1cb72 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -41,9 +41,9 @@ use reth_primitives_traits::{ }; use reth_provider::{ providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockNumReader, BlockReader, - ChangeSetReader, DatabaseProviderFactory, DatabaseProviderROFactory, ExecutionOutcome, - HashedPostStateProvider, ProviderError, PruneCheckpointReader, StageCheckpointReader, - StateProvider, StateProviderFactory, StateReader, + ChangeSetReader, DatabaseProviderFactory, DatabaseProviderROFactory, HashedPostStateProvider, + ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProvider, + StateProviderFactory, StateReader, }; use reth_revm::db::State; use reth_trie::{ @@ -376,7 +376,6 @@ where } let parent_hash = input.parent_hash(); - let block_num_hash = input.num_hash(); trace!(target: "engine::tree::payload_validator", "Fetching block state provider"); let _enter = @@ -586,7 +585,7 @@ where // Create ExecutionOutcome and wrap in Arc for sharing with both the caching task // and the deferred trie task. This avoids cloning the expensive BundleState. - let execution_outcome = Arc::new(ExecutionOutcome::from((output, block_num_hash.number))); + let execution_outcome = Arc::new(output); // Terminate prewarming task with the shared execution outcome handle.terminate_caching(Some(Arc::clone(&execution_outcome))); @@ -1097,7 +1096,7 @@ where fn spawn_deferred_trie_task( &self, block: RecoveredBlock, - execution_outcome: Arc>, + execution_outcome: Arc>, ctx: &TreeCtx<'_, N>, hashed_state: HashedPostState, trie_output: TrieUpdates, @@ -1344,7 +1343,7 @@ where fn on_inserted_executed_block(&self, block: ExecutedBlock) { self.payload_processor.on_inserted_executed_block( block.recovered_block.block_with_parent(), - block.execution_output.state(), + &block.execution_output.state, ); } } diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index d5d1ef5adcb..adfc62ef4bc 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -27,7 +27,7 @@ use reth_ethereum_engine_primitives::EthEngineTypes; use reth_ethereum_primitives::{Block, EthPrimitives}; use reth_evm_ethereum::MockEvmConfig; use reth_primitives_traits::Block as _; -use reth_provider::{test_utils::MockEthProvider, ExecutionOutcome}; +use reth_provider::test_utils::MockEthProvider; use std::{ collections::BTreeMap, str::FromStr, @@ -838,7 +838,7 @@ fn test_tree_state_on_new_head_deep_fork() { for block in &chain_a { test_harness.tree.state.tree_state.insert_executed(ExecutedBlock::new( Arc::new(block.clone()), - Arc::new(ExecutionOutcome::default()), + Arc::new(BlockExecutionOutput::default()), empty_trie_data(), )); } @@ -847,7 +847,7 @@ fn test_tree_state_on_new_head_deep_fork() { for block in &chain_b { test_harness.tree.state.tree_state.insert_executed(ExecutedBlock::new( Arc::new(block.clone()), - Arc::new(ExecutionOutcome::default()), + Arc::new(BlockExecutionOutput::default()), empty_trie_data(), )); } diff --git a/crates/evm/execution-types/src/execute.rs b/crates/evm/execution-types/src/execute.rs index b014df07522..452ebae2b3a 100644 --- a/crates/evm/execution-types/src/execute.rs +++ b/crates/evm/execution-types/src/execute.rs @@ -1,3 +1,5 @@ +use alloy_primitives::{Address, B256, U256}; +use reth_primitives_traits::{Account, Bytecode}; use revm::database::BundleState; pub use alloy_evm::block::BlockExecutionResult; @@ -23,3 +25,36 @@ pub struct BlockExecutionOutput { /// The changed state of the block after execution. pub state: BundleState, } + +impl BlockExecutionOutput { + /// Return bytecode if known. + pub fn bytecode(&self, code_hash: &B256) -> Option { + self.state.bytecode(code_hash).map(Bytecode) + } + + /// Get account if account is known. + pub fn account(&self, address: &Address) -> Option> { + self.state.account(address).map(|a| a.info.as_ref().map(Into::into)) + } + + /// Get storage if value is known. + /// + /// This means that depending on status we can potentially return `U256::ZERO`. + pub fn storage(&self, address: &Address, storage_key: U256) -> Option { + self.state.account(address).and_then(|a| a.storage_slot(storage_key)) + } +} + +impl Default for BlockExecutionOutput { + fn default() -> Self { + Self { + result: BlockExecutionResult { + receipts: Default::default(), + requests: Default::default(), + gas_used: 0, + blob_gas_used: 0, + }, + state: Default::default(), + } + } +} diff --git a/crates/optimism/flashblocks/src/worker.rs b/crates/optimism/flashblocks/src/worker.rs index 7d9ab860a58..e1b29c27296 100644 --- a/crates/optimism/flashblocks/src/worker.rs +++ b/crates/optimism/flashblocks/src/worker.rs @@ -8,10 +8,8 @@ use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome}, ConfigureEvm, }; -use reth_execution_types::ExecutionOutcome; -use reth_primitives_traits::{ - AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, Recovered, -}; +use reth_execution_types::BlockExecutionOutput; +use reth_primitives_traits::{BlockTy, HeaderTy, NodePrimitives, ReceiptTy, Recovered}; use reth_revm::{cached::CachedReads, database::StateProviderDatabase, db::State}; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_storage_api::{noop::NoopProvider, BlockReaderIdExt, StateProviderFactory}; @@ -112,12 +110,8 @@ where builder.finish(NoopProvider::default())? }; - let execution_outcome = ExecutionOutcome::new( - state.take_bundle(), - vec![execution_result.receipts], - block.number(), - vec![execution_result.requests], - ); + let execution_outcome = + BlockExecutionOutput { state: state.take_bundle(), result: execution_result }; let pending_block = PendingBlock::with_executed_block( Instant::now() + Duration::from_secs(1), diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 05d156ab3b4..1cb766db099 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -18,7 +18,7 @@ use reth_evm::{ op_revm::{constants::L1_BLOCK_CONTRACT, L1BlockInfo}, ConfigureEvm, Database, }; -use reth_execution_types::ExecutionOutcome; +use reth_execution_types::BlockExecutionOutput; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{transaction::OpTransaction, L2_TO_L1_MESSAGE_PASSER_ADDRESS}; use reth_optimism_txpool::{ @@ -375,12 +375,8 @@ impl OpBuilder<'_, Txs> { let sealed_block = Arc::new(block.sealed_block().clone()); debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header(), "sealed built block"); - let execution_outcome = ExecutionOutcome::new( - db.take_bundle(), - vec![execution_result.receipts], - block.number(), - Vec::new(), - ); + let execution_outcome = + BlockExecutionOutput { state: db.take_bundle(), result: execution_result }; // create the executed block data let executed: BuiltPayloadExecutedBlock = BuiltPayloadExecutedBlock { diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 726122743ea..fa102c85e2c 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -11,7 +11,7 @@ use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadI use core::fmt; use either::Either; use reth_chain_state::ComputedTrieData; -use reth_execution_types::ExecutionOutcome; +use reth_execution_types::BlockExecutionOutput; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; use reth_trie_common::{ updates::{TrieUpdates, TrieUpdatesSorted}, @@ -27,7 +27,7 @@ pub struct BuiltPayloadExecutedBlock { /// Recovered Block pub recovered_block: Arc>, /// Block's execution outcome. - pub execution_output: Arc>, + pub execution_output: Arc>, /// Block's hashed state. /// /// Supports both unsorted and sorted variants so payload builders can avoid cloning in order diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 3fad2ae01f2..dc6222f9df1 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -12,7 +12,7 @@ use reth_chain_state::{BlockState, ComputedTrieData, ExecutedBlock}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError, RethError}; use reth_evm::{ - execute::{BlockBuilder, BlockBuilderOutcome, ExecutionOutcome}, + execute::{BlockBuilder, BlockBuilderOutcome, BlockExecutionOutput}, ConfigureEvm, Evm, NextBlockEnvAttributes, }; use reth_primitives_traits::{transaction::error::InvalidTransactionError, HeaderTy, SealedHeader}; @@ -363,12 +363,8 @@ pub trait LoadPendingBlock: let BlockBuilderOutcome { execution_result, block, hashed_state, trie_updates } = builder.finish(NoopProvider::default()).map_err(Self::Error::from_eth_err)?; - let execution_outcome = ExecutionOutcome::new( - db.take_bundle(), - vec![execution_result.receipts], - block.number(), - vec![execution_result.requests], - ); + let execution_outcome = + BlockExecutionOutput { state: db.take_bundle(), result: execution_result }; Ok(ExecutedBlock::new( block.into(), diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 92a4ad6bde9..0cc01eee15a 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -99,9 +99,7 @@ impl PendingBlock { pub fn with_executed_block(expires_at: Instant, executed_block: ExecutedBlock) -> Self { Self { expires_at, - receipts: Arc::new( - executed_block.execution_output.receipts.iter().flatten().cloned().collect(), - ), + receipts: Arc::new(executed_block.execution_output.receipts.clone()), executed_block, } } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 0e290f4aecd..58ec1e25571 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -790,7 +790,9 @@ mod tests { use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; use reth_ethereum_primitives::{Block, Receipt}; - use reth_execution_types::{Chain, ExecutionOutcome}; + use reth_execution_types::{ + BlockExecutionOutput, BlockExecutionResult, Chain, ExecutionOutcome, + }; use reth_primitives_traits::{RecoveredBlock, SealedBlock, SignerRecoverable}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, @@ -909,8 +911,15 @@ mod tests { .map(|block| { let senders = block.senders().expect("failed to recover senders"); let block_receipts = receipts.get(block.number as usize).unwrap().clone(); - let execution_outcome = - ExecutionOutcome { receipts: vec![block_receipts], ..Default::default() }; + let execution_outcome = BlockExecutionOutput { + result: BlockExecutionResult { + receipts: block_receipts, + requests: Default::default(), + gas_used: 0, + blob_gas_used: 0, + }, + state: BundleState::default(), + }; ExecutedBlock { recovered_block: Arc::new(RecoveredBlock::new_sealed( @@ -979,8 +988,7 @@ mod tests { state.parent_state_chain().last().expect("qed").block(); let num_hash = lowest_memory_block.recovered_block().num_hash(); - let mut execution_output = (*lowest_memory_block.execution_output).clone(); - execution_output.first_block = lowest_memory_block.recovered_block().number; + let execution_output = (*lowest_memory_block.execution_output).clone(); lowest_memory_block.execution_output = Arc::new(execution_output); // Push to disk @@ -1708,8 +1716,8 @@ mod tests { block.clone(), senders, )), - execution_output: Arc::new(ExecutionOutcome { - bundle: BundleState::new( + execution_output: Arc::new(BlockExecutionOutput { + state: BundleState::new( in_memory_state.into_iter().map(|(address, (account, _))| { (address, None, Some(account.into()), Default::default()) }), @@ -1718,8 +1726,12 @@ mod tests { })], [], ), - first_block: first_in_memory_block, - ..Default::default() + result: BlockExecutionResult { + receipts: Default::default(), + requests: Default::default(), + gas_used: 0, + blob_gas_used: 0, + }, }), ..Default::default() } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index b4eb5769c6b..e0c503eae01 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1307,7 +1307,7 @@ impl StorageChangeSetReader for ConsistentProvider { let changesets = state .block() .execution_output - .bundle + .state .reverts .clone() .to_plain_state_reverts() @@ -1360,7 +1360,7 @@ impl ChangeSetReader for ConsistentProvider { let changesets = state .block_ref() .execution_output - .bundle + .state .reverts .clone() .to_plain_state_reverts() @@ -1406,7 +1406,7 @@ impl ChangeSetReader for ConsistentProvider { let changeset = state .block_ref() .execution_output - .bundle + .state .reverts .clone() .to_plain_state_reverts() @@ -1460,7 +1460,7 @@ impl ChangeSetReader for ConsistentProvider { let block_changesets = state .block_ref() .execution_output - .bundle + .state .reverts .clone() .to_plain_state_reverts() @@ -1508,7 +1508,7 @@ impl ChangeSetReader for ConsistentProvider { count += state .block_ref() .execution_output - .bundle + .state .reverts .clone() .to_plain_state_reverts() @@ -1551,7 +1551,7 @@ impl StateReader for ConsistentProvider { ) -> ProviderResult>> { if let Some(state) = self.head_block.as_ref().and_then(|b| b.block_on_chain(block.into())) { let state = state.block_ref().execution_outcome().clone(); - Ok(Some(state)) + Ok(Some(ExecutionOutcome::from((state, block)))) } else { Self::get_state(self, block..=block) } @@ -1571,7 +1571,7 @@ mod tests { use reth_chain_state::{ExecutedBlock, NewCanonicalChain}; use reth_db_api::models::AccountBeforeTx; use reth_ethereum_primitives::Block; - use reth_execution_types::ExecutionOutcome; + use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, ExecutionOutcome}; use reth_primitives_traits::{RecoveredBlock, SealedBlock}; use reth_storage_api::{BlockReader, BlockSource, ChangeSetReader}; use reth_testing_utils::generators::{ @@ -1883,8 +1883,8 @@ mod tests { block.clone(), senders, )), - execution_output: Arc::new(ExecutionOutcome { - bundle: BundleState::new( + execution_output: Arc::new(BlockExecutionOutput { + state: BundleState::new( in_memory_state.into_iter().map(|(address, (account, _))| { (address, None, Some(account.into()), Default::default()) }), @@ -1893,8 +1893,12 @@ mod tests { })], [], ), - first_block: first_in_memory_block, - ..Default::default() + result: BlockExecutionResult { + receipts: Default::default(), + requests: Default::default(), + gas_used: 0, + blob_gas_used: 0, + }, }), ..Default::default() } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8a5e04b3c7c..3ec5bd28cb4 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -47,7 +47,7 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, BlockNumberList, PlainAccountState, PlainStorageState, }; -use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, Chain, ExecutionOutcome}; use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives_traits::{ Account, Block as _, BlockBody as _, Bytecode, RecoveredBlock, SealedHeader, StorageEntry, @@ -60,7 +60,7 @@ use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ BlockBodyIndicesProvider, BlockBodyReader, MetadataProvider, MetadataWriter, NodePrimitivesProvider, StateProvider, StateWriteConfig, StorageChangeSetReader, - StorageSettingsCache, TryIntoHistoricalStateProvider, + StorageSettingsCache, TryIntoHistoricalStateProvider, WriteStateInput, }; use reth_storage_errors::provider::{ProviderResult, StaticFileWriterError}; use reth_trie::{ @@ -537,7 +537,10 @@ impl DatabaseProvider StateWriter type Receipt = ReceiptTy; #[instrument(level = "debug", target = "providers::db", skip_all)] - fn write_state( + fn write_state<'a>( &self, - execution_outcome: &ExecutionOutcome, + execution_outcome: impl Into>, is_value_known: OriginalValuesKnown, config: StateWriteConfig, ) -> ProviderResult<()> { + let execution_outcome = execution_outcome.into(); let first_block = execution_outcome.first_block(); let (plain_state, reverts) = - execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); + execution_outcome.state().to_plain_state_and_reverts(is_value_known); self.write_state_reverts(reverts, first_block, config)?; self.write_state_changes(plain_state)?; @@ -2101,7 +2105,7 @@ impl StateWriter } for (idx, (receipts, first_tx_index)) in - execution_outcome.receipts.iter().zip(block_indices).enumerate() + execution_outcome.receipts().zip(block_indices).enumerate() { let block_number = first_block + idx as u64; @@ -3130,12 +3134,15 @@ impl BlockWriter // Wrap block in ExecutedBlock with empty execution output (no receipts/state/trie) let executed_block = ExecutedBlock::new( Arc::new(block.clone()), - Arc::new(ExecutionOutcome::new( - Default::default(), - Vec::>>::new(), - block_number, - vec![], - )), + Arc::new(BlockExecutionOutput { + result: BlockExecutionResult { + receipts: Default::default(), + requests: Default::default(), + gas_used: 0, + blob_gas_used: 0, + }, + state: Default::default(), + }), ComputedTrieData::default(), ); diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 88f09a9d350..626e73ad05d 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -587,7 +587,7 @@ impl RocksDBProvider { let mut account_history: BTreeMap> = BTreeMap::new(); for (block_idx, block) in blocks.iter().enumerate() { let block_number = ctx.first_block_number + block_idx as u64; - let bundle = &block.execution_outcome().bundle; + let bundle = &block.execution_outcome().state; for &address in bundle.state().keys() { account_history.entry(address).or_default().push(block_number); } @@ -612,7 +612,7 @@ impl RocksDBProvider { let mut storage_history: BTreeMap<(Address, B256), Vec> = BTreeMap::new(); for (block_idx, block) in blocks.iter().enumerate() { let block_number = ctx.first_block_number + block_idx as u64; - let bundle = &block.execution_outcome().bundle; + let bundle = &block.execution_outcome().state; for (&address, account) in bundle.state() { for &slot in account.storage.keys() { let key = B256::new(slot.to_be_bytes()); diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 718283114f2..b835a4491de 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -594,7 +594,7 @@ impl StaticFileProvider { continue } - for (i, receipt) in block.execution_outcome().receipts.iter().flatten().enumerate() { + for (i, receipt) in block.execution_outcome().receipts.iter().enumerate() { w.append_receipt(first_tx + i as u64, receipt)?; } } @@ -609,7 +609,7 @@ impl StaticFileProvider { ) -> ProviderResult<()> { for block in blocks { let block_number = block.recovered_block().number(); - let reverts = block.execution_outcome().bundle.reverts.to_plain_state_reverts(); + let reverts = block.execution_outcome().state.reverts.to_plain_state_reverts(); for account_block_reverts in reverts.accounts { let changeset = account_block_reverts diff --git a/crates/storage/storage-api/src/state_writer.rs b/crates/storage/storage-api/src/state_writer.rs index 3daab1a85ad..f2c193559b9 100644 --- a/crates/storage/storage-api/src/state_writer.rs +++ b/crates/storage/storage-api/src/state_writer.rs @@ -1,23 +1,98 @@ +use alloc::vec::Vec; +use alloy_consensus::transaction::Either; use alloy_primitives::BlockNumber; -use reth_execution_types::ExecutionOutcome; +use reth_execution_types::{BlockExecutionOutput, ExecutionOutcome}; use reth_storage_errors::provider::ProviderResult; use reth_trie_common::HashedPostStateSorted; use revm_database::{ states::{PlainStateReverts, StateChangeset}, - OriginalValuesKnown, + BundleState, OriginalValuesKnown, }; +/// A helper type used as input to [`StateWriter`] for writing execution outcome for one or many +/// blocks. +#[derive(Debug)] +pub enum WriteStateInput<'a, R> { + /// A single block execution outcome. + Single { + /// The execution outcome. + outcome: &'a BlockExecutionOutput, + /// Block number + block: BlockNumber, + }, + /// Multiple block execution outcomes. + Multiple(&'a ExecutionOutcome), +} + +impl<'a, R> WriteStateInput<'a, R> { + /// Number of blocks in the execution outcome. + pub const fn len(&self) -> usize { + match self { + Self::Single { .. } => 1, + Self::Multiple(outcome) => outcome.len(), + } + } + + /// Returns true if the execution outcome is empty. + pub const fn is_empty(&self) -> bool { + match self { + Self::Single { outcome, .. } => outcome.result.receipts.is_empty(), + Self::Multiple(outcome) => outcome.is_empty(), + } + } + + /// Number of the first block. + pub const fn first_block(&self) -> BlockNumber { + match self { + Self::Single { block, .. } => *block, + Self::Multiple(outcome) => outcome.first_block(), + } + } + + /// Number of the last block. + pub const fn last_block(&self) -> BlockNumber { + match self { + Self::Single { block, .. } => *block, + Self::Multiple(outcome) => outcome.last_block(), + } + } + + /// Returns a reference to the [`BundleState`]. + pub const fn state(&self) -> &BundleState { + match self { + Self::Single { outcome, .. } => &outcome.state, + Self::Multiple(outcome) => &outcome.bundle, + } + } + + /// Returns an iterator over receipt sets for each block. + pub fn receipts(&self) -> impl Iterator> { + match self { + Self::Single { outcome, .. } => { + Either::Left(core::iter::once(&outcome.result.receipts)) + } + Self::Multiple(outcome) => Either::Right(outcome.receipts.iter()), + } + } +} + +impl<'a, R> From<&'a ExecutionOutcome> for WriteStateInput<'a, R> { + fn from(outcome: &'a ExecutionOutcome) -> Self { + Self::Multiple(outcome) + } +} + /// A trait specifically for writing state changes or reverts pub trait StateWriter { /// Receipt type included into [`ExecutionOutcome`]. - type Receipt; + type Receipt: 'static; /// Write the state and optionally receipts to the database. /// /// Use `config` to skip writing certain data types when they are written elsewhere. - fn write_state( + fn write_state<'a>( &self, - execution_outcome: &ExecutionOutcome, + execution_outcome: impl Into>, is_value_known: OriginalValuesKnown, config: StateWriteConfig, ) -> ProviderResult<()>; From 1be9fab5bf20e75d61456d8c85be3607f0227ad3 Mon Sep 17 00:00:00 2001 From: YK Date: Sat, 17 Jan 2026 01:33:48 +0800 Subject: [PATCH 044/267] perf: Optimize multiproof sequencer `add_proof` (#21129) --- .../src/tree/payload_processor/multiproof.rs | 29 +++++++++++-------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index d3907720b5d..b5f1272b67e 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -141,22 +141,27 @@ impl ProofSequencer { /// Adds a proof with the corresponding state update and returns all sequential proofs and state /// updates if we have a continuous sequence fn add_proof(&mut self, sequence: u64, update: SparseTrieUpdate) -> Vec { - if sequence >= self.next_to_deliver { - self.pending_proofs.insert(sequence, update); - } - - let mut consecutive_proofs = Vec::with_capacity(self.pending_proofs.len()); - let mut current_sequence = self.next_to_deliver; + // Optimization: fast path for in-order delivery to avoid BTreeMap overhead. + // If this is the expected sequence, return it immediately without buffering. + if sequence == self.next_to_deliver { + let mut consecutive_proofs = Vec::with_capacity(1); + consecutive_proofs.push(update); + self.next_to_deliver += 1; + + // Check if we have subsequent proofs in the pending buffer + while let Some(pending) = self.pending_proofs.remove(&self.next_to_deliver) { + consecutive_proofs.push(pending); + self.next_to_deliver += 1; + } - // keep collecting proofs and state updates as long as we have consecutive sequence numbers - while let Some(pending) = self.pending_proofs.remove(¤t_sequence) { - consecutive_proofs.push(pending); - current_sequence += 1; + return consecutive_proofs; } - self.next_to_deliver += consecutive_proofs.len() as u64; + if sequence > self.next_to_deliver { + self.pending_proofs.insert(sequence, update); + } - consecutive_proofs + Vec::new() } /// Returns true if we still have pending proofs From 13c32625bc368f53090b72966f4833870e508d5f Mon Sep 17 00:00:00 2001 From: YK Date: Sat, 17 Jan 2026 01:44:43 +0800 Subject: [PATCH 045/267] feat(storage): add EitherReader for routing history queries to MDBX or RocksDB (#21063) Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- .../src/providers/database/provider.rs | 21 +- crates/storage/provider/src/providers/mod.rs | 4 +- .../src/providers/rocksdb/invariants.rs | 170 +++++++-- .../src/providers/rocksdb/provider.rs | 358 +++++++++++++++--- .../src/providers/state/historical.rs | 144 ++++--- .../provider/src/traits/rocksdb_provider.rs | 20 +- 6 files changed, 555 insertions(+), 162 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 3ec5bd28cb4..af644a47a9b 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -16,7 +16,7 @@ use crate::{ HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RawRocksDBBatch, RevertsInit, RocksBatchArg, - RocksDBProviderFactory, RocksTxRefArg, StageCheckpointReader, StateProviderBox, StateWriter, + RocksDBProviderFactory, StageCheckpointReader, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, }; @@ -889,25 +889,6 @@ impl DatabaseProvider { pub fn chain_spec(&self) -> &N::ChainSpec { &self.chain_spec } - - /// Executes a closure with a `RocksDB` transaction for reading. - /// - /// This helper encapsulates all the cfg-gated `RocksDB` transaction handling for reads. - fn with_rocksdb_tx(&self, f: F) -> ProviderResult - where - F: FnOnce(RocksTxRefArg<'_>) -> ProviderResult, - { - #[cfg(all(unix, feature = "rocksdb"))] - let rocksdb = self.rocksdb_provider(); - #[cfg(all(unix, feature = "rocksdb"))] - let rocksdb_tx = rocksdb.tx(); - #[cfg(all(unix, feature = "rocksdb"))] - let rocksdb_tx_ref = &rocksdb_tx; - #[cfg(not(all(unix, feature = "rocksdb")))] - let rocksdb_tx_ref = (); - - f(rocksdb_tx_ref) - } } impl DatabaseProvider { diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 14f112a27b7..1047e58c063 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -16,8 +16,8 @@ pub use static_file::{ mod state; pub use state::{ historical::{ - history_info, needs_prev_shard_check, HistoricalStateProvider, HistoricalStateProviderRef, - HistoryInfo, LowestAvailableBlocks, + compute_history_rank, history_info, needs_prev_shard_check, HistoricalStateProvider, + HistoricalStateProviderRef, HistoryInfo, LowestAvailableBlocks, }, latest::{LatestStateProvider, LatestStateProviderRef}, overlay::{OverlayStateProvider, OverlayStateProviderFactory}, diff --git a/crates/storage/provider/src/providers/rocksdb/invariants.rs b/crates/storage/provider/src/providers/rocksdb/invariants.rs index 7a5c5f9db30..75be8ca5adf 100644 --- a/crates/storage/provider/src/providers/rocksdb/invariants.rs +++ b/crates/storage/provider/src/providers/rocksdb/invariants.rs @@ -164,16 +164,7 @@ impl RocksDBProvider { self.prune_transaction_hash_numbers_in_range(provider, 0..=highest_tx)?; } (None, None) => { - // Both MDBX and static files are empty. - // If checkpoint says we should have data, that's an inconsistency. - if checkpoint > 0 { - tracing::warn!( - target: "reth::providers::rocksdb", - checkpoint, - "Checkpoint set but no transaction data exists, unwind needed" - ); - return Ok(Some(0)); - } + // Both MDBX and static files are empty, nothing to check. } } @@ -263,16 +254,27 @@ impl RocksDBProvider { } // Find the max highest_block_number (excluding u64::MAX sentinel) across all - // entries + // entries. Also track if we found any non-sentinel entries. let mut max_highest_block = 0u64; + let mut found_non_sentinel = false; for result in self.iter::()? { let (key, _) = result?; let highest = key.sharded_key.highest_block_number; - if highest != u64::MAX && highest > max_highest_block { - max_highest_block = highest; + if highest != u64::MAX { + found_non_sentinel = true; + if highest > max_highest_block { + max_highest_block = highest; + } } } + // If all entries are sentinel entries (u64::MAX), treat as first-run scenario. + // This means no completed shards exist (only sentinel shards with + // highest_block_number=u64::MAX), so no actual history has been indexed. + if !found_non_sentinel { + return Ok(None); + } + // If any entry has highest_block > checkpoint, prune excess if max_highest_block > checkpoint { tracing::info!( @@ -296,11 +298,7 @@ impl RocksDBProvider { Ok(None) } None => { - // Empty RocksDB table - if checkpoint > 0 { - // Stage says we should have data but we don't - return Ok(Some(0)); - } + // Empty RocksDB table, nothing to check. Ok(None) } } @@ -377,16 +375,27 @@ impl RocksDBProvider { } // Find the max highest_block_number (excluding u64::MAX sentinel) across all - // entries + // entries. Also track if we found any non-sentinel entries. let mut max_highest_block = 0u64; + let mut found_non_sentinel = false; for result in self.iter::()? { let (key, _) = result?; let highest = key.highest_block_number; - if highest != u64::MAX && highest > max_highest_block { - max_highest_block = highest; + if highest != u64::MAX { + found_non_sentinel = true; + if highest > max_highest_block { + max_highest_block = highest; + } } } + // If all entries are sentinel entries (u64::MAX), treat as first-run scenario. + // This means no completed shards exist (only sentinel shards with + // highest_block_number=u64::MAX), so no actual history has been indexed. + if !found_non_sentinel { + return Ok(None); + } + // If any entry has highest_block > checkpoint, prune excess if max_highest_block > checkpoint { tracing::info!( @@ -413,11 +422,7 @@ impl RocksDBProvider { Ok(None) } None => { - // Empty RocksDB table - if checkpoint > 0 { - // Stage says we should have data but we don't - return Ok(Some(0)); - } + // Empty RocksDB table, nothing to check. Ok(None) } } @@ -542,7 +547,7 @@ mod tests { } #[test] - fn test_check_consistency_empty_rocksdb_with_checkpoint_needs_unwind() { + fn test_check_consistency_empty_rocksdb_with_checkpoint_is_first_run() { let temp_dir = TempDir::new().unwrap(); let rocksdb = RocksDBBuilder::new(temp_dir.path()) .with_table::() @@ -566,10 +571,10 @@ mod tests { let provider = factory.database_provider_ro().unwrap(); - // RocksDB is empty but checkpoint says block 100 was processed - // This means RocksDB is missing data and we need to unwind to rebuild + // RocksDB is empty but checkpoint says block 100 was processed. + // This is treated as a first-run/migration scenario - no unwind needed. let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!(result, Some(0), "Should require unwind to block 0 to rebuild RocksDB"); + assert_eq!(result, None, "Empty data with checkpoint is treated as first run"); } #[test] @@ -650,7 +655,7 @@ mod tests { } #[test] - fn test_check_consistency_storages_history_empty_with_checkpoint_needs_unwind() { + fn test_check_consistency_storages_history_empty_with_checkpoint_is_first_run() { let temp_dir = TempDir::new().unwrap(); let rocksdb = RocksDBBuilder::new(temp_dir.path()) .with_table::() @@ -674,9 +679,10 @@ mod tests { let provider = factory.database_provider_ro().unwrap(); - // RocksDB is empty but checkpoint says block 100 was processed + // RocksDB is empty but checkpoint says block 100 was processed. + // This is treated as a first-run/migration scenario - no unwind needed. let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!(result, Some(0), "Should require unwind to block 0 to rebuild StoragesHistory"); + assert_eq!(result, None, "Empty RocksDB with checkpoint is treated as first run"); } #[test] @@ -978,6 +984,97 @@ mod tests { ); } + #[test] + fn test_check_consistency_storages_history_sentinel_only_with_checkpoint_is_first_run() { + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + // Insert ONLY sentinel entries (highest_block_number = u64::MAX) + // This simulates a scenario where history tracking started but no shards were completed + let key_sentinel_1 = StorageShardedKey::new(Address::ZERO, B256::ZERO, u64::MAX); + let key_sentinel_2 = StorageShardedKey::new(Address::random(), B256::random(), u64::MAX); + let block_list = BlockNumberList::new_pre_sorted([10, 20, 30]); + rocksdb.put::(key_sentinel_1, &block_list).unwrap(); + rocksdb.put::(key_sentinel_2, &block_list).unwrap(); + + // Verify entries exist (not empty table) + assert!(rocksdb.first::().unwrap().is_some()); + + // Create a test provider factory for MDBX + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy().with_storages_history_in_rocksdb(true), + ); + + // Set a checkpoint indicating we should have processed up to block 100 + { + let provider = factory.database_provider_rw().unwrap(); + provider + .save_stage_checkpoint(StageId::IndexStorageHistory, StageCheckpoint::new(100)) + .unwrap(); + provider.commit().unwrap(); + } + + let provider = factory.database_provider_ro().unwrap(); + + // RocksDB has only sentinel entries (no completed shards) but checkpoint is set. + // This is treated as a first-run/migration scenario - no unwind needed. + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!( + result, None, + "Sentinel-only entries with checkpoint should be treated as first run" + ); + } + + #[test] + fn test_check_consistency_accounts_history_sentinel_only_with_checkpoint_is_first_run() { + use reth_db_api::models::ShardedKey; + + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + // Insert ONLY sentinel entries (highest_block_number = u64::MAX) + let key_sentinel_1 = ShardedKey::new(Address::ZERO, u64::MAX); + let key_sentinel_2 = ShardedKey::new(Address::random(), u64::MAX); + let block_list = BlockNumberList::new_pre_sorted([10, 20, 30]); + rocksdb.put::(key_sentinel_1, &block_list).unwrap(); + rocksdb.put::(key_sentinel_2, &block_list).unwrap(); + + // Verify entries exist (not empty table) + assert!(rocksdb.first::().unwrap().is_some()); + + // Create a test provider factory for MDBX + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy().with_account_history_in_rocksdb(true), + ); + + // Set a checkpoint indicating we should have processed up to block 100 + { + let provider = factory.database_provider_rw().unwrap(); + provider + .save_stage_checkpoint(StageId::IndexAccountHistory, StageCheckpoint::new(100)) + .unwrap(); + provider.commit().unwrap(); + } + + let provider = factory.database_provider_ro().unwrap(); + + // RocksDB has only sentinel entries (no completed shards) but checkpoint is set. + // This is treated as a first-run/migration scenario - no unwind needed. + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!( + result, None, + "Sentinel-only entries with checkpoint should be treated as first run" + ); + } + #[test] fn test_check_consistency_storages_history_behind_checkpoint_single_entry() { use reth_db_api::models::storage_sharded_key::StorageShardedKey; @@ -1135,7 +1232,7 @@ mod tests { } #[test] - fn test_check_consistency_accounts_history_empty_with_checkpoint_needs_unwind() { + fn test_check_consistency_accounts_history_empty_with_checkpoint_is_first_run() { let temp_dir = TempDir::new().unwrap(); let rocksdb = RocksDBBuilder::new(temp_dir.path()) .with_table::() @@ -1159,9 +1256,10 @@ mod tests { let provider = factory.database_provider_ro().unwrap(); - // RocksDB is empty but checkpoint says block 100 was processed + // RocksDB is empty but checkpoint says block 100 was processed. + // This is treated as a first-run/migration scenario - no unwind needed. let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!(result, Some(0), "Should require unwind to block 0 to rebuild AccountsHistory"); + assert_eq!(result, None, "Empty RocksDB with checkpoint is treated as first run"); } #[test] diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 626e73ad05d..cc427fcb8b8 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -1,11 +1,15 @@ use super::metrics::{RocksDBMetrics, RocksDBOperation}; -use crate::providers::{needs_prev_shard_check, HistoryInfo}; +use crate::providers::{compute_history_rank, needs_prev_shard_check, HistoryInfo}; use alloy_consensus::transaction::TxHashRef; use alloy_primitives::{Address, BlockNumber, TxNumber, B256}; +use itertools::Itertools; use parking_lot::Mutex; use reth_chain_state::ExecutedBlock; use reth_db_api::{ - models::{storage_sharded_key::StorageShardedKey, ShardedKey, StorageSettings}, + models::{ + sharded_key::NUM_OF_INDICES_IN_SHARD, storage_sharded_key::StorageShardedKey, ShardedKey, + StorageSettings, + }, table::{Compress, Decode, Decompress, Encode, Table}, tables, BlockNumberList, DatabaseError, }; @@ -592,10 +596,10 @@ impl RocksDBProvider { account_history.entry(address).or_default().push(block_number); } } - for (address, blocks) in account_history { - let key = ShardedKey::new(address, u64::MAX); - let value = BlockNumberList::new_pre_sorted(blocks); - batch.put::(key, &value)?; + + // Write account history using proper shard append logic + for (address, indices) in account_history { + batch.append_account_history_shard(address, indices)?; } ctx.pending_batches.lock().push(batch.into_inner()); Ok(()) @@ -620,10 +624,10 @@ impl RocksDBProvider { } } } - for ((address, slot), blocks) in storage_history { - let key = StorageShardedKey::new(address, slot, u64::MAX); - let value = BlockNumberList::new_pre_sorted(blocks); - batch.put::(key, &value)?; + + // Write storage history using proper shard append logic + for ((address, slot), indices) in storage_history { + batch.append_storage_history_shard(address, slot, indices)?; } ctx.pending_batches.lock().push(batch.into_inner()); Ok(()) @@ -714,6 +718,129 @@ impl<'a> RocksDBBatch<'a> { pub fn into_inner(self) -> WriteBatchWithTransaction { self.inner } + + /// Appends indices to an account history shard with proper shard management. + /// + /// Loads the existing shard (if any), appends new indices, and rechunks into + /// multiple shards if needed (respecting `NUM_OF_INDICES_IN_SHARD` limit). + /// + /// # Requirements + /// + /// - The `indices` MUST be strictly increasing and contain no duplicates. + /// - This method MUST only be called once per address per batch. The batch reads existing + /// shards from committed DB state, not from pending writes. Calling twice for the same + /// address will cause the second call to overwrite the first. + pub fn append_account_history_shard( + &mut self, + address: Address, + indices: impl IntoIterator, + ) -> ProviderResult<()> { + let indices: Vec = indices.into_iter().collect(); + + if indices.is_empty() { + return Ok(()); + } + + debug_assert!( + indices.windows(2).all(|w| w[0] < w[1]), + "indices must be strictly increasing: {:?}", + indices + ); + + let last_key = ShardedKey::new(address, u64::MAX); + let last_shard_opt = self.provider.get::(last_key.clone())?; + let mut last_shard = last_shard_opt.unwrap_or_else(BlockNumberList::empty); + + last_shard.append(indices).map_err(ProviderError::other)?; + + // Fast path: all indices fit in one shard + if last_shard.len() <= NUM_OF_INDICES_IN_SHARD as u64 { + self.put::(last_key, &last_shard)?; + return Ok(()); + } + + // Slow path: rechunk into multiple shards + let chunks = last_shard.iter().chunks(NUM_OF_INDICES_IN_SHARD); + let mut chunks_peekable = chunks.into_iter().peekable(); + + while let Some(chunk) = chunks_peekable.next() { + let shard = BlockNumberList::new_pre_sorted(chunk); + let highest_block_number = if chunks_peekable.peek().is_some() { + shard.iter().next_back().expect("`chunks` does not return empty list") + } else { + u64::MAX + }; + + self.put::( + ShardedKey::new(address, highest_block_number), + &shard, + )?; + } + + Ok(()) + } + + /// Appends indices to a storage history shard with proper shard management. + /// + /// Loads the existing shard (if any), appends new indices, and rechunks into + /// multiple shards if needed (respecting `NUM_OF_INDICES_IN_SHARD` limit). + /// + /// # Requirements + /// + /// - The `indices` MUST be strictly increasing and contain no duplicates. + /// - This method MUST only be called once per (address, `storage_key`) pair per batch. The + /// batch reads existing shards from committed DB state, not from pending writes. Calling + /// twice for the same key will cause the second call to overwrite the first. + pub fn append_storage_history_shard( + &mut self, + address: Address, + storage_key: B256, + indices: impl IntoIterator, + ) -> ProviderResult<()> { + let indices: Vec = indices.into_iter().collect(); + + if indices.is_empty() { + return Ok(()); + } + + debug_assert!( + indices.windows(2).all(|w| w[0] < w[1]), + "indices must be strictly increasing: {:?}", + indices + ); + + let last_key = StorageShardedKey::last(address, storage_key); + let last_shard_opt = self.provider.get::(last_key.clone())?; + let mut last_shard = last_shard_opt.unwrap_or_else(BlockNumberList::empty); + + last_shard.append(indices).map_err(ProviderError::other)?; + + // Fast path: all indices fit in one shard + if last_shard.len() <= NUM_OF_INDICES_IN_SHARD as u64 { + self.put::(last_key, &last_shard)?; + return Ok(()); + } + + // Slow path: rechunk into multiple shards + let chunks = last_shard.iter().chunks(NUM_OF_INDICES_IN_SHARD); + let mut chunks_peekable = chunks.into_iter().peekable(); + + while let Some(chunk) = chunks_peekable.next() { + let shard = BlockNumberList::new_pre_sorted(chunk); + let highest_block_number = if chunks_peekable.peek().is_some() { + shard.iter().next_back().expect("`chunks` does not return empty list") + } else { + u64::MAX + }; + + self.put::( + StorageShardedKey::new(address, storage_key, highest_block_number), + &shard, + )?; + } + + Ok(()) + } } /// `RocksDB` transaction wrapper providing MDBX-like semantics. @@ -901,6 +1028,16 @@ impl<'db> RocksTx<'db> { where T: Table, { + // History may be pruned if a lowest available block is set. + let is_maybe_pruned = lowest_available_block_number.is_some(); + let fallback = || { + Ok(if is_maybe_pruned { + HistoryInfo::MaybeInPlainState + } else { + HistoryInfo::NotYetWritten + }) + }; + let cf = self.provider.0.db.cf_handle(T::NAME).ok_or_else(|| { ProviderError::Database(DatabaseError::Other(format!( "column family not found: {}", @@ -918,53 +1055,28 @@ impl<'db> RocksTx<'db> { if !iter.valid() { // No shard found at or after target block. - return if lowest_available_block_number.is_some() { - // The key may have been written, but due to pruning we may not have changesets - // and history, so we need to make a plain state lookup. - Ok(HistoryInfo::MaybeInPlainState) - } else { - // The key has not been written to at all. - Ok(HistoryInfo::NotYetWritten) - }; + // + // (MaybeInPlainState) The key may have been written, but due to pruning we may not have + // changesets and history, so we need to make a plain state lookup. + // (HistoryInfo::NotYetWritten) The key has not been written to at all. + return fallback(); } // Check if the found key matches our target entity. let Some(key_bytes) = iter.key() else { - return if lowest_available_block_number.is_some() { - Ok(HistoryInfo::MaybeInPlainState) - } else { - Ok(HistoryInfo::NotYetWritten) - }; + return fallback(); }; if !key_matches(key_bytes)? { // The found key is for a different entity. - return if lowest_available_block_number.is_some() { - Ok(HistoryInfo::MaybeInPlainState) - } else { - Ok(HistoryInfo::NotYetWritten) - }; + return fallback(); } // Decompress the block list for this shard. let Some(value_bytes) = iter.value() else { - return if lowest_available_block_number.is_some() { - Ok(HistoryInfo::MaybeInPlainState) - } else { - Ok(HistoryInfo::NotYetWritten) - }; + return fallback(); }; let chunk = BlockNumberList::decompress(value_bytes)?; - - // Get the rank of the first entry before or equal to our block. - let mut rank = chunk.rank(block_number); - - // Adjust the rank, so that we have the rank of the first entry strictly before our - // block (not equal to it). - if rank.checked_sub(1).and_then(|r| chunk.select(r)) == Some(block_number) { - rank -= 1; - } - - let found_block = chunk.select(rank); + let (rank, found_block) = compute_history_rank(&chunk, block_number); // Lazy check for previous shard - only called when needed. // If we can step to a previous shard for this same key, history already exists, @@ -1103,7 +1215,11 @@ mod tests { use crate::providers::HistoryInfo; use alloy_primitives::{Address, TxHash, B256}; use reth_db_api::{ - models::{sharded_key::ShardedKey, storage_sharded_key::StorageShardedKey, IntegerList}, + models::{ + sharded_key::{ShardedKey, NUM_OF_INDICES_IN_SHARD}, + storage_sharded_key::StorageShardedKey, + IntegerList, + }, table::Table, tables, }; @@ -1452,4 +1568,156 @@ mod tests { tx.rollback().unwrap(); } + + #[test] + fn test_account_history_shard_split_at_boundary() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x42; 20]); + let limit = NUM_OF_INDICES_IN_SHARD; + + // Add exactly NUM_OF_INDICES_IN_SHARD + 1 indices to trigger a split + let indices: Vec = (0..=(limit as u64)).collect(); + let mut batch = provider.batch(); + batch.append_account_history_shard(address, indices).unwrap(); + batch.commit().unwrap(); + + // Should have 2 shards: one completed shard and one sentinel shard + let completed_key = ShardedKey::new(address, (limit - 1) as u64); + let sentinel_key = ShardedKey::new(address, u64::MAX); + + let completed_shard = provider.get::(completed_key).unwrap(); + let sentinel_shard = provider.get::(sentinel_key).unwrap(); + + assert!(completed_shard.is_some(), "completed shard should exist"); + assert!(sentinel_shard.is_some(), "sentinel shard should exist"); + + let completed_shard = completed_shard.unwrap(); + let sentinel_shard = sentinel_shard.unwrap(); + + assert_eq!(completed_shard.len(), limit as u64, "completed shard should be full"); + assert_eq!(sentinel_shard.len(), 1, "sentinel shard should have 1 element"); + } + + #[test] + fn test_account_history_multiple_shard_splits() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x43; 20]); + let limit = NUM_OF_INDICES_IN_SHARD; + + // First batch: add NUM_OF_INDICES_IN_SHARD indices + let first_batch_indices: Vec = (0..limit as u64).collect(); + let mut batch = provider.batch(); + batch.append_account_history_shard(address, first_batch_indices).unwrap(); + batch.commit().unwrap(); + + // Should have just a sentinel shard (exactly at limit, not over) + let sentinel_key = ShardedKey::new(address, u64::MAX); + let shard = provider.get::(sentinel_key.clone()).unwrap(); + assert!(shard.is_some()); + assert_eq!(shard.unwrap().len(), limit as u64); + + // Second batch: add another NUM_OF_INDICES_IN_SHARD + 1 indices (causing 2 more shards) + let second_batch_indices: Vec = (limit as u64..=(2 * limit) as u64).collect(); + let mut batch = provider.batch(); + batch.append_account_history_shard(address, second_batch_indices).unwrap(); + batch.commit().unwrap(); + + // Now we should have: 2 completed shards + 1 sentinel shard + let first_completed = ShardedKey::new(address, (limit - 1) as u64); + let second_completed = ShardedKey::new(address, (2 * limit - 1) as u64); + + assert!( + provider.get::(first_completed).unwrap().is_some(), + "first completed shard should exist" + ); + assert!( + provider.get::(second_completed).unwrap().is_some(), + "second completed shard should exist" + ); + assert!( + provider.get::(sentinel_key).unwrap().is_some(), + "sentinel shard should exist" + ); + } + + #[test] + fn test_storage_history_shard_split_at_boundary() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x44; 20]); + let slot = B256::from([0x55; 32]); + let limit = NUM_OF_INDICES_IN_SHARD; + + // Add exactly NUM_OF_INDICES_IN_SHARD + 1 indices to trigger a split + let indices: Vec = (0..=(limit as u64)).collect(); + let mut batch = provider.batch(); + batch.append_storage_history_shard(address, slot, indices).unwrap(); + batch.commit().unwrap(); + + // Should have 2 shards: one completed shard and one sentinel shard + let completed_key = StorageShardedKey::new(address, slot, (limit - 1) as u64); + let sentinel_key = StorageShardedKey::new(address, slot, u64::MAX); + + let completed_shard = provider.get::(completed_key).unwrap(); + let sentinel_shard = provider.get::(sentinel_key).unwrap(); + + assert!(completed_shard.is_some(), "completed shard should exist"); + assert!(sentinel_shard.is_some(), "sentinel shard should exist"); + + let completed_shard = completed_shard.unwrap(); + let sentinel_shard = sentinel_shard.unwrap(); + + assert_eq!(completed_shard.len(), limit as u64, "completed shard should be full"); + assert_eq!(sentinel_shard.len(), 1, "sentinel shard should have 1 element"); + } + + #[test] + fn test_storage_history_multiple_shard_splits() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x46; 20]); + let slot = B256::from([0x57; 32]); + let limit = NUM_OF_INDICES_IN_SHARD; + + // First batch: add NUM_OF_INDICES_IN_SHARD indices + let first_batch_indices: Vec = (0..limit as u64).collect(); + let mut batch = provider.batch(); + batch.append_storage_history_shard(address, slot, first_batch_indices).unwrap(); + batch.commit().unwrap(); + + // Should have just a sentinel shard (exactly at limit, not over) + let sentinel_key = StorageShardedKey::new(address, slot, u64::MAX); + let shard = provider.get::(sentinel_key.clone()).unwrap(); + assert!(shard.is_some()); + assert_eq!(shard.unwrap().len(), limit as u64); + + // Second batch: add another NUM_OF_INDICES_IN_SHARD + 1 indices (causing 2 more shards) + let second_batch_indices: Vec = (limit as u64..=(2 * limit) as u64).collect(); + let mut batch = provider.batch(); + batch.append_storage_history_shard(address, slot, second_batch_indices).unwrap(); + batch.commit().unwrap(); + + // Now we should have: 2 completed shards + 1 sentinel shard + let first_completed = StorageShardedKey::new(address, slot, (limit - 1) as u64); + let second_completed = StorageShardedKey::new(address, slot, (2 * limit - 1) as u64); + + assert!( + provider.get::(first_completed).unwrap().is_some(), + "first completed shard should exist" + ); + assert!( + provider.get::(second_completed).unwrap().is_some(), + "second completed shard should exist" + ); + assert!( + provider.get::(sentinel_key).unwrap().is_some(), + "sentinel shard should exist" + ); + } } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index f9bc61c7eb3..62c74898195 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -1,12 +1,11 @@ use crate::{ - AccountReader, BlockHashReader, ChangeSetReader, HashedPostStateProvider, ProviderError, - StateProvider, StateRootProvider, + AccountReader, BlockHashReader, ChangeSetReader, EitherReader, HashedPostStateProvider, + ProviderError, RocksDBProviderFactory, StateProvider, StateRootProvider, }; use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, - models::{storage_sharded_key::StorageShardedKey, ShardedKey}, table::Table, tables, transaction::DbTx, @@ -14,7 +13,8 @@ use reth_db_api::{ }; use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{ - BlockNumReader, BytecodeReader, DBProvider, StateProofProvider, StorageRootProvider, + BlockNumReader, BytecodeReader, DBProvider, NodePrimitivesProvider, StateProofProvider, + StorageRootProvider, StorageSettingsCache, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ @@ -127,38 +127,47 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader> Self { provider, block_number, lowest_available_blocks } } - /// Lookup an account in the `AccountsHistory` table - pub fn account_history_lookup(&self, address: Address) -> ProviderResult { + /// Lookup an account in the `AccountsHistory` table using `EitherReader`. + pub fn account_history_lookup(&self, address: Address) -> ProviderResult + where + Provider: StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider, + { if !self.lowest_available_blocks.is_account_history_available(self.block_number) { return Err(ProviderError::StateAtBlockPruned(self.block_number)) } - // history key to search IntegerList of block number changesets. - let history_key = ShardedKey::new(address, self.block_number); - self.history_info_lookup::( - history_key, - |key| key.key == address, - self.lowest_available_blocks.account_history_block_number, - ) + self.provider.with_rocksdb_tx(|rocks_tx_ref| { + let mut reader = EitherReader::new_accounts_history(self.provider, rocks_tx_ref)?; + reader.account_history_info( + address, + self.block_number, + self.lowest_available_blocks.account_history_block_number, + ) + }) } - /// Lookup a storage key in the `StoragesHistory` table + /// Lookup a storage key in the `StoragesHistory` table using `EitherReader`. pub fn storage_history_lookup( &self, address: Address, storage_key: StorageKey, - ) -> ProviderResult { + ) -> ProviderResult + where + Provider: StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider, + { if !self.lowest_available_blocks.is_storage_history_available(self.block_number) { return Err(ProviderError::StateAtBlockPruned(self.block_number)) } - // history key to search IntegerList of block number changesets. - let history_key = StorageShardedKey::new(address, storage_key, self.block_number); - self.history_info_lookup::( - history_key, - |key| key.address == address && key.sharded_key.key == storage_key, - self.lowest_available_blocks.storage_history_block_number, - ) + self.provider.with_rocksdb_tx(|rocks_tx_ref| { + let mut reader = EitherReader::new_storages_history(self.provider, rocks_tx_ref)?; + reader.storage_history_info( + address, + storage_key, + self.block_number, + self.lowest_available_blocks.storage_history_block_number, + ) + }) } /// Checks and returns `true` if distance to historical block exceeds the provided limit. @@ -204,25 +213,6 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader> Ok(HashedStorage::from_reverts(self.tx(), address, self.block_number)?) } - fn history_info_lookup( - &self, - key: K, - key_filter: impl Fn(&K) -> bool, - lowest_available_block_number: Option, - ) -> ProviderResult - where - T: Table, - { - let mut cursor = self.tx().cursor_read::()?; - history_info::( - &mut cursor, - key, - self.block_number, - key_filter, - lowest_available_block_number, - ) - } - /// Set the lowest block number at which the account history is available. pub const fn with_lowest_available_account_history_block_number( mut self, @@ -248,8 +238,14 @@ impl HistoricalStateProviderRef<'_, Provi } } -impl AccountReader - for HistoricalStateProviderRef<'_, Provider> +impl< + Provider: DBProvider + + BlockNumReader + + ChangeSetReader + + StorageSettingsCache + + RocksDBProviderFactory + + NodePrimitivesProvider, + > AccountReader for HistoricalStateProviderRef<'_, Provider> { /// Get basic account information. fn basic_account(&self, address: &Address) -> ProviderResult> { @@ -404,8 +400,15 @@ impl HashedPostStateProvider for HistoricalStateProviderRef<'_, Provid } } -impl StateProvider - for HistoricalStateProviderRef<'_, Provider> +impl< + Provider: DBProvider + + BlockNumReader + + BlockHashReader + + ChangeSetReader + + StorageSettingsCache + + RocksDBProviderFactory + + NodePrimitivesProvider, + > StateProvider for HistoricalStateProviderRef<'_, Provider> { /// Get storage. fn storage( @@ -495,7 +498,7 @@ impl HistoricalStatePro } // Delegates all provider impls to [HistoricalStateProviderRef] -reth_storage_api::macros::delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader]); +reth_storage_api::macros::delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -525,6 +528,32 @@ impl LowestAvailableBlocks { } } +/// Computes the rank and finds the next modification block in a history shard. +/// +/// Given a `block_number`, this function returns: +/// - `rank`: The number of entries strictly before `block_number` in the shard +/// - `found_block`: The block number at position `rank` (i.e., the first block >= `block_number` +/// where a modification occurred), or `None` if `rank` is out of bounds +/// +/// The rank is adjusted when `block_number` exactly matches an entry in the shard, +/// so that `found_block` always returns the modification at or after the target. +/// +/// This logic is shared between MDBX cursor-based lookups and `RocksDB` iterator lookups. +#[inline] +pub fn compute_history_rank( + chunk: &reth_db_api::BlockNumberList, + block_number: BlockNumber, +) -> (u64, Option) { + let mut rank = chunk.rank(block_number); + // `rank(block_number)` returns count of entries <= block_number. + // We want the first entry >= block_number, so if block_number is in the shard, + // we need to step back one position to point at it (not past it). + if rank.checked_sub(1).and_then(|r| chunk.select(r)) == Some(block_number) { + rank -= 1; + } + (rank, chunk.select(rank)) +} + /// Checks if a previous shard lookup is needed to determine if we're before the first write. /// /// Returns `true` when `rank == 0` (first entry in shard) and the found block doesn't match @@ -557,16 +586,7 @@ where // index, the first chunk for the next key will be returned so we filter out chunks that // have a different key. if let Some(chunk) = cursor.seek(key)?.filter(|(k, _)| key_filter(k)).map(|x| x.1) { - // Get the rank of the first entry before or equal to our block. - let mut rank = chunk.rank(block_number); - - // Adjust the rank, so that we have the rank of the first entry strictly before our - // block (not equal to it). - if rank.checked_sub(1).and_then(|r| chunk.select(r)) == Some(block_number) { - rank -= 1; - } - - let found_block = chunk.select(rank); + let (rank, found_block) = compute_history_rank(&chunk, block_number); // If our block is before the first entry in the index chunk and this first entry // doesn't equal to our block, it might be before the first write ever. To check, we @@ -598,7 +618,8 @@ mod tests { use crate::{ providers::state::historical::{HistoryInfo, LowestAvailableBlocks}, test_utils::create_test_provider_factory, - AccountReader, HistoricalStateProvider, HistoricalStateProviderRef, StateProvider, + AccountReader, HistoricalStateProvider, HistoricalStateProviderRef, RocksDBProviderFactory, + StateProvider, }; use alloy_primitives::{address, b256, Address, B256, U256}; use reth_db_api::{ @@ -610,6 +631,7 @@ mod tests { use reth_primitives_traits::{Account, StorageEntry}; use reth_storage_api::{ BlockHashReader, BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory, + NodePrimitivesProvider, StorageSettingsCache, }; use reth_storage_errors::provider::ProviderError; @@ -621,7 +643,13 @@ mod tests { const fn assert_state_provider() {} #[expect(dead_code)] const fn assert_historical_state_provider< - T: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader, + T: DBProvider + + BlockNumReader + + BlockHashReader + + ChangeSetReader + + StorageSettingsCache + + RocksDBProviderFactory + + NodePrimitivesProvider, >() { assert_state_provider::>(); } diff --git a/crates/storage/provider/src/traits/rocksdb_provider.rs b/crates/storage/provider/src/traits/rocksdb_provider.rs index 9d2186677de..3394fa16f67 100644 --- a/crates/storage/provider/src/traits/rocksdb_provider.rs +++ b/crates/storage/provider/src/traits/rocksdb_provider.rs @@ -1,4 +1,5 @@ -use crate::providers::RocksDBProvider; +use crate::{either_writer::RocksTxRefArg, providers::RocksDBProvider}; +use reth_storage_errors::provider::ProviderResult; /// `RocksDB` provider factory. /// @@ -13,4 +14,21 @@ pub trait RocksDBProviderFactory { /// commits, ensuring atomicity across all storage backends. #[cfg(all(unix, feature = "rocksdb"))] fn set_pending_rocksdb_batch(&self, batch: rocksdb::WriteBatchWithTransaction); + + /// Executes a closure with a `RocksDB` transaction for reading. + /// + /// This helper encapsulates all the cfg-gated `RocksDB` transaction handling for reads. + fn with_rocksdb_tx(&self, f: F) -> ProviderResult + where + F: FnOnce(RocksTxRefArg<'_>) -> ProviderResult, + { + #[cfg(all(unix, feature = "rocksdb"))] + { + let rocksdb = self.rocksdb_provider(); + let tx = rocksdb.tx(); + f(&tx) + } + #[cfg(not(all(unix, feature = "rocksdb")))] + f(()) + } } From 905bb95f8bff2aee999197f511f99b4aebf2dddb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 16 Jan 2026 19:25:04 +0100 Subject: [PATCH 046/267] perf(engine): defer trie overlay computation with LazyOverlay (#21133) --- crates/chain-state/src/lazy_overlay.rs | 231 ++++++++++++++++++ crates/chain-state/src/lib.rs | 3 + crates/engine/tree/src/tree/metrics.rs | 4 - .../engine/tree/src/tree/payload_validator.rs | 165 ++----------- .../provider/src/providers/state/overlay.rs | 152 ++++++++---- 5 files changed, 366 insertions(+), 189 deletions(-) create mode 100644 crates/chain-state/src/lazy_overlay.rs diff --git a/crates/chain-state/src/lazy_overlay.rs b/crates/chain-state/src/lazy_overlay.rs new file mode 100644 index 00000000000..a0295c9a5b4 --- /dev/null +++ b/crates/chain-state/src/lazy_overlay.rs @@ -0,0 +1,231 @@ +//! Lazy overlay computation for trie input. +//! +//! This module provides [`LazyOverlay`], a type that computes the [`TrieInputSorted`] +//! lazily on first access. This allows execution to start before the trie overlay +//! is fully computed. + +use crate::DeferredTrieData; +use alloy_primitives::B256; +use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted, TrieInputSorted}; +use std::sync::{Arc, OnceLock}; +use tracing::{debug, trace}; + +/// Threshold for switching from `extend_ref` loop to `merge_batch`. +/// +/// Benchmarked crossover: `extend_ref` wins up to ~64 blocks, `merge_batch` wins beyond. +const MERGE_BATCH_THRESHOLD: usize = 64; + +/// Inputs captured for lazy overlay computation. +#[derive(Clone)] +struct LazyOverlayInputs { + /// The persisted ancestor hash (anchor) this overlay should be built on. + anchor_hash: B256, + /// Deferred trie data handles for all in-memory blocks (newest to oldest). + blocks: Vec, +} + +/// Lazily computed trie overlay. +/// +/// Captures the inputs needed to compute a [`TrieInputSorted`] and defers the actual +/// computation until first access. This is conceptually similar to [`DeferredTrieData`] +/// but for overlay computation. +/// +/// # Fast Path vs Slow Path +/// +/// - **Fast path**: If the tip block's cached `anchored_trie_input` is ready and its `anchor_hash` +/// matches our expected anchor, we can reuse it directly (O(1)). +/// - **Slow path**: Otherwise, we merge all ancestor blocks' trie data into a new overlay. +#[derive(Clone)] +pub struct LazyOverlay { + /// Computed result, cached after first access. + inner: Arc>, + /// Inputs for lazy computation. + inputs: LazyOverlayInputs, +} + +impl std::fmt::Debug for LazyOverlay { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("LazyOverlay") + .field("anchor_hash", &self.inputs.anchor_hash) + .field("num_blocks", &self.inputs.blocks.len()) + .field("computed", &self.inner.get().is_some()) + .finish() + } +} + +impl LazyOverlay { + /// Create a new lazy overlay with the given anchor hash and block handles. + /// + /// # Arguments + /// + /// * `anchor_hash` - The persisted ancestor hash this overlay is built on top of + /// * `blocks` - Deferred trie data handles for in-memory blocks (newest to oldest) + pub fn new(anchor_hash: B256, blocks: Vec) -> Self { + Self { inner: Arc::new(OnceLock::new()), inputs: LazyOverlayInputs { anchor_hash, blocks } } + } + + /// Returns the anchor hash this overlay is built on. + pub const fn anchor_hash(&self) -> B256 { + self.inputs.anchor_hash + } + + /// Returns the number of in-memory blocks this overlay covers. + pub const fn num_blocks(&self) -> usize { + self.inputs.blocks.len() + } + + /// Returns true if the overlay has already been computed. + pub fn is_computed(&self) -> bool { + self.inner.get().is_some() + } + + /// Returns the computed trie input, computing it if necessary. + /// + /// The first call triggers computation (which may block waiting for deferred data). + /// Subsequent calls return the cached result immediately. + pub fn get(&self) -> &TrieInputSorted { + self.inner.get_or_init(|| self.compute()) + } + + /// Returns the overlay as (nodes, state) tuple for use with `OverlayStateProviderFactory`. + pub fn as_overlay(&self) -> (Arc, Arc) { + let input = self.get(); + (Arc::clone(&input.nodes), Arc::clone(&input.state)) + } + + /// Compute the trie input overlay. + fn compute(&self) -> TrieInputSorted { + let anchor_hash = self.inputs.anchor_hash; + let blocks = &self.inputs.blocks; + + if blocks.is_empty() { + debug!(target: "chain_state::lazy_overlay", "No in-memory blocks, returning empty overlay"); + return TrieInputSorted::default(); + } + + // Fast path: Check if tip block's overlay is ready and anchor matches. + // The tip block (first in list) has the cumulative overlay from all ancestors. + if let Some(tip) = blocks.first() { + let data = tip.wait_cloned(); + if let Some(anchored) = &data.anchored_trie_input { + if anchored.anchor_hash == anchor_hash { + trace!(target: "chain_state::lazy_overlay", %anchor_hash, "Reusing tip block's cached overlay (fast path)"); + return (*anchored.trie_input).clone(); + } + debug!( + target: "chain_state::lazy_overlay", + computed_anchor = %anchored.anchor_hash, + %anchor_hash, + "Anchor mismatch, falling back to merge" + ); + } + } + + // Slow path: Merge all blocks' trie data into a new overlay. + debug!(target: "chain_state::lazy_overlay", num_blocks = blocks.len(), "Merging blocks (slow path)"); + Self::merge_blocks(blocks) + } + + /// Merge all blocks' trie data into a single [`TrieInputSorted`]. + /// + /// Blocks are ordered newest to oldest. We iterate oldest to newest so that + /// newer values override older ones. + fn merge_blocks(blocks: &[DeferredTrieData]) -> TrieInputSorted { + if blocks.is_empty() { + return TrieInputSorted::default(); + } + + // Single block: use its data directly + if blocks.len() == 1 { + let data = blocks[0].wait_cloned(); + return TrieInputSorted { + state: Arc::clone(&data.hashed_state), + nodes: Arc::clone(&data.trie_updates), + prefix_sets: Default::default(), + }; + } + + if blocks.len() < MERGE_BATCH_THRESHOLD { + // Small k: extend_ref loop is faster + // Iterate oldest->newest so newer values override older ones + let mut blocks_iter = blocks.iter().rev(); + let first = blocks_iter.next().expect("blocks is non-empty"); + let data = first.wait_cloned(); + + let mut state = Arc::clone(&data.hashed_state); + let mut nodes = Arc::clone(&data.trie_updates); + let state_mut = Arc::make_mut(&mut state); + let nodes_mut = Arc::make_mut(&mut nodes); + + for block in blocks_iter { + let data = block.wait_cloned(); + state_mut.extend_ref(data.hashed_state.as_ref()); + nodes_mut.extend_ref(data.trie_updates.as_ref()); + } + + TrieInputSorted { state, nodes, prefix_sets: Default::default() } + } else { + // Large k: merge_batch is faster (O(n log k) via k-way merge) + let trie_data: Vec<_> = blocks.iter().map(|b| b.wait_cloned()).collect(); + + let merged_state = HashedPostStateSorted::merge_batch( + trie_data.iter().map(|d| d.hashed_state.as_ref()), + ); + let merged_nodes = + TrieUpdatesSorted::merge_batch(trie_data.iter().map(|d| d.trie_updates.as_ref())); + + TrieInputSorted { + state: Arc::new(merged_state), + nodes: Arc::new(merged_nodes), + prefix_sets: Default::default(), + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_trie::{updates::TrieUpdates, HashedPostState}; + + fn empty_deferred(anchor: B256) -> DeferredTrieData { + DeferredTrieData::pending( + Arc::new(HashedPostState::default()), + Arc::new(TrieUpdates::default()), + anchor, + Vec::new(), + ) + } + + #[test] + fn empty_blocks_returns_default() { + let overlay = LazyOverlay::new(B256::ZERO, vec![]); + let result = overlay.get(); + assert!(result.state.is_empty()); + assert!(result.nodes.is_empty()); + } + + #[test] + fn single_block_uses_data_directly() { + let anchor = B256::random(); + let deferred = empty_deferred(anchor); + let overlay = LazyOverlay::new(anchor, vec![deferred]); + + assert!(!overlay.is_computed()); + let _ = overlay.get(); + assert!(overlay.is_computed()); + } + + #[test] + fn cached_after_first_access() { + let overlay = LazyOverlay::new(B256::ZERO, vec![]); + + // First access computes + let _ = overlay.get(); + assert!(overlay.is_computed()); + + // Second access uses cache + let _ = overlay.get(); + assert!(overlay.is_computed()); + } +} diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs index 7ba3e2316c6..f6abed91467 100644 --- a/crates/chain-state/src/lib.rs +++ b/crates/chain-state/src/lib.rs @@ -14,6 +14,9 @@ pub use in_memory::*; mod deferred_trie; pub use deferred_trie::*; +mod lazy_overlay; +pub use lazy_overlay::*; + mod noop; mod chain_info; diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 5acf81104ed..0e9685c0910 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -334,10 +334,6 @@ pub(crate) struct BlockValidationMetrics { pub(crate) state_root_histogram: Histogram, /// Histogram of deferred trie computation duration. pub(crate) deferred_trie_compute_duration: Histogram, - /// Histogram of time spent waiting for deferred trie data to become available. - pub(crate) deferred_trie_wait_duration: Histogram, - /// Trie input computation duration - pub(crate) trie_input_duration: Histogram, /// Payload conversion and validation latency pub(crate) payload_validation_duration: Gauge, /// Histogram of payload validation latency diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 2d2dbe1cb72..9160535df6c 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -1,11 +1,5 @@ //! Types and traits for validating blocks and payloads. -/// Threshold for switching from `extend_ref` loop to `merge_batch` in `merge_overlay_trie_input`. -/// -/// Benchmarked crossover: `extend_ref` wins up to ~64 blocks, `merge_batch` wins beyond. -/// Using 64 as threshold since they're roughly equal there. -const MERGE_BATCH_THRESHOLD: usize = 64; - use crate::tree::{ cached_state::CachedStateProvider, error::{InsertBlockError, InsertBlockErrorKind, InsertPayloadError}, @@ -22,7 +16,7 @@ use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_evm::Evm; use alloy_primitives::B256; use rayon::prelude::*; -use reth_chain_state::{CanonicalInMemoryState, DeferredTrieData, ExecutedBlock}; +use reth_chain_state::{CanonicalInMemoryState, DeferredTrieData, ExecutedBlock, LazyOverlay}; use reth_consensus::{ConsensusError, FullConsensus}; use reth_engine_primitives::{ ConfigureEngineEvm, ExecutableTxIterator, ExecutionPayload, InvalidBlockHook, PayloadValidator, @@ -46,10 +40,7 @@ use reth_provider::{ StateProviderFactory, StateReader, }; use reth_revm::db::State; -use reth_trie::{ - updates::{TrieUpdates, TrieUpdatesSorted}, - HashedPostState, HashedPostStateSorted, StateRoot, TrieInputSorted, -}; +use reth_trie::{updates::TrieUpdates, HashedPostState, StateRoot}; use reth_trie_db::ChangesetCache; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use revm_primitives::Address; @@ -430,26 +421,16 @@ where .map_err(Box::::from)) .map(Arc::new); - // Compute trie input from ancestors once, before spawning payload processor. - // This will be extended with the current block's hashed state after execution. - let trie_input_start = Instant::now(); - let (trie_input, block_hash_for_overlay) = - ensure_ok!(self.compute_trie_input(parent_hash, ctx.state())); - - self.metrics - .block_validation - .trie_input_duration - .record(trie_input_start.elapsed().as_secs_f64()); + // Create lazy overlay from ancestors - this doesn't block, allowing execution to start + // before the trie data is ready. The overlay will be computed on first access. + let (lazy_overlay, anchor_hash) = Self::get_parent_lazy_overlay(parent_hash, ctx.state()); // Create overlay factory for payload processor (StateRootTask path needs it for // multiproofs) - let overlay_factory = { - let TrieInputSorted { nodes, state, .. } = &trie_input; + let overlay_factory = OverlayStateProviderFactory::new(self.provider.clone(), self.changeset_cache.clone()) - .with_block_hash(Some(block_hash_for_overlay)) - .with_trie_overlay(Some(Arc::clone(nodes))) - .with_hashed_state_overlay(Some(Arc::clone(state))) - }; + .with_block_hash(Some(anchor_hash)) + .with_lazy_overlay(lazy_overlay); // Spawn the appropriate processor based on strategy let mut handle = ensure_ok!(self.spawn_payload_processor( @@ -953,128 +934,36 @@ where self.invalid_block_hook.on_invalid_block(parent_header, block, output, trie_updates); } - /// Computes [`TrieInputSorted`] for the provided parent hash by combining database state - /// with in-memory overlays. + /// Creates a [`LazyOverlay`] for the parent block without blocking. /// - /// The goal of this function is to take in-memory blocks and generate a [`TrieInputSorted`] - /// that extends from the highest persisted ancestor up through the parent. This enables state - /// root computation and proof generation without requiring all blocks to be persisted - /// first. + /// Returns a lazy overlay that will compute the trie input on first access, and the anchor + /// block hash (the highest persisted ancestor). This allows execution to start immediately + /// while the trie input computation is deferred until the overlay is actually needed. /// - /// It works as follows: - /// 1. Collect in-memory overlay blocks using [`crate::tree::TreeState::blocks_by_hash`]. This - /// returns the highest persisted ancestor hash (`block_hash`) and the list of in-memory - /// blocks building on top of it. - /// 2. Fast path: If the tip in-memory block's trie input is already anchored to `block_hash` - /// (its `anchor_hash` matches `block_hash`), reuse it directly. - /// 3. Slow path: Build a new [`TrieInputSorted`] by aggregating the overlay blocks (from oldest - /// to newest) on top of the database state at `block_hash`. - #[instrument( - level = "debug", - target = "engine::tree::payload_validator", - skip_all, - fields(parent_hash) - )] - fn compute_trie_input( - &self, + /// If parent is on disk (no in-memory blocks), returns `None` for the lazy overlay. + fn get_parent_lazy_overlay( parent_hash: B256, state: &EngineApiTreeState, - ) -> ProviderResult<(TrieInputSorted, B256)> { - let wait_start = Instant::now(); - let (block_hash, blocks) = + ) -> (Option, B256) { + let (anchor_hash, blocks) = state.tree_state.blocks_by_hash(parent_hash).unwrap_or_else(|| (parent_hash, vec![])); - // Fast path: if the tip block's anchor matches the persisted ancestor hash, reuse its - // TrieInput. This means the TrieInputSorted already aggregates all in-memory overlays - // from that ancestor, so we can avoid re-aggregation. - if let Some(tip_block) = blocks.first() { - let data = tip_block.trie_data(); - if let (Some(anchor_hash), Some(trie_input)) = - (data.anchor_hash(), data.trie_input().cloned()) && - anchor_hash == block_hash - { - trace!(target: "engine::tree::payload_validator", %block_hash,"Reusing trie input with matching anchor hash"); - self.metrics - .block_validation - .deferred_trie_wait_duration - .record(wait_start.elapsed().as_secs_f64()); - return Ok(((*trie_input).clone(), block_hash)); - } - } - - if blocks.is_empty() { - debug!(target: "engine::tree::payload_validator", "Parent found on disk"); - } else { - debug!(target: "engine::tree::payload_validator", historical = ?block_hash, blocks = blocks.len(), "Parent found in memory"); - } - - // Extend with contents of parent in-memory blocks directly in sorted form. - let input = Self::merge_overlay_trie_input(&blocks); - - self.metrics - .block_validation - .deferred_trie_wait_duration - .record(wait_start.elapsed().as_secs_f64()); - Ok((input, block_hash)) - } - - /// Aggregates in-memory blocks into a single [`TrieInputSorted`] by combining their - /// state changes. - /// - /// The input `blocks` vector is ordered newest -> oldest (see `TreeState::blocks_by_hash`). - /// - /// Uses `extend_ref` loop for small k, k-way `merge_batch` for large k. - /// See [`MERGE_BATCH_THRESHOLD`] for crossover point. - fn merge_overlay_trie_input(blocks: &[ExecutedBlock]) -> TrieInputSorted { if blocks.is_empty() { - return TrieInputSorted::default(); + debug!(target: "engine::tree::payload_validator", "Parent found on disk, no lazy overlay needed"); + return (None, anchor_hash); } - // Single block: return Arc directly without cloning - if blocks.len() == 1 { - let data = blocks[0].trie_data(); - return TrieInputSorted { - state: Arc::clone(&data.hashed_state), - nodes: Arc::clone(&data.trie_updates), - prefix_sets: Default::default(), - }; - } - - if blocks.len() < MERGE_BATCH_THRESHOLD { - // Small k: extend_ref loop is faster - // Iterate oldest->newest so newer values override older ones - let mut blocks_iter = blocks.iter().rev(); - let first = blocks_iter.next().expect("blocks is non-empty"); - let data = first.trie_data(); - - let mut state = Arc::clone(&data.hashed_state); - let mut nodes = Arc::clone(&data.trie_updates); - let state_mut = Arc::make_mut(&mut state); - let nodes_mut = Arc::make_mut(&mut nodes); - - for block in blocks_iter { - let data = block.trie_data(); - state_mut.extend_ref(data.hashed_state.as_ref()); - nodes_mut.extend_ref(data.trie_updates.as_ref()); - } - - TrieInputSorted { state, nodes, prefix_sets: Default::default() } - } else { - // Large k: merge_batch is faster (O(n log k) via k-way merge) - let trie_data: Vec<_> = blocks.iter().map(|b| b.trie_data()).collect(); + debug!( + target: "engine::tree::payload_validator", + %anchor_hash, + num_blocks = blocks.len(), + "Creating lazy overlay for in-memory blocks" + ); - let merged_state = HashedPostStateSorted::merge_batch( - trie_data.iter().map(|d| d.hashed_state.as_ref()), - ); - let merged_nodes = - TrieUpdatesSorted::merge_batch(trie_data.iter().map(|d| d.trie_updates.as_ref())); + // Extract deferred trie data handles (non-blocking) + let handles: Vec = blocks.iter().map(|b| b.trie_data_handle()).collect(); - TrieInputSorted { - state: Arc::new(merged_state), - nodes: Arc::new(merged_nodes), - prefix_sets: Default::default(), - } - } + (Some(LazyOverlay::new(anchor_hash, handles)), anchor_hash) } /// Spawns a background task to compute and sort trie data for the executed block. diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 5c7877f7b14..23e972938c9 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -1,6 +1,7 @@ use alloy_primitives::{BlockNumber, B256}; use metrics::{Counter, Histogram}; use parking_lot::RwLock; +use reth_chain_state::LazyOverlay; use reth_db_api::DatabaseError; use reth_errors::{ProviderError, ProviderResult}; use reth_metrics::Metrics; @@ -53,6 +54,35 @@ struct Overlay { hashed_post_state: Arc, } +/// Source of overlay data for [`OverlayStateProviderFactory`]. +/// +/// Either provides immediate pre-computed overlay data, or a lazy overlay that computes +/// on first access. +#[derive(Debug, Clone)] +pub enum OverlaySource { + /// Immediate overlay with already-computed data. + Immediate { + /// Trie updates overlay. + trie: Arc, + /// Hashed state overlay. + state: Arc, + }, + /// Lazy overlay computed on first access. + Lazy(LazyOverlay), +} + +impl OverlaySource { + /// Resolve the overlay source into (trie, state) tuple. + /// + /// For lazy overlays, this may block waiting for deferred data. + fn resolve(&self) -> (Arc, Arc) { + match self { + Self::Immediate { trie, state } => (Arc::clone(trie), Arc::clone(state)), + Self::Lazy(lazy) => lazy.as_overlay(), + } + } +} + /// Factory for creating overlay state providers with optional reverts and overlays. /// /// This factory allows building an `OverlayStateProvider` whose DB state has been reverted to a @@ -63,10 +93,8 @@ pub struct OverlayStateProviderFactory { factory: F, /// Optional block hash for collecting reverts block_hash: Option, - /// Optional trie overlay - trie_overlay: Option>, - /// Optional hashed state overlay - hashed_state_overlay: Option>, + /// Optional overlay source (lazy or immediate). + overlay_source: Option, /// Changeset cache handle for retrieving trie changesets changeset_cache: ChangesetCache, /// Metrics for tracking provider operations @@ -82,8 +110,7 @@ impl OverlayStateProviderFactory { Self { factory, block_hash: None, - trie_overlay: None, - hashed_state_overlay: None, + overlay_source: None, changeset_cache, metrics: OverlayStateProviderMetrics::default(), overlay_cache: Default::default(), @@ -97,31 +124,59 @@ impl OverlayStateProviderFactory { self } - /// Set the trie overlay. + /// Set the overlay source (lazy or immediate). /// /// This overlay will be applied on top of any reverts applied via `with_block_hash`. - pub fn with_trie_overlay(mut self, trie_overlay: Option>) -> Self { - self.trie_overlay = trie_overlay; + pub fn with_overlay_source(mut self, source: Option) -> Self { + self.overlay_source = source; + self + } + + /// Set a lazy overlay that will be computed on first access. + /// + /// Convenience method that wraps the lazy overlay in `OverlaySource::Lazy`. + pub fn with_lazy_overlay(mut self, lazy_overlay: Option) -> Self { + self.overlay_source = lazy_overlay.map(OverlaySource::Lazy); self } - /// Set the hashed state overlay + /// Set the hashed state overlay. /// /// This overlay will be applied on top of any reverts applied via `with_block_hash`. pub fn with_hashed_state_overlay( mut self, hashed_state_overlay: Option>, ) -> Self { - self.hashed_state_overlay = hashed_state_overlay; + if let Some(state) = hashed_state_overlay { + self.overlay_source = Some(OverlaySource::Immediate { + trie: Arc::new(TrieUpdatesSorted::default()), + state, + }); + } self } /// Extends the existing hashed state overlay with the given [`HashedPostStateSorted`]. + /// + /// If no overlay exists, creates a new immediate overlay with the given state. + /// If a lazy overlay exists, it is resolved first then extended. pub fn with_extended_hashed_state_overlay(mut self, other: HashedPostStateSorted) -> Self { - if let Some(overlay) = self.hashed_state_overlay.as_mut() { - Arc::make_mut(overlay).extend_ref(&other); - } else { - self.hashed_state_overlay = Some(Arc::new(other)) + match &mut self.overlay_source { + Some(OverlaySource::Immediate { state, .. }) => { + Arc::make_mut(state).extend_ref(&other); + } + Some(OverlaySource::Lazy(lazy)) => { + // Resolve lazy overlay and convert to immediate with extension + let (trie, mut state) = lazy.as_overlay(); + Arc::make_mut(&mut state).extend_ref(&other); + self.overlay_source = Some(OverlaySource::Immediate { trie, state }); + } + None => { + self.overlay_source = Some(OverlaySource::Immediate { + trie: Arc::new(TrieUpdatesSorted::default()), + state: Arc::new(other), + }); + } } self } @@ -136,6 +191,19 @@ where + DBProvider + BlockNumReader, { + /// Resolves the effective overlay (trie updates, hashed state). + /// + /// If an overlay source is set, it is resolved (blocking if lazy). + /// Otherwise, returns empty defaults. + fn resolve_overlays(&self) -> (Arc, Arc) { + match &self.overlay_source { + Some(source) => source.resolve(), + None => { + (Arc::new(TrieUpdatesSorted::default()), Arc::new(HashedPostStateSorted::default())) + } + } + } + /// Returns the block number for [`Self`]'s `block_hash` field, if any. fn get_requested_block_number( &self, @@ -267,26 +335,26 @@ where res }; - // Extend with overlays if provided. If the reverts are empty we should just use the - // overlays directly, because `extend_ref` will actually clone the overlay. - let trie_updates = match self.trie_overlay.as_ref() { - Some(trie_overlay) if trie_reverts.is_empty() => Arc::clone(trie_overlay), - Some(trie_overlay) => { - trie_reverts.extend_ref(trie_overlay); - Arc::new(trie_reverts) - } - None => Arc::new(trie_reverts), + // Resolve overlays (lazy or immediate) and extend reverts with them. + // If reverts are empty, use overlays directly to avoid cloning. + let (overlay_trie, overlay_state) = self.resolve_overlays(); + + let trie_updates = if trie_reverts.is_empty() { + overlay_trie + } else if !overlay_trie.is_empty() { + trie_reverts.extend_ref(&overlay_trie); + Arc::new(trie_reverts) + } else { + Arc::new(trie_reverts) }; - let hashed_state_updates = match self.hashed_state_overlay.as_ref() { - Some(hashed_state_overlay) if hashed_state_reverts.is_empty() => { - Arc::clone(hashed_state_overlay) - } - Some(hashed_state_overlay) => { - hashed_state_reverts.extend_ref(hashed_state_overlay); - Arc::new(hashed_state_reverts) - } - None => Arc::new(hashed_state_reverts), + let hashed_state_updates = if hashed_state_reverts.is_empty() { + overlay_state + } else if !overlay_state.is_empty() { + hashed_state_reverts.extend_ref(&overlay_state); + Arc::new(hashed_state_reverts) + } else { + Arc::new(hashed_state_reverts) }; trie_updates_total_len = trie_updates.total_len(); @@ -303,13 +371,8 @@ where (trie_updates, hashed_state_updates) } else { - // If no block_hash, use overlays directly or defaults - let trie_updates = - self.trie_overlay.clone().unwrap_or_else(|| Arc::new(TrieUpdatesSorted::default())); - let hashed_state = self - .hashed_state_overlay - .clone() - .unwrap_or_else(|| Arc::new(HashedPostStateSorted::default())); + // If no block_hash, use overlays directly (resolving lazy if set) + let (trie_updates, hashed_state) = self.resolve_overlays(); retrieve_trie_reverts_duration = Duration::ZERO; retrieve_hashed_state_reverts_duration = Duration::ZERO; @@ -337,14 +400,9 @@ where #[instrument(level = "debug", target = "providers::state::overlay", skip_all)] fn get_overlay(&self, provider: &F::Provider) -> ProviderResult { // If we have no anchor block configured then we will never need to get trie reverts, just - // return the in-memory overlay. + // return the in-memory overlay (resolving lazy overlay if set). if self.block_hash.is_none() { - let trie_updates = - self.trie_overlay.clone().unwrap_or_else(|| Arc::new(TrieUpdatesSorted::default())); - let hashed_post_state = self - .hashed_state_overlay - .clone() - .unwrap_or_else(|| Arc::new(HashedPostStateSorted::default())); + let (trie_updates, hashed_post_state) = self.resolve_overlays(); return Ok(Overlay { trie_updates, hashed_post_state }) } From a1646541459fb9b39e352a0e176369f592a29e50 Mon Sep 17 00:00:00 2001 From: Arun Dhyani Date: Sat, 17 Jan 2026 00:12:23 +0530 Subject: [PATCH 047/267] fix(exex): prevent ExExManager deadlock when buffer clears after being full (#21135) --- crates/exex/exex/src/manager.rs | 86 +++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index b57beb8d5cf..b28aef51246 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -503,6 +503,7 @@ where } break } + let buffer_full = this.buffer.len() >= this.max_capacity; // Update capacity this.update_capacity(); @@ -536,6 +537,12 @@ where // Update capacity this.update_capacity(); + // If the buffer was full and we made space, we need to wake up to accept new notifications + if buffer_full && this.buffer.len() < this.max_capacity { + debug!(target: "exex::manager", "Buffer has space again, waking up senders"); + cx.waker().wake_by_ref(); + } + // Update watch channel block number let finished_height = this.exex_handles.iter_mut().try_fold(u64::MAX, |curr, exex| { exex.finished_height.map_or(Err(()), |height| Ok(height.number.min(curr))) @@ -1443,4 +1450,83 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_deadlock_manager_wakes_after_buffer_clears() { + // This test simulates the scenario where the buffer fills up, ingestion pauses, + // and then space clears. We verify the manager wakes up to process pending items. + + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let provider_factory = create_test_provider_factory(); + init_genesis(&provider_factory).unwrap(); + let provider = BlockchainProvider::new(provider_factory.clone()).unwrap(); + + // 1. Setup Manager with Capacity = 1 + let (exex_handle, _, mut notifications) = ExExHandle::new( + "test_exex".to_string(), + Default::default(), + provider, + EthEvmConfig::mainnet(), + wal.handle(), + ); + + let max_capacity = 2; + let exex_manager = ExExManager::new( + provider_factory, + vec![exex_handle], + max_capacity, + wal, + empty_finalized_header_stream(), + ); + + let manager_handle = exex_manager.handle(); + + // Spawn manager in background so it runs continuously + tokio::spawn(async move { + exex_manager.await.ok(); + }); + + // Helper to create notifications + let mut rng = generators::rng(); + let mut make_notif = |id: u64| { + let block = random_block(&mut rng, id, BlockParams::default()).try_recover().unwrap(); + ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![block], + Default::default(), + Default::default(), + Default::default(), + )), + } + }; + + manager_handle.send(ExExNotificationSource::Pipeline, make_notif(1)).unwrap(); + + // Send the "Stuck" Item (Notification #100). + // At this point, the Manager loop has skipped the ingestion logic because buffer is full + // (buffer_full=true). This item sits in the unbounded 'handle_rx' channel waiting. + manager_handle.send(ExExNotificationSource::Pipeline, make_notif(100)).unwrap(); + + // 3. Relieve Pressure + // We consume items from the ExEx. + // As we pull items out, the ExEx frees space -> Manager sends buffered item -> Manager + // frees space. Once Manager frees space, the FIX (wake_by_ref) should trigger, + // causing it to read Notif #100. + + // Consume the jam + let _ = notifications.next().await.unwrap(); + + // 4. Assert No Deadlock + // We expect Notification #100 next. + // If the wake_by_ref fix is missing, this will Time Out because the manager is sleeping + // despite having empty buffer. + let result = + tokio::time::timeout(std::time::Duration::from_secs(1), notifications.next()).await; + + assert!( + result.is_ok(), + "Deadlock detected! Manager failed to wake up and process Pending Item #100." + ); + } } From b81e373d782e50cbfcc2734af6207fd9fc1b5981 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 16 Jan 2026 20:00:43 +0100 Subject: [PATCH 048/267] chore(deps): bump vergen and vergen-git2 to 9.1.0 (#21141) --- Cargo.lock | 42 +++++++++++++++++++++++++++++++++--------- Cargo.toml | 4 ++-- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0cf63c85e4d..cf087d74270 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2051,6 +2051,16 @@ dependencies = [ "serde", ] +[[package]] +name = "cargo-platform" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87a0c0e6148f11f01f32650a2ea02d532b2ad4e81d8bd41e6e565b5adc5e6082" +dependencies = [ + "serde", + "serde_core", +] + [[package]] name = "cargo_metadata" version = "0.14.2" @@ -2058,7 +2068,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" dependencies = [ "camino", - "cargo-platform", + "cargo-platform 0.1.9", "semver 1.0.27", "serde", "serde_json", @@ -2071,7 +2081,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" dependencies = [ "camino", - "cargo-platform", + "cargo-platform 0.1.9", + "semver 1.0.27", + "serde", + "serde_json", + "thiserror 2.0.17", +] + +[[package]] +name = "cargo_metadata" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef987d17b0a113becdd19d3d0022d04d7ef41f9efe4f3fb63ac44ba61df3ade9" +dependencies = [ + "camino", + "cargo-platform 0.3.2", "semver 1.0.27", "serde", "serde_json", @@ -13618,12 +13642,12 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "vergen" -version = "9.0.6" +version = "9.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777" +checksum = "b849a1f6d8639e8de261e81ee0fc881e3e3620db1af9f2e0da015d4382ceaf75" dependencies = [ "anyhow", - "cargo_metadata 0.19.2", + "cargo_metadata 0.23.1", "derive_builder", "regex", "rustversion", @@ -13633,9 +13657,9 @@ dependencies = [ [[package]] name = "vergen-git2" -version = "1.0.7" +version = "9.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f6ee511ec45098eabade8a0750e76eec671e7fb2d9360c563911336bea9cac1" +checksum = "d51ab55ddf1188c8d679f349775362b0fa9e90bd7a4ac69838b2a087623f0d57" dependencies = [ "anyhow", "derive_builder", @@ -13648,9 +13672,9 @@ dependencies = [ [[package]] name = "vergen-lib" -version = "0.1.6" +version = "9.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b07e6010c0f3e59fcb164e0163834597da68d1f864e2b8ca49f74de01e9c166" +checksum = "b34a29ba7e9c59e62f229ae1932fb1b8fb8a6fdcc99215a641913f5f5a59a569" dependencies = [ "anyhow", "derive_builder", diff --git a/Cargo.toml b/Cargo.toml index fa6ae2f84ff..c9a3ba0d93c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -739,10 +739,10 @@ tracing-subscriber = { version = "0.3", default-features = false } tracing-tracy = "0.11" triehash = "0.8" typenum = "1.15.0" -vergen = "9.0.4" +vergen = "9.1.0" visibility = "0.1.1" walkdir = "2.3.3" -vergen-git2 = "1.0.5" +vergen-git2 = "9.1.0" # networking ipnet = "2.11" From 6e6415690c6af1ea89cc2dc55c2eaa62139fe0c9 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 16 Jan 2026 18:55:18 +0000 Subject: [PATCH 049/267] perf: start saving cache sooner (#21130) --- .../tree/src/tree/payload_processor/mod.rs | 19 ++++++++++--- .../src/tree/payload_processor/prewarm.rs | 28 +++++++++++++------ .../engine/tree/src/tree/payload_validator.rs | 20 ++++++++----- 3 files changed, 48 insertions(+), 19 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 73924117059..f2147ce93ba 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -668,10 +668,12 @@ impl PayloadHandle { /// If the [`BlockExecutionOutput`] is provided it will update the shared cache using its /// bundle state. Using `Arc` allows sharing with the main execution /// path without cloning the expensive `BundleState`. + /// + /// Returns a sender for the channel that should be notified on block validation success. pub(super) fn terminate_caching( &mut self, execution_outcome: Option>>, - ) { + ) -> Option> { self.prewarm_handle.terminate_caching(execution_outcome) } @@ -709,13 +711,19 @@ impl CacheTaskHandle { /// /// If the [`BlockExecutionOutput`] is provided it will update the shared cache using its /// bundle state. Using `Arc` avoids cloning the expensive `BundleState`. + #[must_use = "sender must be used and notified on block validation success"] pub(super) fn terminate_caching( &mut self, execution_outcome: Option>>, - ) { + ) -> Option> { if let Some(tx) = self.to_prewarm_task.take() { - let event = PrewarmTaskEvent::Terminate { execution_outcome }; + let (valid_block_tx, valid_block_rx) = mpsc::channel(); + let event = PrewarmTaskEvent::Terminate { execution_outcome, valid_block_rx }; let _ = tx.send(event); + + Some(valid_block_tx) + } else { + None } } } @@ -724,7 +732,10 @@ impl Drop for CacheTaskHandle { fn drop(&mut self) { // Ensure we always terminate on drop - send None without needing Send + Sync bounds if let Some(tx) = self.to_prewarm_task.take() { - let _ = tx.send(PrewarmTaskEvent::Terminate { execution_outcome: None }); + let _ = tx.send(PrewarmTaskEvent::Terminate { + execution_outcome: None, + valid_block_rx: mpsc::channel().1, + }); } } } diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 99c689dd196..aecc2249657 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -261,7 +261,11 @@ where /// /// This method is called from `run()` only after all execution tasks are complete. #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] - fn save_cache(self, execution_outcome: Arc>) { + fn save_cache( + self, + execution_outcome: Arc>, + valid_block_rx: mpsc::Receiver<()>, + ) { let start = Instant::now(); let Self { execution_cache, ctx: PrewarmContext { env, metrics, saved_cache, .. }, .. } = @@ -288,9 +292,11 @@ where new_cache.update_metrics(); - // Replace the shared cache with the new one; the previous cache (if any) is - // dropped. - *cached = Some(new_cache); + if valid_block_rx.recv().is_ok() { + // Replace the shared cache with the new one; the previous cache (if any) is + // dropped. + *cached = Some(new_cache); + } }); let elapsed = start.elapsed(); @@ -421,9 +427,10 @@ where // completed executing a set of transactions self.send_multi_proof_targets(proof_targets); } - PrewarmTaskEvent::Terminate { execution_outcome } => { + PrewarmTaskEvent::Terminate { execution_outcome, valid_block_rx } => { trace!(target: "engine::tree::payload_processor::prewarm", "Received termination signal"); - final_execution_outcome = Some(execution_outcome); + final_execution_outcome = + Some(execution_outcome.map(|outcome| (outcome, valid_block_rx))); if finished_execution { // all tasks are done, we can exit, which will save caches and exit @@ -448,8 +455,8 @@ where debug!(target: "engine::tree::payload_processor::prewarm", "Completed prewarm execution"); // save caches and finish using the shared ExecutionOutcome - if let Some(Some(execution_outcome)) = final_execution_outcome { - self.save_cache(execution_outcome); + if let Some(Some((execution_outcome, valid_block_rx))) = final_execution_outcome { + self.save_cache(execution_outcome, valid_block_rx); } } } @@ -813,6 +820,11 @@ pub(super) enum PrewarmTaskEvent { /// The final execution outcome. Using `Arc` allows sharing with the main execution /// path without cloning the expensive `BundleState`. execution_outcome: Option>>, + /// Receiver for the block validation result. + /// + /// Cache saving is racing the state root validation. We optimistically construct the + /// updated cache but only save it once we know the block is valid. + valid_block_rx: mpsc::Receiver<()>, }, /// The outcome of a pre-warm task Outcome { diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 9160535df6c..6575463f0c0 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -462,6 +462,15 @@ where // After executing the block we can stop prewarming transactions handle.stop_prewarming_execution(); + // Create ExecutionOutcome early so we can terminate caching before validation and state + // root computation. Using Arc allows sharing with both the caching task and the deferred + // trie task without cloning the expensive BundleState. + let output = Arc::new(output); + + // Terminate caching task early since execution is complete and caching is no longer + // needed. This frees up resources while state root computation continues. + let valid_block_tx = handle.terminate_caching(Some(output.clone())); + let block = self.convert_to_block(input)?.with_senders(senders); let hashed_state = ensure_ok_post_block!( @@ -564,16 +573,13 @@ where .into()) } - // Create ExecutionOutcome and wrap in Arc for sharing with both the caching task - // and the deferred trie task. This avoids cloning the expensive BundleState. - let execution_outcome = Arc::new(output); - - // Terminate prewarming task with the shared execution outcome - handle.terminate_caching(Some(Arc::clone(&execution_outcome))); + if let Some(valid_block_tx) = valid_block_tx { + let _ = valid_block_tx.send(()); + } Ok(self.spawn_deferred_trie_task( block, - execution_outcome, + output, &ctx, hashed_state, trie_output, From 13707faf1ad0ef2bd24c28997c9a3a2692c5a190 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Fri, 16 Jan 2026 20:53:59 +0100 Subject: [PATCH 050/267] feat(consensus): incremental receipt root computation in background task (#21131) --- Cargo.lock | 1 + crates/cli/commands/src/re_execute.rs | 2 +- crates/consensus/consensus/src/lib.rs | 10 + crates/consensus/consensus/src/noop.rs | 3 +- crates/consensus/consensus/src/test_utils.rs | 3 +- crates/engine/tree/Cargo.toml | 2 + crates/engine/tree/src/tree/metrics.rs | 17 +- .../tree/src/tree/payload_processor/mod.rs | 1 + .../payload_processor/receipt_root_task.rs | 250 +++++++++++++ .../engine/tree/src/tree/payload_validator.rs | 71 +++- crates/ethereum/consensus/src/lib.rs | 11 +- crates/ethereum/consensus/src/validation.rs | 37 +- crates/optimism/consensus/src/lib.rs | 11 +- .../optimism/consensus/src/validation/mod.rs | 49 ++- crates/rpc/rpc/src/validation.rs | 2 +- crates/stages/stages/src/stages/execution.rs | 2 +- crates/stateless/src/validation.rs | 10 +- crates/trie/common/src/lib.rs | 3 + crates/trie/common/src/ordered_root.rs | 354 ++++++++++++++++++ testing/ef-tests/src/cases/blockchain_test.rs | 2 +- 20 files changed, 784 insertions(+), 57 deletions(-) create mode 100644 crates/engine/tree/src/tree/payload_processor/receipt_root_task.rs create mode 100644 crates/trie/common/src/ordered_root.rs diff --git a/Cargo.lock b/Cargo.lock index cf087d74270..17e53c81377 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8506,6 +8506,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "reth-trie", + "reth-trie-common", "reth-trie-db", "reth-trie-parallel", "reth-trie-sparse", diff --git a/crates/cli/commands/src/re_execute.rs b/crates/cli/commands/src/re_execute.rs index 41b6afdbc30..742b6ce76d9 100644 --- a/crates/cli/commands/src/re_execute.rs +++ b/crates/cli/commands/src/re_execute.rs @@ -152,7 +152,7 @@ impl }; if let Err(err) = consensus - .validate_block_post_execution(&block, &result) + .validate_block_post_execution(&block, &result, None) .wrap_err_with(|| { format!("Failed to validate block {} {}", block.number(), block.hash()) }) diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 1911b095c38..319a7a0ffb6 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -15,6 +15,12 @@ use alloc::{boxed::Box, fmt::Debug, string::String, sync::Arc, vec::Vec}; use alloy_consensus::Header; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256}; use core::error::Error; + +/// Pre-computed receipt root and logs bloom. +/// +/// When provided to [`FullConsensus::validate_block_post_execution`], this allows skipping +/// the receipt root computation and using the pre-computed values instead. +pub type ReceiptRootBloom = (B256, Bloom); use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{ constants::{GAS_LIMIT_BOUND_DIVISOR, MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT}, @@ -39,11 +45,15 @@ pub trait FullConsensus: Consensus { /// /// See the Yellow Paper sections 4.3.2 "Holistic Validity". /// + /// If `receipt_root_bloom` is provided, the implementation should use the pre-computed + /// receipt root and logs bloom instead of computing them from the receipts. + /// /// Note: validating blocks does not include other validations of the Consensus fn validate_block_post_execution( &self, block: &RecoveredBlock, result: &BlockExecutionResult, + receipt_root_bloom: Option, ) -> Result<(), ConsensusError>; } diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 3e3341769df..08fe08e96e3 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -18,7 +18,7 @@ //! //! **Not for production use** - provides no security guarantees or consensus validation. -use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; +use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator, ReceiptRootBloom}; use alloc::sync::Arc; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; @@ -76,6 +76,7 @@ impl FullConsensus for NoopConsensus { &self, _block: &RecoveredBlock, _result: &BlockExecutionResult, + _receipt_root_bloom: Option, ) -> Result<(), ConsensusError> { Ok(()) } diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index 94a178abde7..b2a1fc71f0e 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,4 +1,4 @@ -use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; +use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator, ReceiptRootBloom}; use core::sync::atomic::{AtomicBool, Ordering}; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; @@ -51,6 +51,7 @@ impl FullConsensus for TestConsensus { &self, _block: &RecoveredBlock, _result: &BlockExecutionResult, + _receipt_root_bloom: Option, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 006233c1908..50122c10ff0 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -34,6 +34,7 @@ reth-trie-parallel.workspace = true reth-trie-sparse = { workspace = true, features = ["std", "metrics"] } reth-trie-sparse-parallel = { workspace = true, features = ["std"] } reth-trie.workspace = true +reth-trie-common.workspace = true reth-trie-db.workspace = true # alloy @@ -134,6 +135,7 @@ test-utils = [ "reth-static-file", "reth-tracing", "reth-trie/test-utils", + "reth-trie-common/test-utils", "reth-trie-db/test-utils", "reth-trie-sparse/test-utils", "reth-prune-types?/test-utils", diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 0e9685c0910..a11064ebd54 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -60,16 +60,22 @@ impl EngineApiMetrics { /// /// This method updates metrics for execution time, gas usage, and the number /// of accounts, storage slots and bytecodes loaded and updated. - pub(crate) fn execute_metered( + /// + /// The optional `on_receipt` callback is invoked after each transaction with the receipt + /// index and a reference to all receipts collected so far. This allows callers to stream + /// receipts to a background task for incremental receipt root computation. + pub(crate) fn execute_metered( &self, executor: E, mut transactions: impl Iterator, BlockExecutionError>>, transaction_count: usize, state_hook: Box, + mut on_receipt: F, ) -> Result<(BlockExecutionOutput, Vec

), BlockExecutionError> where DB: alloy_evm::Database, E: BlockExecutor>>, Transaction: SignedTransaction>, + F: FnMut(&[E::Receipt]), { // clone here is cheap, all the metrics are Option>. additionally // they are globally registered so that the data recorded in the hook will @@ -103,6 +109,9 @@ impl EngineApiMetrics { let gas_used = executor.execute_transaction(tx)?; self.executor.transaction_execution_histogram.record(start.elapsed()); + // Invoke callback with the latest receipt + on_receipt(executor.receipts()); + // record the tx gas used enter.record("gas_used", gas_used); } @@ -536,11 +545,12 @@ mod tests { let executor = MockExecutor::new(state); // This will fail to create the EVM but should still call the hook - let _result = metrics.execute_metered::<_, EmptyDB>( + let _result = metrics.execute_metered::<_, EmptyDB, _>( executor, input.clone_transactions_recovered().map(Ok::<_, BlockExecutionError>), input.transaction_count(), state_hook, + |_| {}, ); // Check if hook was called (it might not be if finish() fails early) @@ -595,11 +605,12 @@ mod tests { let executor = MockExecutor::new(state); // Execute (will fail but should still update some metrics) - let _result = metrics.execute_metered::<_, EmptyDB>( + let _result = metrics.execute_metered::<_, EmptyDB, _>( executor, input.clone_transactions_recovered().map(Ok::<_, BlockExecutionError>), input.transaction_count(), state_hook, + |_| {}, ); let snapshot = snapshotter.snapshot().into_vec(); diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index f2147ce93ba..ed179afa8b2 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -61,6 +61,7 @@ mod configured_sparse_trie; pub mod executor; pub mod multiproof; pub mod prewarm; +pub mod receipt_root_task; pub mod sparse_trie; use configured_sparse_trie::ConfiguredSparseTrie; diff --git a/crates/engine/tree/src/tree/payload_processor/receipt_root_task.rs b/crates/engine/tree/src/tree/payload_processor/receipt_root_task.rs new file mode 100644 index 00000000000..c9e53a11b0c --- /dev/null +++ b/crates/engine/tree/src/tree/payload_processor/receipt_root_task.rs @@ -0,0 +1,250 @@ +//! Receipt root computation in a background task. +//! +//! This module provides a streaming receipt root builder that computes the receipt trie root +//! in a background thread. Receipts are sent via a channel with their index, and for each +//! receipt received, the builder incrementally flushes leaves to the underlying +//! [`OrderedTrieRootEncodedBuilder`] when possible. When the channel closes, the task returns the +//! computed root. + +use alloy_eips::Encodable2718; +use alloy_primitives::{Bloom, B256}; +use crossbeam_channel::Receiver; +use reth_primitives_traits::Receipt; +use reth_trie_common::ordered_root::OrderedTrieRootEncodedBuilder; +use tokio::sync::oneshot; + +/// Receipt with index, ready to be sent to the background task for encoding and trie building. +#[derive(Debug, Clone)] +pub struct IndexedReceipt { + /// The transaction index within the block. + pub index: usize, + /// The receipt. + pub receipt: R, +} + +impl IndexedReceipt { + /// Creates a new indexed receipt. + #[inline] + pub const fn new(index: usize, receipt: R) -> Self { + Self { index, receipt } + } +} + +/// Handle for running the receipt root computation in a background task. +/// +/// This struct holds the channels needed to receive receipts and send the result. +/// Use [`Self::run`] to execute the computation (typically in a spawned blocking task). +#[derive(Debug)] +pub struct ReceiptRootTaskHandle { + /// Receiver for indexed receipts. + receipt_rx: Receiver>, + /// Sender for the computed result. + result_tx: oneshot::Sender<(B256, Bloom)>, +} + +impl ReceiptRootTaskHandle { + /// Creates a new handle from the receipt receiver and result sender channels. + pub const fn new( + receipt_rx: Receiver>, + result_tx: oneshot::Sender<(B256, Bloom)>, + ) -> Self { + Self { receipt_rx, result_tx } + } + + /// Runs the receipt root computation, consuming the handle. + /// + /// This method receives indexed receipts from the channel, encodes them, + /// and builds the trie incrementally. When all receipts have been received + /// (channel closed), it sends the result through the oneshot channel. + /// + /// This is designed to be called inside a blocking task (e.g., via + /// `executor.spawn_blocking(move || handle.run(receipts_len))`). + /// + /// # Arguments + /// + /// * `receipts_len` - The total number of receipts expected. This is needed to correctly order + /// the trie keys according to RLP encoding rules. + /// + /// # Panics + /// + /// Panics if the number of receipts received doesn't match `receipts_len`. + pub fn run(self, receipts_len: usize) { + let mut builder = OrderedTrieRootEncodedBuilder::new(receipts_len); + let mut aggregated_bloom = Bloom::ZERO; + let mut encode_buf = Vec::new(); + + for indexed_receipt in self.receipt_rx { + let receipt_with_bloom = indexed_receipt.receipt.with_bloom_ref(); + + encode_buf.clear(); + receipt_with_bloom.encode_2718(&mut encode_buf); + + aggregated_bloom |= *receipt_with_bloom.bloom_ref(); + builder.push_unchecked(indexed_receipt.index, &encode_buf); + } + + let root = builder.finalize().expect("receipt root builder incomplete"); + let _ = self.result_tx.send((root, aggregated_bloom)); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::{proofs::calculate_receipt_root, TxReceipt}; + use alloy_primitives::{b256, hex, Address, Bytes, Log}; + use crossbeam_channel::bounded; + use reth_ethereum_primitives::{Receipt, TxType}; + + #[tokio::test] + async fn test_receipt_root_task_empty() { + let (_tx, rx) = bounded::>(1); + let (result_tx, result_rx) = oneshot::channel(); + drop(_tx); + + let handle = ReceiptRootTaskHandle::new(rx, result_tx); + tokio::task::spawn_blocking(move || handle.run(0)).await.unwrap(); + + let (root, bloom) = result_rx.await.unwrap(); + + // Empty trie root + assert_eq!(root, reth_trie_common::EMPTY_ROOT_HASH); + assert_eq!(bloom, Bloom::ZERO); + } + + #[tokio::test] + async fn test_receipt_root_task_single_receipt() { + let receipts: Vec = vec![Receipt::default()]; + + let (tx, rx) = bounded(1); + let (result_tx, result_rx) = oneshot::channel(); + let receipts_len = receipts.len(); + + let handle = ReceiptRootTaskHandle::new(rx, result_tx); + let join_handle = tokio::task::spawn_blocking(move || handle.run(receipts_len)); + + for (i, receipt) in receipts.clone().into_iter().enumerate() { + tx.send(IndexedReceipt::new(i, receipt)).unwrap(); + } + drop(tx); + + join_handle.await.unwrap(); + let (root, _bloom) = result_rx.await.unwrap(); + + // Verify against the standard calculation + let receipts_with_bloom: Vec<_> = receipts.iter().map(|r| r.with_bloom_ref()).collect(); + let expected_root = calculate_receipt_root(&receipts_with_bloom); + + assert_eq!(root, expected_root); + } + + #[tokio::test] + async fn test_receipt_root_task_multiple_receipts() { + let receipts: Vec = vec![Receipt::default(); 5]; + + let (tx, rx) = bounded(4); + let (result_tx, result_rx) = oneshot::channel(); + let receipts_len = receipts.len(); + + let handle = ReceiptRootTaskHandle::new(rx, result_tx); + let join_handle = tokio::task::spawn_blocking(move || handle.run(receipts_len)); + + for (i, receipt) in receipts.into_iter().enumerate() { + tx.send(IndexedReceipt::new(i, receipt)).unwrap(); + } + drop(tx); + + join_handle.await.unwrap(); + let (root, bloom) = result_rx.await.unwrap(); + + // Verify against expected values from existing test + assert_eq!( + root, + b256!("0x61353b4fb714dc1fccacbf7eafc4273e62f3d1eed716fe41b2a0cd2e12c63ebc") + ); + assert_eq!( + bloom, + Bloom::from(hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")) + ); + } + + #[tokio::test] + async fn test_receipt_root_matches_standard_calculation() { + // Create some receipts with actual data + let receipts = vec![ + Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 21000, + success: true, + logs: vec![], + }, + Receipt { + tx_type: TxType::Eip1559, + cumulative_gas_used: 42000, + success: true, + logs: vec![Log { + address: Address::ZERO, + data: alloy_primitives::LogData::new_unchecked(vec![B256::ZERO], Bytes::new()), + }], + }, + Receipt { + tx_type: TxType::Eip2930, + cumulative_gas_used: 63000, + success: false, + logs: vec![], + }, + ]; + + // Calculate expected values first (before we move receipts) + let receipts_with_bloom: Vec<_> = receipts.iter().map(|r| r.with_bloom_ref()).collect(); + let expected_root = calculate_receipt_root(&receipts_with_bloom); + let expected_bloom = + receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom_ref()); + + // Calculate using the task + let (tx, rx) = bounded(4); + let (result_tx, result_rx) = oneshot::channel(); + let receipts_len = receipts.len(); + + let handle = ReceiptRootTaskHandle::new(rx, result_tx); + let join_handle = tokio::task::spawn_blocking(move || handle.run(receipts_len)); + + for (i, receipt) in receipts.into_iter().enumerate() { + tx.send(IndexedReceipt::new(i, receipt)).unwrap(); + } + drop(tx); + + join_handle.await.unwrap(); + let (task_root, task_bloom) = result_rx.await.unwrap(); + + assert_eq!(task_root, expected_root); + assert_eq!(task_bloom, expected_bloom); + } + + #[tokio::test] + async fn test_receipt_root_task_out_of_order() { + let receipts: Vec = vec![Receipt::default(); 5]; + + // Calculate expected values first (before we move receipts) + let receipts_with_bloom: Vec<_> = receipts.iter().map(|r| r.with_bloom_ref()).collect(); + let expected_root = calculate_receipt_root(&receipts_with_bloom); + + let (tx, rx) = bounded(4); + let (result_tx, result_rx) = oneshot::channel(); + let receipts_len = receipts.len(); + + let handle = ReceiptRootTaskHandle::new(rx, result_tx); + let join_handle = tokio::task::spawn_blocking(move || handle.run(receipts_len)); + + // Send in reverse order to test out-of-order handling + for (i, receipt) in receipts.into_iter().enumerate().rev() { + tx.send(IndexedReceipt::new(i, receipt)).unwrap(); + } + drop(tx); + + join_handle.await.unwrap(); + let (root, _bloom) = result_rx.await.unwrap(); + + assert_eq!(root, expected_root); + } +} diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 6575463f0c0..573962a8e5c 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -15,9 +15,11 @@ use alloy_eip7928::BlockAccessList; use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_evm::Evm; use alloy_primitives::B256; + +use crate::tree::payload_processor::receipt_root_task::{IndexedReceipt, ReceiptRootTaskHandle}; use rayon::prelude::*; use reth_chain_state::{CanonicalInMemoryState, DeferredTrieData, ExecutedBlock, LazyOverlay}; -use reth_consensus::{ConsensusError, FullConsensus}; +use reth_consensus::{ConsensusError, FullConsensus, ReceiptRootBloom}; use reth_engine_primitives::{ ConfigureEngineEvm, ExecutableTxIterator, ExecutionPayload, InvalidBlockHook, PayloadValidator, }; @@ -453,11 +455,14 @@ where state_provider = Box::new(InstrumentedStateProvider::new(state_provider, "engine")); } - // Execute the block and handle any execution errors - let (output, senders) = match self.execute_block(state_provider, env, &input, &mut handle) { - Ok(output) => output, - Err(err) => return self.handle_execution_error(input, err, &parent_block), - }; + // Execute the block and handle any execution errors. + // The receipt root task is spawned before execution and receives receipts incrementally + // as transactions complete, allowing parallel computation during execution. + let (output, senders, receipt_root_rx) = + match self.execute_block(state_provider, env, &input, &mut handle) { + Ok(output) => output, + Err(err) => return self.handle_execution_error(input, err, &parent_block), + }; // After executing the block we can stop prewarming transactions handle.stop_prewarming_execution(); @@ -473,8 +478,21 @@ where let block = self.convert_to_block(input)?.with_senders(senders); + // Wait for the receipt root computation to complete. + let receipt_root_bloom = Some( + receipt_root_rx + .blocking_recv() + .expect("receipt root task dropped sender without result"), + ); + let hashed_state = ensure_ok_post_block!( - self.validate_post_execution(&block, &parent_block, &output, &mut ctx), + self.validate_post_execution( + &block, + &parent_block, + &output, + &mut ctx, + receipt_root_bloom + ), block ); @@ -622,13 +640,21 @@ where /// Executes a block with the given state provider #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] + #[expect(clippy::type_complexity)] fn execute_block( &mut self, state_provider: S, env: ExecutionEnv, input: &BlockOrPayload, handle: &mut PayloadHandle, Err, N::Receipt>, - ) -> Result<(BlockExecutionOutput, Vec
), InsertBlockErrorKind> + ) -> Result< + ( + BlockExecutionOutput, + Vec
, + tokio::sync::oneshot::Receiver<(B256, alloy_primitives::Bloom)>, + ), + InsertBlockErrorKind, + > where S: StateProvider + Send, Err: core::error::Error + Send + Sync + 'static, @@ -667,6 +693,14 @@ where }); } + // Spawn background task to compute receipt root and logs bloom incrementally. + // Unbounded channel is used since tx count bounds capacity anyway (max ~30k txs per block). + let receipts_len = input.transaction_count(); + let (receipt_tx, receipt_rx) = crossbeam_channel::unbounded(); + let (result_tx, result_rx) = tokio::sync::oneshot::channel(); + let task_handle = ReceiptRootTaskHandle::new(receipt_rx, result_tx); + self.payload_processor.executor().spawn_blocking(move || task_handle.run(receipts_len)); + let execution_start = Instant::now(); let state_hook = Box::new(handle.state_hook()); let (output, senders) = self.metrics.execute_metered( @@ -674,11 +708,22 @@ where handle.iter_transactions().map(|res| res.map_err(BlockExecutionError::other)), input.transaction_count(), state_hook, + |receipts| { + // Send the latest receipt to the background task for incremental root computation. + // The receipt is cloned here; encoding happens in the background thread. + if let Some(receipt) = receipts.last() { + // Infer tx_index from the number of receipts collected so far + let tx_index = receipts.len() - 1; + let _ = receipt_tx.send(IndexedReceipt::new(tx_index, receipt.clone())); + } + }, )?; + drop(receipt_tx); + let execution_finish = Instant::now(); let execution_time = execution_finish.duration_since(execution_start); debug!(target: "engine::tree::payload_validator", elapsed = ?execution_time, "Executed block"); - Ok((output, senders)) + Ok((output, senders, result_rx)) } /// Compute state root for the given hashed post state in parallel. @@ -736,6 +781,9 @@ where /// - parent header validation /// - post-execution consensus validation /// - state-root based post-execution validation + /// + /// If `receipt_root_bloom` is provided, it will be used instead of computing the receipt root + /// and logs bloom from the receipts. #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn validate_post_execution>>( &self, @@ -743,6 +791,7 @@ where parent_block: &SealedHeader, output: &BlockExecutionOutput, ctx: &mut TreeCtx<'_, N>, + receipt_root_bloom: Option, ) -> Result where V: PayloadValidator, @@ -769,7 +818,9 @@ where let _enter = debug_span!(target: "engine::tree::payload_validator", "validate_block_post_execution") .entered(); - if let Err(err) = self.consensus.validate_block_post_execution(block, output) { + if let Err(err) = + self.consensus.validate_block_post_execution(block, output, receipt_root_bloom) + { // call post-block hook self.on_invalid_block(parent_block, block, output, None, ctx.state_mut()); return Err(err.into()) diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index ccbbb036ff0..fec4f21b9f9 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -15,7 +15,7 @@ use alloc::{fmt::Debug, sync::Arc}; use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::eip7840::BlobParams; use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; +use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator, ReceiptRootBloom}; use reth_consensus_common::validation::{ validate_4844_header_standalone, validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_gas_limit, @@ -74,8 +74,15 @@ where &self, block: &RecoveredBlock, result: &BlockExecutionResult, + receipt_root_bloom: Option, ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, &result.receipts, &result.requests) + validate_block_post_execution( + block, + &self.chain_spec, + &result.receipts, + &result.requests, + receipt_root_bloom, + ) } } diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index 055977b5175..693d6ce0020 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -12,11 +12,15 @@ use reth_primitives_traits::{ /// /// - Compares the receipts root in the block header to the block body /// - Compares the gas used in the block header to the actual gas usage after execution +/// +/// If `receipt_root_bloom` is provided, the pre-computed receipt root and logs bloom are used +/// instead of computing them from the receipts. pub fn validate_block_post_execution( block: &RecoveredBlock, chain_spec: &ChainSpec, receipts: &[R], requests: &Requests, + receipt_root_bloom: Option<(B256, Bloom)>, ) -> Result<(), ConsensusError> where B: Block, @@ -37,19 +41,26 @@ where // operation as hashing that is required for state root got calculated in every // transaction This was replaced with is_success flag. // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if chain_spec.is_byzantium_active_at_block(block.header().number()) && - let Err(error) = verify_receipts( - block.header().receipts_root(), - block.header().logs_bloom(), - receipts, - ) - { - let receipts = receipts - .iter() - .map(|r| Bytes::from(r.with_bloom_ref().encoded_2718())) - .collect::>(); - tracing::debug!(%error, ?receipts, "receipts verification failed"); - return Err(error) + if chain_spec.is_byzantium_active_at_block(block.header().number()) { + let result = if let Some((receipts_root, logs_bloom)) = receipt_root_bloom { + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + block.header().receipts_root(), + block.header().logs_bloom(), + ) + } else { + verify_receipts(block.header().receipts_root(), block.header().logs_bloom(), receipts) + }; + + if let Err(error) = result { + let receipts = receipts + .iter() + .map(|r| Bytes::from(r.with_bloom_ref().encoded_2718())) + .collect::>(); + tracing::debug!(%error, ?receipts, "receipts verification failed"); + return Err(error) + } } // Validate that the header requests hash matches the calculated requests hash diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 8be804db451..1d3cb421c45 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -18,7 +18,7 @@ use alloy_consensus::{ use alloy_primitives::B64; use core::fmt::Debug; use reth_chainspec::EthChainSpec; -use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; +use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator, ReceiptRootBloom}; use reth_consensus_common::validation::{ validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, validate_against_parent_timestamp, validate_cancun_gas, validate_header_base_fee, @@ -79,8 +79,9 @@ where &self, block: &RecoveredBlock, result: &BlockExecutionResult, + receipt_root_bloom: Option, ) -> Result<(), ConsensusError> { - validate_block_post_execution(block.header(), &self.chain_spec, result) + validate_block_post_execution(block.header(), &self.chain_spec, result, receipt_root_bloom) } } @@ -410,7 +411,8 @@ mod tests { let post_execution = as FullConsensus>::validate_block_post_execution( &beacon_consensus, &block, - &result + &result, + None, ); // validate blob, it should pass blob gas used validation @@ -479,7 +481,8 @@ mod tests { let post_execution = as FullConsensus>::validate_block_post_execution( &beacon_consensus, &block, - &result + &result, + None, ); // validate blob, it should fail blob gas used validation post execution. diff --git a/crates/optimism/consensus/src/validation/mod.rs b/crates/optimism/consensus/src/validation/mod.rs index 50c45f7172c..21685486088 100644 --- a/crates/optimism/consensus/src/validation/mod.rs +++ b/crates/optimism/consensus/src/validation/mod.rs @@ -85,10 +85,14 @@ where /// /// - Compares the receipts root in the block header to the block body /// - Compares the gas used in the block header to the actual gas usage after execution +/// +/// If `receipt_root_bloom` is provided, the pre-computed receipt root and logs bloom are used +/// instead of computing them from the receipts. pub fn validate_block_post_execution( header: impl BlockHeader, chain_spec: impl OpHardforks, result: &BlockExecutionResult, + receipt_root_bloom: Option<(B256, Bloom)>, ) -> Result<(), ConsensusError> { // Validate that the blob gas used is present and correctly computed if Jovian is active. if chain_spec.is_jovian_active_at_timestamp(header.timestamp()) { @@ -110,21 +114,32 @@ pub fn validate_block_post_execution( // operation as hashing that is required for state root got calculated in every // transaction This was replaced with is_success flag. // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if chain_spec.is_byzantium_active_at_block(header.number()) && - let Err(error) = verify_receipts_optimism( - header.receipts_root(), - header.logs_bloom(), - receipts, - chain_spec, - header.timestamp(), - ) - { - let receipts = receipts - .iter() - .map(|r| Bytes::from(r.with_bloom_ref().encoded_2718())) - .collect::>(); - tracing::debug!(%error, ?receipts, "receipts verification failed"); - return Err(error) + if chain_spec.is_byzantium_active_at_block(header.number()) { + let result = if let Some((receipts_root, logs_bloom)) = receipt_root_bloom { + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + header.receipts_root(), + header.logs_bloom(), + ) + } else { + verify_receipts_optimism( + header.receipts_root(), + header.logs_bloom(), + receipts, + chain_spec, + header.timestamp(), + ) + }; + + if let Err(error) = result { + let receipts = receipts + .iter() + .map(|r| Bytes::from(r.with_bloom_ref().encoded_2718())) + .collect::>(); + tracing::debug!(%error, ?receipts, "receipts verification failed"); + return Err(error) + } } // Check if gas used matches the value set in header. @@ -543,7 +558,7 @@ mod tests { requests: Requests::default(), gas_used: GAS_USED, }; - validate_block_post_execution(&header, &chainspec, &result).unwrap(); + validate_block_post_execution(&header, &chainspec, &result, None).unwrap(); } #[test] @@ -565,7 +580,7 @@ mod tests { gas_used: GAS_USED, }; assert!(matches!( - validate_block_post_execution(&header, &chainspec, &result).unwrap_err(), + validate_block_post_execution(&header, &chainspec, &result, None).unwrap_err(), ConsensusError::BlobGasUsedDiff(diff) if diff.got == BLOB_GAS_USED && diff.expected == BLOB_GAS_USED + 1 )); diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 78185f92ffb..73b2c68e2b9 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -201,7 +201,7 @@ where // update the cached reads self.update_cached_reads(parent_header_hash, request_cache).await; - self.consensus.validate_block_post_execution(&block, &output)?; + self.consensus.validate_block_post_execution(&block, &output, None)?; self.ensure_payment(&block, &output, &message)?; diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 593180926dd..29adf3b2d3f 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -351,7 +351,7 @@ where }) })?; - if let Err(err) = self.consensus.validate_block_post_execution(&block, &result) { + if let Err(err) = self.consensus.validate_block_post_execution(&block, &result, None) { return Err(StageError::Block { block: Box::new(block.block_with_parent()), error: BlockErrorKind::Validation(err), diff --git a/crates/stateless/src/validation.rs b/crates/stateless/src/validation.rs index 08d84f84668..a3a8ba7b2dd 100644 --- a/crates/stateless/src/validation.rs +++ b/crates/stateless/src/validation.rs @@ -231,8 +231,14 @@ where .map_err(|e| StatelessValidationError::StatelessExecutionFailed(e.to_string()))?; // Post validation checks - validate_block_post_execution(¤t_block, &chain_spec, &output.receipts, &output.requests) - .map_err(StatelessValidationError::ConsensusValidationFailed)?; + validate_block_post_execution( + ¤t_block, + &chain_spec, + &output.receipts, + &output.requests, + None, + ) + .map_err(StatelessValidationError::ConsensusValidationFailed)?; // Compute and check the post state root let hashed_state = HashedPostState::from_bundle_state::(&output.state.state); diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index b478a126a52..8faa44622fa 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -55,6 +55,9 @@ pub use proofs::*; pub mod root; +/// Incremental ordered trie root computation. +pub mod ordered_root; + /// Buffer for trie updates. pub mod updates; diff --git a/crates/trie/common/src/ordered_root.rs b/crates/trie/common/src/ordered_root.rs new file mode 100644 index 00000000000..ed02a369f73 --- /dev/null +++ b/crates/trie/common/src/ordered_root.rs @@ -0,0 +1,354 @@ +//! Incremental ordered trie root computation. +//! +//! This module provides builders for computing ordered trie roots incrementally as items +//! arrive, rather than requiring all items upfront. This is useful for receipt root +//! calculation during block execution, where we know the total count but receive receipts +//! one by one as transactions are executed. + +use crate::{HashBuilder, Nibbles, EMPTY_ROOT_HASH}; +use alloc::vec::Vec; +use alloy_primitives::B256; +use alloy_trie::root::adjust_index_for_rlp; +use core::fmt; + +/// Error returned when using [`OrderedTrieRootEncodedBuilder`]. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum OrderedRootError { + /// Called `finalize()` before all items were pushed. + Incomplete { + /// The expected number of items. + expected: usize, + /// The number of items received. + received: usize, + }, + /// Index is out of bounds. + IndexOutOfBounds { + /// The index that was provided. + index: usize, + /// The expected length. + len: usize, + }, + /// Item at this index was already pushed. + DuplicateIndex { + /// The duplicate index. + index: usize, + }, +} + +impl OrderedRootError { + /// Returns `true` if the error is [`OrderedRootError::Incomplete`]. + #[inline] + pub const fn is_incomplete(&self) -> bool { + matches!(self, Self::Incomplete { .. }) + } + + /// Returns `true` if the error is [`OrderedRootError::IndexOutOfBounds`]. + #[inline] + pub const fn is_index_out_of_bounds(&self) -> bool { + matches!(self, Self::IndexOutOfBounds { .. }) + } + + /// Returns `true` if the error is [`OrderedRootError::DuplicateIndex`]. + #[inline] + pub const fn is_duplicate_index(&self) -> bool { + matches!(self, Self::DuplicateIndex { .. }) + } + + /// Returns the index associated with the error, if any. + #[inline] + pub const fn index(&self) -> Option { + match self { + Self::Incomplete { .. } => None, + Self::IndexOutOfBounds { index, .. } | Self::DuplicateIndex { index } => Some(*index), + } + } +} + +impl fmt::Display for OrderedRootError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Incomplete { expected, received } => { + write!(f, "incomplete: expected {expected} items, received {received}") + } + Self::IndexOutOfBounds { index, len } => { + write!(f, "index {index} out of bounds for length {len}") + } + Self::DuplicateIndex { index } => { + write!(f, "duplicate item at index {index}") + } + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for OrderedRootError {} + +/// A builder for computing ordered trie roots incrementally from pre-encoded items. +/// +/// This builder allows pushing items one by one as they become available +/// (e.g., receipts after each transaction execution), rather than requiring +/// all items upfront. +/// +/// # Use Case +/// +/// When executing a block, the receipt root must be computed from all transaction +/// receipts. With the standard `ordered_trie_root`, you must wait until all +/// transactions are executed before computing the root. This builder enables +/// **incremental computation** - you can start building the trie as soon as +/// receipts become available, potentially in parallel with continued execution. +/// +/// The builder requires knowing the total item count upfront (the number of +/// transactions in the block), but items can be pushed in any order by index. +/// +/// # How It Works +/// +/// Items can be pushed in any order by specifying their index. The builder +/// internally buffers items and flushes them to the underlying [`HashBuilder`] +/// in the correct order for RLP key encoding (as determined by [`adjust_index_for_rlp`]). +/// +/// # Memory +/// +/// Each pushed item is stored in an internal buffer until it can be flushed. +/// In the worst case (e.g., pushing index 0 last), all items except one will +/// be buffered. For receipt roots, index 0 is typically flushed late due to +/// RLP key ordering, so expect to buffer most items until near the end. +/// +/// # Example +/// +/// ``` +/// use reth_trie_common::ordered_root::OrderedTrieRootEncodedBuilder; +/// +/// // Create a builder for 2 pre-encoded items +/// let mut builder = OrderedTrieRootEncodedBuilder::new(2); +/// +/// // Push pre-encoded items as they arrive (can be out of order) +/// builder.push(1, b"encoded_item_1").unwrap(); +/// builder.push(0, b"encoded_item_0").unwrap(); +/// +/// // Finalize to get the root hash +/// let root = builder.finalize().unwrap(); +/// ``` +#[derive(Debug)] +pub struct OrderedTrieRootEncodedBuilder { + /// Total expected number of items. + len: usize, + /// Number of items received so far. + received: usize, + /// Next insertion loop counter (determines which adjusted index to flush next). + next_insert_i: usize, + /// Buffer for pending items, indexed by execution index. + pending: Vec>>, + /// The underlying hash builder. + hb: HashBuilder, +} + +impl OrderedTrieRootEncodedBuilder { + /// Creates a new builder for `len` pre-encoded items. + pub fn new(len: usize) -> Self { + Self { + len, + received: 0, + next_insert_i: 0, + pending: alloc::vec![None; len], + hb: HashBuilder::default(), + } + } + + /// Pushes a pre-encoded item at the given index to the builder. + /// + /// Items can be pushed in any order. The builder will automatically + /// flush items to the underlying [`HashBuilder`] when they become + /// available in the correct order. + /// + /// # Errors + /// + /// - [`OrderedRootError::IndexOutOfBounds`] if `index >= len` + /// - [`OrderedRootError::DuplicateIndex`] if an item was already pushed at this index + #[inline] + pub fn push(&mut self, index: usize, bytes: &[u8]) -> Result<(), OrderedRootError> { + if index >= self.len { + return Err(OrderedRootError::IndexOutOfBounds { index, len: self.len }); + } + + if self.pending[index].is_some() { + return Err(OrderedRootError::DuplicateIndex { index }); + } + + self.push_unchecked(index, bytes); + Ok(()) + } + + /// Pushes a pre-encoded item at the given index without bounds or duplicate checking. + /// + /// This is a performance-critical method for callers that can guarantee: + /// - `index < len` + /// - No item has been pushed at this index before + /// + /// # Panics + /// + /// Panics in debug mode if `index >= len`. + #[inline] + pub fn push_unchecked(&mut self, index: usize, bytes: &[u8]) { + debug_assert!(index < self.len, "index {index} out of bounds for length {}", self.len); + debug_assert!(self.pending[index].is_none(), "duplicate item at index {index}"); + + self.pending[index] = Some(bytes.to_vec()); + self.received += 1; + + self.flush(); + } + + /// Attempts to flush pending items to the hash builder. + fn flush(&mut self) { + while self.next_insert_i < self.len { + let exec_index_needed = adjust_index_for_rlp(self.next_insert_i, self.len); + + let Some(value) = self.pending[exec_index_needed].take() else { + break; + }; + + let index_buffer = alloy_rlp::encode_fixed_size(&exec_index_needed); + self.hb.add_leaf(Nibbles::unpack(&index_buffer), &value); + + self.next_insert_i += 1; + } + } + + /// Returns `true` if all items have been pushed. + #[inline] + pub const fn is_complete(&self) -> bool { + self.received == self.len + } + + /// Returns the number of items pushed so far. + #[inline] + pub const fn pushed_count(&self) -> usize { + self.received + } + + /// Returns the expected total number of items. + #[inline] + pub const fn expected_count(&self) -> usize { + self.len + } + + /// Finalizes the builder and returns the trie root. + /// + /// # Errors + /// + /// Returns [`OrderedRootError::Incomplete`] if not all items have been pushed. + pub fn finalize(mut self) -> Result { + if self.len == 0 { + return Ok(EMPTY_ROOT_HASH); + } + + if self.received != self.len { + return Err(OrderedRootError::Incomplete { + expected: self.len, + received: self.received, + }); + } + + debug_assert_eq!(self.next_insert_i, self.len, "not all items were flushed"); + + Ok(self.hb.root()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_trie::root::ordered_trie_root_encoded; + + #[test] + fn test_ordered_encoded_builder_equivalence() { + for len in [0, 1, 2, 3, 10, 127, 128, 129, 130, 200] { + let items: Vec> = + (0..len).map(|i| format!("item_{i}_data").into_bytes()).collect(); + + let expected = ordered_trie_root_encoded(&items); + + let mut builder = OrderedTrieRootEncodedBuilder::new(len); + + for (i, item) in items.iter().enumerate() { + builder.push(i, item).unwrap(); + } + + let actual = builder.finalize().unwrap(); + assert_eq!( + expected, actual, + "mismatch for len={len}: expected {expected:?}, got {actual:?}" + ); + } + } + + #[test] + fn test_ordered_builder_out_of_order() { + for len in [2, 3, 5, 10, 50] { + let items: Vec> = + (0..len).map(|i| format!("item_{i}_data").into_bytes()).collect(); + + let expected = ordered_trie_root_encoded(&items); + + // Push in reverse order + let mut builder = OrderedTrieRootEncodedBuilder::new(len); + for i in (0..len).rev() { + builder.push(i, &items[i]).unwrap(); + } + let actual = builder.finalize().unwrap(); + assert_eq!(expected, actual, "mismatch for reverse order len={len}"); + + // Push odds first, then evens + let mut builder = OrderedTrieRootEncodedBuilder::new(len); + for i in (1..len).step_by(2) { + builder.push(i, &items[i]).unwrap(); + } + for i in (0..len).step_by(2) { + builder.push(i, &items[i]).unwrap(); + } + let actual = builder.finalize().unwrap(); + assert_eq!(expected, actual, "mismatch for odd/even order len={len}"); + } + } + + #[test] + fn test_ordered_builder_empty() { + let builder = OrderedTrieRootEncodedBuilder::new(0); + assert!(builder.is_complete()); + assert_eq!(builder.finalize().unwrap(), EMPTY_ROOT_HASH); + } + + #[test] + fn test_ordered_builder_incomplete_error() { + let mut builder = OrderedTrieRootEncodedBuilder::new(3); + + builder.push(0, b"item_0").unwrap(); + builder.push(1, b"item_1").unwrap(); + + assert!(!builder.is_complete()); + assert_eq!( + builder.finalize(), + Err(OrderedRootError::Incomplete { expected: 3, received: 2 }) + ); + } + + #[test] + fn test_ordered_builder_index_errors() { + let mut builder = OrderedTrieRootEncodedBuilder::new(2); + + assert_eq!( + builder.push(5, b"item"), + Err(OrderedRootError::IndexOutOfBounds { index: 5, len: 2 }) + ); + + builder.push(0, b"item_0").unwrap(); + + assert_eq!( + builder.push(0, b"item_0_dup"), + Err(OrderedRootError::DuplicateIndex { index: 0 }) + ); + + builder.push(1, b"item_1").unwrap(); + assert!(builder.is_complete()); + } +} diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 6d8dbc6827c..8f26ac8dd04 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -277,7 +277,7 @@ fn run_case( .map_err(|err| Error::block_failed(block_number, program_inputs.clone(), err))?; // Consensus checks after block execution - validate_block_post_execution(block, &chain_spec, &output.receipts, &output.requests) + validate_block_post_execution(block, &chain_spec, &output.receipts, &output.requests, None) .map_err(|err| Error::block_failed(block_number, program_inputs.clone(), err))?; // Generate the stateless witness From f6dbf2d82da906417442030dbfbea4a17225a21b Mon Sep 17 00:00:00 2001 From: Julian Meyer Date: Fri, 16 Jan 2026 13:31:52 -0800 Subject: [PATCH 051/267] feat(db): implement extra dup methods (#20964) Co-authored-by: Matthias Seitz --- crates/storage/db-api/src/cursor.rs | 6 ++++++ crates/storage/db-api/src/mock.rs | 12 ++++++++++++ .../storage/db/src/implementation/mdbx/cursor.rs | 14 ++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index fac85af5b88..73bc369ee58 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -62,9 +62,15 @@ pub trait DbCursorRO { /// A read-only cursor over the dup table `T`. pub trait DbDupCursorRO { + /// Positions the cursor at the prev KV pair of the table, returning it. + fn prev_dup(&mut self) -> PairResult; + /// Positions the cursor at the next KV pair of the table, returning it. fn next_dup(&mut self) -> PairResult; + /// Positions the cursor at the last duplicate value of the current key. + fn last_dup(&mut self) -> ValueOnlyResult; + /// Positions the cursor at the next KV pair of the table, skipping duplicates. fn next_no_dup(&mut self) -> PairResult; diff --git a/crates/storage/db-api/src/mock.rs b/crates/storage/db-api/src/mock.rs index 9928a66c0d4..78a2aec1e14 100644 --- a/crates/storage/db-api/src/mock.rs +++ b/crates/storage/db-api/src/mock.rs @@ -296,6 +296,18 @@ impl DbDupCursorRO for CursorMock { Ok(None) } + /// Moves to the previous duplicate entry. + /// **Mock behavior**: Always returns `None`. + fn prev_dup(&mut self) -> PairResult { + Ok(None) + } + + /// Moves to the last duplicate entry. + /// **Mock behavior**: Always returns `None`. + fn last_dup(&mut self) -> ValueOnlyResult { + Ok(None) + } + /// Moves to the next entry with a different key. /// **Mock behavior**: Always returns `None`. fn next_no_dup(&mut self) -> PairResult { diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 5ca6eacb6c7..f432e76642d 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -158,11 +158,25 @@ impl DbCursorRO for Cursor { } impl DbDupCursorRO for Cursor { + /// Returns the previous `(key, value)` pair of a DUPSORT table. + fn prev_dup(&mut self) -> PairResult { + decode::(self.inner.prev_dup()) + } + /// Returns the next `(key, value)` pair of a DUPSORT table. fn next_dup(&mut self) -> PairResult { decode::(self.inner.next_dup()) } + /// Returns the last `value` of the current duplicate `key`. + fn last_dup(&mut self) -> ValueOnlyResult { + self.inner + .last_dup() + .map_err(|e| DatabaseError::Read(e.into()))? + .map(decode_one::) + .transpose() + } + /// Returns the next `(key, value)` pair skipping the duplicates. fn next_no_dup(&mut self) -> PairResult { decode::(self.inner.next_nodup()) From 3a39251f79d8731df77403321e50da67952adcf8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 16 Jan 2026 23:32:23 +0100 Subject: [PATCH 052/267] fix: release mutex before dropping ancestors in wait_cloned (#21146) --- crates/chain-state/src/deferred_trie.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/chain-state/src/deferred_trie.rs b/crates/chain-state/src/deferred_trie.rs index c2013f9ed6f..efe23a2ded3 100644 --- a/crates/chain-state/src/deferred_trie.rs +++ b/crates/chain-state/src/deferred_trie.rs @@ -287,6 +287,11 @@ impl DeferredTrieData { &inputs.ancestors, ); *state = DeferredState::Ready(computed.clone()); + + // Release lock before inputs (and its ancestors) drop to avoid holding it + // while their potential last Arc refs drop (which could trigger recursive locking) + drop(state); + computed } } From d7a5d1f87269ef8bc5277a57075a5dbc134d6cce Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 16 Jan 2026 23:25:54 +0000 Subject: [PATCH 053/267] fix: properly record span fields (#21148) --- crates/engine/tree/src/tree/metrics.rs | 8 ++++++-- .../engine/tree/src/tree/payload_processor/prewarm.rs | 11 ++++++++--- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index a11064ebd54..da787520257 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -101,8 +101,12 @@ impl EngineApiMetrics { let tx = tx?; senders.push(*tx.signer()); - let span = - debug_span!(target: "engine::tree", "execute tx", tx_hash=?tx.tx().tx_hash()); + let span = debug_span!( + target: "engine::tree", + "execute tx", + tx_hash = ?tx.tx().tx_hash(), + gas_used = tracing::field::Empty, + ); let enter = span.entered(); trace!(target: "engine::tree", "Executing transaction"); let start = Instant::now(); diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index aecc2249657..494e2d0f261 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -576,9 +576,14 @@ where .entered(); txs.recv() } { - let enter = - debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm tx", index, tx_hash=%tx.tx().tx_hash()) - .entered(); + let enter = debug_span!( + target: "engine::tree::payload_processor::prewarm", + "prewarm tx", + index, + tx_hash = %tx.tx().tx_hash(), + is_success = tracing::field::Empty, + ) + .entered(); // create the tx env let start = Instant::now(); From 012fbf51104b8a5d5892ccc05677a941bc093cb2 Mon Sep 17 00:00:00 2001 From: Mablr <59505383+mablr@users.noreply.github.com> Date: Sat, 17 Jan 2026 00:35:26 +0100 Subject: [PATCH 054/267] fix(`docs/cli`): update `help.rs` to use nightly toolchain (#21149) --- docs/cli/help.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/cli/help.rs b/docs/cli/help.rs index c96eaf63a1d..0e43adbe3df 100755 --- a/docs/cli/help.rs +++ b/docs/cli/help.rs @@ -1,4 +1,4 @@ -#!/usr/bin/env -S cargo -Zscript +#!/usr/bin/env -S cargo +nightly -Zscript --- [package] edition = "2021" From b96a30821f5d345a5db0c34692500544d0fc3ff4 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Sat, 17 Jan 2026 00:33:27 +0000 Subject: [PATCH 055/267] fix(engine): request head block download when not buffered after backfill (#21150) --- crates/engine/tree/src/tree/mod.rs | 12 ++++++++++++ crates/engine/tree/src/tree/tests.rs | 9 +++++++++ 2 files changed, 21 insertions(+) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 6796e098d1a..275a2c47f6b 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1676,6 +1676,18 @@ where ))); return Ok(()); } + } else { + // We don't have the head block or any of its ancestors buffered. Request + // a download for the head block which will then trigger further sync. + debug!( + target: "engine::tree", + head_hash = %sync_target_state.head_block_hash, + "Backfill complete but head block not buffered, requesting download" + ); + self.emit_event(EngineApiEvent::Download(DownloadRequest::single_block( + sync_target_state.head_block_hash, + ))); + return Ok(()); } // try to close the gap by executing buffered blocks that are child blocks of the new head diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index adfc62ef4bc..dd576ed37f8 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -1008,6 +1008,15 @@ async fn test_engine_tree_live_sync_transition_required_blocks_requested() { _ => panic!("Unexpected event: {event:#?}"), } + // After backfill completes with head not buffered, we also request head download + let event = test_harness.from_tree_rx.recv().await.unwrap(); + match event { + EngineApiEvent::Download(DownloadRequest::BlockSet(hash_set)) => { + assert_eq!(hash_set, HashSet::from_iter([main_chain_last_hash])); + } + _ => panic!("Unexpected event: {event:#?}"), + } + let _ = test_harness .tree .on_engine_message(FromEngine::DownloadedBlocks(vec![main_chain From c617d25c36f639a859941805c42a4a4b26f94125 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 17 Jan 2026 01:05:35 +0000 Subject: [PATCH 056/267] perf: make Chain use DeferredTrieData (#21137) Co-authored-by: Matthias Seitz --- Cargo.lock | 39 ++- Cargo.toml | 2 + crates/chain-state/Cargo.toml | 2 + crates/chain-state/src/in_memory.rs | 107 +++++-- crates/chain-state/src/lib.rs | 4 +- crates/chain-state/src/notifications.rs | 36 +-- crates/chain-state/src/test_utils.rs | 3 +- crates/evm/chain/Cargo.toml | 69 ++++ .../{execution-types => chain}/src/chain.rs | 295 +++++++++++------- .../chain}/src/deferred_trie.rs | 1 + crates/evm/chain/src/lib.rs | 30 ++ crates/evm/execution-types/Cargo.toml | 4 - .../execution-types/src/execution_outcome.rs | 2 +- crates/evm/execution-types/src/lib.rs | 5 +- crates/exex/exex/Cargo.toml | 1 + crates/exex/exex/src/backfill/job.rs | 2 +- crates/exex/exex/src/manager.rs | 130 ++++---- crates/exex/exex/src/notifications.rs | 138 ++++---- crates/exex/exex/src/wal/mod.rs | 74 +++-- crates/exex/exex/src/wal/storage.rs | 61 ++-- crates/exex/test-utils/Cargo.toml | 2 +- crates/exex/test-utils/src/lib.rs | 2 +- crates/exex/types/Cargo.toml | 6 +- crates/exex/types/src/notification.rs | 105 ++++--- crates/optimism/evm/Cargo.toml | 1 + crates/optimism/evm/src/lib.rs | 13 +- crates/rpc/rpc-eth-types/Cargo.toml | 2 +- crates/rpc/rpc-eth-types/src/cache/mod.rs | 2 +- crates/stages/stages/Cargo.toml | 2 +- crates/stages/stages/src/stages/execution.rs | 4 +- crates/storage/provider/Cargo.toml | 10 + crates/storage/provider/src/lib.rs | 11 +- .../src/providers/blockchain_provider.rs | 29 +- .../src/providers/database/provider.rs | 5 +- crates/storage/storage-api/Cargo.toml | 3 + .../storage/storage-api/src/block_writer.rs | 3 +- crates/transaction-pool/Cargo.toml | 2 + .../transaction-pool/src/blobstore/tracker.rs | 7 +- 38 files changed, 751 insertions(+), 463 deletions(-) create mode 100644 crates/evm/chain/Cargo.toml rename crates/evm/{execution-types => chain}/src/chain.rs (74%) rename crates/{chain-state => evm/chain}/src/deferred_trie.rs (99%) create mode 100644 crates/evm/chain/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 17e53c81377..450ab2bc263 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7774,6 +7774,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-chain" +version = "1.10.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "arbitrary", + "bincode 1.3.3", + "metrics", + "parking_lot", + "rand 0.9.2", + "reth-ethereum-primitives", + "reth-execution-types", + "reth-metrics", + "reth-primitives-traits", + "reth-trie", + "reth-trie-common", + "revm", + "serde", + "serde_with", + "tracing", +] + [[package]] name = "reth-chain-state" version = "1.10.0" @@ -7789,6 +7813,7 @@ dependencies = [ "parking_lot", "pin-project", "rand 0.9.2", + "reth-chain", "reth-chainspec", "reth-errors", "reth-ethereum-primitives", @@ -8927,7 +8952,6 @@ dependencies = [ name = "reth-execution-types" version = "1.10.0" dependencies = [ - "alloy-consensus", "alloy-eips", "alloy-evm", "alloy-primitives", @@ -8981,6 +9005,7 @@ dependencies = [ "reth-trie-common", "rmp-serde", "secp256k1 0.30.0", + "serde_with", "tempfile", "thiserror 2.0.17", "tokio", @@ -8995,6 +9020,7 @@ dependencies = [ "alloy-eips", "eyre", "futures-util", + "reth-chain", "reth-chainspec", "reth-config", "reth-consensus", @@ -9002,7 +9028,6 @@ dependencies = [ "reth-db-common", "reth-ethereum-primitives", "reth-evm-ethereum", - "reth-execution-types", "reth-exex", "reth-network", "reth-node-api", @@ -9028,9 +9053,9 @@ dependencies = [ "arbitrary", "bincode 1.3.3", "rand 0.9.2", + "reth-chain", "reth-chain-state", "reth-ethereum-primitives", - "reth-execution-types", "reth-primitives-traits", "serde", "serde_with", @@ -9773,6 +9798,7 @@ dependencies = [ "op-alloy-consensus", "op-alloy-rpc-types-engine", "op-revm", + "reth-chain", "reth-chainspec", "reth-evm", "reth-execution-errors", @@ -10223,6 +10249,7 @@ dependencies = [ "parking_lot", "rand 0.9.2", "rayon", + "reth-chain", "reth-chain-state", "reth-chainspec", "reth-codecs", @@ -10712,12 +10739,12 @@ dependencies = [ "metrics", "rand 0.9.2", "reqwest", + "reth-chain", "reth-chain-state", "reth-chainspec", "reth-errors", "reth-ethereum-primitives", "reth-evm", - "reth-execution-types", "reth-metrics", "reth-primitives-traits", "reth-revm", @@ -10790,6 +10817,7 @@ dependencies = [ "rand 0.9.2", "rayon", "reqwest", + "reth-chain", "reth-chainspec", "reth-codecs", "reth-config", @@ -10805,7 +10833,6 @@ dependencies = [ "reth-etl", "reth-evm", "reth-evm-ethereum", - "reth-execution-types", "reth-exex", "reth-fs-util", "reth-network-p2p", @@ -10952,6 +10979,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", "auto_impl", + "reth-chain", "reth-chainspec", "reth-db-api", "reth-db-models", @@ -11110,6 +11138,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.9.2", + "reth-chain", "reth-chain-state", "reth-chainspec", "reth-eth-wire-types", diff --git a/Cargo.toml b/Cargo.toml index c9a3ba0d93c..1efe7985da1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,6 +44,7 @@ members = [ "crates/ethereum/primitives/", "crates/ethereum/reth/", "crates/etl/", + "crates/evm/chain", "crates/evm/evm", "crates/evm/execution-errors", "crates/evm/execution-types", @@ -387,6 +388,7 @@ reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm/evm", default-features = false } reth-evm-ethereum = { path = "crates/ethereum/evm", default-features = false } reth-optimism-evm = { path = "crates/optimism/evm", default-features = false } +reth-chain = { path = "crates/evm/chain" } reth-execution-errors = { path = "crates/evm/execution-errors", default-features = false } reth-execution-types = { path = "crates/evm/execution-types", default-features = false } reth-exex = { path = "crates/exex/exex" } diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index d21c83ae7c4..b3fbe487311 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chain.workspace = true reth-chainspec.workspace = true reth-errors.workspace = true reth-execution-types.workspace = true @@ -65,6 +66,7 @@ serde = [ "alloy-primitives/serde", "parking_lot/serde", "rand?/serde", + "reth-chain/serde", "reth-ethereum-primitives/serde", "reth-execution-types/serde", "reth-primitives-traits/serde", diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 7f2f328b191..1cf281395e7 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -8,9 +8,10 @@ use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::{map::HashMap, BlockNumber, TxHash, B256}; use parking_lot::RwLock; +use reth_chain::Chain; use reth_chainspec::ChainInfo; use reth_ethereum_primitives::EthPrimitives; -use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, Chain, ExecutionOutcome}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives_traits::{ BlockBody as _, IndexedTx, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, @@ -944,6 +945,9 @@ impl> NewCanonicalChain { } /// Converts a slice of executed blocks into a [`Chain`]. + /// + /// Uses [`ExecutedBlock::trie_data_handle`] to avoid blocking on deferred trie computations. + /// The trie data will be computed lazily when actually needed by consumers. fn blocks_to_chain(blocks: &[ExecutedBlock]) -> Chain { match blocks { [] => Chain::default(), @@ -954,8 +958,7 @@ impl> NewCanonicalChain { first.execution_outcome().clone(), first.block_number(), )), - first.trie_updates(), - first.hashed_state(), + first.trie_data_handle(), ); for exec in rest { chain.append_block( @@ -964,8 +967,7 @@ impl> NewCanonicalChain { exec.execution_outcome().clone(), exec.block_number(), )), - exec.trie_updates(), - exec.hashed_state(), + exec.trie_data_handle(), ); } chain @@ -1560,17 +1562,30 @@ mod tests { ..Default::default() }; - assert_eq!( - chain_commit.to_chain_notification(), - CanonStateNotification::Commit { - new: Arc::new(Chain::new( - vec![block0.recovered_block().clone(), block1.recovered_block().clone()], - commit_execution_outcome, - expected_trie_updates, - expected_hashed_state - )) - } - ); + // Get the notification and verify + let notification = chain_commit.to_chain_notification(); + let CanonStateNotification::Commit { new } = notification else { + panic!("Expected Commit notification"); + }; + + // Compare blocks + let expected_blocks: Vec<_> = + vec![block0.recovered_block().clone(), block1.recovered_block().clone()]; + let actual_blocks: Vec<_> = new.blocks().values().cloned().collect(); + assert_eq!(actual_blocks, expected_blocks); + + // Compare execution outcome + assert_eq!(*new.execution_outcome(), commit_execution_outcome); + + // Compare trie data by waiting on deferred data + for (block_num, expected_updates) in &expected_trie_updates { + let actual = new.trie_data_at(*block_num).unwrap().wait_cloned(); + assert_eq!(actual.trie_updates, *expected_updates); + } + for (block_num, expected_state) in &expected_hashed_state { + let actual = new.trie_data_at(*block_num).unwrap().wait_cloned(); + assert_eq!(actual.hashed_state, *expected_state); + } // Test reorg notification let chain_reorg = NewCanonicalChain::Reorg { @@ -1607,22 +1622,48 @@ mod tests { ..Default::default() }; - assert_eq!( - chain_reorg.to_chain_notification(), - CanonStateNotification::Reorg { - old: Arc::new(Chain::new( - vec![block1.recovered_block().clone(), block2.recovered_block().clone()], - reorg_execution_outcome.clone(), - old_trie_updates, - old_hashed_state - )), - new: Arc::new(Chain::new( - vec![block1a.recovered_block().clone(), block2a.recovered_block().clone()], - reorg_execution_outcome, - new_trie_updates, - new_hashed_state - )) - } - ); + // Get the notification and verify + let notification = chain_reorg.to_chain_notification(); + let CanonStateNotification::Reorg { old, new } = notification else { + panic!("Expected Reorg notification"); + }; + + // Compare old chain blocks + let expected_old_blocks: Vec<_> = + vec![block1.recovered_block().clone(), block2.recovered_block().clone()]; + let actual_old_blocks: Vec<_> = old.blocks().values().cloned().collect(); + assert_eq!(actual_old_blocks, expected_old_blocks); + + // Compare old chain execution outcome + assert_eq!(*old.execution_outcome(), reorg_execution_outcome); + + // Compare old chain trie data + for (block_num, expected_updates) in &old_trie_updates { + let actual = old.trie_data_at(*block_num).unwrap().wait_cloned(); + assert_eq!(actual.trie_updates, *expected_updates); + } + for (block_num, expected_state) in &old_hashed_state { + let actual = old.trie_data_at(*block_num).unwrap().wait_cloned(); + assert_eq!(actual.hashed_state, *expected_state); + } + + // Compare new chain blocks + let expected_new_blocks: Vec<_> = + vec![block1a.recovered_block().clone(), block2a.recovered_block().clone()]; + let actual_new_blocks: Vec<_> = new.blocks().values().cloned().collect(); + assert_eq!(actual_new_blocks, expected_new_blocks); + + // Compare new chain execution outcome + assert_eq!(*new.execution_outcome(), reorg_execution_outcome); + + // Compare new chain trie data + for (block_num, expected_updates) in &new_trie_updates { + let actual = new.trie_data_at(*block_num).unwrap().wait_cloned(); + assert_eq!(actual.trie_updates, *expected_updates); + } + for (block_num, expected_state) in &new_hashed_state { + let actual = new.trie_data_at(*block_num).unwrap().wait_cloned(); + assert_eq!(actual.hashed_state, *expected_state); + } } } diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs index f6abed91467..d32b131e0ee 100644 --- a/crates/chain-state/src/lib.rs +++ b/crates/chain-state/src/lib.rs @@ -11,8 +11,8 @@ mod in_memory; pub use in_memory::*; -mod deferred_trie; -pub use deferred_trie::*; +// Re-export deferred_trie types from reth_chain +pub use reth_chain::{AnchoredTrieInput, ComputedTrieData, DeferredTrieData}; mod lazy_overlay; pub use lazy_overlay::*; diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index 18676ce2005..88152edc6ef 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -2,7 +2,7 @@ use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; use derive_more::{Deref, DerefMut}; -use reth_execution_types::{BlockReceipts, Chain}; +use reth_chain::{BlockReceipts, Chain}; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader}; use reth_storage_api::NodePrimitivesProvider; use std::{ @@ -80,7 +80,7 @@ impl Stream for CanonStateNotificationStream { /// /// The notification contains at least one [`Chain`] with the imported segment. If some blocks were /// reverted (e.g. during a reorg), the old chain is also returned. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "serde", serde(bound = ""))] pub enum CanonStateNotification { @@ -280,14 +280,13 @@ mod tests { vec![block1.clone(), block2.clone()], ExecutionOutcome::default(), BTreeMap::new(), - BTreeMap::new(), )); // Create a commit notification let notification = CanonStateNotification::Commit { new: chain.clone() }; - // Test that `committed` returns the correct chain - assert_eq!(notification.committed(), chain); + // Test that `committed` returns the correct chain (compare Arc pointers) + assert!(Arc::ptr_eq(¬ification.committed(), &chain)); // Test that `reverted` returns None for `Commit` assert!(notification.reverted().is_none()); @@ -319,24 +318,22 @@ mod tests { vec![block1.clone()], ExecutionOutcome::default(), BTreeMap::new(), - BTreeMap::new(), )); let new_chain = Arc::new(Chain::new( vec![block2.clone(), block3.clone()], ExecutionOutcome::default(), BTreeMap::new(), - BTreeMap::new(), )); // Create a reorg notification let notification = CanonStateNotification::Reorg { old: old_chain.clone(), new: new_chain.clone() }; - // Test that `reverted` returns the old chain - assert_eq!(notification.reverted(), Some(old_chain)); + // Test that `reverted` returns the old chain (compare Arc pointers) + assert!(Arc::ptr_eq(¬ification.reverted().unwrap(), &old_chain)); - // Test that `committed` returns the new chain - assert_eq!(notification.committed(), new_chain); + // Test that `committed` returns the new chain (compare Arc pointers) + assert!(Arc::ptr_eq(¬ification.committed(), &new_chain)); // Test that `tip` returns the tip of the new chain (last block in the new chain) assert_eq!(*notification.tip(), block3); @@ -391,7 +388,6 @@ mod tests { vec![block1.clone(), block2.clone()], execution_outcome, BTreeMap::new(), - BTreeMap::new(), )); // Create a commit notification containing the new chain segment. @@ -449,12 +445,8 @@ mod tests { ExecutionOutcome { receipts: old_receipts, ..Default::default() }; // Create an old chain segment to be reverted, containing `old_block1`. - let old_chain: Arc = Arc::new(Chain::new( - vec![old_block1.clone()], - old_execution_outcome, - BTreeMap::new(), - BTreeMap::new(), - )); + let old_chain: Arc = + Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, BTreeMap::new())); // Define block2 for the new chain segment, which will be committed. let mut body = BlockBody::::default(); @@ -482,12 +474,8 @@ mod tests { ExecutionOutcome { receipts: new_receipts, ..Default::default() }; // Create a new chain segment to be committed, containing `new_block1`. - let new_chain = Arc::new(Chain::new( - vec![new_block1.clone()], - new_execution_outcome, - BTreeMap::new(), - BTreeMap::new(), - )); + let new_chain = + Arc::new(Chain::new(vec![new_block1.clone()], new_execution_outcome, BTreeMap::new())); // Create a reorg notification with both reverted (old) and committed (new) chain segments. let notification = CanonStateNotification::Reorg { old: old_chain, new: new_chain }; diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 73bad27d79f..81baf10be65 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -9,11 +9,12 @@ use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use core::marker::PhantomData; use rand::Rng; +use reth_chain::Chain; use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_ethereum_primitives::{ Block, BlockBody, EthPrimitives, Receipt, Transaction, TransactionSigned, }; -use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, Chain, ExecutionOutcome}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, ExecutionOutcome}; use reth_primitives_traits::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, Account, NodePrimitives, Recovered, RecoveredBlock, SealedBlock, SealedHeader, diff --git a/crates/evm/chain/Cargo.toml b/crates/evm/chain/Cargo.toml new file mode 100644 index 00000000000..67e2a73d0e9 --- /dev/null +++ b/crates/evm/chain/Cargo.toml @@ -0,0 +1,69 @@ +[package] +name = "reth-chain" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Chain and deferred trie data types for reth." + +[lints] +workspace = true + +[dependencies] +reth-ethereum-primitives.workspace = true +reth-execution-types = { workspace = true, features = ["std"] } +reth-metrics.workspace = true +reth-primitives-traits.workspace = true +reth-trie.workspace = true +reth-trie-common.workspace = true + +# alloy +alloy-consensus.workspace = true +alloy-primitives.workspace = true +alloy-eips.workspace = true + +serde = { workspace = true, optional = true } +serde_with = { workspace = true, optional = true } + +metrics.workspace = true +parking_lot.workspace = true +tracing.workspace = true + +[dev-dependencies] +reth-primitives-traits = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-ethereum-primitives = { workspace = true, features = ["arbitrary"] } +alloy-primitives = { workspace = true, features = ["rand", "arbitrary"] } +alloy-consensus = { workspace = true, features = ["arbitrary"] } +arbitrary.workspace = true +bincode.workspace = true +rand.workspace = true +revm.workspace = true + +[features] +default = [] +serde = [ + "dep:serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "reth-primitives-traits/serde", + "alloy-consensus/serde", + "reth-trie/serde", + "reth-trie-common/serde", + "reth-ethereum-primitives/serde", + "reth-execution-types/serde", + "rand/serde", + "revm/serde", + "parking_lot/serde", +] +serde-bincode-compat = [ + "serde", + "reth-trie-common/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", + "serde_with", + "alloy-eips/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", + "reth-ethereum-primitives/serde-bincode-compat", + "reth-execution-types/serde-bincode-compat", +] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/chain/src/chain.rs similarity index 74% rename from crates/evm/execution-types/src/chain.rs rename to crates/evm/chain/src/chain.rs index 1592cf78e05..7cd3c4a88cd 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/chain/src/chain.rs @@ -1,16 +1,16 @@ //! Contains [Chain], a chain of blocks and their final state. -use crate::ExecutionOutcome; -use alloc::{borrow::Cow, collections::BTreeMap, sync::Arc, vec::Vec}; +use crate::DeferredTrieData; use alloy_consensus::{transaction::Recovered, BlockHeader}; use alloy_eips::{eip1898::ForkBlock, eip2718::Encodable2718, BlockNumHash}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; -use core::{fmt, ops::RangeInclusive}; +use reth_execution_types::ExecutionOutcome; use reth_primitives_traits::{ transaction::signed::SignedTransaction, Block, BlockBody, IndexedTx, NodePrimitives, RecoveredBlock, SealedHeader, }; use reth_trie_common::{updates::TrieUpdatesSorted, HashedPostStateSorted}; +use std::{borrow::Cow, collections::BTreeMap, fmt, ops::RangeInclusive, sync::Arc, vec::Vec}; /// A chain of blocks and their final state. /// @@ -22,8 +22,7 @@ use reth_trie_common::{updates::TrieUpdatesSorted, HashedPostStateSorted}; /// # Warning /// /// A chain of blocks should not be empty. -#[derive(Clone, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Clone, Debug)] pub struct Chain { /// All blocks in this chain. blocks: BTreeMap>, @@ -34,10 +33,12 @@ pub struct Chain { /// /// Additionally, it includes the individual state changes that led to the current state. execution_outcome: ExecutionOutcome, - /// State trie updates for each block in the chain, keyed by block number. - trie_updates: BTreeMap>, - /// Hashed post state for each block in the chain, keyed by block number. - hashed_state: BTreeMap>, + /// Deferred trie data for each block in the chain, keyed by block number. + /// + /// Contains handles to lazily-computed sorted trie updates and hashed state. + /// This allows Chain to be constructed without blocking on expensive trie + /// computations - the data is only materialized when actually needed. + trie_data: BTreeMap, } type ChainTxReceiptMeta<'a, N> = ( @@ -52,8 +53,7 @@ impl Default for Chain { Self { blocks: Default::default(), execution_outcome: Default::default(), - trie_updates: Default::default(), - hashed_state: Default::default(), + trie_data: Default::default(), } } } @@ -67,27 +67,24 @@ impl Chain { pub fn new( blocks: impl IntoIterator>, execution_outcome: ExecutionOutcome, - trie_updates: BTreeMap>, - hashed_state: BTreeMap>, + trie_data: BTreeMap, ) -> Self { let blocks = blocks.into_iter().map(|b| (b.header().number(), b)).collect::>(); debug_assert!(!blocks.is_empty(), "Chain should have at least one block"); - Self { blocks, execution_outcome, trie_updates, hashed_state } + Self { blocks, execution_outcome, trie_data } } /// Create new Chain from a single block and its state. pub fn from_block( block: RecoveredBlock, execution_outcome: ExecutionOutcome, - trie_updates: Arc, - hashed_state: Arc, + trie_data: DeferredTrieData, ) -> Self { let block_number = block.header().number(); - let trie_updates_map = BTreeMap::from([(block_number, trie_updates)]); - let hashed_state_map = BTreeMap::from([(block_number, hashed_state)]); - Self::new([block], execution_outcome, trie_updates_map, hashed_state_map) + let trie_data_map = BTreeMap::from([(block_number, trie_data)]); + Self::new([block], execution_outcome, trie_data_map) } /// Get the blocks in this chain. @@ -105,37 +102,62 @@ impl Chain { self.blocks.values().map(|block| block.clone_sealed_header()) } + /// Get all deferred trie data for this chain. + /// + /// Returns handles to lazily-computed sorted trie updates and hashed state. + /// [`DeferredTrieData`] allows `Chain` to be constructed without blocking on + /// expensive trie computations - the data is only materialized when actually needed + /// via [`DeferredTrieData::wait_cloned`] or similar methods. + /// + /// This method does **not** block. To access the computed trie data, call + /// [`DeferredTrieData::wait_cloned`] on individual entries, which will block + /// if the background computation has not yet completed. + pub const fn trie_data(&self) -> &BTreeMap { + &self.trie_data + } + + /// Get deferred trie data for a specific block number. + /// + /// Returns a handle to the lazily-computed trie data. This method does **not** block. + /// Call [`DeferredTrieData::wait_cloned`] on the result to wait for and retrieve + /// the computed data, which will block if computation is still in progress. + pub fn trie_data_at(&self, block_number: BlockNumber) -> Option<&DeferredTrieData> { + self.trie_data.get(&block_number) + } + /// Get all trie updates for this chain. - pub const fn trie_updates(&self) -> &BTreeMap> { - &self.trie_updates + /// + /// Note: This blocks on deferred trie data for all blocks in the chain. + /// Prefer using [`trie_data`](Self::trie_data) when possible to avoid blocking. + pub fn trie_updates(&self) -> BTreeMap> { + self.trie_data.iter().map(|(num, data)| (*num, data.wait_cloned().trie_updates)).collect() } /// Get trie updates for a specific block number. - pub fn trie_updates_at(&self, block_number: BlockNumber) -> Option<&Arc> { - self.trie_updates.get(&block_number) + /// + /// Note: This waits for deferred trie data if not already computed. + pub fn trie_updates_at(&self, block_number: BlockNumber) -> Option> { + self.trie_data.get(&block_number).map(|data| data.wait_cloned().trie_updates) } - /// Remove all trie updates for this chain. - pub fn clear_trie_updates(&mut self) { - self.trie_updates.clear(); + /// Remove all trie data for this chain. + pub fn clear_trie_data(&mut self) { + self.trie_data.clear(); } /// Get all hashed states for this chain. - pub const fn hashed_state(&self) -> &BTreeMap> { - &self.hashed_state + /// + /// Note: This blocks on deferred trie data for all blocks in the chain. + /// Prefer using [`trie_data`](Self::trie_data) when possible to avoid blocking. + pub fn hashed_state(&self) -> BTreeMap> { + self.trie_data.iter().map(|(num, data)| (*num, data.wait_cloned().hashed_state)).collect() } /// Get hashed state for a specific block number. - pub fn hashed_state_at( - &self, - block_number: BlockNumber, - ) -> Option<&Arc> { - self.hashed_state.get(&block_number) - } - - /// Remove all hashed states for this chain. - pub fn clear_hashed_state(&mut self) { - self.hashed_state.clear(); + /// + /// Note: This waits for deferred trie data if not already computed. + pub fn hashed_state_at(&self, block_number: BlockNumber) -> Option> { + self.trie_data.get(&block_number).map(|data| data.wait_cloned().hashed_state) } /// Get execution outcome of this chain @@ -183,23 +205,16 @@ impl Chain { /// Destructure the chain into its inner components: /// 1. The blocks contained in the chain. /// 2. The execution outcome representing the final state. - /// 3. The trie updates map. - /// 4. The hashed state map. + /// 3. The deferred trie data map. #[allow(clippy::type_complexity)] pub fn into_inner( self, ) -> ( ChainBlocks<'static, N::Block>, ExecutionOutcome, - BTreeMap>, - BTreeMap>, + BTreeMap, ) { - ( - ChainBlocks { blocks: Cow::Owned(self.blocks) }, - self.execution_outcome, - self.trie_updates, - self.hashed_state, - ) + (ChainBlocks { blocks: Cow::Owned(self.blocks) }, self.execution_outcome, self.trie_data) } /// Destructure the chain into its inner components: @@ -329,14 +344,12 @@ impl Chain { &mut self, block: RecoveredBlock, execution_outcome: ExecutionOutcome, - trie_updates: Arc, - hashed_state: Arc, + trie_data: DeferredTrieData, ) { let block_number = block.header().number(); self.blocks.insert(block_number, block); self.execution_outcome.extend(execution_outcome); - self.trie_updates.insert(block_number, trie_updates); - self.hashed_state.insert(block_number, hashed_state); + self.trie_data.insert(block_number, trie_data); } /// Merge two chains by appending the given chain into the current one. @@ -355,8 +368,7 @@ impl Chain { // Insert blocks from other chain self.blocks.extend(other.blocks); self.execution_outcome.extend(other.execution_outcome); - self.trie_updates.extend(other.trie_updates); - self.hashed_state.extend(other.hashed_state); + self.trie_data.extend(other.trie_data); Ok(()) } @@ -459,7 +471,7 @@ impl>> ChainBlocks<'_, impl IntoIterator for ChainBlocks<'_, B> { type Item = (BlockNumber, RecoveredBlock); - type IntoIter = alloc::collections::btree_map::IntoIter>; + type IntoIter = std::collections::btree_map::IntoIter>; fn into_iter(self) -> Self::IntoIter { self.blocks.into_owned().into_iter() @@ -477,25 +489,95 @@ pub struct BlockReceipts { pub timestamp: u64, } +#[cfg(feature = "serde")] +mod chain_serde { + use super::*; + use crate::ComputedTrieData; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + /// Serializable representation of Chain that waits for deferred trie data. + #[derive(Serialize, Deserialize)] + #[serde(bound = "")] + struct ChainRepr { + blocks: BTreeMap>, + execution_outcome: ExecutionOutcome, + #[serde(default)] + trie_updates: BTreeMap>, + #[serde(default)] + hashed_state: BTreeMap>, + } + + impl Serialize for Chain { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Wait for deferred trie data for serialization + let trie_updates: BTreeMap<_, _> = self + .trie_data + .iter() + .map(|(num, data)| (*num, data.wait_cloned().trie_updates)) + .collect(); + let hashed_state: BTreeMap<_, _> = self + .trie_data + .iter() + .map(|(num, data)| (*num, data.wait_cloned().hashed_state)) + .collect(); + + let repr = ChainRepr:: { + blocks: self.blocks.clone(), + execution_outcome: self.execution_outcome.clone(), + trie_updates, + hashed_state, + }; + repr.serialize(serializer) + } + } + + impl<'de, N: NodePrimitives> Deserialize<'de> for Chain { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let repr = ChainRepr::::deserialize(deserializer)?; + + // Convert to ready DeferredTrieData handles + let trie_data = repr + .trie_updates + .into_iter() + .map(|(num, trie_updates)| { + let hashed_state = repr.hashed_state.get(&num).cloned().unwrap_or_default(); + let computed = ComputedTrieData::without_trie_input(hashed_state, trie_updates); + (num, DeferredTrieData::ready(computed)) + }) + .collect(); + + Ok(Self { blocks: repr.blocks, execution_outcome: repr.execution_outcome, trie_data }) + } + } +} + /// Bincode-compatible [`Chain`] serde implementation. #[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { - use crate::{serde_bincode_compat, ExecutionOutcome}; - use alloc::{borrow::Cow, collections::BTreeMap, sync::Arc}; use alloy_primitives::BlockNumber; use reth_ethereum_primitives::EthPrimitives; + use reth_execution_types::{ + serde_bincode_compat as exec_serde_bincode_compat, ExecutionOutcome, + }; use reth_primitives_traits::{ serde_bincode_compat::{RecoveredBlock, SerdeBincodeCompat}, Block, NodePrimitives, }; use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; + use std::{borrow::Cow, collections::BTreeMap, sync::Arc}; /// Bincode-compatible [`super::Chain`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust - /// use reth_execution_types::{serde_bincode_compat, Chain}; + /// use reth_chain::{serde_bincode_compat, Chain}; /// use serde::{Deserialize, Serialize}; /// use serde_with::serde_as; /// @@ -515,7 +597,7 @@ pub(super) mod serde_bincode_compat { >, { blocks: RecoveredBlocks<'a, N::Block>, - execution_outcome: serde_bincode_compat::ExecutionOutcome<'a, N::Receipt>, + execution_outcome: exec_serde_bincode_compat::ExecutionOutcome<'a, N::Receipt>, #[serde(default, rename = "trie_updates_legacy")] _trie_updates_legacy: Option>, @@ -571,31 +653,6 @@ pub(super) mod serde_bincode_compat { } } - impl<'a, N> From<&'a super::Chain> for Chain<'a, N> - where - N: NodePrimitives< - Block: Block + 'static, - >, - { - fn from(value: &'a super::Chain) -> Self { - Self { - blocks: RecoveredBlocks(Cow::Borrowed(&value.blocks)), - execution_outcome: value.execution_outcome.as_repr(), - _trie_updates_legacy: None, - trie_updates: value - .trie_updates - .iter() - .map(|(k, v)| (*k, v.as_ref().into())) - .collect(), - hashed_state: value - .hashed_state - .iter() - .map(|(k, v)| (*k, v.as_ref().into())) - .collect(), - } - } - } - impl<'a, N> From> for super::Chain where N: NodePrimitives< @@ -603,19 +660,26 @@ pub(super) mod serde_bincode_compat { >, { fn from(value: Chain<'a, N>) -> Self { + use crate::{ComputedTrieData, DeferredTrieData}; + + let trie_updates: BTreeMap<_, _> = + value.trie_updates.into_iter().map(|(k, v)| (k, Arc::new(v.into()))).collect(); + let hashed_state: BTreeMap<_, _> = + value.hashed_state.into_iter().map(|(k, v)| (k, Arc::new(v.into()))).collect(); + + let trie_data = trie_updates + .into_iter() + .map(|(num, trie_updates)| { + let hashed_state = hashed_state.get(&num).cloned().unwrap_or_default(); + let computed = ComputedTrieData::without_trie_input(hashed_state, trie_updates); + (num, DeferredTrieData::ready(computed)) + }) + .collect(); + Self { blocks: value.blocks.0.into_owned(), execution_outcome: ExecutionOutcome::from_repr(value.execution_outcome), - trie_updates: value - .trie_updates - .into_iter() - .map(|(k, v)| (k, Arc::new(v.into()))) - .collect(), - hashed_state: value - .hashed_state - .into_iter() - .map(|(k, v)| (k, Arc::new(v.into()))) - .collect(), + trie_data, } } } @@ -630,7 +694,31 @@ pub(super) mod serde_bincode_compat { where S: Serializer, { - Chain::from(source).serialize(serializer) + use reth_trie_common::serde_bincode_compat as trie_serde; + + // Wait for deferred trie data and collect into maps we can borrow from + let trie_updates_data: BTreeMap = + source.trie_data.iter().map(|(k, v)| (*k, v.wait_cloned().trie_updates)).collect(); + let hashed_state_data: BTreeMap = + source.trie_data.iter().map(|(k, v)| (*k, v.wait_cloned().hashed_state)).collect(); + + // Now create the serde-compatible struct borrowing from the collected data + let chain: Chain<'_, N> = Chain { + blocks: RecoveredBlocks(Cow::Borrowed(&source.blocks)), + execution_outcome: source.execution_outcome.as_repr(), + _trie_updates_legacy: None, + trie_updates: trie_updates_data + .iter() + .map(|(k, v)| (*k, trie_serde::updates::TrieUpdatesSorted::from(v.as_ref()))) + .collect(), + hashed_state: hashed_state_data + .iter() + .map(|(k, v)| { + (*k, trie_serde::hashed_state::HashedPostStateSorted::from(v.as_ref())) + }) + .collect(), + }; + chain.serialize(serializer) } } @@ -659,10 +747,10 @@ pub(super) mod serde_bincode_compat { #[test] fn test_chain_bincode_roundtrip() { - use alloc::collections::BTreeMap; + use std::collections::BTreeMap; #[serde_as] - #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + #[derive(Debug, Serialize, Deserialize)] struct Data { #[serde_as(as = "serde_bincode_compat::Chain")] chain: Chain, @@ -676,13 +764,14 @@ pub(super) mod serde_bincode_compat { .unwrap()], Default::default(), BTreeMap::new(), - BTreeMap::new(), ), }; let encoded = bincode::serialize(&data).unwrap(); let decoded: Data = bincode::deserialize(&encoded).unwrap(); - assert_eq!(decoded, data); + // Note: Can't compare directly because DeferredTrieData doesn't implement PartialEq + assert_eq!(decoded.chain.blocks, data.chain.blocks); + assert_eq!(decoded.chain.execution_outcome, data.chain.execution_outcome); } } } @@ -776,12 +865,8 @@ mod tests { let mut block_state_extended = execution_outcome1; block_state_extended.extend(execution_outcome2); - let chain: Chain = Chain::new( - vec![block1.clone(), block2.clone()], - block_state_extended, - BTreeMap::new(), - BTreeMap::new(), - ); + let chain: Chain = + Chain::new(vec![block1.clone(), block2.clone()], block_state_extended, BTreeMap::new()); // return tip state assert_eq!( diff --git a/crates/chain-state/src/deferred_trie.rs b/crates/evm/chain/src/deferred_trie.rs similarity index 99% rename from crates/chain-state/src/deferred_trie.rs rename to crates/evm/chain/src/deferred_trie.rs index efe23a2ded3..9c870d02a40 100644 --- a/crates/chain-state/src/deferred_trie.rs +++ b/crates/evm/chain/src/deferred_trie.rs @@ -8,6 +8,7 @@ use reth_trie::{ use std::{ fmt, sync::{Arc, LazyLock}, + vec::Vec, }; use tracing::instrument; diff --git a/crates/evm/chain/src/lib.rs b/crates/evm/chain/src/lib.rs new file mode 100644 index 00000000000..38e7485de10 --- /dev/null +++ b/crates/evm/chain/src/lib.rs @@ -0,0 +1,30 @@ +//! Chain and deferred trie data types for reth. +//! +//! This crate contains the [`Chain`] type representing a chain of blocks and their final state, +//! as well as [`DeferredTrieData`] for handling asynchronously computed trie data. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg))] + +mod chain; +pub use chain::*; + +mod deferred_trie; +pub use deferred_trie::*; + +/// Bincode-compatible serde implementations for chain types. +/// +/// `bincode` crate doesn't work with optionally serializable serde fields, but some of the +/// chain types require optional serialization for RPC compatibility. This module makes so that +/// all fields are serialized. +/// +/// Read more: +#[cfg(feature = "serde-bincode-compat")] +pub mod serde_bincode_compat { + pub use super::chain::serde_bincode_compat::*; +} diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 6c53e315b32..982408226f3 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -19,7 +19,6 @@ revm.workspace = true # alloy alloy-evm.workspace = true -alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true @@ -45,7 +44,6 @@ serde = [ "alloy-eips/serde", "alloy-primitives/serde", "reth-primitives-traits/serde", - "alloy-consensus/serde", "reth-trie-common/serde", "reth-ethereum-primitives/serde", ] @@ -55,7 +53,6 @@ serde-bincode-compat = [ "reth-primitives-traits/serde-bincode-compat", "serde_with", "alloy-eips/serde-bincode-compat", - "alloy-consensus/serde-bincode-compat", "reth-ethereum-primitives/serde-bincode-compat", ] std = [ @@ -64,7 +61,6 @@ std = [ "revm/std", "serde?/std", "reth-primitives-traits/std", - "alloy-consensus/std", "serde_with?/std", "derive_more/std", "reth-ethereum-primitives/std", diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 6df354219ea..9c2842899e6 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -564,8 +564,8 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::TxType; use alloy_primitives::{bytes, Address, LogData, B256}; + use reth_ethereum_primitives::TxType; #[test] fn test_initialization() { diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index 8b795981fb5..f3dcc166eb3 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -11,9 +11,6 @@ extern crate alloc; -mod chain; -pub use chain::*; - mod execute; pub use execute::*; @@ -29,5 +26,5 @@ pub use execution_outcome::*; /// Read more: #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { - pub use super::{chain::serde_bincode_compat::*, execution_outcome::serde_bincode_compat::*}; + pub use super::execution_outcome::serde_bincode_compat::*; } diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 189cd509655..8a550db8a73 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -48,6 +48,7 @@ itertools = { workspace = true, features = ["use_std"] } metrics.workspace = true parking_lot.workspace = true rmp-serde.workspace = true +serde_with.workspace = true thiserror.workspace = true tracing.workspace = true diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 57b180eb30b..2d8d699d737 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -149,7 +149,7 @@ where executor.into_state().take_bundle(), results, ); - let chain = Chain::new(blocks, outcome, BTreeMap::new(), BTreeMap::new()); + let chain = Chain::new(blocks, outcome, BTreeMap::new()); Ok(chain) } } diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index b28aef51246..eadb7b81979 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -796,21 +796,20 @@ mod tests { block1.set_block_number(10); let notification1 = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![block1.clone()], - Default::default(), - Default::default(), - Default::default(), - )), + new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())), }; // Push the first notification - exex_manager.push_notification(notification1.clone()); + exex_manager.push_notification(notification1); // Verify the buffer contains the notification with the correct ID assert_eq!(exex_manager.buffer.len(), 1); assert_eq!(exex_manager.buffer.front().unwrap().0, 0); - assert_eq!(exex_manager.buffer.front().unwrap().1, notification1); + // Compare by tip block since ExExNotification doesn't implement PartialEq + assert_eq!( + *exex_manager.buffer.front().unwrap().1.committed_chain().unwrap().tip(), + block1 + ); assert_eq!(exex_manager.next_id, 1); // Push another notification @@ -819,22 +818,20 @@ mod tests { block2.set_block_number(20); let notification2 = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![block2.clone()], - Default::default(), - Default::default(), - Default::default(), - )), + new: Arc::new(Chain::new(vec![block2.clone()], Default::default(), Default::default())), }; - exex_manager.push_notification(notification2.clone()); + exex_manager.push_notification(notification2); // Verify the buffer contains both notifications with correct IDs assert_eq!(exex_manager.buffer.len(), 2); assert_eq!(exex_manager.buffer.front().unwrap().0, 0); - assert_eq!(exex_manager.buffer.front().unwrap().1, notification1); + assert_eq!( + *exex_manager.buffer.front().unwrap().1.committed_chain().unwrap().tip(), + block1 + ); assert_eq!(exex_manager.buffer.get(1).unwrap().0, 1); - assert_eq!(exex_manager.buffer.get(1).unwrap().1, notification2); + assert_eq!(*exex_manager.buffer.get(1).unwrap().1.committed_chain().unwrap().tip(), block2); assert_eq!(exex_manager.next_id, 2); } @@ -867,12 +864,7 @@ mod tests { block1.set_block_number(10); let notification1 = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![block1.clone()], - Default::default(), - Default::default(), - Default::default(), - )), + new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())), }; exex_manager.push_notification(notification1.clone()); @@ -1100,7 +1092,6 @@ mod tests { vec![Default::default()], Default::default(), Default::default(), - Default::default(), )), }; @@ -1166,10 +1157,10 @@ mod tests { block2.set_block_number(11); // Setup a notification + let expected_block: RecoveredBlock = Default::default(); let notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( - vec![Default::default()], - Default::default(), + vec![expected_block.clone()], Default::default(), Default::default(), )), @@ -1181,7 +1172,8 @@ mod tests { match exex_handle.send(&mut cx, &(22, notification.clone())) { Poll::Ready(Ok(())) => { let received_notification = notifications.next().await.unwrap().unwrap(); - assert_eq!(received_notification, notification); + // Compare by tip block since ExExNotification doesn't implement PartialEq + assert_eq!(*received_notification.committed_chain().unwrap().tip(), expected_block); } Poll::Pending => panic!("Notification send is pending"), Poll::Ready(Err(e)) => panic!("Failed to send notification: {e:?}"), @@ -1216,12 +1208,7 @@ mod tests { block1.set_block_number(10); let notification = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![block1.clone()], - Default::default(), - Default::default(), - Default::default(), - )), + new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())), }; let mut cx = Context::from_waker(futures::task::noop_waker_ref()); @@ -1278,7 +1265,9 @@ mod tests { match exex_handle.send(&mut cx, &(22, notification.clone())) { Poll::Ready(Ok(())) => { let received_notification = notifications.next().await.unwrap().unwrap(); - assert_eq!(received_notification, notification); + // Compare by checking that both are reorgs with empty chains + assert!(received_notification.committed_chain().is_some()); + assert!(received_notification.reverted_chain().is_some()); } Poll::Pending | Poll::Ready(Err(_)) => { panic!("Notification should not be pending or fail") @@ -1318,7 +1307,9 @@ mod tests { match exex_handle.send(&mut cx, &(22, notification.clone())) { Poll::Ready(Ok(())) => { let received_notification = notifications.next().await.unwrap().unwrap(); - assert_eq!(received_notification, notification); + // Compare by checking that it's a revert with empty chain + assert!(received_notification.reverted_chain().is_some()); + assert!(received_notification.committed_chain().is_none()); } Poll::Pending | Poll::Ready(Err(_)) => { panic!("Notification should not be pending or fail") @@ -1371,16 +1362,10 @@ mod tests { vec![genesis_block.clone()], Default::default(), BTreeMap::new(), - BTreeMap::new(), )), }; let notification = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![block.clone()], - Default::default(), - BTreeMap::new(), - BTreeMap::new(), - )), + new: Arc::new(Chain::new(vec![block.clone()], Default::default(), BTreeMap::new())), }; let (finalized_headers_tx, rx) = watch::channel(None); @@ -1397,34 +1382,38 @@ mod tests { let mut cx = Context::from_waker(futures::task::noop_waker_ref()); - exex_manager - .handle() - .send(ExExNotificationSource::Pipeline, genesis_notification.clone())?; - exex_manager.handle().send(ExExNotificationSource::BlockchainTree, notification.clone())?; + exex_manager.handle().send(ExExNotificationSource::Pipeline, genesis_notification)?; + exex_manager.handle().send(ExExNotificationSource::BlockchainTree, notification)?; assert!(exex_manager.as_mut().poll(&mut cx)?.is_pending()); - assert_eq!( - notifications.try_poll_next_unpin(&mut cx)?, - Poll::Ready(Some(genesis_notification)) - ); + // Check genesis notification received + let poll_result = notifications.try_poll_next_unpin(&mut cx)?; + if let Poll::Ready(Some(n)) = poll_result { + assert_eq!(*n.committed_chain().unwrap().tip(), genesis_block); + } else { + panic!("Expected genesis notification"); + } assert!(exex_manager.as_mut().poll(&mut cx)?.is_pending()); - assert_eq!( - notifications.try_poll_next_unpin(&mut cx)?, - Poll::Ready(Some(notification.clone())) - ); + // Check block notification received + let poll_result = notifications.try_poll_next_unpin(&mut cx)?; + if let Poll::Ready(Some(n)) = poll_result { + assert_eq!(*n.committed_chain().unwrap().tip(), block); + } else { + panic!("Expected block notification"); + } // WAL shouldn't contain the genesis notification, because it's finalized - assert_eq!( - exex_manager.wal.iter_notifications()?.collect::>>()?, - std::slice::from_ref(¬ification) - ); + let wal_notifications = + exex_manager.wal.iter_notifications()?.collect::>>()?; + assert_eq!(wal_notifications.len(), 1); + assert_eq!(*wal_notifications[0].committed_chain().unwrap().tip(), block); finalized_headers_tx.send(Some(block.clone_sealed_header()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL isn't finalized because the ExEx didn't emit the `FinishedHeight` event - assert_eq!( - exex_manager.wal.iter_notifications()?.collect::>>()?, - std::slice::from_ref(¬ification) - ); + let wal_notifications = + exex_manager.wal.iter_notifications()?.collect::>>()?; + assert_eq!(wal_notifications.len(), 1); + assert_eq!(*wal_notifications[0].committed_chain().unwrap().tip(), block); // Send a `FinishedHeight` event with a non-canonical block events_tx @@ -1435,10 +1424,10 @@ mod tests { assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL isn't finalized because the ExEx emitted a `FinishedHeight` event with a // non-canonical block - assert_eq!( - exex_manager.wal.iter_notifications()?.collect::>>()?, - std::slice::from_ref(¬ification) - ); + let wal_notifications = + exex_manager.wal.iter_notifications()?.collect::>>()?; + assert_eq!(wal_notifications.len(), 1); + assert_eq!(*wal_notifications[0].committed_chain().unwrap().tip(), block); // Send a `FinishedHeight` event with a canonical block events_tx.send(ExExEvent::FinishedHeight(block.num_hash())).unwrap(); @@ -1446,7 +1435,7 @@ mod tests { finalized_headers_tx.send(Some(block.clone_sealed_header()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL is finalized - assert_eq!(exex_manager.wal.iter_notifications()?.next().transpose()?, None); + assert!(exex_manager.wal.iter_notifications()?.next().is_none()); Ok(()) } @@ -1492,12 +1481,7 @@ mod tests { let mut make_notif = |id: u64| { let block = random_block(&mut rng, id, BlockParams::default()).try_recover().unwrap(); ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![block], - Default::default(), - Default::default(), - Default::default(), - )), + new: Arc::new(Chain::new(vec![block], Default::default(), Default::default())), } }; diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index e32f065aba0..2b5d6d93d18 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -449,7 +449,7 @@ mod tests { use crate::Wal; use alloy_consensus::Header; use alloy_eips::BlockNumHash; - use eyre::OptionExt; + use futures::StreamExt; use reth_db_common::init::init_genesis; use reth_ethereum_primitives::Block; @@ -491,17 +491,17 @@ mod tests { let exex_head = ExExHead { block: BlockNumHash { number: genesis_block.number, hash: genesis_hash } }; + let expected_block = random_block( + &mut rng, + node_head.number + 1, + BlockParams { parent: Some(node_head.hash), ..Default::default() }, + ) + .try_recover()?; let notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( - vec![random_block( - &mut rng, - node_head.number + 1, - BlockParams { parent: Some(node_head.hash), ..Default::default() }, - ) - .try_recover()?], + vec![expected_block.clone()], Default::default(), BTreeMap::new(), - BTreeMap::new(), )), }; @@ -519,23 +519,16 @@ mod tests { .with_head(exex_head); // First notification is the backfill of missing blocks from the canonical chain - assert_eq!( - notifications.next().await.transpose()?, - Some(ExExNotification::ChainCommitted { - new: Arc::new( - BackfillJobFactory::new( - notifications.evm_config.clone(), - notifications.provider.clone() - ) - .backfill(1..=1) - .next() - .ok_or_eyre("failed to backfill")?? - ) - }) - ); + let backfill_notification = notifications.next().await.transpose()?; + assert!(backfill_notification.is_some()); + // Verify it's a commit notification with the expected block range + let backfill_chain = backfill_notification.unwrap().committed_chain().unwrap(); + assert_eq!(backfill_chain.first().header().number(), 1); // Second notification is the actual notification that we sent before - assert_eq!(notifications.next().await.transpose()?, Some(notification)); + let received = notifications.next().await.transpose()?; + assert!(received.is_some()); + assert_eq!(*received.unwrap().committed_chain().unwrap().tip(), expected_block); Ok(()) } @@ -556,21 +549,21 @@ mod tests { let node_head = BlockNumHash { number: genesis_block.number, hash: genesis_hash }; let exex_head = ExExHead { block: node_head }; + let expected_block = Block { + header: Header { + parent_hash: node_head.hash, + number: node_head.number + 1, + ..Default::default() + }, + ..Default::default() + } + .seal_slow() + .try_recover()?; let notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( - vec![Block { - header: Header { - parent_hash: node_head.hash, - number: node_head.number + 1, - ..Default::default() - }, - ..Default::default() - } - .seal_slow() - .try_recover()?], + vec![expected_block.clone()], Default::default(), BTreeMap::new(), - BTreeMap::new(), )), }; @@ -588,7 +581,8 @@ mod tests { .with_head(exex_head); let new_notification = notifications.next().await.transpose()?; - assert_eq!(new_notification, Some(notification)); + assert!(new_notification.is_some()); + assert_eq!(*new_notification.unwrap().committed_chain().unwrap().tip(), expected_block); Ok(()) } @@ -618,7 +612,7 @@ mod tests { let provider_rw = provider.database_provider_rw()?; provider_rw.insert_block(&node_head_block)?; provider_rw.commit()?; - let node_head_notification = ExExNotification::ChainCommitted { + let _node_head_notification = ExExNotification::ChainCommitted { new: Arc::new( BackfillJobFactory::new(EthEvmConfig::mainnet(), provider.clone()) .backfill(node_head.number..=node_head.number) @@ -633,28 +627,24 @@ mod tests { BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, ); let exex_head = ExExHead { block: exex_head_block.num_hash() }; + let exex_head_recovered = exex_head_block.clone().try_recover()?; let exex_head_notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( - vec![exex_head_block.clone().try_recover()?], + vec![exex_head_recovered.clone()], Default::default(), BTreeMap::new(), - BTreeMap::new(), )), }; wal.commit(&exex_head_notification)?; + let new_block = random_block( + &mut rng, + node_head.number + 1, + BlockParams { parent: Some(node_head.hash), ..Default::default() }, + ) + .try_recover()?; let new_notification = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![random_block( - &mut rng, - node_head.number + 1, - BlockParams { parent: Some(node_head.hash), ..Default::default() }, - ) - .try_recover()?], - Default::default(), - BTreeMap::new(), - BTreeMap::new(), - )), + new: Arc::new(Chain::new(vec![new_block.clone()], Default::default(), BTreeMap::new())), }; let (notifications_tx, notifications_rx) = mpsc::channel(1); @@ -672,15 +662,25 @@ mod tests { // First notification is the revert of the ExEx head block to get back to the canonical // chain + let revert_notification = notifications.next().await.transpose()?; + assert!(revert_notification.is_some()); + // Verify it's a revert with the exex_head block assert_eq!( - notifications.next().await.transpose()?, - Some(exex_head_notification.into_inverted()) + *revert_notification.unwrap().reverted_chain().unwrap().tip(), + exex_head_recovered ); // Second notification is the backfilled block from the canonical chain to get back to the // canonical tip - assert_eq!(notifications.next().await.transpose()?, Some(node_head_notification)); + let backfill_notification = notifications.next().await.transpose()?; + assert!(backfill_notification.is_some()); + assert_eq!( + backfill_notification.unwrap().committed_chain().unwrap().tip().header().number(), + node_head.number + ); // Third notification is the actual notification that we sent before - assert_eq!(notifications.next().await.transpose()?, Some(new_notification)); + let received = notifications.next().await.transpose()?; + assert!(received.is_some()); + assert_eq!(*received.unwrap().committed_chain().unwrap().tip(), new_block); Ok(()) } @@ -706,12 +706,12 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, ); + let exex_head_recovered = exex_head_block.clone().try_recover()?; let exex_head_notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( - vec![exex_head_block.clone().try_recover()?], + vec![exex_head_recovered.clone()], Default::default(), BTreeMap::new(), - BTreeMap::new(), )), }; wal.commit(&exex_head_notification)?; @@ -721,18 +721,14 @@ mod tests { block: BlockNumHash { number: exex_head_block.number, hash: exex_head_block.hash() }, }; + let new_block = random_block( + &mut rng, + genesis_block.number + 1, + BlockParams { parent: Some(genesis_hash), ..Default::default() }, + ) + .try_recover()?; let new_notification = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![random_block( - &mut rng, - genesis_block.number + 1, - BlockParams { parent: Some(genesis_hash), ..Default::default() }, - ) - .try_recover()?], - Default::default(), - BTreeMap::new(), - BTreeMap::new(), - )), + new: Arc::new(Chain::new(vec![new_block.clone()], Default::default(), BTreeMap::new())), }; let (notifications_tx, notifications_rx) = mpsc::channel(1); @@ -750,13 +746,17 @@ mod tests { // First notification is the revert of the ExEx head block to get back to the canonical // chain + let revert_notification = notifications.next().await.transpose()?; + assert!(revert_notification.is_some()); assert_eq!( - notifications.next().await.transpose()?, - Some(exex_head_notification.into_inverted()) + *revert_notification.unwrap().reverted_chain().unwrap().tip(), + exex_head_recovered ); // Second notification is the actual notification that we sent before - assert_eq!(notifications.next().await.transpose()?, Some(new_notification)); + let received = notifications.next().await.transpose()?; + assert!(received.is_some()); + assert_eq!(*received.unwrap().committed_chain().unwrap().tip(), new_block); Ok(()) } diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index 7ab49b6e0de..a59c7202b14 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -255,6 +255,36 @@ mod tests { }) } + fn notifications_equal(a: &[ExExNotification], b: &[ExExNotification]) -> bool { + if a.len() != b.len() { + return false; + } + a.iter().zip(b.iter()).all(|(n1, n2)| { + let committed_eq = match (n1.committed_chain(), n2.committed_chain()) { + (Some(c1), Some(c2)) => { + c1.tip().hash() == c2.tip().hash() && c1.blocks() == c2.blocks() + } + (None, None) => true, + _ => false, + }; + let reverted_eq = match (n1.reverted_chain(), n2.reverted_chain()) { + (Some(c1), Some(c2)) => { + c1.tip().hash() == c2.tip().hash() && c1.blocks() == c2.blocks() + } + (None, None) => true, + _ => false, + }; + committed_eq && reverted_eq + }) + } + + fn assert_notifications_eq(actual: Vec, expected: Vec) { + assert!( + notifications_equal(&actual, &expected), + "notifications mismatch:\nactual: {actual:?}\nexpected: {expected:?}" + ); + } + fn sort_committed_blocks( committed_blocks: Vec<(B256, u32, CachedBlock)>, ) -> Vec<(B256, u32, CachedBlock)> { @@ -304,37 +334,24 @@ mod tests { vec![blocks[0].clone(), blocks[1].clone()], Default::default(), BTreeMap::new(), - BTreeMap::new(), )), }; let reverted_notification = ExExNotification::ChainReverted { - old: Arc::new(Chain::new( - vec![blocks[1].clone()], - Default::default(), - BTreeMap::new(), - BTreeMap::new(), - )), + old: Arc::new(Chain::new(vec![blocks[1].clone()], Default::default(), BTreeMap::new())), }; let committed_notification_2 = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( vec![block_1_reorged.clone(), blocks[2].clone()], Default::default(), BTreeMap::new(), - BTreeMap::new(), )), }; let reorged_notification = ExExNotification::ChainReorged { - old: Arc::new(Chain::new( - vec![blocks[2].clone()], - Default::default(), - BTreeMap::new(), - BTreeMap::new(), - )), + old: Arc::new(Chain::new(vec![blocks[2].clone()], Default::default(), BTreeMap::new())), new: Arc::new(Chain::new( vec![block_2_reorged.clone(), blocks[3].clone()], Default::default(), BTreeMap::new(), - BTreeMap::new(), )), }; @@ -371,7 +388,7 @@ mod tests { wal.inner.block_cache().committed_blocks_sorted(), committed_notification_1_cache_committed_blocks ); - assert_eq!(read_notifications(&wal)?, vec![committed_notification_1.clone()]); + assert_notifications_eq(read_notifications(&wal)?, vec![committed_notification_1.clone()]); // Second notification (revert block 1) wal.commit(&reverted_notification)?; @@ -385,9 +402,9 @@ mod tests { wal.inner.block_cache().committed_blocks_sorted(), committed_notification_1_cache_committed_blocks ); - assert_eq!( + assert_notifications_eq( read_notifications(&wal)?, - vec![committed_notification_1.clone(), reverted_notification.clone()] + vec![committed_notification_1.clone(), reverted_notification.clone()], ); // Third notification (commit block 1, 2) @@ -430,13 +447,13 @@ mod tests { .concat() ) ); - assert_eq!( + assert_notifications_eq( read_notifications(&wal)?, vec![ committed_notification_1.clone(), reverted_notification.clone(), - committed_notification_2.clone() - ] + committed_notification_2.clone(), + ], ); // Fourth notification (revert block 2, commit block 2, 3) @@ -481,14 +498,14 @@ mod tests { .concat() ) ); - assert_eq!( + assert_notifications_eq( read_notifications(&wal)?, vec![ committed_notification_1, reverted_notification, committed_notification_2.clone(), - reorged_notification.clone() - ] + reorged_notification.clone(), + ], ); // Now, finalize the WAL up to the block 1. Block 1 was in the third notification that also @@ -510,9 +527,9 @@ mod tests { .concat() ) ); - assert_eq!( + assert_notifications_eq( read_notifications(&wal)?, - vec![committed_notification_2.clone(), reorged_notification.clone()] + vec![committed_notification_2.clone(), reorged_notification.clone()], ); // Re-open the WAL and verify that the cache population works correctly @@ -531,7 +548,10 @@ mod tests { .concat() ) ); - assert_eq!(read_notifications(&wal)?, vec![committed_notification_2, reorged_notification]); + assert_notifications_eq( + read_notifications(&wal)?, + vec![committed_notification_2, reorged_notification], + ); Ok(()) } diff --git a/crates/exex/exex/src/wal/storage.rs b/crates/exex/exex/src/wal/storage.rs index af58eba7e0b..bb118c8a98a 100644 --- a/crates/exex/exex/src/wal/storage.rs +++ b/crates/exex/exex/src/wal/storage.rs @@ -163,12 +163,16 @@ where let file_path = self.file_path(file_id); debug!(target: "exex::wal::storage", ?file_path, "Writing notification to WAL"); - // Serialize using the bincode- and msgpack-compatible serde wrapper - let notification = - reth_exex_types::serde_bincode_compat::ExExNotification::::from(notification); - + // Serialize using the bincode- and msgpack-compatible serde wrapper via SerializeAs reth_fs_util::atomic_write_file(&file_path, |file| { - rmp_serde::encode::write(file, ¬ification) + use serde_with::SerializeAs; + let mut buf = Vec::new(); + reth_exex_types::serde_bincode_compat::ExExNotification::<'_, N>::serialize_as( + notification, + &mut rmp_serde::Serializer::new(&mut buf), + ) + .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?; + std::io::Write::write_all(file, &buf) })?; Ok(file_path.metadata().map_err(|err| WalError::FileMetadata(file_id, err))?.len()) @@ -224,8 +228,10 @@ mod tests { // Get expected data let expected_notification = get_test_notification_data().unwrap(); + // Compare by tip block since ExExNotification doesn't implement PartialEq assert_eq!( - ¬ification, &expected_notification, + *notification.committed_chain().unwrap().tip(), + *expected_notification.committed_chain().unwrap().tip(), "Decoded notification should match expected static data" ); } @@ -241,28 +247,18 @@ mod tests { let new_block = random_block(&mut rng, 0, Default::default()).try_recover()?; let notification = ExExNotification::ChainReorged { - new: Arc::new(Chain::new( - vec![new_block], - Default::default(), - BTreeMap::new(), - BTreeMap::new(), - )), - old: Arc::new(Chain::new( - vec![old_block], - Default::default(), - BTreeMap::new(), - BTreeMap::new(), - )), + new: Arc::new(Chain::new(vec![new_block.clone()], Default::default(), BTreeMap::new())), + old: Arc::new(Chain::new(vec![old_block.clone()], Default::default(), BTreeMap::new())), }; // Do a round trip serialization and deserialization let file_id = 0; storage.write_notification(file_id, ¬ification)?; let deserialized_notification = storage.read_notification(file_id)?; - assert_eq!( - deserialized_notification.map(|(notification, _)| notification), - Some(notification) - ); + // Compare by chain tips since ExExNotification doesn't implement PartialEq + let deserialized = deserialized_notification.map(|(n, _)| n).unwrap(); + assert_eq!(*deserialized.committed_chain().unwrap().tip(), new_block); + assert_eq!(*deserialized.reverted_chain().unwrap().tip(), old_block); Ok(()) } @@ -280,10 +276,14 @@ mod tests { let notification = get_test_notification_data()?; - // Serialize the notification - let notification_compat = - reth_exex_types::serde_bincode_compat::ExExNotification::from(¬ification); - let encoded = rmp_serde::encode::to_vec(¬ification_compat)?; + // Create a temp storage and write the notification using the existing serialization path + let temp_dir = tempfile::tempdir()?; + let storage = Storage::new(&temp_dir)?; + storage.write_notification(0, ¬ification)?; + + // Read it back as raw bytes + let temp_path = temp_dir.path().join("0.wal"); + let encoded = std::fs::read(&temp_path)?; // Write to test-data directory let test_data_dir = std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join("test-data"); @@ -346,13 +346,18 @@ mod tests { )]), }; + let trie_data = + reth_chain_state::DeferredTrieData::ready(reth_chain_state::ComputedTrieData { + hashed_state: Arc::new(hashed_state.into_sorted()), + trie_updates: Arc::new(trie_updates.into_sorted()), + anchored_trie_input: None, + }); let notification: ExExNotification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( vec![block], Default::default(), - BTreeMap::from([(block_number, Arc::new(trie_updates.into_sorted()))]), - BTreeMap::from([(block_number, Arc::new(hashed_state.into_sorted()))]), + BTreeMap::from([(block_number, trie_data)]), )), }; Ok(notification) diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index 80ce4167e46..39b116e6786 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -12,13 +12,13 @@ workspace = true [dependencies] ## reth +reth-chain.workspace = true reth-chainspec.workspace = true reth-config.workspace = true reth-consensus = { workspace = true, features = ["test-utils"] } reth-db = { workspace = true, features = ["test-utils"] } reth-db-common.workspace = true reth-evm-ethereum = { workspace = true, features = ["test-utils"] } -reth-execution-types.workspace = true reth-exex.workspace = true reth-payload-builder.workspace = true reth-network.workspace = true diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 8430ea5d91f..d6d112bf88f 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -17,6 +17,7 @@ use std::{ use alloy_eips::BlockNumHash; use futures_util::FutureExt; +use reth_chain::Chain; use reth_chainspec::{ChainSpec, MAINNET}; use reth_consensus::test_utils::TestConsensus; use reth_db::{ @@ -28,7 +29,6 @@ use reth_db::{ use reth_db_common::init::init_genesis; use reth_ethereum_primitives::{EthPrimitives, TransactionSigned}; use reth_evm_ethereum::MockEvmConfig; -use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications, Wal}; use reth_network::{config::rng_secret_key, NetworkConfigBuilder, NetworkManager}; use reth_node_api::{ diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index 11dec0246fe..fadbf9c2abd 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] # reth +reth-chain.workspace = true reth-chain-state.workspace = true -reth-execution-types.workspace = true reth-primitives-traits.workspace = true # reth @@ -36,7 +36,7 @@ rand.workspace = true default = [] serde = [ "dep:serde", - "reth-execution-types/serde", + "reth-chain/serde", "alloy-eips/serde", "alloy-primitives/serde", "rand/serde", @@ -45,7 +45,7 @@ serde = [ "reth-chain-state/serde", ] serde-bincode-compat = [ - "reth-execution-types/serde-bincode-compat", + "reth-chain/serde-bincode-compat", "serde_with", "alloy-eips/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index 4813450a010..e076540aec7 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -1,12 +1,13 @@ use std::sync::Arc; +use reth_chain::Chain; use reth_chain_state::CanonStateNotification; -use reth_execution_types::Chain; use reth_primitives_traits::NodePrimitives; /// Notifications sent to an `ExEx`. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(bound = ""))] pub enum ExExNotification { /// Chain got committed without a reorg, and only the new chain is returned. ChainCommitted { @@ -73,7 +74,7 @@ impl From> for ExExNotification

/// Bincode-compatible [`ExExNotification`] serde implementation. #[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] pub(super) mod serde_bincode_compat { - use reth_execution_types::serde_bincode_compat::Chain; + use reth_chain::serde_bincode_compat::Chain; use reth_primitives_traits::NodePrimitives; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -124,28 +125,6 @@ pub(super) mod serde_bincode_compat { }, } - impl<'a, N> From<&'a super::ExExNotification> for ExExNotification<'a, N> - where - N: NodePrimitives, - { - fn from(value: &'a super::ExExNotification) -> Self { - match value { - super::ExExNotification::ChainCommitted { new } => { - ExExNotification::ChainCommitted { new: Chain::from(new.as_ref()) } - } - super::ExExNotification::ChainReorged { old, new } => { - ExExNotification::ChainReorged { - old: Chain::from(old.as_ref()), - new: Chain::from(new.as_ref()), - } - } - super::ExExNotification::ChainReverted { old } => { - ExExNotification::ChainReverted { old: Chain::from(old.as_ref()) } - } - } - } - } - impl<'a, N> From> for super::ExExNotification where N: NodePrimitives, @@ -176,7 +155,41 @@ pub(super) mod serde_bincode_compat { where S: Serializer, { - ExExNotification::from(source).serialize(serializer) + // Helper that uses Chain's SerializeAs for bincode-compatible serialization + struct ChainWrapper<'a, N: NodePrimitives>(&'a reth_chain::Chain); + + impl Serialize for ChainWrapper<'_, N> { + fn serialize(&self, serializer: S2) -> Result + where + S2: Serializer, + { + Chain::<'_, N>::serialize_as(self.0, serializer) + } + } + + // Create an enum that matches the ExExNotification structure but uses ChainWrapper + #[derive(Serialize)] + #[serde(bound = "")] + #[allow(clippy::enum_variant_names)] + enum Repr<'a, N: NodePrimitives> { + ChainCommitted { new: ChainWrapper<'a, N> }, + ChainReorged { old: ChainWrapper<'a, N>, new: ChainWrapper<'a, N> }, + ChainReverted { old: ChainWrapper<'a, N> }, + } + + match source { + super::ExExNotification::ChainCommitted { new } => { + Repr::ChainCommitted { new: ChainWrapper(new.as_ref()) }.serialize(serializer) + } + super::ExExNotification::ChainReorged { old, new } => Repr::ChainReorged { + old: ChainWrapper(old.as_ref()), + new: ChainWrapper(new.as_ref()), + } + .serialize(serializer), + super::ExExNotification::ChainReverted { old } => { + Repr::ChainReverted { old: ChainWrapper(old.as_ref()) }.serialize(serializer) + } + } } } @@ -197,7 +210,7 @@ pub(super) mod serde_bincode_compat { use super::super::{serde_bincode_compat, ExExNotification}; use arbitrary::Arbitrary; use rand::Rng; - use reth_execution_types::Chain; + use reth_chain::Chain; use reth_primitives_traits::RecoveredBlock; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -206,7 +219,7 @@ pub(super) mod serde_bincode_compat { #[test] fn test_exex_notification_bincode_roundtrip() { #[serde_as] - #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + #[derive(Debug, Serialize, Deserialize)] struct Data { #[serde_as( as = "serde_bincode_compat::ExExNotification<'_, reth_ethereum_primitives::EthPrimitives>" @@ -216,28 +229,34 @@ pub(super) mod serde_bincode_compat { let mut bytes = [0u8; 1024]; rand::rng().fill(bytes.as_mut_slice()); + let old_block: reth_primitives_traits::RecoveredBlock = + RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + let new_block: reth_primitives_traits::RecoveredBlock = + RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + let data = Data { notification: ExExNotification::ChainReorged { - old: Arc::new(Chain::new( - vec![RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) - .unwrap()], - Default::default(), - BTreeMap::new(), - BTreeMap::new(), - )), - new: Arc::new(Chain::new( - vec![RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) - .unwrap()], - Default::default(), - BTreeMap::new(), - BTreeMap::new(), - )), + old: Arc::new(Chain::new(vec![old_block], Default::default(), BTreeMap::new())), + new: Arc::new(Chain::new(vec![new_block], Default::default(), BTreeMap::new())), }, }; let encoded = bincode::serialize(&data).unwrap(); let decoded: Data = bincode::deserialize(&encoded).unwrap(); - assert_eq!(decoded, data); + + // Compare fields individually since Chain doesn't implement PartialEq + match (&decoded.notification, &data.notification) { + ( + ExExNotification::ChainReorged { old: decoded_old, new: decoded_new }, + ExExNotification::ChainReorged { old: expected_old, new: expected_new }, + ) => { + assert_eq!(decoded_old.blocks(), expected_old.blocks()); + assert_eq!(decoded_old.execution_outcome(), expected_old.execution_outcome()); + assert_eq!(decoded_new.blocks(), expected_new.blocks()); + assert_eq!(decoded_new.execution_outcome(), expected_new.execution_outcome()); + } + _ => panic!("Expected ChainReorged variant"), + } } } } diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 724f8555e09..4bbd87c6dfb 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -44,6 +44,7 @@ op-revm.workspace = true thiserror.workspace = true [dev-dependencies] +reth-chain.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } alloy-genesis.workspace = true diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 1dbd8c7e385..f8cde24b5ed 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -295,11 +295,10 @@ mod tests { use alloy_genesis::Genesis; use alloy_primitives::{bytes, map::HashMap, Address, LogData, B256}; use op_revm::OpSpecId; + use reth_chain::Chain; use reth_chainspec::ChainSpec; use reth_evm::execute::ProviderError; - use reth_execution_types::{ - AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit, - }; + use reth_execution_types::{AccountRevertInit, BundleStateInit, ExecutionOutcome, RevertsInit}; use reth_optimism_chainspec::{OpChainSpec, BASE_MAINNET}; use reth_optimism_primitives::{OpBlock, OpPrimitives, OpReceipt}; use reth_primitives_traits::{Account, RecoveredBlock}; @@ -529,12 +528,8 @@ mod tests { // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, // including block1_hash and block2_hash, and the execution_outcome - let chain: Chain = Chain::new( - [block1, block2], - execution_outcome.clone(), - BTreeMap::new(), - BTreeMap::new(), - ); + let chain: Chain = + Chain::new([block1, block2], execution_outcome.clone(), BTreeMap::new()); // Assert that the proper receipt vector is returned for block1_hash assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1])); diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index ab0855bf4f6..222440539c4 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -12,11 +12,11 @@ description = "Types supporting implementation of 'eth' namespace RPC server API workspace = true [dependencies] +reth-chain.workspace = true reth-chainspec.workspace = true reth-chain-state.workspace = true reth-errors.workspace = true reth-evm.workspace = true -reth-execution-types.workspace = true reth-metrics.workspace = true reth-ethereum-primitives = { workspace = true, features = ["rpc"] } reth-primitives-traits = { workspace = true, features = ["rpc-compat"] } diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index 73d8072e6d8..16d35028295 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -5,9 +5,9 @@ use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::{stream::FuturesOrdered, Stream, StreamExt}; +use reth_chain::Chain; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; -use reth_execution_types::Chain; use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock}; use reth_storage_api::{BlockReader, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 470a84a825b..45e1e5ff5fc 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chain.workspace = true reth-chainspec = { workspace = true, optional = true } reth-codecs.workspace = true reth-config.workspace = true @@ -29,7 +30,6 @@ reth-fs-util.workspace = true reth-network-p2p.workspace = true reth-primitives-traits = { workspace = true, features = ["serde-bincode-compat"] } reth-provider.workspace = true -reth-execution-types.workspace = true reth-ethereum-primitives = { workspace = true, optional = true } reth-prune.workspace = true reth-prune-types.workspace = true diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 29adf3b2d3f..a00b0780f74 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -2,11 +2,11 @@ use crate::stages::MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD; use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use num_traits::Zero; +use reth_chain::Chain; use reth_config::config::ExecutionConfig; use reth_consensus::FullConsensus; use reth_db::{static_file::HeaderMask, tables}; use reth_evm::{execute::Executor, metrics::ExecutorMetrics, ConfigureEvm}; -use reth_execution_types::Chain; use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; use reth_primitives_traits::{format_gas_throughput, BlockBody, NodePrimitives}; use reth_provider::{ @@ -423,7 +423,6 @@ where blocks, state.clone(), BTreeMap::new(), - BTreeMap::new(), )); if previous_input.is_some() { @@ -525,7 +524,6 @@ where blocks, bundle_state_with_receipts, BTreeMap::new(), - BTreeMap::new(), )); debug_assert!( diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 0199b6d2fc4..7227d618cf9 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chain.workspace = true reth-chainspec.workspace = true reth-execution-types.workspace = true reth-ethereum-primitives = { workspace = true, features = ["reth-codec"] } @@ -86,6 +87,15 @@ tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } [features] rocksdb = ["dep:rocksdb"] +serde-bincode-compat = [ + "reth-chain/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", + "alloy-eips/serde-bincode-compat", + "reth-ethereum-primitives/serde-bincode-compat", + "reth-execution-types/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", + "reth-storage-api/serde-bincode-compat", +] test-utils = [ "reth-db/test-utils", "reth-nippy-jar/test-utils", diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index bfab44cb2ac..6c587cc2bed 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -35,11 +35,20 @@ pub mod test_utils; pub mod either_writer; pub use either_writer::*; +#[cfg(feature = "serde-bincode-compat")] +pub use reth_chain::serde_bincode_compat; +pub use reth_chain::{ + AnchoredTrieInput, BlockReceipts, Chain, ChainBlocks, ComputedTrieData, DeferredTrieData, + DisplayBlocksChain, +}; pub use reth_chain_state::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, CanonStateNotifications, CanonStateSubscriptions, }; -pub use reth_execution_types::*; +pub use reth_execution_types::{ + AccountRevertInit, BlockExecutionOutput, BlockExecutionResult, BundleStateInit, ChangedAccount, + ExecutionOutcome, RevertsInit, +}; /// Re-export `OriginalValuesKnown` pub use revm_database::states::OriginalValuesKnown; // reexport traits to avoid breaking changes diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 58ec1e25571..dcd95f3de23 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -782,6 +782,7 @@ mod tests { use alloy_primitives::{BlockNumber, TxNumber, B256}; use itertools::Itertools; use rand::Rng; + use reth_chain::Chain; use reth_chain_state::{ test_utils::TestBlockBuilder, CanonStateNotification, CanonStateSubscriptions, CanonicalInMemoryState, ExecutedBlock, NewCanonicalChain, @@ -790,9 +791,7 @@ mod tests { use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; use reth_ethereum_primitives::{Block, Receipt}; - use reth_execution_types::{ - BlockExecutionOutput, BlockExecutionResult, Chain, ExecutionOutcome, - }; + use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, ExecutionOutcome}; use reth_primitives_traits::{RecoveredBlock, SealedBlock, SignerRecoverable}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, @@ -1348,33 +1347,33 @@ mod tests { // Send and receive commit notifications. let block_2 = test_block_builder.generate_random_block(1, block_hash_1).try_recover()?; - let chain = Chain::new( - vec![block_2], - ExecutionOutcome::default(), - BTreeMap::new(), - BTreeMap::new(), - ); + let chain = Chain::new(vec![block_2.clone()], ExecutionOutcome::default(), BTreeMap::new()); let commit = CanonStateNotification::Commit { new: Arc::new(chain.clone()) }; in_memory_state.notify_canon_state(commit.clone()); let (notification_1, notification_2) = tokio::join!(rx_1.recv(), rx_2.recv()); - assert_eq!(notification_1, Ok(commit.clone())); - assert_eq!(notification_2, Ok(commit.clone())); + // Verify both subscribers received commit notifications with matching tip + let n1 = notification_1.unwrap(); + let n2 = notification_2.unwrap(); + assert_eq!(*n1.tip(), block_2); + assert_eq!(*n2.tip(), block_2); // Send and receive re-org notifications. let block_3 = test_block_builder.generate_random_block(1, block_hash_1).try_recover()?; let block_4 = test_block_builder.generate_random_block(2, block_3.hash()).try_recover()?; let new_chain = Chain::new( - vec![block_3, block_4], + vec![block_3, block_4.clone()], ExecutionOutcome::default(), BTreeMap::new(), - BTreeMap::new(), ); let re_org = CanonStateNotification::Reorg { old: Arc::new(chain), new: Arc::new(new_chain) }; in_memory_state.notify_canon_state(re_org.clone()); let (notification_1, notification_2) = tokio::join!(rx_1.recv(), rx_2.recv()); - assert_eq!(notification_1, Ok(re_org.clone())); - assert_eq!(notification_2, Ok(re_org.clone())); + // Verify both subscribers received reorg notifications with matching tip + let n1 = notification_1.unwrap(); + let n2 = notification_2.unwrap(); + assert_eq!(*n1.tip(), block_4); + assert_eq!(*n2.tip(), block_4); Ok(()) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index af644a47a9b..b2d911bc5a4 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -33,6 +33,7 @@ use alloy_primitives::{ use itertools::Itertools; use parking_lot::RwLock; use rayon::slice::ParallelSliceMut; +use reth_chain::Chain; use reth_chain_state::{ComputedTrieData, ExecutedBlock}; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec}; use reth_db_api::{ @@ -47,7 +48,7 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, BlockNumberList, PlainAccountState, PlainStorageState, }; -use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, Chain, ExecutionOutcome}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, ExecutionOutcome}; use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives_traits::{ Account, Block as _, BlockBody as _, Bytecode, RecoveredBlock, SealedHeader, StorageEntry, @@ -3076,7 +3077,7 @@ impl BlockExecutionWriter // Update pipeline progress self.update_pipeline_stages(block, true)?; - Ok(Chain::new(blocks, execution_state, BTreeMap::new(), BTreeMap::new())) + Ok(Chain::new(blocks, execution_state, BTreeMap::new())) } fn remove_block_and_execution_above(&self, block: BlockNumber) -> ProviderResult<()> { diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 83cbbbd714e..0786e55aa45 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chain.workspace = true reth-db-models.workspace = true reth-chainspec.workspace = true reth-db-api = { workspace = true, optional = true } @@ -60,6 +61,7 @@ db-api = [ ] serde = [ + "reth-chain/serde", "reth-ethereum-primitives/serde", "reth-db-models/serde", "reth-execution-types/serde", @@ -78,6 +80,7 @@ serde-bincode-compat = [ "reth-execution-types/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", "reth-trie-common/serde-bincode-compat", + "reth-chain/serde-bincode-compat", "reth-ethereum-primitives/serde-bincode-compat", "alloy-eips/serde-bincode-compat", "alloy-consensus/serde-bincode-compat", diff --git a/crates/storage/storage-api/src/block_writer.rs b/crates/storage/storage-api/src/block_writer.rs index 233e9898d11..5124ff1676a 100644 --- a/crates/storage/storage-api/src/block_writer.rs +++ b/crates/storage/storage-api/src/block_writer.rs @@ -1,8 +1,9 @@ use crate::NodePrimitivesProvider; use alloc::vec::Vec; use alloy_primitives::BlockNumber; +use reth_chain::Chain; use reth_db_models::StoredBlockBodyIndices; -use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_execution_types::ExecutionOutcome; use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock}; use reth_storage_errors::provider::ProviderResult; use reth_trie_common::HashedPostStateSorted; diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 02030719840..bb4f9ba310b 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chain.workspace = true reth-chain-state.workspace = true reth-ethereum-primitives.workspace = true reth-chainspec.workspace = true @@ -90,6 +91,7 @@ serde = [ "revm-primitives/serde", "reth-primitives-traits/serde", "reth-ethereum-primitives/serde", + "reth-chain/serde", "reth-chain-state/serde", "reth-storage-api/serde", ] diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index 44bd772cf1d..6edb41a6a23 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -3,7 +3,7 @@ use alloy_consensus::Typed2718; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{BlockNumber, B256}; -use reth_execution_types::ChainBlocks; +use reth_chain::ChainBlocks; use reth_primitives_traits::{Block, BlockBody, SignedTransaction}; use std::collections::BTreeMap; @@ -91,8 +91,8 @@ mod tests { use super::*; use alloy_consensus::{Header, Signed}; use alloy_primitives::Signature; + use reth_chain::Chain; use reth_ethereum_primitives::Transaction; - use reth_execution_types::Chain; use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; #[test] @@ -175,8 +175,7 @@ mod tests { ); // Extract blocks from the chain - let chain: Chain = - Chain::new(vec![block1, block2], Default::default(), BTreeMap::new(), BTreeMap::new()); + let chain: Chain = Chain::new(vec![block1, block2], Default::default(), BTreeMap::new()); let blocks = chain.into_inner().0; // Add new chain blocks to the tracker From 79b8ffb8288f8d25d4eb31769cabfc57205ac63f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 17 Jan 2026 02:24:53 +0100 Subject: [PATCH 057/267] feat(primitives-traits): add try_recover_signers for parallel batch recovery (#21103) --- .../src/transaction/recover.rs | 183 ++++++++++++------ crates/rpc/rpc/src/testing.rs | 27 ++- 2 files changed, 144 insertions(+), 66 deletions(-) diff --git a/crates/primitives-traits/src/transaction/recover.rs b/crates/primitives-traits/src/transaction/recover.rs index 59e6e8a6943..e23b962fd19 100644 --- a/crates/primitives-traits/src/transaction/recover.rs +++ b/crates/primitives-traits/src/transaction/recover.rs @@ -1,68 +1,137 @@ //! Helpers for recovering signers from a set of transactions +use crate::{transaction::signed::RecoveryError, Recovered, SignedTransaction}; +use alloc::vec::Vec; +use alloy_consensus::transaction::SignerRecoverable; +use alloy_primitives::Address; + +#[cfg(feature = "rayon")] +use rayon::prelude::{IntoParallelIterator, ParallelIterator}; + +/// Recovers a list of signers from a transaction list iterator. +/// +/// Returns `Err(RecoveryError)`, if some transaction's signature is invalid. +/// +/// When the `rayon` feature is enabled, recovery is performed in parallel. +#[cfg(feature = "rayon")] +pub fn recover_signers<'a, I, T>(txes: I) -> Result, RecoveryError> +where + T: SignedTransaction, + I: IntoParallelIterator, +{ + txes.into_par_iter().map(|tx| tx.recover_signer()).collect() +} + +/// Recovers a list of signers from a transaction list iterator. +/// +/// Returns `Err(RecoveryError)`, if some transaction's signature is invalid. +#[cfg(not(feature = "rayon"))] +pub fn recover_signers<'a, I, T>(txes: I) -> Result, RecoveryError> +where + T: SignedTransaction, + I: IntoIterator, +{ + txes.into_iter().map(|tx| tx.recover_signer()).collect() +} + +/// Recovers a list of signers from a transaction list iterator _without ensuring that the +/// signature has a low `s` value_. +/// +/// Returns `Err(RecoveryError)`, if some transaction's signature is invalid. +/// +/// When the `rayon` feature is enabled, recovery is performed in parallel. +#[cfg(feature = "rayon")] +pub fn recover_signers_unchecked<'a, I, T>(txes: I) -> Result, RecoveryError> +where + T: SignedTransaction, + I: IntoParallelIterator, +{ + txes.into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect() +} + +/// Recovers a list of signers from a transaction list iterator _without ensuring that the +/// signature has a low `s` value_. +/// +/// Returns `Err(RecoveryError)`, if some transaction's signature is invalid. +#[cfg(not(feature = "rayon"))] +pub fn recover_signers_unchecked<'a, I, T>(txes: I) -> Result, RecoveryError> +where + T: SignedTransaction, + I: IntoIterator, +{ + txes.into_iter().map(|tx| tx.recover_signer_unchecked()).collect() +} + +/// Trait for items that can be used with [`try_recover_signers`]. +#[cfg(feature = "rayon")] +pub trait TryRecoverItems: IntoParallelIterator {} + +/// Trait for items that can be used with [`try_recover_signers`]. +#[cfg(not(feature = "rayon"))] +pub trait TryRecoverItems: IntoIterator {} + +#[cfg(feature = "rayon")] +impl TryRecoverItems for I {} + +#[cfg(not(feature = "rayon"))] +impl TryRecoverItems for I {} + +/// Trait for decode functions that can be used with [`try_recover_signers`]. +#[cfg(feature = "rayon")] +pub trait TryRecoverFn: Fn(Item) -> Result + Sync {} + +/// Trait for decode functions that can be used with [`try_recover_signers`]. +#[cfg(not(feature = "rayon"))] +pub trait TryRecoverFn: Fn(Item) -> Result {} + #[cfg(feature = "rayon")] -pub use rayon::*; +impl Result + Sync> TryRecoverFn for F {} #[cfg(not(feature = "rayon"))] -pub use iter::*; +impl Result> TryRecoverFn for F {} +/// Decodes and recovers a list of [`Recovered`] transactions from an iterator. +/// +/// The `decode` closure transforms each item into a [`SignedTransaction`], which is then +/// recovered. +/// +/// Returns an error if decoding or signature recovery fails for any transaction. +/// +/// When the `rayon` feature is enabled, recovery is performed in parallel. #[cfg(feature = "rayon")] -mod rayon { - use crate::{transaction::signed::RecoveryError, SignedTransaction}; - use alloc::vec::Vec; - use alloy_primitives::Address; - use rayon::prelude::{IntoParallelIterator, ParallelIterator}; - - /// Recovers a list of signers from a transaction list iterator. - /// - /// Returns `Err(RecoveryError)`, if some transaction's signature is invalid - pub fn recover_signers<'a, I, T>(txes: I) -> Result, RecoveryError> - where - T: SignedTransaction, - I: IntoParallelIterator, - { - txes.into_par_iter().map(|tx| tx.recover_signer()).collect() - } - - /// Recovers a list of signers from a transaction list iterator _without ensuring that the - /// signature has a low `s` value_. - /// - /// Returns `Err(RecoveryError)`, if some transaction's signature is invalid. - pub fn recover_signers_unchecked<'a, I, T>(txes: I) -> Result, RecoveryError> - where - T: SignedTransaction, - I: IntoParallelIterator, - { - txes.into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect() - } +pub fn try_recover_signers(items: I, decode: F) -> Result>, RecoveryError> +where + I: IntoParallelIterator, + F: Fn(I::Item) -> Result + Sync, + T: SignedTransaction, +{ + items + .into_par_iter() + .map(|item| { + let tx = decode(item)?; + SignerRecoverable::try_into_recovered(tx) + }) + .collect() } +/// Decodes and recovers a list of [`Recovered`] transactions from an iterator. +/// +/// The `decode` closure transforms each item into a [`SignedTransaction`], which is then +/// recovered. +/// +/// Returns an error if decoding or signature recovery fails for any transaction. #[cfg(not(feature = "rayon"))] -mod iter { - use crate::{transaction::signed::RecoveryError, SignedTransaction}; - use alloc::vec::Vec; - use alloy_primitives::Address; - - /// Recovers a list of signers from a transaction list iterator. - /// - /// Returns `Err(RecoveryError)`, if some transaction's signature is invalid - pub fn recover_signers<'a, I, T>(txes: I) -> Result, RecoveryError> - where - T: SignedTransaction, - I: IntoIterator, - { - txes.into_iter().map(|tx| tx.recover_signer()).collect() - } - - /// Recovers a list of signers from a transaction list iterator _without ensuring that the - /// signature has a low `s` value_. - /// - /// Returns `Err(RecoveryError)`, if some transaction's signature is invalid. - pub fn recover_signers_unchecked<'a, I, T>(txes: I) -> Result, RecoveryError> - where - T: SignedTransaction, - I: IntoIterator, - { - txes.into_iter().map(|tx| tx.recover_signer_unchecked()).collect() - } +pub fn try_recover_signers(items: I, decode: F) -> Result>, RecoveryError> +where + I: IntoIterator, + F: Fn(I::Item) -> Result, + T: SignedTransaction, +{ + items + .into_iter() + .map(|item| { + let tx = decode(item)?; + SignerRecoverable::try_into_recovered(tx) + }) + .collect() } diff --git a/crates/rpc/rpc/src/testing.rs b/crates/rpc/rpc/src/testing.rs index dfaea2bb545..94e95edae0b 100644 --- a/crates/rpc/rpc/src/testing.rs +++ b/crates/rpc/rpc/src/testing.rs @@ -15,6 +15,7 @@ //! on public-facing RPC endpoints without proper authentication. use alloy_consensus::{Header, Transaction}; +use alloy_eips::eip2718::Decodable2718; use alloy_evm::Evm; use alloy_primitives::{map::HashSet, Address, U256}; use alloy_rpc_types_engine::ExecutionPayloadEnvelopeV5; @@ -24,11 +25,14 @@ use reth_errors::RethError; use reth_ethereum_engine_primitives::EthBuiltPayload; use reth_ethereum_primitives::EthPrimitives; use reth_evm::{execute::BlockBuilder, ConfigureEvm, NextBlockEnvAttributes}; -use reth_primitives_traits::{AlloyBlockHeader as BlockTrait, Recovered, TxTy}; +use reth_primitives_traits::{ + transaction::{recover::try_recover_signers, signed::RecoveryError}, + AlloyBlockHeader as BlockTrait, TxTy, +}; use reth_revm::{database::StateProviderDatabase, db::State}; use reth_rpc_api::{TestingApiServer, TestingBuildBlockRequestV1}; use reth_rpc_eth_api::{helpers::Call, FromEthApiError}; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; +use reth_rpc_eth_types::EthApiError; use reth_storage_api::{BlockReader, HeaderProvider}; use revm::context::Block; use revm_primitives::map::DefaultHashBuilder; @@ -106,11 +110,16 @@ where let mut invalid_senders: HashSet = HashSet::default(); - for (idx, tx) in request.transactions.iter().enumerate() { - let tx: Recovered> = recover_raw_transaction(tx)?; - let sender = tx.signer(); + // Decode and recover all transactions in parallel + let recovered_txs = try_recover_signers(&request.transactions, |tx| { + TxTy::::decode_2718_exact(tx.as_ref()) + .map_err(RecoveryError::from_source) + }) + .or(Err(EthApiError::InvalidTransactionSignature))?; - if skip_invalid_transactions && invalid_senders.contains(&sender) { + for (idx, tx) in recovered_txs.into_iter().enumerate() { + let signer = tx.signer(); + if skip_invalid_transactions && invalid_senders.contains(&signer) { continue; } @@ -122,17 +131,17 @@ where debug!( target: "rpc::testing", tx_idx = idx, - ?sender, + ?signer, error = ?err, "Skipping invalid transaction" ); - invalid_senders.insert(sender); + invalid_senders.insert(signer); continue; } debug!( target: "rpc::testing", tx_idx = idx, - ?sender, + ?signer, error = ?err, "Transaction execution failed" ); From 574bde0d6f3c5d9b8763c9bd406033409ef19c7b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 17 Jan 2026 02:39:29 +0100 Subject: [PATCH 058/267] chore(chain-state): reorganize deferred_trie.rs impl blocks (#21151) --- crates/evm/chain/src/deferred_trie.rs | 132 +++++++++++++------------- 1 file changed, 66 insertions(+), 66 deletions(-) diff --git a/crates/evm/chain/src/deferred_trie.rs b/crates/evm/chain/src/deferred_trie.rs index 9c870d02a40..03d7184fd72 100644 --- a/crates/evm/chain/src/deferred_trie.rs +++ b/crates/evm/chain/src/deferred_trie.rs @@ -23,72 +23,6 @@ pub struct DeferredTrieData { state: Arc>, } -/// Sorted trie data computed for an executed block. -/// These represent the complete set of sorted trie data required to persist -/// block state for, and generate proofs on top of, a block. -#[derive(Clone, Debug, Default)] -pub struct ComputedTrieData { - /// Sorted hashed post-state produced by execution. - pub hashed_state: Arc, - /// Sorted trie updates produced by state root computation. - pub trie_updates: Arc, - /// Trie input bundled with its anchor hash, if available. - pub anchored_trie_input: Option, -} - -/// Trie input bundled with its anchor hash. -/// -/// The `trie_input` contains the **cumulative** overlay of all in-memory ancestor blocks, -/// not just this block's changes. Child blocks reuse the parent's overlay in O(1) by -/// cloning the Arc-wrapped data. -/// -/// The `anchor_hash` is metadata indicating which persisted base state this overlay -/// sits on top of. It is CRITICAL for overlay reuse decisions: an overlay built on top -/// of Anchor A cannot be reused for a block anchored to Anchor B, as it would result -/// in an incorrect state. -#[derive(Clone, Debug)] -pub struct AnchoredTrieInput { - /// The persisted ancestor hash this trie input is anchored to. - pub anchor_hash: B256, - /// Cumulative trie input overlay from all in-memory ancestors. - pub trie_input: Arc, -} - -/// Metrics for deferred trie computation. -#[derive(Metrics)] -#[metrics(scope = "sync.block_validation")] -struct DeferredTrieMetrics { - /// Number of times deferred trie data was ready (async task completed first). - deferred_trie_async_ready: Counter, - /// Number of times deferred trie data required synchronous computation (fallback path). - deferred_trie_sync_fallback: Counter, -} - -static DEFERRED_TRIE_METRICS: LazyLock = - LazyLock::new(DeferredTrieMetrics::default); - -/// Internal state for deferred trie data. -enum DeferredState { - /// Data is not yet available; raw inputs stored for fallback computation. - /// Wrapped in `Option` to allow taking ownership during computation. - Pending(Option), - /// Data has been computed and is ready. - Ready(ComputedTrieData), -} - -/// Inputs kept while a deferred trie computation is pending. -#[derive(Clone, Debug)] -struct PendingInputs { - /// Unsorted hashed post-state from execution. - hashed_state: Arc, - /// Unsorted trie updates from state root computation. - trie_updates: Arc, - /// The persisted ancestor hash this trie input is anchored to. - anchor_hash: B256, - /// Deferred trie data from ancestor blocks for merging. - ancestors: Vec, -} - impl fmt::Debug for DeferredTrieData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let state = self.state.lock(); @@ -299,6 +233,19 @@ impl DeferredTrieData { } } +/// Sorted trie data computed for an executed block. +/// These represent the complete set of sorted trie data required to persist +/// block state for, and generate proofs on top of, a block. +#[derive(Clone, Debug, Default)] +pub struct ComputedTrieData { + /// Sorted hashed post-state produced by execution. + pub hashed_state: Arc, + /// Sorted trie updates produced by state root computation. + pub trie_updates: Arc, + /// Trie input bundled with its anchor hash, if available. + pub anchored_trie_input: Option, +} + impl ComputedTrieData { /// Construct a bundle that includes trie input anchored to a persisted ancestor. pub const fn with_trie_input( @@ -340,6 +287,59 @@ impl ComputedTrieData { } } +/// Trie input bundled with its anchor hash. +/// +/// The `trie_input` contains the **cumulative** overlay of all in-memory ancestor blocks, +/// not just this block's changes. Child blocks reuse the parent's overlay in O(1) by +/// cloning the Arc-wrapped data. +/// +/// The `anchor_hash` is metadata indicating which persisted base state this overlay +/// sits on top of. It is CRITICAL for overlay reuse decisions: an overlay built on top +/// of Anchor A cannot be reused for a block anchored to Anchor B, as it would result +/// in an incorrect state. +#[derive(Clone, Debug)] +pub struct AnchoredTrieInput { + /// The persisted ancestor hash this trie input is anchored to. + pub anchor_hash: B256, + /// Cumulative trie input overlay from all in-memory ancestors. + pub trie_input: Arc, +} + +/// Metrics for deferred trie computation. +#[derive(Metrics)] +#[metrics(scope = "sync.block_validation")] +struct DeferredTrieMetrics { + /// Number of times deferred trie data was ready (async task completed first). + deferred_trie_async_ready: Counter, + /// Number of times deferred trie data required synchronous computation (fallback path). + deferred_trie_sync_fallback: Counter, +} + +static DEFERRED_TRIE_METRICS: LazyLock = + LazyLock::new(DeferredTrieMetrics::default); + +/// Internal state for deferred trie data. +enum DeferredState { + /// Data is not yet available; raw inputs stored for fallback computation. + /// Wrapped in `Option` to allow taking ownership during computation. + Pending(Option), + /// Data has been computed and is ready. + Ready(ComputedTrieData), +} + +/// Inputs kept while a deferred trie computation is pending. +#[derive(Clone, Debug)] +struct PendingInputs { + /// Unsorted hashed post-state from execution. + hashed_state: Arc, + /// Unsorted trie updates from state root computation. + trie_updates: Arc, + /// The persisted ancestor hash this trie input is anchored to. + anchor_hash: B256, + /// Deferred trie data from ancestor blocks for merging. + ancestors: Vec, +} + #[cfg(test)] mod tests { use super::*; From 6bf43ab24a75589cd7b3a6c700e77a80fb4a1fd1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 17 Jan 2026 02:51:26 +0100 Subject: [PATCH 059/267] refactor: use ExecutionOutcome::single instead of tuple From (#21152) --- crates/chain-state/src/in_memory.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 1cf281395e7..44a5fb8be66 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -954,19 +954,19 @@ impl> NewCanonicalChain { [first, rest @ ..] => { let mut chain = Chain::from_block( first.recovered_block().clone(), - ExecutionOutcome::from(( - first.execution_outcome().clone(), + ExecutionOutcome::single( first.block_number(), - )), + first.execution_outcome().clone(), + ), first.trie_data_handle(), ); for exec in rest { chain.append_block( exec.recovered_block().clone(), - ExecutionOutcome::from(( - exec.execution_outcome().clone(), + ExecutionOutcome::single( exec.block_number(), - )), + exec.execution_outcome().clone(), + ), exec.trie_data_handle(), ); } From c11c13000fdca9b5c4baa1cf28786f0c0abffe42 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Sat, 17 Jan 2026 07:15:40 +0000 Subject: [PATCH 060/267] perf(storage): batch trie updates across blocks in save_blocks (#21142) Co-authored-by: Amp Co-authored-by: YK --- crates/chain-state/src/lazy_overlay.rs | 37 +++++++-------- .../src/providers/database/provider.rs | 47 +++++++++++++++++-- 2 files changed, 61 insertions(+), 23 deletions(-) diff --git a/crates/chain-state/src/lazy_overlay.rs b/crates/chain-state/src/lazy_overlay.rs index a0295c9a5b4..712d85d1989 100644 --- a/crates/chain-state/src/lazy_overlay.rs +++ b/crates/chain-state/src/lazy_overlay.rs @@ -10,11 +10,6 @@ use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted, TrieInputSort use std::sync::{Arc, OnceLock}; use tracing::{debug, trace}; -/// Threshold for switching from `extend_ref` loop to `merge_batch`. -/// -/// Benchmarked crossover: `extend_ref` wins up to ~64 blocks, `merge_batch` wins beyond. -const MERGE_BATCH_THRESHOLD: usize = 64; - /// Inputs captured for lazy overlay computation. #[derive(Clone)] struct LazyOverlayInputs { @@ -128,44 +123,46 @@ impl LazyOverlay { /// Merge all blocks' trie data into a single [`TrieInputSorted`]. /// - /// Blocks are ordered newest to oldest. We iterate oldest to newest so that - /// newer values override older ones. + /// Blocks are ordered newest to oldest. Uses hybrid merge algorithm that + /// switches between `extend_ref` (small batches) and k-way merge (large batches). fn merge_blocks(blocks: &[DeferredTrieData]) -> TrieInputSorted { + const MERGE_BATCH_THRESHOLD: usize = 64; + if blocks.is_empty() { return TrieInputSorted::default(); } - // Single block: use its data directly + // Single block: use its data directly (no allocation) if blocks.len() == 1 { let data = blocks[0].wait_cloned(); return TrieInputSorted { - state: Arc::clone(&data.hashed_state), - nodes: Arc::clone(&data.trie_updates), + state: data.hashed_state, + nodes: data.trie_updates, prefix_sets: Default::default(), }; } if blocks.len() < MERGE_BATCH_THRESHOLD { - // Small k: extend_ref loop is faster - // Iterate oldest->newest so newer values override older ones + // Small k: extend_ref loop with Arc::make_mut is faster. + // Uses copy-on-write - only clones inner data if Arc has multiple refs. + // Iterate oldest->newest so newer values override older ones. let mut blocks_iter = blocks.iter().rev(); let first = blocks_iter.next().expect("blocks is non-empty"); let data = first.wait_cloned(); - let mut state = Arc::clone(&data.hashed_state); - let mut nodes = Arc::clone(&data.trie_updates); - let state_mut = Arc::make_mut(&mut state); - let nodes_mut = Arc::make_mut(&mut nodes); + let mut state = data.hashed_state; + let mut nodes = data.trie_updates; for block in blocks_iter { - let data = block.wait_cloned(); - state_mut.extend_ref(data.hashed_state.as_ref()); - nodes_mut.extend_ref(data.trie_updates.as_ref()); + let block_data = block.wait_cloned(); + Arc::make_mut(&mut state).extend_ref(block_data.hashed_state.as_ref()); + Arc::make_mut(&mut nodes).extend_ref(block_data.trie_updates.as_ref()); } TrieInputSorted { state, nodes, prefix_sets: Default::default() } } else { - // Large k: merge_batch is faster (O(n log k) via k-way merge) + // Large k: k-way merge is faster (O(n log k)). + // Collect is unavoidable here - we need all data materialized for k-way merge. let trie_data: Vec<_> = blocks.iter().map(|b| b.wait_cloned()).collect(); let merged_state = HashedPostStateSorted::merge_batch( diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index b2d911bc5a4..46ca89ba32b 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -556,11 +556,52 @@ impl DatabaseProvider owned, + Err(arc) => (*arc).clone(), + } + } else if num_blocks < MERGE_BATCH_THRESHOLD { + // Small k: extend_ref with Arc::make_mut (copy-on-write). + // Blocks are oldest-to-newest, iterate forward so newest overrides. + let mut blocks_iter = blocks.iter(); + let mut result = blocks_iter.next().expect("non-empty").trie_updates(); + + for block in blocks_iter { + Arc::make_mut(&mut result).extend_ref(block.trie_updates().as_ref()); + } + + match Arc::try_unwrap(result) { + Ok(owned) => owned, + Err(arc) => (*arc).clone(), + } + } else { + // Large k: k-way merge is faster (O(n log k)). + // Collect Arcs first to extend lifetime, then pass refs. + // Blocks are oldest-to-newest, merge_batch expects newest-to-oldest. + let arcs: Vec<_> = blocks.iter().rev().map(|b| b.trie_updates()).collect(); + TrieUpdatesSorted::merge_batch(arcs.iter().map(|arc| arc.as_ref())) + }; + + if !merged.is_empty() { + self.write_trie_updates_sorted(&merged)?; } + timings.write_trie_updates += start.elapsed(); } // Full mode: update history indices From d5dc0b27ebf2e4f7c8fd73cc7023a1a4fea2c0cc Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Sat, 17 Jan 2026 08:32:10 +0000 Subject: [PATCH 061/267] fix(storage-api): gate reth-chain dependency behind std feature The reth-chain crate is inherently std-only (uses BTreeMap, Arc, etc.) and was breaking the riscv32imac no_std builds by pulling in serde_core which doesn't support no_std properly. This makes reth-chain optional and only enables it when std feature is active, gating the block_writer module that uses Chain behind std. --- crates/storage/storage-api/Cargo.toml | 7 ++++--- crates/storage/storage-api/src/lib.rs | 2 ++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 0786e55aa45..70723cda284 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -reth-chain.workspace = true +reth-chain = { workspace = true, optional = true } reth-db-models.workspace = true reth-chainspec.workspace = true reth-db-api = { workspace = true, optional = true } @@ -38,6 +38,7 @@ serde_json = { workspace = true, optional = true } [features] default = ["std"] std = [ + "dep:reth-chain", "reth-chainspec/std", "alloy-consensus/std", "alloy-eips/std", @@ -61,7 +62,7 @@ db-api = [ ] serde = [ - "reth-chain/serde", + "reth-chain?/serde", "reth-ethereum-primitives/serde", "reth-db-models/serde", "reth-execution-types/serde", @@ -80,7 +81,7 @@ serde-bincode-compat = [ "reth-execution-types/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", "reth-trie-common/serde-bincode-compat", - "reth-chain/serde-bincode-compat", + "reth-chain?/serde-bincode-compat", "reth-ethereum-primitives/serde-bincode-compat", "alloy-eips/serde-bincode-compat", "alloy-consensus/serde-bincode-compat", diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 0daf2805190..8c69e2090fc 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -85,7 +85,9 @@ pub use primitives::*; mod block_indices; pub use block_indices::*; +#[cfg(feature = "std")] mod block_writer; +#[cfg(feature = "std")] pub use block_writer::*; mod state_writer; From 27e055f7906e48c8bca3e126484f4256c2a3c2c1 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Sat, 17 Jan 2026 10:20:22 +0000 Subject: [PATCH 062/267] feat(engine): add time_between_new_payloads metric (#21158) --- crates/engine/tree/src/tree/metrics.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index da787520257..40b57c54b46 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -298,6 +298,8 @@ pub(crate) struct NewPayloadStatusMetrics { pub(crate) new_payload_latency: Histogram, /// Latency for the last new payload call. pub(crate) new_payload_last: Gauge, + /// Time between consecutive new payload calls (payload-to-payload interval). + pub(crate) time_between_new_payloads: Histogram, } impl NewPayloadStatusMetrics { @@ -311,6 +313,9 @@ impl NewPayloadStatusMetrics { let finish = Instant::now(); let elapsed = finish - start; + if let Some(prev) = self.latest_at { + self.time_between_new_payloads.record(start - prev); + } self.latest_at = Some(finish); match result { Ok(outcome) => match outcome.outcome.status { From 1ea574417f0e8172d772ff4f5acecc1214b5e0fe Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Sat, 17 Jan 2026 12:15:45 +0000 Subject: [PATCH 063/267] feat(engine): add new_payload_interval metric (start-to-start) (#21159) --- crates/engine/tree/src/tree/metrics.rs | 19 ++++++++++++++----- crates/engine/tree/src/tree/mod.rs | 2 +- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 40b57c54b46..303f3c62985 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -270,7 +270,10 @@ impl ForkchoiceUpdatedMetrics { pub(crate) struct NewPayloadStatusMetrics { /// Finish time of the latest new payload call. #[metric(skip)] - pub(crate) latest_at: Option, + pub(crate) latest_finish_at: Option, + /// Start time of the latest new payload call. + #[metric(skip)] + pub(crate) latest_start_at: Option, /// The total count of new payload messages received. pub(crate) new_payload_messages: Counter, /// The total count of new payload messages that we responded to with @@ -298,8 +301,10 @@ pub(crate) struct NewPayloadStatusMetrics { pub(crate) new_payload_latency: Histogram, /// Latency for the last new payload call. pub(crate) new_payload_last: Gauge, - /// Time between consecutive new payload calls (payload-to-payload interval). + /// Time from previous payload finish to current payload start (idle time). pub(crate) time_between_new_payloads: Histogram, + /// Time from previous payload start to current payload start (total interval). + pub(crate) new_payload_interval: Histogram, } impl NewPayloadStatusMetrics { @@ -313,10 +318,14 @@ impl NewPayloadStatusMetrics { let finish = Instant::now(); let elapsed = finish - start; - if let Some(prev) = self.latest_at { - self.time_between_new_payloads.record(start - prev); + if let Some(prev_finish) = self.latest_finish_at { + self.time_between_new_payloads.record(start - prev_finish); + } + if let Some(prev_start) = self.latest_start_at { + self.new_payload_interval.record(start - prev_start); } - self.latest_at = Some(finish); + self.latest_finish_at = Some(finish); + self.latest_start_at = Some(start); match result { Ok(outcome) => match outcome.outcome.status { PayloadStatusEnum::Valid => { diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 275a2c47f6b..3d4913780c2 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1478,7 +1478,7 @@ where self.metrics.engine.forkchoice_updated.update_response_metrics( start, - &mut self.metrics.engine.new_payload.latest_at, + &mut self.metrics.engine.new_payload.latest_finish_at, has_attrs, &output, ); From 40bc9d3860094c4a1627969a2787577460bcc74a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 17 Jan 2026 16:57:09 +0100 Subject: [PATCH 064/267] revert: undo Chain crate, add LazyTrieData to trie-common (#21155) --- Cargo.lock | 39 +-- Cargo.toml | 2 - crates/chain-state/Cargo.toml | 2 - .../src/deferred_trie.rs | 133 +++++----- crates/chain-state/src/in_memory.rs | 159 ++++-------- crates/chain-state/src/lib.rs | 4 +- crates/chain-state/src/notifications.rs | 16 +- crates/chain-state/src/test_utils.rs | 3 +- crates/evm/chain/Cargo.toml | 69 ----- crates/evm/chain/src/lib.rs | 30 --- crates/evm/execution-types/Cargo.toml | 4 + .../{chain => execution-types}/src/chain.rs | 238 +++++------------- .../execution-types/src/execution_outcome.rs | 2 +- crates/evm/execution-types/src/lib.rs | 5 +- crates/exex/exex/Cargo.toml | 1 - crates/exex/exex/src/manager.rs | 90 +++---- crates/exex/exex/src/notifications.rs | 132 +++++----- crates/exex/exex/src/wal/mod.rs | 57 +---- crates/exex/exex/src/wal/storage.rs | 55 ++-- crates/exex/test-utils/Cargo.toml | 2 +- crates/exex/test-utils/src/lib.rs | 2 +- crates/exex/types/Cargo.toml | 6 +- crates/exex/types/src/notification.rs | 103 +++----- crates/optimism/evm/Cargo.toml | 1 - crates/optimism/evm/src/lib.rs | 5 +- crates/rpc/rpc-eth-types/Cargo.toml | 2 +- crates/rpc/rpc-eth-types/src/cache/mod.rs | 2 +- crates/stages/stages/Cargo.toml | 2 +- crates/stages/stages/src/stages/execution.rs | 2 +- crates/storage/provider/Cargo.toml | 10 - crates/storage/provider/src/lib.rs | 11 +- .../src/providers/blockchain_provider.rs | 28 +-- .../src/providers/database/provider.rs | 3 +- crates/storage/storage-api/Cargo.toml | 4 - .../storage/storage-api/src/block_writer.rs | 3 +- crates/transaction-pool/Cargo.toml | 2 - .../transaction-pool/src/blobstore/tracker.rs | 4 +- crates/trie/common/src/lazy.rs | 194 ++++++++++++++ crates/trie/common/src/lib.rs | 4 + 39 files changed, 601 insertions(+), 830 deletions(-) rename crates/{evm/chain => chain-state}/src/deferred_trie.rs (99%) delete mode 100644 crates/evm/chain/Cargo.toml delete mode 100644 crates/evm/chain/src/lib.rs rename crates/evm/{chain => execution-types}/src/chain.rs (76%) create mode 100644 crates/trie/common/src/lazy.rs diff --git a/Cargo.lock b/Cargo.lock index 450ab2bc263..17e53c81377 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7774,30 +7774,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-chain" -version = "1.10.0" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-primitives", - "arbitrary", - "bincode 1.3.3", - "metrics", - "parking_lot", - "rand 0.9.2", - "reth-ethereum-primitives", - "reth-execution-types", - "reth-metrics", - "reth-primitives-traits", - "reth-trie", - "reth-trie-common", - "revm", - "serde", - "serde_with", - "tracing", -] - [[package]] name = "reth-chain-state" version = "1.10.0" @@ -7813,7 +7789,6 @@ dependencies = [ "parking_lot", "pin-project", "rand 0.9.2", - "reth-chain", "reth-chainspec", "reth-errors", "reth-ethereum-primitives", @@ -8952,6 +8927,7 @@ dependencies = [ name = "reth-execution-types" version = "1.10.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-evm", "alloy-primitives", @@ -9005,7 +8981,6 @@ dependencies = [ "reth-trie-common", "rmp-serde", "secp256k1 0.30.0", - "serde_with", "tempfile", "thiserror 2.0.17", "tokio", @@ -9020,7 +8995,6 @@ dependencies = [ "alloy-eips", "eyre", "futures-util", - "reth-chain", "reth-chainspec", "reth-config", "reth-consensus", @@ -9028,6 +9002,7 @@ dependencies = [ "reth-db-common", "reth-ethereum-primitives", "reth-evm-ethereum", + "reth-execution-types", "reth-exex", "reth-network", "reth-node-api", @@ -9053,9 +9028,9 @@ dependencies = [ "arbitrary", "bincode 1.3.3", "rand 0.9.2", - "reth-chain", "reth-chain-state", "reth-ethereum-primitives", + "reth-execution-types", "reth-primitives-traits", "serde", "serde_with", @@ -9798,7 +9773,6 @@ dependencies = [ "op-alloy-consensus", "op-alloy-rpc-types-engine", "op-revm", - "reth-chain", "reth-chainspec", "reth-evm", "reth-execution-errors", @@ -10249,7 +10223,6 @@ dependencies = [ "parking_lot", "rand 0.9.2", "rayon", - "reth-chain", "reth-chain-state", "reth-chainspec", "reth-codecs", @@ -10739,12 +10712,12 @@ dependencies = [ "metrics", "rand 0.9.2", "reqwest", - "reth-chain", "reth-chain-state", "reth-chainspec", "reth-errors", "reth-ethereum-primitives", "reth-evm", + "reth-execution-types", "reth-metrics", "reth-primitives-traits", "reth-revm", @@ -10817,7 +10790,6 @@ dependencies = [ "rand 0.9.2", "rayon", "reqwest", - "reth-chain", "reth-chainspec", "reth-codecs", "reth-config", @@ -10833,6 +10805,7 @@ dependencies = [ "reth-etl", "reth-evm", "reth-evm-ethereum", + "reth-execution-types", "reth-exex", "reth-fs-util", "reth-network-p2p", @@ -10979,7 +10952,6 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", "auto_impl", - "reth-chain", "reth-chainspec", "reth-db-api", "reth-db-models", @@ -11138,7 +11110,6 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.9.2", - "reth-chain", "reth-chain-state", "reth-chainspec", "reth-eth-wire-types", diff --git a/Cargo.toml b/Cargo.toml index 1efe7985da1..c9a3ba0d93c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,7 +44,6 @@ members = [ "crates/ethereum/primitives/", "crates/ethereum/reth/", "crates/etl/", - "crates/evm/chain", "crates/evm/evm", "crates/evm/execution-errors", "crates/evm/execution-types", @@ -388,7 +387,6 @@ reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm/evm", default-features = false } reth-evm-ethereum = { path = "crates/ethereum/evm", default-features = false } reth-optimism-evm = { path = "crates/optimism/evm", default-features = false } -reth-chain = { path = "crates/evm/chain" } reth-execution-errors = { path = "crates/evm/execution-errors", default-features = false } reth-execution-types = { path = "crates/evm/execution-types", default-features = false } reth-exex = { path = "crates/exex/exex" } diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index b3fbe487311..d21c83ae7c4 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-chain.workspace = true reth-chainspec.workspace = true reth-errors.workspace = true reth-execution-types.workspace = true @@ -66,7 +65,6 @@ serde = [ "alloy-primitives/serde", "parking_lot/serde", "rand?/serde", - "reth-chain/serde", "reth-ethereum-primitives/serde", "reth-execution-types/serde", "reth-primitives-traits/serde", diff --git a/crates/evm/chain/src/deferred_trie.rs b/crates/chain-state/src/deferred_trie.rs similarity index 99% rename from crates/evm/chain/src/deferred_trie.rs rename to crates/chain-state/src/deferred_trie.rs index 03d7184fd72..efe23a2ded3 100644 --- a/crates/evm/chain/src/deferred_trie.rs +++ b/crates/chain-state/src/deferred_trie.rs @@ -8,7 +8,6 @@ use reth_trie::{ use std::{ fmt, sync::{Arc, LazyLock}, - vec::Vec, }; use tracing::instrument; @@ -23,6 +22,72 @@ pub struct DeferredTrieData { state: Arc>, } +/// Sorted trie data computed for an executed block. +/// These represent the complete set of sorted trie data required to persist +/// block state for, and generate proofs on top of, a block. +#[derive(Clone, Debug, Default)] +pub struct ComputedTrieData { + /// Sorted hashed post-state produced by execution. + pub hashed_state: Arc, + /// Sorted trie updates produced by state root computation. + pub trie_updates: Arc, + /// Trie input bundled with its anchor hash, if available. + pub anchored_trie_input: Option, +} + +/// Trie input bundled with its anchor hash. +/// +/// The `trie_input` contains the **cumulative** overlay of all in-memory ancestor blocks, +/// not just this block's changes. Child blocks reuse the parent's overlay in O(1) by +/// cloning the Arc-wrapped data. +/// +/// The `anchor_hash` is metadata indicating which persisted base state this overlay +/// sits on top of. It is CRITICAL for overlay reuse decisions: an overlay built on top +/// of Anchor A cannot be reused for a block anchored to Anchor B, as it would result +/// in an incorrect state. +#[derive(Clone, Debug)] +pub struct AnchoredTrieInput { + /// The persisted ancestor hash this trie input is anchored to. + pub anchor_hash: B256, + /// Cumulative trie input overlay from all in-memory ancestors. + pub trie_input: Arc, +} + +/// Metrics for deferred trie computation. +#[derive(Metrics)] +#[metrics(scope = "sync.block_validation")] +struct DeferredTrieMetrics { + /// Number of times deferred trie data was ready (async task completed first). + deferred_trie_async_ready: Counter, + /// Number of times deferred trie data required synchronous computation (fallback path). + deferred_trie_sync_fallback: Counter, +} + +static DEFERRED_TRIE_METRICS: LazyLock = + LazyLock::new(DeferredTrieMetrics::default); + +/// Internal state for deferred trie data. +enum DeferredState { + /// Data is not yet available; raw inputs stored for fallback computation. + /// Wrapped in `Option` to allow taking ownership during computation. + Pending(Option), + /// Data has been computed and is ready. + Ready(ComputedTrieData), +} + +/// Inputs kept while a deferred trie computation is pending. +#[derive(Clone, Debug)] +struct PendingInputs { + /// Unsorted hashed post-state from execution. + hashed_state: Arc, + /// Unsorted trie updates from state root computation. + trie_updates: Arc, + /// The persisted ancestor hash this trie input is anchored to. + anchor_hash: B256, + /// Deferred trie data from ancestor blocks for merging. + ancestors: Vec, +} + impl fmt::Debug for DeferredTrieData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let state = self.state.lock(); @@ -233,19 +298,6 @@ impl DeferredTrieData { } } -/// Sorted trie data computed for an executed block. -/// These represent the complete set of sorted trie data required to persist -/// block state for, and generate proofs on top of, a block. -#[derive(Clone, Debug, Default)] -pub struct ComputedTrieData { - /// Sorted hashed post-state produced by execution. - pub hashed_state: Arc, - /// Sorted trie updates produced by state root computation. - pub trie_updates: Arc, - /// Trie input bundled with its anchor hash, if available. - pub anchored_trie_input: Option, -} - impl ComputedTrieData { /// Construct a bundle that includes trie input anchored to a persisted ancestor. pub const fn with_trie_input( @@ -287,59 +339,6 @@ impl ComputedTrieData { } } -/// Trie input bundled with its anchor hash. -/// -/// The `trie_input` contains the **cumulative** overlay of all in-memory ancestor blocks, -/// not just this block's changes. Child blocks reuse the parent's overlay in O(1) by -/// cloning the Arc-wrapped data. -/// -/// The `anchor_hash` is metadata indicating which persisted base state this overlay -/// sits on top of. It is CRITICAL for overlay reuse decisions: an overlay built on top -/// of Anchor A cannot be reused for a block anchored to Anchor B, as it would result -/// in an incorrect state. -#[derive(Clone, Debug)] -pub struct AnchoredTrieInput { - /// The persisted ancestor hash this trie input is anchored to. - pub anchor_hash: B256, - /// Cumulative trie input overlay from all in-memory ancestors. - pub trie_input: Arc, -} - -/// Metrics for deferred trie computation. -#[derive(Metrics)] -#[metrics(scope = "sync.block_validation")] -struct DeferredTrieMetrics { - /// Number of times deferred trie data was ready (async task completed first). - deferred_trie_async_ready: Counter, - /// Number of times deferred trie data required synchronous computation (fallback path). - deferred_trie_sync_fallback: Counter, -} - -static DEFERRED_TRIE_METRICS: LazyLock = - LazyLock::new(DeferredTrieMetrics::default); - -/// Internal state for deferred trie data. -enum DeferredState { - /// Data is not yet available; raw inputs stored for fallback computation. - /// Wrapped in `Option` to allow taking ownership during computation. - Pending(Option), - /// Data has been computed and is ready. - Ready(ComputedTrieData), -} - -/// Inputs kept while a deferred trie computation is pending. -#[derive(Clone, Debug)] -struct PendingInputs { - /// Unsorted hashed post-state from execution. - hashed_state: Arc, - /// Unsorted trie updates from state root computation. - trie_updates: Arc, - /// The persisted ancestor hash this trie input is anchored to. - anchor_hash: B256, - /// Deferred trie data from ancestor blocks for merging. - ancestors: Vec, -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 44a5fb8be66..311830dbc69 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -8,17 +8,16 @@ use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::{map::HashMap, BlockNumber, TxHash, B256}; use parking_lot::RwLock; -use reth_chain::Chain; use reth_chainspec::ChainInfo; use reth_ethereum_primitives::EthPrimitives; -use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, ExecutionOutcome}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives_traits::{ BlockBody as _, IndexedTx, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, SignedTransaction, }; use reth_storage_api::StateProviderBox; -use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted, TrieInputSorted}; +use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted, LazyTrieData, TrieInputSorted}; use std::{collections::BTreeMap, sync::Arc, time::Instant}; use tokio::sync::{broadcast, watch}; @@ -945,29 +944,26 @@ impl> NewCanonicalChain { } /// Converts a slice of executed blocks into a [`Chain`]. - /// - /// Uses [`ExecutedBlock::trie_data_handle`] to avoid blocking on deferred trie computations. - /// The trie data will be computed lazily when actually needed by consumers. fn blocks_to_chain(blocks: &[ExecutedBlock]) -> Chain { match blocks { [] => Chain::default(), [first, rest @ ..] => { let mut chain = Chain::from_block( first.recovered_block().clone(), - ExecutionOutcome::single( - first.block_number(), + ExecutionOutcome::from(( first.execution_outcome().clone(), - ), - first.trie_data_handle(), + first.block_number(), + )), + LazyTrieData::ready(first.hashed_state(), first.trie_updates()), ); for exec in rest { chain.append_block( exec.recovered_block().clone(), - ExecutionOutcome::single( - exec.block_number(), + ExecutionOutcome::from(( exec.execution_outcome().clone(), - ), - exec.trie_data_handle(), + exec.block_number(), + )), + LazyTrieData::ready(exec.hashed_state(), exec.trie_updates()), ); } chain @@ -1544,15 +1540,12 @@ mod tests { // Test commit notification let chain_commit = NewCanonicalChain::Commit { new: vec![block0.clone(), block1.clone()] }; - // Build expected trie updates map - let mut expected_trie_updates = BTreeMap::new(); - expected_trie_updates.insert(0, block0.trie_updates()); - expected_trie_updates.insert(1, block1.trie_updates()); - - // Build expected hashed state map - let mut expected_hashed_state = BTreeMap::new(); - expected_hashed_state.insert(0, block0.hashed_state()); - expected_hashed_state.insert(1, block1.hashed_state()); + // Build expected trie data map + let mut expected_trie_data = BTreeMap::new(); + expected_trie_data + .insert(0, LazyTrieData::ready(block0.hashed_state(), block0.trie_updates())); + expected_trie_data + .insert(1, LazyTrieData::ready(block1.hashed_state(), block1.trie_updates())); // Build expected execution outcome (first_block matches first block number) let commit_execution_outcome = ExecutionOutcome { @@ -1562,30 +1555,16 @@ mod tests { ..Default::default() }; - // Get the notification and verify - let notification = chain_commit.to_chain_notification(); - let CanonStateNotification::Commit { new } = notification else { - panic!("Expected Commit notification"); - }; - - // Compare blocks - let expected_blocks: Vec<_> = - vec![block0.recovered_block().clone(), block1.recovered_block().clone()]; - let actual_blocks: Vec<_> = new.blocks().values().cloned().collect(); - assert_eq!(actual_blocks, expected_blocks); - - // Compare execution outcome - assert_eq!(*new.execution_outcome(), commit_execution_outcome); - - // Compare trie data by waiting on deferred data - for (block_num, expected_updates) in &expected_trie_updates { - let actual = new.trie_data_at(*block_num).unwrap().wait_cloned(); - assert_eq!(actual.trie_updates, *expected_updates); - } - for (block_num, expected_state) in &expected_hashed_state { - let actual = new.trie_data_at(*block_num).unwrap().wait_cloned(); - assert_eq!(actual.hashed_state, *expected_state); - } + assert_eq!( + chain_commit.to_chain_notification(), + CanonStateNotification::Commit { + new: Arc::new(Chain::new( + vec![block0.recovered_block().clone(), block1.recovered_block().clone()], + commit_execution_outcome, + expected_trie_data, + )) + } + ); // Test reorg notification let chain_reorg = NewCanonicalChain::Reorg { @@ -1593,25 +1572,17 @@ mod tests { old: vec![block1.clone(), block2.clone()], }; - // Build expected trie updates for old chain - let mut old_trie_updates = BTreeMap::new(); - old_trie_updates.insert(1, block1.trie_updates()); - old_trie_updates.insert(2, block2.trie_updates()); - - // Build expected trie updates for new chain - let mut new_trie_updates = BTreeMap::new(); - new_trie_updates.insert(1, block1a.trie_updates()); - new_trie_updates.insert(2, block2a.trie_updates()); + // Build expected trie data for old chain + let mut old_trie_data = BTreeMap::new(); + old_trie_data.insert(1, LazyTrieData::ready(block1.hashed_state(), block1.trie_updates())); + old_trie_data.insert(2, LazyTrieData::ready(block2.hashed_state(), block2.trie_updates())); - // Build expected hashed state for old chain - let mut old_hashed_state = BTreeMap::new(); - old_hashed_state.insert(1, block1.hashed_state()); - old_hashed_state.insert(2, block2.hashed_state()); - - // Build expected hashed state for new chain - let mut new_hashed_state = BTreeMap::new(); - new_hashed_state.insert(1, block1a.hashed_state()); - new_hashed_state.insert(2, block2a.hashed_state()); + // Build expected trie data for new chain + let mut new_trie_data = BTreeMap::new(); + new_trie_data + .insert(1, LazyTrieData::ready(block1a.hashed_state(), block1a.trie_updates())); + new_trie_data + .insert(2, LazyTrieData::ready(block2a.hashed_state(), block2a.trie_updates())); // Build expected execution outcome for reorg chains (first_block matches first block // number) @@ -1622,48 +1593,20 @@ mod tests { ..Default::default() }; - // Get the notification and verify - let notification = chain_reorg.to_chain_notification(); - let CanonStateNotification::Reorg { old, new } = notification else { - panic!("Expected Reorg notification"); - }; - - // Compare old chain blocks - let expected_old_blocks: Vec<_> = - vec![block1.recovered_block().clone(), block2.recovered_block().clone()]; - let actual_old_blocks: Vec<_> = old.blocks().values().cloned().collect(); - assert_eq!(actual_old_blocks, expected_old_blocks); - - // Compare old chain execution outcome - assert_eq!(*old.execution_outcome(), reorg_execution_outcome); - - // Compare old chain trie data - for (block_num, expected_updates) in &old_trie_updates { - let actual = old.trie_data_at(*block_num).unwrap().wait_cloned(); - assert_eq!(actual.trie_updates, *expected_updates); - } - for (block_num, expected_state) in &old_hashed_state { - let actual = old.trie_data_at(*block_num).unwrap().wait_cloned(); - assert_eq!(actual.hashed_state, *expected_state); - } - - // Compare new chain blocks - let expected_new_blocks: Vec<_> = - vec![block1a.recovered_block().clone(), block2a.recovered_block().clone()]; - let actual_new_blocks: Vec<_> = new.blocks().values().cloned().collect(); - assert_eq!(actual_new_blocks, expected_new_blocks); - - // Compare new chain execution outcome - assert_eq!(*new.execution_outcome(), reorg_execution_outcome); - - // Compare new chain trie data - for (block_num, expected_updates) in &new_trie_updates { - let actual = new.trie_data_at(*block_num).unwrap().wait_cloned(); - assert_eq!(actual.trie_updates, *expected_updates); - } - for (block_num, expected_state) in &new_hashed_state { - let actual = new.trie_data_at(*block_num).unwrap().wait_cloned(); - assert_eq!(actual.hashed_state, *expected_state); - } + assert_eq!( + chain_reorg.to_chain_notification(), + CanonStateNotification::Reorg { + old: Arc::new(Chain::new( + vec![block1.recovered_block().clone(), block2.recovered_block().clone()], + reorg_execution_outcome.clone(), + old_trie_data, + )), + new: Arc::new(Chain::new( + vec![block1a.recovered_block().clone(), block2a.recovered_block().clone()], + reorg_execution_outcome, + new_trie_data, + )) + } + ); } } diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs index d32b131e0ee..f6abed91467 100644 --- a/crates/chain-state/src/lib.rs +++ b/crates/chain-state/src/lib.rs @@ -11,8 +11,8 @@ mod in_memory; pub use in_memory::*; -// Re-export deferred_trie types from reth_chain -pub use reth_chain::{AnchoredTrieInput, ComputedTrieData, DeferredTrieData}; +mod deferred_trie; +pub use deferred_trie::*; mod lazy_overlay; pub use lazy_overlay::*; diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index 88152edc6ef..3fd9d9a1c76 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -2,7 +2,7 @@ use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; use derive_more::{Deref, DerefMut}; -use reth_chain::{BlockReceipts, Chain}; +use reth_execution_types::{BlockReceipts, Chain}; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader}; use reth_storage_api::NodePrimitivesProvider; use std::{ @@ -80,7 +80,7 @@ impl Stream for CanonStateNotificationStream { /// /// The notification contains at least one [`Chain`] with the imported segment. If some blocks were /// reverted (e.g. during a reorg), the old chain is also returned. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "serde", serde(bound = ""))] pub enum CanonStateNotification { @@ -285,8 +285,8 @@ mod tests { // Create a commit notification let notification = CanonStateNotification::Commit { new: chain.clone() }; - // Test that `committed` returns the correct chain (compare Arc pointers) - assert!(Arc::ptr_eq(¬ification.committed(), &chain)); + // Test that `committed` returns the correct chain + assert_eq!(notification.committed(), chain); // Test that `reverted` returns None for `Commit` assert!(notification.reverted().is_none()); @@ -329,11 +329,11 @@ mod tests { let notification = CanonStateNotification::Reorg { old: old_chain.clone(), new: new_chain.clone() }; - // Test that `reverted` returns the old chain (compare Arc pointers) - assert!(Arc::ptr_eq(¬ification.reverted().unwrap(), &old_chain)); + // Test that `reverted` returns the old chain + assert_eq!(notification.reverted(), Some(old_chain)); - // Test that `committed` returns the new chain (compare Arc pointers) - assert!(Arc::ptr_eq(¬ification.committed(), &new_chain)); + // Test that `committed` returns the new chain + assert_eq!(notification.committed(), new_chain); // Test that `tip` returns the tip of the new chain (last block in the new chain) assert_eq!(*notification.tip(), block3); diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 81baf10be65..73bad27d79f 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -9,12 +9,11 @@ use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use core::marker::PhantomData; use rand::Rng; -use reth_chain::Chain; use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_ethereum_primitives::{ Block, BlockBody, EthPrimitives, Receipt, Transaction, TransactionSigned, }; -use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, ExecutionOutcome}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, Chain, ExecutionOutcome}; use reth_primitives_traits::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, Account, NodePrimitives, Recovered, RecoveredBlock, SealedBlock, SealedHeader, diff --git a/crates/evm/chain/Cargo.toml b/crates/evm/chain/Cargo.toml deleted file mode 100644 index 67e2a73d0e9..00000000000 --- a/crates/evm/chain/Cargo.toml +++ /dev/null @@ -1,69 +0,0 @@ -[package] -name = "reth-chain" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true -description = "Chain and deferred trie data types for reth." - -[lints] -workspace = true - -[dependencies] -reth-ethereum-primitives.workspace = true -reth-execution-types = { workspace = true, features = ["std"] } -reth-metrics.workspace = true -reth-primitives-traits.workspace = true -reth-trie.workspace = true -reth-trie-common.workspace = true - -# alloy -alloy-consensus.workspace = true -alloy-primitives.workspace = true -alloy-eips.workspace = true - -serde = { workspace = true, optional = true } -serde_with = { workspace = true, optional = true } - -metrics.workspace = true -parking_lot.workspace = true -tracing.workspace = true - -[dev-dependencies] -reth-primitives-traits = { workspace = true, features = ["test-utils", "arbitrary"] } -reth-ethereum-primitives = { workspace = true, features = ["arbitrary"] } -alloy-primitives = { workspace = true, features = ["rand", "arbitrary"] } -alloy-consensus = { workspace = true, features = ["arbitrary"] } -arbitrary.workspace = true -bincode.workspace = true -rand.workspace = true -revm.workspace = true - -[features] -default = [] -serde = [ - "dep:serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "reth-primitives-traits/serde", - "alloy-consensus/serde", - "reth-trie/serde", - "reth-trie-common/serde", - "reth-ethereum-primitives/serde", - "reth-execution-types/serde", - "rand/serde", - "revm/serde", - "parking_lot/serde", -] -serde-bincode-compat = [ - "serde", - "reth-trie-common/serde-bincode-compat", - "reth-primitives-traits/serde-bincode-compat", - "serde_with", - "alloy-eips/serde-bincode-compat", - "alloy-consensus/serde-bincode-compat", - "reth-ethereum-primitives/serde-bincode-compat", - "reth-execution-types/serde-bincode-compat", -] diff --git a/crates/evm/chain/src/lib.rs b/crates/evm/chain/src/lib.rs deleted file mode 100644 index 38e7485de10..00000000000 --- a/crates/evm/chain/src/lib.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! Chain and deferred trie data types for reth. -//! -//! This crate contains the [`Chain`] type representing a chain of blocks and their final state, -//! as well as [`DeferredTrieData`] for handling asynchronously computed trie data. - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg))] - -mod chain; -pub use chain::*; - -mod deferred_trie; -pub use deferred_trie::*; - -/// Bincode-compatible serde implementations for chain types. -/// -/// `bincode` crate doesn't work with optionally serializable serde fields, but some of the -/// chain types require optional serialization for RPC compatibility. This module makes so that -/// all fields are serialized. -/// -/// Read more: -#[cfg(feature = "serde-bincode-compat")] -pub mod serde_bincode_compat { - pub use super::chain::serde_bincode_compat::*; -} diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 982408226f3..6c53e315b32 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -19,6 +19,7 @@ revm.workspace = true # alloy alloy-evm.workspace = true +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true @@ -44,6 +45,7 @@ serde = [ "alloy-eips/serde", "alloy-primitives/serde", "reth-primitives-traits/serde", + "alloy-consensus/serde", "reth-trie-common/serde", "reth-ethereum-primitives/serde", ] @@ -53,6 +55,7 @@ serde-bincode-compat = [ "reth-primitives-traits/serde-bincode-compat", "serde_with", "alloy-eips/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", "reth-ethereum-primitives/serde-bincode-compat", ] std = [ @@ -61,6 +64,7 @@ std = [ "revm/std", "serde?/std", "reth-primitives-traits/std", + "alloy-consensus/std", "serde_with?/std", "derive_more/std", "reth-ethereum-primitives/std", diff --git a/crates/evm/chain/src/chain.rs b/crates/evm/execution-types/src/chain.rs similarity index 76% rename from crates/evm/chain/src/chain.rs rename to crates/evm/execution-types/src/chain.rs index 7cd3c4a88cd..3cde0eaa796 100644 --- a/crates/evm/chain/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -1,16 +1,16 @@ //! Contains [Chain], a chain of blocks and their final state. -use crate::DeferredTrieData; +use crate::ExecutionOutcome; +use alloc::{borrow::Cow, collections::BTreeMap, vec::Vec}; use alloy_consensus::{transaction::Recovered, BlockHeader}; use alloy_eips::{eip1898::ForkBlock, eip2718::Encodable2718, BlockNumHash}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; -use reth_execution_types::ExecutionOutcome; +use core::{fmt, ops::RangeInclusive}; use reth_primitives_traits::{ transaction::signed::SignedTransaction, Block, BlockBody, IndexedTx, NodePrimitives, RecoveredBlock, SealedHeader, }; -use reth_trie_common::{updates::TrieUpdatesSorted, HashedPostStateSorted}; -use std::{borrow::Cow, collections::BTreeMap, fmt, ops::RangeInclusive, sync::Arc, vec::Vec}; +use reth_trie_common::LazyTrieData; /// A chain of blocks and their final state. /// @@ -22,7 +22,8 @@ use std::{borrow::Cow, collections::BTreeMap, fmt, ops::RangeInclusive, sync::Ar /// # Warning /// /// A chain of blocks should not be empty. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Chain { /// All blocks in this chain. blocks: BTreeMap>, @@ -33,12 +34,10 @@ pub struct Chain { /// /// Additionally, it includes the individual state changes that led to the current state. execution_outcome: ExecutionOutcome, - /// Deferred trie data for each block in the chain, keyed by block number. + /// Lazy trie data for each block in the chain, keyed by block number. /// - /// Contains handles to lazily-computed sorted trie updates and hashed state. - /// This allows Chain to be constructed without blocking on expensive trie - /// computations - the data is only materialized when actually needed. - trie_data: BTreeMap, + /// Contains handles to lazily-initialized sorted trie updates and hashed state. + trie_data: BTreeMap, } type ChainTxReceiptMeta<'a, N> = ( @@ -67,7 +66,7 @@ impl Chain { pub fn new( blocks: impl IntoIterator>, execution_outcome: ExecutionOutcome, - trie_data: BTreeMap, + trie_data: BTreeMap, ) -> Self { let blocks = blocks.into_iter().map(|b| (b.header().number(), b)).collect::>(); @@ -80,11 +79,10 @@ impl Chain { pub fn from_block( block: RecoveredBlock, execution_outcome: ExecutionOutcome, - trie_data: DeferredTrieData, + trie_data: LazyTrieData, ) -> Self { let block_number = block.header().number(); - let trie_data_map = BTreeMap::from([(block_number, trie_data)]); - Self::new([block], execution_outcome, trie_data_map) + Self::new([block], execution_outcome, BTreeMap::from([(block_number, trie_data)])) } /// Get the blocks in this chain. @@ -102,64 +100,21 @@ impl Chain { self.blocks.values().map(|block| block.clone_sealed_header()) } - /// Get all deferred trie data for this chain. - /// - /// Returns handles to lazily-computed sorted trie updates and hashed state. - /// [`DeferredTrieData`] allows `Chain` to be constructed without blocking on - /// expensive trie computations - the data is only materialized when actually needed - /// via [`DeferredTrieData::wait_cloned`] or similar methods. - /// - /// This method does **not** block. To access the computed trie data, call - /// [`DeferredTrieData::wait_cloned`] on individual entries, which will block - /// if the background computation has not yet completed. - pub const fn trie_data(&self) -> &BTreeMap { + /// Get all trie data for this chain. + pub const fn trie_data(&self) -> &BTreeMap { &self.trie_data } - /// Get deferred trie data for a specific block number. - /// - /// Returns a handle to the lazily-computed trie data. This method does **not** block. - /// Call [`DeferredTrieData::wait_cloned`] on the result to wait for and retrieve - /// the computed data, which will block if computation is still in progress. - pub fn trie_data_at(&self, block_number: BlockNumber) -> Option<&DeferredTrieData> { + /// Get trie data for a specific block number. + pub fn trie_data_at(&self, block_number: BlockNumber) -> Option<&LazyTrieData> { self.trie_data.get(&block_number) } - /// Get all trie updates for this chain. - /// - /// Note: This blocks on deferred trie data for all blocks in the chain. - /// Prefer using [`trie_data`](Self::trie_data) when possible to avoid blocking. - pub fn trie_updates(&self) -> BTreeMap> { - self.trie_data.iter().map(|(num, data)| (*num, data.wait_cloned().trie_updates)).collect() - } - - /// Get trie updates for a specific block number. - /// - /// Note: This waits for deferred trie data if not already computed. - pub fn trie_updates_at(&self, block_number: BlockNumber) -> Option> { - self.trie_data.get(&block_number).map(|data| data.wait_cloned().trie_updates) - } - /// Remove all trie data for this chain. pub fn clear_trie_data(&mut self) { self.trie_data.clear(); } - /// Get all hashed states for this chain. - /// - /// Note: This blocks on deferred trie data for all blocks in the chain. - /// Prefer using [`trie_data`](Self::trie_data) when possible to avoid blocking. - pub fn hashed_state(&self) -> BTreeMap> { - self.trie_data.iter().map(|(num, data)| (*num, data.wait_cloned().hashed_state)).collect() - } - - /// Get hashed state for a specific block number. - /// - /// Note: This waits for deferred trie data if not already computed. - pub fn hashed_state_at(&self, block_number: BlockNumber) -> Option> { - self.trie_data.get(&block_number).map(|data| data.wait_cloned().hashed_state) - } - /// Get execution outcome of this chain pub const fn execution_outcome(&self) -> &ExecutionOutcome { &self.execution_outcome @@ -205,14 +160,14 @@ impl Chain { /// Destructure the chain into its inner components: /// 1. The blocks contained in the chain. /// 2. The execution outcome representing the final state. - /// 3. The deferred trie data map. + /// 3. The trie data map. #[allow(clippy::type_complexity)] pub fn into_inner( self, ) -> ( ChainBlocks<'static, N::Block>, ExecutionOutcome, - BTreeMap, + BTreeMap, ) { (ChainBlocks { blocks: Cow::Owned(self.blocks) }, self.execution_outcome, self.trie_data) } @@ -344,7 +299,7 @@ impl Chain { &mut self, block: RecoveredBlock, execution_outcome: ExecutionOutcome, - trie_data: DeferredTrieData, + trie_data: LazyTrieData, ) { let block_number = block.header().number(); self.blocks.insert(block_number, block); @@ -471,7 +426,7 @@ impl>> ChainBlocks<'_, impl IntoIterator for ChainBlocks<'_, B> { type Item = (BlockNumber, RecoveredBlock); - type IntoIter = std::collections::btree_map::IntoIter>; + type IntoIter = alloc::collections::btree_map::IntoIter>; fn into_iter(self) -> Self::IntoIter { self.blocks.into_owned().into_iter() @@ -489,95 +444,25 @@ pub struct BlockReceipts { pub timestamp: u64, } -#[cfg(feature = "serde")] -mod chain_serde { - use super::*; - use crate::ComputedTrieData; - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - - /// Serializable representation of Chain that waits for deferred trie data. - #[derive(Serialize, Deserialize)] - #[serde(bound = "")] - struct ChainRepr { - blocks: BTreeMap>, - execution_outcome: ExecutionOutcome, - #[serde(default)] - trie_updates: BTreeMap>, - #[serde(default)] - hashed_state: BTreeMap>, - } - - impl Serialize for Chain { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - // Wait for deferred trie data for serialization - let trie_updates: BTreeMap<_, _> = self - .trie_data - .iter() - .map(|(num, data)| (*num, data.wait_cloned().trie_updates)) - .collect(); - let hashed_state: BTreeMap<_, _> = self - .trie_data - .iter() - .map(|(num, data)| (*num, data.wait_cloned().hashed_state)) - .collect(); - - let repr = ChainRepr:: { - blocks: self.blocks.clone(), - execution_outcome: self.execution_outcome.clone(), - trie_updates, - hashed_state, - }; - repr.serialize(serializer) - } - } - - impl<'de, N: NodePrimitives> Deserialize<'de> for Chain { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let repr = ChainRepr::::deserialize(deserializer)?; - - // Convert to ready DeferredTrieData handles - let trie_data = repr - .trie_updates - .into_iter() - .map(|(num, trie_updates)| { - let hashed_state = repr.hashed_state.get(&num).cloned().unwrap_or_default(); - let computed = ComputedTrieData::without_trie_input(hashed_state, trie_updates); - (num, DeferredTrieData::ready(computed)) - }) - .collect(); - - Ok(Self { blocks: repr.blocks, execution_outcome: repr.execution_outcome, trie_data }) - } - } -} - /// Bincode-compatible [`Chain`] serde implementation. #[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { + use crate::{serde_bincode_compat, ExecutionOutcome}; + use alloc::{borrow::Cow, collections::BTreeMap, sync::Arc}; use alloy_primitives::BlockNumber; use reth_ethereum_primitives::EthPrimitives; - use reth_execution_types::{ - serde_bincode_compat as exec_serde_bincode_compat, ExecutionOutcome, - }; use reth_primitives_traits::{ serde_bincode_compat::{RecoveredBlock, SerdeBincodeCompat}, Block, NodePrimitives, }; use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; - use std::{borrow::Cow, collections::BTreeMap, sync::Arc}; /// Bincode-compatible [`super::Chain`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust - /// use reth_chain::{serde_bincode_compat, Chain}; + /// use reth_execution_types::{serde_bincode_compat, Chain}; /// use serde::{Deserialize, Serialize}; /// use serde_with::serde_as; /// @@ -597,7 +482,7 @@ pub(super) mod serde_bincode_compat { >, { blocks: RecoveredBlocks<'a, N::Block>, - execution_outcome: exec_serde_bincode_compat::ExecutionOutcome<'a, N::Receipt>, + execution_outcome: serde_bincode_compat::ExecutionOutcome<'a, N::Receipt>, #[serde(default, rename = "trie_updates_legacy")] _trie_updates_legacy: Option>, @@ -653,6 +538,31 @@ pub(super) mod serde_bincode_compat { } } + impl<'a, N> From<&'a super::Chain> for Chain<'a, N> + where + N: NodePrimitives< + Block: Block + 'static, + >, + { + fn from(value: &'a super::Chain) -> Self { + Self { + blocks: RecoveredBlocks(Cow::Borrowed(&value.blocks)), + execution_outcome: value.execution_outcome.as_repr(), + _trie_updates_legacy: None, + trie_updates: value + .trie_data + .iter() + .map(|(k, v)| (*k, v.get().trie_updates.as_ref().into())) + .collect(), + hashed_state: value + .trie_data + .iter() + .map(|(k, v)| (*k, v.get().hashed_state.as_ref().into())) + .collect(), + } + } + } + impl<'a, N> From> for super::Chain where N: NodePrimitives< @@ -660,19 +570,17 @@ pub(super) mod serde_bincode_compat { >, { fn from(value: Chain<'a, N>) -> Self { - use crate::{ComputedTrieData, DeferredTrieData}; + use reth_trie_common::LazyTrieData; - let trie_updates: BTreeMap<_, _> = - value.trie_updates.into_iter().map(|(k, v)| (k, Arc::new(v.into()))).collect(); - let hashed_state: BTreeMap<_, _> = + let hashed_state_map: BTreeMap<_, _> = value.hashed_state.into_iter().map(|(k, v)| (k, Arc::new(v.into()))).collect(); - let trie_data = trie_updates + let trie_data: BTreeMap = value + .trie_updates .into_iter() - .map(|(num, trie_updates)| { - let hashed_state = hashed_state.get(&num).cloned().unwrap_or_default(); - let computed = ComputedTrieData::without_trie_input(hashed_state, trie_updates); - (num, DeferredTrieData::ready(computed)) + .map(|(k, v)| { + let hashed_state = hashed_state_map.get(&k).cloned().unwrap_or_default(); + (k, LazyTrieData::ready(hashed_state, Arc::new(v.into()))) }) .collect(); @@ -694,31 +602,7 @@ pub(super) mod serde_bincode_compat { where S: Serializer, { - use reth_trie_common::serde_bincode_compat as trie_serde; - - // Wait for deferred trie data and collect into maps we can borrow from - let trie_updates_data: BTreeMap = - source.trie_data.iter().map(|(k, v)| (*k, v.wait_cloned().trie_updates)).collect(); - let hashed_state_data: BTreeMap = - source.trie_data.iter().map(|(k, v)| (*k, v.wait_cloned().hashed_state)).collect(); - - // Now create the serde-compatible struct borrowing from the collected data - let chain: Chain<'_, N> = Chain { - blocks: RecoveredBlocks(Cow::Borrowed(&source.blocks)), - execution_outcome: source.execution_outcome.as_repr(), - _trie_updates_legacy: None, - trie_updates: trie_updates_data - .iter() - .map(|(k, v)| (*k, trie_serde::updates::TrieUpdatesSorted::from(v.as_ref()))) - .collect(), - hashed_state: hashed_state_data - .iter() - .map(|(k, v)| { - (*k, trie_serde::hashed_state::HashedPostStateSorted::from(v.as_ref())) - }) - .collect(), - }; - chain.serialize(serializer) + Chain::from(source).serialize(serializer) } } @@ -747,10 +631,10 @@ pub(super) mod serde_bincode_compat { #[test] fn test_chain_bincode_roundtrip() { - use std::collections::BTreeMap; + use alloc::collections::BTreeMap; #[serde_as] - #[derive(Debug, Serialize, Deserialize)] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] struct Data { #[serde_as(as = "serde_bincode_compat::Chain")] chain: Chain, @@ -769,9 +653,7 @@ pub(super) mod serde_bincode_compat { let encoded = bincode::serialize(&data).unwrap(); let decoded: Data = bincode::deserialize(&encoded).unwrap(); - // Note: Can't compare directly because DeferredTrieData doesn't implement PartialEq - assert_eq!(decoded.chain.blocks, data.chain.blocks); - assert_eq!(decoded.chain.execution_outcome, data.chain.execution_outcome); + assert_eq!(decoded, data); } } } diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 9c2842899e6..6df354219ea 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -564,8 +564,8 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::TxType; use alloy_primitives::{bytes, Address, LogData, B256}; - use reth_ethereum_primitives::TxType; #[test] fn test_initialization() { diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index f3dcc166eb3..8b795981fb5 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -11,6 +11,9 @@ extern crate alloc; +mod chain; +pub use chain::*; + mod execute; pub use execute::*; @@ -26,5 +29,5 @@ pub use execution_outcome::*; /// Read more: #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { - pub use super::execution_outcome::serde_bincode_compat::*; + pub use super::{chain::serde_bincode_compat::*, execution_outcome::serde_bincode_compat::*}; } diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 8a550db8a73..189cd509655 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -48,7 +48,6 @@ itertools = { workspace = true, features = ["use_std"] } metrics.workspace = true parking_lot.workspace = true rmp-serde.workspace = true -serde_with.workspace = true thiserror.workspace = true tracing.workspace = true diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index eadb7b81979..663a81485ac 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -694,7 +694,6 @@ mod tests { BlockWriter, Chain, DBProvider, DatabaseProviderFactory, TransactionVariant, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; - use std::collections::BTreeMap; fn empty_finalized_header_stream() -> ForkChoiceStream { let (tx, rx) = watch::channel(None); @@ -800,16 +799,12 @@ mod tests { }; // Push the first notification - exex_manager.push_notification(notification1); + exex_manager.push_notification(notification1.clone()); // Verify the buffer contains the notification with the correct ID assert_eq!(exex_manager.buffer.len(), 1); assert_eq!(exex_manager.buffer.front().unwrap().0, 0); - // Compare by tip block since ExExNotification doesn't implement PartialEq - assert_eq!( - *exex_manager.buffer.front().unwrap().1.committed_chain().unwrap().tip(), - block1 - ); + assert_eq!(exex_manager.buffer.front().unwrap().1, notification1); assert_eq!(exex_manager.next_id, 1); // Push another notification @@ -821,17 +816,14 @@ mod tests { new: Arc::new(Chain::new(vec![block2.clone()], Default::default(), Default::default())), }; - exex_manager.push_notification(notification2); + exex_manager.push_notification(notification2.clone()); // Verify the buffer contains both notifications with correct IDs assert_eq!(exex_manager.buffer.len(), 2); assert_eq!(exex_manager.buffer.front().unwrap().0, 0); - assert_eq!( - *exex_manager.buffer.front().unwrap().1.committed_chain().unwrap().tip(), - block1 - ); + assert_eq!(exex_manager.buffer.front().unwrap().1, notification1); assert_eq!(exex_manager.buffer.get(1).unwrap().0, 1); - assert_eq!(*exex_manager.buffer.get(1).unwrap().1.committed_chain().unwrap().tip(), block2); + assert_eq!(exex_manager.buffer.get(1).unwrap().1, notification2); assert_eq!(exex_manager.next_id, 2); } @@ -1157,10 +1149,9 @@ mod tests { block2.set_block_number(11); // Setup a notification - let expected_block: RecoveredBlock = Default::default(); let notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( - vec![expected_block.clone()], + vec![Default::default()], Default::default(), Default::default(), )), @@ -1172,8 +1163,7 @@ mod tests { match exex_handle.send(&mut cx, &(22, notification.clone())) { Poll::Ready(Ok(())) => { let received_notification = notifications.next().await.unwrap().unwrap(); - // Compare by tip block since ExExNotification doesn't implement PartialEq - assert_eq!(*received_notification.committed_chain().unwrap().tip(), expected_block); + assert_eq!(received_notification, notification); } Poll::Pending => panic!("Notification send is pending"), Poll::Ready(Err(e)) => panic!("Failed to send notification: {e:?}"), @@ -1265,9 +1255,7 @@ mod tests { match exex_handle.send(&mut cx, &(22, notification.clone())) { Poll::Ready(Ok(())) => { let received_notification = notifications.next().await.unwrap().unwrap(); - // Compare by checking that both are reorgs with empty chains - assert!(received_notification.committed_chain().is_some()); - assert!(received_notification.reverted_chain().is_some()); + assert_eq!(received_notification, notification); } Poll::Pending | Poll::Ready(Err(_)) => { panic!("Notification should not be pending or fail") @@ -1307,9 +1295,7 @@ mod tests { match exex_handle.send(&mut cx, &(22, notification.clone())) { Poll::Ready(Ok(())) => { let received_notification = notifications.next().await.unwrap().unwrap(); - // Compare by checking that it's a revert with empty chain - assert!(received_notification.reverted_chain().is_some()); - assert!(received_notification.committed_chain().is_none()); + assert_eq!(received_notification, notification); } Poll::Pending | Poll::Ready(Err(_)) => { panic!("Notification should not be pending or fail") @@ -1361,11 +1347,11 @@ mod tests { new: Arc::new(Chain::new( vec![genesis_block.clone()], Default::default(), - BTreeMap::new(), + Default::default(), )), }; let notification = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new(vec![block.clone()], Default::default(), BTreeMap::new())), + new: Arc::new(Chain::new(vec![block.clone()], Default::default(), Default::default())), }; let (finalized_headers_tx, rx) = watch::channel(None); @@ -1382,38 +1368,34 @@ mod tests { let mut cx = Context::from_waker(futures::task::noop_waker_ref()); - exex_manager.handle().send(ExExNotificationSource::Pipeline, genesis_notification)?; - exex_manager.handle().send(ExExNotificationSource::BlockchainTree, notification)?; + exex_manager + .handle() + .send(ExExNotificationSource::Pipeline, genesis_notification.clone())?; + exex_manager.handle().send(ExExNotificationSource::BlockchainTree, notification.clone())?; assert!(exex_manager.as_mut().poll(&mut cx)?.is_pending()); - // Check genesis notification received - let poll_result = notifications.try_poll_next_unpin(&mut cx)?; - if let Poll::Ready(Some(n)) = poll_result { - assert_eq!(*n.committed_chain().unwrap().tip(), genesis_block); - } else { - panic!("Expected genesis notification"); - } + assert_eq!( + notifications.try_poll_next_unpin(&mut cx)?, + Poll::Ready(Some(genesis_notification)) + ); assert!(exex_manager.as_mut().poll(&mut cx)?.is_pending()); - // Check block notification received - let poll_result = notifications.try_poll_next_unpin(&mut cx)?; - if let Poll::Ready(Some(n)) = poll_result { - assert_eq!(*n.committed_chain().unwrap().tip(), block); - } else { - panic!("Expected block notification"); - } + assert_eq!( + notifications.try_poll_next_unpin(&mut cx)?, + Poll::Ready(Some(notification.clone())) + ); // WAL shouldn't contain the genesis notification, because it's finalized - let wal_notifications = - exex_manager.wal.iter_notifications()?.collect::>>()?; - assert_eq!(wal_notifications.len(), 1); - assert_eq!(*wal_notifications[0].committed_chain().unwrap().tip(), block); + assert_eq!( + exex_manager.wal.iter_notifications()?.collect::>>()?, + std::slice::from_ref(¬ification) + ); finalized_headers_tx.send(Some(block.clone_sealed_header()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL isn't finalized because the ExEx didn't emit the `FinishedHeight` event - let wal_notifications = - exex_manager.wal.iter_notifications()?.collect::>>()?; - assert_eq!(wal_notifications.len(), 1); - assert_eq!(*wal_notifications[0].committed_chain().unwrap().tip(), block); + assert_eq!( + exex_manager.wal.iter_notifications()?.collect::>>()?, + std::slice::from_ref(¬ification) + ); // Send a `FinishedHeight` event with a non-canonical block events_tx @@ -1424,10 +1406,10 @@ mod tests { assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL isn't finalized because the ExEx emitted a `FinishedHeight` event with a // non-canonical block - let wal_notifications = - exex_manager.wal.iter_notifications()?.collect::>>()?; - assert_eq!(wal_notifications.len(), 1); - assert_eq!(*wal_notifications[0].committed_chain().unwrap().tip(), block); + assert_eq!( + exex_manager.wal.iter_notifications()?.collect::>>()?, + std::slice::from_ref(¬ification) + ); // Send a `FinishedHeight` event with a canonical block events_tx.send(ExExEvent::FinishedHeight(block.num_hash())).unwrap(); @@ -1435,7 +1417,7 @@ mod tests { finalized_headers_tx.send(Some(block.clone_sealed_header()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL is finalized - assert!(exex_manager.wal.iter_notifications()?.next().is_none()); + assert_eq!(exex_manager.wal.iter_notifications()?.next().transpose()?, None); Ok(()) } diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 2b5d6d93d18..e6880951dd5 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -449,7 +449,7 @@ mod tests { use crate::Wal; use alloy_consensus::Header; use alloy_eips::BlockNumHash; - + use eyre::OptionExt; use futures::StreamExt; use reth_db_common::init::init_genesis; use reth_ethereum_primitives::Block; @@ -491,15 +491,14 @@ mod tests { let exex_head = ExExHead { block: BlockNumHash { number: genesis_block.number, hash: genesis_hash } }; - let expected_block = random_block( - &mut rng, - node_head.number + 1, - BlockParams { parent: Some(node_head.hash), ..Default::default() }, - ) - .try_recover()?; let notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( - vec![expected_block.clone()], + vec![random_block( + &mut rng, + node_head.number + 1, + BlockParams { parent: Some(node_head.hash), ..Default::default() }, + ) + .try_recover()?], Default::default(), BTreeMap::new(), )), @@ -519,16 +518,23 @@ mod tests { .with_head(exex_head); // First notification is the backfill of missing blocks from the canonical chain - let backfill_notification = notifications.next().await.transpose()?; - assert!(backfill_notification.is_some()); - // Verify it's a commit notification with the expected block range - let backfill_chain = backfill_notification.unwrap().committed_chain().unwrap(); - assert_eq!(backfill_chain.first().header().number(), 1); + assert_eq!( + notifications.next().await.transpose()?, + Some(ExExNotification::ChainCommitted { + new: Arc::new( + BackfillJobFactory::new( + notifications.evm_config.clone(), + notifications.provider.clone() + ) + .backfill(1..=1) + .next() + .ok_or_eyre("failed to backfill")?? + ) + }) + ); // Second notification is the actual notification that we sent before - let received = notifications.next().await.transpose()?; - assert!(received.is_some()); - assert_eq!(*received.unwrap().committed_chain().unwrap().tip(), expected_block); + assert_eq!(notifications.next().await.transpose()?, Some(notification)); Ok(()) } @@ -549,19 +555,18 @@ mod tests { let node_head = BlockNumHash { number: genesis_block.number, hash: genesis_hash }; let exex_head = ExExHead { block: node_head }; - let expected_block = Block { - header: Header { - parent_hash: node_head.hash, - number: node_head.number + 1, - ..Default::default() - }, - ..Default::default() - } - .seal_slow() - .try_recover()?; let notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( - vec![expected_block.clone()], + vec![Block { + header: Header { + parent_hash: node_head.hash, + number: node_head.number + 1, + ..Default::default() + }, + ..Default::default() + } + .seal_slow() + .try_recover()?], Default::default(), BTreeMap::new(), )), @@ -581,8 +586,7 @@ mod tests { .with_head(exex_head); let new_notification = notifications.next().await.transpose()?; - assert!(new_notification.is_some()); - assert_eq!(*new_notification.unwrap().committed_chain().unwrap().tip(), expected_block); + assert_eq!(new_notification, Some(notification)); Ok(()) } @@ -612,7 +616,7 @@ mod tests { let provider_rw = provider.database_provider_rw()?; provider_rw.insert_block(&node_head_block)?; provider_rw.commit()?; - let _node_head_notification = ExExNotification::ChainCommitted { + let node_head_notification = ExExNotification::ChainCommitted { new: Arc::new( BackfillJobFactory::new(EthEvmConfig::mainnet(), provider.clone()) .backfill(node_head.number..=node_head.number) @@ -627,24 +631,26 @@ mod tests { BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, ); let exex_head = ExExHead { block: exex_head_block.num_hash() }; - let exex_head_recovered = exex_head_block.clone().try_recover()?; let exex_head_notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( - vec![exex_head_recovered.clone()], + vec![exex_head_block.clone().try_recover()?], Default::default(), BTreeMap::new(), )), }; wal.commit(&exex_head_notification)?; - let new_block = random_block( - &mut rng, - node_head.number + 1, - BlockParams { parent: Some(node_head.hash), ..Default::default() }, - ) - .try_recover()?; let new_notification = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new(vec![new_block.clone()], Default::default(), BTreeMap::new())), + new: Arc::new(Chain::new( + vec![random_block( + &mut rng, + node_head.number + 1, + BlockParams { parent: Some(node_head.hash), ..Default::default() }, + ) + .try_recover()?], + Default::default(), + BTreeMap::new(), + )), }; let (notifications_tx, notifications_rx) = mpsc::channel(1); @@ -662,25 +668,15 @@ mod tests { // First notification is the revert of the ExEx head block to get back to the canonical // chain - let revert_notification = notifications.next().await.transpose()?; - assert!(revert_notification.is_some()); - // Verify it's a revert with the exex_head block assert_eq!( - *revert_notification.unwrap().reverted_chain().unwrap().tip(), - exex_head_recovered + notifications.next().await.transpose()?, + Some(exex_head_notification.into_inverted()) ); // Second notification is the backfilled block from the canonical chain to get back to the // canonical tip - let backfill_notification = notifications.next().await.transpose()?; - assert!(backfill_notification.is_some()); - assert_eq!( - backfill_notification.unwrap().committed_chain().unwrap().tip().header().number(), - node_head.number - ); + assert_eq!(notifications.next().await.transpose()?, Some(node_head_notification)); // Third notification is the actual notification that we sent before - let received = notifications.next().await.transpose()?; - assert!(received.is_some()); - assert_eq!(*received.unwrap().committed_chain().unwrap().tip(), new_block); + assert_eq!(notifications.next().await.transpose()?, Some(new_notification)); Ok(()) } @@ -706,10 +702,9 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, ); - let exex_head_recovered = exex_head_block.clone().try_recover()?; let exex_head_notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( - vec![exex_head_recovered.clone()], + vec![exex_head_block.clone().try_recover()?], Default::default(), BTreeMap::new(), )), @@ -721,14 +716,17 @@ mod tests { block: BlockNumHash { number: exex_head_block.number, hash: exex_head_block.hash() }, }; - let new_block = random_block( - &mut rng, - genesis_block.number + 1, - BlockParams { parent: Some(genesis_hash), ..Default::default() }, - ) - .try_recover()?; let new_notification = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new(vec![new_block.clone()], Default::default(), BTreeMap::new())), + new: Arc::new(Chain::new( + vec![random_block( + &mut rng, + genesis_block.number + 1, + BlockParams { parent: Some(genesis_hash), ..Default::default() }, + ) + .try_recover()?], + Default::default(), + BTreeMap::new(), + )), }; let (notifications_tx, notifications_rx) = mpsc::channel(1); @@ -746,17 +744,13 @@ mod tests { // First notification is the revert of the ExEx head block to get back to the canonical // chain - let revert_notification = notifications.next().await.transpose()?; - assert!(revert_notification.is_some()); assert_eq!( - *revert_notification.unwrap().reverted_chain().unwrap().tip(), - exex_head_recovered + notifications.next().await.transpose()?, + Some(exex_head_notification.into_inverted()) ); // Second notification is the actual notification that we sent before - let received = notifications.next().await.transpose()?; - assert!(received.is_some()); - assert_eq!(*received.unwrap().committed_chain().unwrap().tip(), new_block); + assert_eq!(notifications.next().await.transpose()?, Some(new_notification)); Ok(()) } diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index a59c7202b14..0836e15b55a 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -255,36 +255,6 @@ mod tests { }) } - fn notifications_equal(a: &[ExExNotification], b: &[ExExNotification]) -> bool { - if a.len() != b.len() { - return false; - } - a.iter().zip(b.iter()).all(|(n1, n2)| { - let committed_eq = match (n1.committed_chain(), n2.committed_chain()) { - (Some(c1), Some(c2)) => { - c1.tip().hash() == c2.tip().hash() && c1.blocks() == c2.blocks() - } - (None, None) => true, - _ => false, - }; - let reverted_eq = match (n1.reverted_chain(), n2.reverted_chain()) { - (Some(c1), Some(c2)) => { - c1.tip().hash() == c2.tip().hash() && c1.blocks() == c2.blocks() - } - (None, None) => true, - _ => false, - }; - committed_eq && reverted_eq - }) - } - - fn assert_notifications_eq(actual: Vec, expected: Vec) { - assert!( - notifications_equal(&actual, &expected), - "notifications mismatch:\nactual: {actual:?}\nexpected: {expected:?}" - ); - } - fn sort_committed_blocks( committed_blocks: Vec<(B256, u32, CachedBlock)>, ) -> Vec<(B256, u32, CachedBlock)> { @@ -388,7 +358,7 @@ mod tests { wal.inner.block_cache().committed_blocks_sorted(), committed_notification_1_cache_committed_blocks ); - assert_notifications_eq(read_notifications(&wal)?, vec![committed_notification_1.clone()]); + assert_eq!(read_notifications(&wal)?, vec![committed_notification_1.clone()]); // Second notification (revert block 1) wal.commit(&reverted_notification)?; @@ -402,9 +372,9 @@ mod tests { wal.inner.block_cache().committed_blocks_sorted(), committed_notification_1_cache_committed_blocks ); - assert_notifications_eq( + assert_eq!( read_notifications(&wal)?, - vec![committed_notification_1.clone(), reverted_notification.clone()], + vec![committed_notification_1.clone(), reverted_notification.clone()] ); // Third notification (commit block 1, 2) @@ -447,13 +417,13 @@ mod tests { .concat() ) ); - assert_notifications_eq( + assert_eq!( read_notifications(&wal)?, vec![ committed_notification_1.clone(), reverted_notification.clone(), - committed_notification_2.clone(), - ], + committed_notification_2.clone() + ] ); // Fourth notification (revert block 2, commit block 2, 3) @@ -498,14 +468,14 @@ mod tests { .concat() ) ); - assert_notifications_eq( + assert_eq!( read_notifications(&wal)?, vec![ committed_notification_1, reverted_notification, committed_notification_2.clone(), - reorged_notification.clone(), - ], + reorged_notification.clone() + ] ); // Now, finalize the WAL up to the block 1. Block 1 was in the third notification that also @@ -527,9 +497,9 @@ mod tests { .concat() ) ); - assert_notifications_eq( + assert_eq!( read_notifications(&wal)?, - vec![committed_notification_2.clone(), reorged_notification.clone()], + vec![committed_notification_2.clone(), reorged_notification.clone()] ); // Re-open the WAL and verify that the cache population works correctly @@ -548,10 +518,7 @@ mod tests { .concat() ) ); - assert_notifications_eq( - read_notifications(&wal)?, - vec![committed_notification_2, reorged_notification], - ); + assert_eq!(read_notifications(&wal)?, vec![committed_notification_2, reorged_notification]); Ok(()) } diff --git a/crates/exex/exex/src/wal/storage.rs b/crates/exex/exex/src/wal/storage.rs index bb118c8a98a..2deffcd68f3 100644 --- a/crates/exex/exex/src/wal/storage.rs +++ b/crates/exex/exex/src/wal/storage.rs @@ -163,16 +163,12 @@ where let file_path = self.file_path(file_id); debug!(target: "exex::wal::storage", ?file_path, "Writing notification to WAL"); - // Serialize using the bincode- and msgpack-compatible serde wrapper via SerializeAs + // Serialize using the bincode- and msgpack-compatible serde wrapper + let notification = + reth_exex_types::serde_bincode_compat::ExExNotification::::from(notification); + reth_fs_util::atomic_write_file(&file_path, |file| { - use serde_with::SerializeAs; - let mut buf = Vec::new(); - reth_exex_types::serde_bincode_compat::ExExNotification::<'_, N>::serialize_as( - notification, - &mut rmp_serde::Serializer::new(&mut buf), - ) - .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?; - std::io::Write::write_all(file, &buf) + rmp_serde::encode::write(file, ¬ification) })?; Ok(file_path.metadata().map_err(|err| WalError::FileMetadata(file_id, err))?.len()) @@ -193,7 +189,7 @@ mod tests { use reth_testing_utils::generators::{self, random_block}; use reth_trie_common::{ updates::{StorageTrieUpdates, TrieUpdates}, - BranchNodeCompact, HashedPostState, HashedStorage, Nibbles, + BranchNodeCompact, HashedPostState, HashedStorage, LazyTrieData, Nibbles, }; use std::{collections::BTreeMap, fs::File, sync::Arc}; @@ -228,10 +224,8 @@ mod tests { // Get expected data let expected_notification = get_test_notification_data().unwrap(); - // Compare by tip block since ExExNotification doesn't implement PartialEq assert_eq!( - *notification.committed_chain().unwrap().tip(), - *expected_notification.committed_chain().unwrap().tip(), + ¬ification, &expected_notification, "Decoded notification should match expected static data" ); } @@ -247,18 +241,18 @@ mod tests { let new_block = random_block(&mut rng, 0, Default::default()).try_recover()?; let notification = ExExNotification::ChainReorged { - new: Arc::new(Chain::new(vec![new_block.clone()], Default::default(), BTreeMap::new())), - old: Arc::new(Chain::new(vec![old_block.clone()], Default::default(), BTreeMap::new())), + new: Arc::new(Chain::new(vec![new_block], Default::default(), BTreeMap::new())), + old: Arc::new(Chain::new(vec![old_block], Default::default(), BTreeMap::new())), }; // Do a round trip serialization and deserialization let file_id = 0; storage.write_notification(file_id, ¬ification)?; let deserialized_notification = storage.read_notification(file_id)?; - // Compare by chain tips since ExExNotification doesn't implement PartialEq - let deserialized = deserialized_notification.map(|(n, _)| n).unwrap(); - assert_eq!(*deserialized.committed_chain().unwrap().tip(), new_block); - assert_eq!(*deserialized.reverted_chain().unwrap().tip(), old_block); + assert_eq!( + deserialized_notification.map(|(notification, _)| notification), + Some(notification) + ); Ok(()) } @@ -276,14 +270,10 @@ mod tests { let notification = get_test_notification_data()?; - // Create a temp storage and write the notification using the existing serialization path - let temp_dir = tempfile::tempdir()?; - let storage = Storage::new(&temp_dir)?; - storage.write_notification(0, ¬ification)?; - - // Read it back as raw bytes - let temp_path = temp_dir.path().join("0.wal"); - let encoded = std::fs::read(&temp_path)?; + // Serialize the notification + let notification_compat = + reth_exex_types::serde_bincode_compat::ExExNotification::from(¬ification); + let encoded = rmp_serde::encode::to_vec(¬ification_compat)?; // Write to test-data directory let test_data_dir = std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join("test-data"); @@ -346,12 +336,11 @@ mod tests { )]), }; - let trie_data = - reth_chain_state::DeferredTrieData::ready(reth_chain_state::ComputedTrieData { - hashed_state: Arc::new(hashed_state.into_sorted()), - trie_updates: Arc::new(trie_updates.into_sorted()), - anchored_trie_input: None, - }); + let trie_data = LazyTrieData::ready( + Arc::new(hashed_state.into_sorted()), + Arc::new(trie_updates.into_sorted()), + ); + let notification: ExExNotification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index 39b116e6786..80ce4167e46 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -12,13 +12,13 @@ workspace = true [dependencies] ## reth -reth-chain.workspace = true reth-chainspec.workspace = true reth-config.workspace = true reth-consensus = { workspace = true, features = ["test-utils"] } reth-db = { workspace = true, features = ["test-utils"] } reth-db-common.workspace = true reth-evm-ethereum = { workspace = true, features = ["test-utils"] } +reth-execution-types.workspace = true reth-exex.workspace = true reth-payload-builder.workspace = true reth-network.workspace = true diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index d6d112bf88f..8430ea5d91f 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -17,7 +17,6 @@ use std::{ use alloy_eips::BlockNumHash; use futures_util::FutureExt; -use reth_chain::Chain; use reth_chainspec::{ChainSpec, MAINNET}; use reth_consensus::test_utils::TestConsensus; use reth_db::{ @@ -29,6 +28,7 @@ use reth_db::{ use reth_db_common::init::init_genesis; use reth_ethereum_primitives::{EthPrimitives, TransactionSigned}; use reth_evm_ethereum::MockEvmConfig; +use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications, Wal}; use reth_network::{config::rng_secret_key, NetworkConfigBuilder, NetworkManager}; use reth_node_api::{ diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index fadbf9c2abd..11dec0246fe 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] # reth -reth-chain.workspace = true reth-chain-state.workspace = true +reth-execution-types.workspace = true reth-primitives-traits.workspace = true # reth @@ -36,7 +36,7 @@ rand.workspace = true default = [] serde = [ "dep:serde", - "reth-chain/serde", + "reth-execution-types/serde", "alloy-eips/serde", "alloy-primitives/serde", "rand/serde", @@ -45,7 +45,7 @@ serde = [ "reth-chain-state/serde", ] serde-bincode-compat = [ - "reth-chain/serde-bincode-compat", + "reth-execution-types/serde-bincode-compat", "serde_with", "alloy-eips/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index e076540aec7..a11fd2a46b6 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -1,13 +1,12 @@ use std::sync::Arc; -use reth_chain::Chain; use reth_chain_state::CanonStateNotification; +use reth_execution_types::Chain; use reth_primitives_traits::NodePrimitives; /// Notifications sent to an `ExEx`. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "serde", serde(bound = ""))] pub enum ExExNotification { /// Chain got committed without a reorg, and only the new chain is returned. ChainCommitted { @@ -74,7 +73,7 @@ impl From> for ExExNotification

/// Bincode-compatible [`ExExNotification`] serde implementation. #[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] pub(super) mod serde_bincode_compat { - use reth_chain::serde_bincode_compat::Chain; + use reth_execution_types::serde_bincode_compat::Chain; use reth_primitives_traits::NodePrimitives; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -125,6 +124,28 @@ pub(super) mod serde_bincode_compat { }, } + impl<'a, N> From<&'a super::ExExNotification> for ExExNotification<'a, N> + where + N: NodePrimitives, + { + fn from(value: &'a super::ExExNotification) -> Self { + match value { + super::ExExNotification::ChainCommitted { new } => { + ExExNotification::ChainCommitted { new: Chain::from(new.as_ref()) } + } + super::ExExNotification::ChainReorged { old, new } => { + ExExNotification::ChainReorged { + old: Chain::from(old.as_ref()), + new: Chain::from(new.as_ref()), + } + } + super::ExExNotification::ChainReverted { old } => { + ExExNotification::ChainReverted { old: Chain::from(old.as_ref()) } + } + } + } + } + impl<'a, N> From> for super::ExExNotification where N: NodePrimitives, @@ -155,41 +176,7 @@ pub(super) mod serde_bincode_compat { where S: Serializer, { - // Helper that uses Chain's SerializeAs for bincode-compatible serialization - struct ChainWrapper<'a, N: NodePrimitives>(&'a reth_chain::Chain); - - impl Serialize for ChainWrapper<'_, N> { - fn serialize(&self, serializer: S2) -> Result - where - S2: Serializer, - { - Chain::<'_, N>::serialize_as(self.0, serializer) - } - } - - // Create an enum that matches the ExExNotification structure but uses ChainWrapper - #[derive(Serialize)] - #[serde(bound = "")] - #[allow(clippy::enum_variant_names)] - enum Repr<'a, N: NodePrimitives> { - ChainCommitted { new: ChainWrapper<'a, N> }, - ChainReorged { old: ChainWrapper<'a, N>, new: ChainWrapper<'a, N> }, - ChainReverted { old: ChainWrapper<'a, N> }, - } - - match source { - super::ExExNotification::ChainCommitted { new } => { - Repr::ChainCommitted { new: ChainWrapper(new.as_ref()) }.serialize(serializer) - } - super::ExExNotification::ChainReorged { old, new } => Repr::ChainReorged { - old: ChainWrapper(old.as_ref()), - new: ChainWrapper(new.as_ref()), - } - .serialize(serializer), - super::ExExNotification::ChainReverted { old } => { - Repr::ChainReverted { old: ChainWrapper(old.as_ref()) }.serialize(serializer) - } - } + ExExNotification::from(source).serialize(serializer) } } @@ -210,7 +197,7 @@ pub(super) mod serde_bincode_compat { use super::super::{serde_bincode_compat, ExExNotification}; use arbitrary::Arbitrary; use rand::Rng; - use reth_chain::Chain; + use reth_execution_types::Chain; use reth_primitives_traits::RecoveredBlock; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -219,7 +206,7 @@ pub(super) mod serde_bincode_compat { #[test] fn test_exex_notification_bincode_roundtrip() { #[serde_as] - #[derive(Debug, Serialize, Deserialize)] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] struct Data { #[serde_as( as = "serde_bincode_compat::ExExNotification<'_, reth_ethereum_primitives::EthPrimitives>" @@ -229,34 +216,26 @@ pub(super) mod serde_bincode_compat { let mut bytes = [0u8; 1024]; rand::rng().fill(bytes.as_mut_slice()); - let old_block: reth_primitives_traits::RecoveredBlock = - RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); - let new_block: reth_primitives_traits::RecoveredBlock = - RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); - let data = Data { notification: ExExNotification::ChainReorged { - old: Arc::new(Chain::new(vec![old_block], Default::default(), BTreeMap::new())), - new: Arc::new(Chain::new(vec![new_block], Default::default(), BTreeMap::new())), + old: Arc::new(Chain::new( + vec![RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) + .unwrap()], + Default::default(), + BTreeMap::new(), + )), + new: Arc::new(Chain::new( + vec![RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) + .unwrap()], + Default::default(), + BTreeMap::new(), + )), }, }; let encoded = bincode::serialize(&data).unwrap(); let decoded: Data = bincode::deserialize(&encoded).unwrap(); - - // Compare fields individually since Chain doesn't implement PartialEq - match (&decoded.notification, &data.notification) { - ( - ExExNotification::ChainReorged { old: decoded_old, new: decoded_new }, - ExExNotification::ChainReorged { old: expected_old, new: expected_new }, - ) => { - assert_eq!(decoded_old.blocks(), expected_old.blocks()); - assert_eq!(decoded_old.execution_outcome(), expected_old.execution_outcome()); - assert_eq!(decoded_new.blocks(), expected_new.blocks()); - assert_eq!(decoded_new.execution_outcome(), expected_new.execution_outcome()); - } - _ => panic!("Expected ChainReorged variant"), - } + assert_eq!(decoded, data); } } } diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 4bbd87c6dfb..724f8555e09 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -44,7 +44,6 @@ op-revm.workspace = true thiserror.workspace = true [dev-dependencies] -reth-chain.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } alloy-genesis.workspace = true diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index f8cde24b5ed..d7985b8b1c5 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -295,10 +295,11 @@ mod tests { use alloy_genesis::Genesis; use alloy_primitives::{bytes, map::HashMap, Address, LogData, B256}; use op_revm::OpSpecId; - use reth_chain::Chain; use reth_chainspec::ChainSpec; use reth_evm::execute::ProviderError; - use reth_execution_types::{AccountRevertInit, BundleStateInit, ExecutionOutcome, RevertsInit}; + use reth_execution_types::{ + AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit, + }; use reth_optimism_chainspec::{OpChainSpec, BASE_MAINNET}; use reth_optimism_primitives::{OpBlock, OpPrimitives, OpReceipt}; use reth_primitives_traits::{Account, RecoveredBlock}; diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 222440539c4..ab0855bf4f6 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -12,11 +12,11 @@ description = "Types supporting implementation of 'eth' namespace RPC server API workspace = true [dependencies] -reth-chain.workspace = true reth-chainspec.workspace = true reth-chain-state.workspace = true reth-errors.workspace = true reth-evm.workspace = true +reth-execution-types.workspace = true reth-metrics.workspace = true reth-ethereum-primitives = { workspace = true, features = ["rpc"] } reth-primitives-traits = { workspace = true, features = ["rpc-compat"] } diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index 16d35028295..73d8072e6d8 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -5,9 +5,9 @@ use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::{stream::FuturesOrdered, Stream, StreamExt}; -use reth_chain::Chain; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; +use reth_execution_types::Chain; use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock}; use reth_storage_api::{BlockReader, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 45e1e5ff5fc..470a84a825b 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-chain.workspace = true reth-chainspec = { workspace = true, optional = true } reth-codecs.workspace = true reth-config.workspace = true @@ -30,6 +29,7 @@ reth-fs-util.workspace = true reth-network-p2p.workspace = true reth-primitives-traits = { workspace = true, features = ["serde-bincode-compat"] } reth-provider.workspace = true +reth-execution-types.workspace = true reth-ethereum-primitives = { workspace = true, optional = true } reth-prune.workspace = true reth-prune-types.workspace = true diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index a00b0780f74..13a6dade63e 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -2,11 +2,11 @@ use crate::stages::MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD; use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use num_traits::Zero; -use reth_chain::Chain; use reth_config::config::ExecutionConfig; use reth_consensus::FullConsensus; use reth_db::{static_file::HeaderMask, tables}; use reth_evm::{execute::Executor, metrics::ExecutorMetrics, ConfigureEvm}; +use reth_execution_types::Chain; use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; use reth_primitives_traits::{format_gas_throughput, BlockBody, NodePrimitives}; use reth_provider::{ diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 7227d618cf9..0199b6d2fc4 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-chain.workspace = true reth-chainspec.workspace = true reth-execution-types.workspace = true reth-ethereum-primitives = { workspace = true, features = ["reth-codec"] } @@ -87,15 +86,6 @@ tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } [features] rocksdb = ["dep:rocksdb"] -serde-bincode-compat = [ - "reth-chain/serde-bincode-compat", - "alloy-consensus/serde-bincode-compat", - "alloy-eips/serde-bincode-compat", - "reth-ethereum-primitives/serde-bincode-compat", - "reth-execution-types/serde-bincode-compat", - "reth-primitives-traits/serde-bincode-compat", - "reth-storage-api/serde-bincode-compat", -] test-utils = [ "reth-db/test-utils", "reth-nippy-jar/test-utils", diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 6c587cc2bed..bfab44cb2ac 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -35,20 +35,11 @@ pub mod test_utils; pub mod either_writer; pub use either_writer::*; -#[cfg(feature = "serde-bincode-compat")] -pub use reth_chain::serde_bincode_compat; -pub use reth_chain::{ - AnchoredTrieInput, BlockReceipts, Chain, ChainBlocks, ComputedTrieData, DeferredTrieData, - DisplayBlocksChain, -}; pub use reth_chain_state::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, CanonStateNotifications, CanonStateSubscriptions, }; -pub use reth_execution_types::{ - AccountRevertInit, BlockExecutionOutput, BlockExecutionResult, BundleStateInit, ChangedAccount, - ExecutionOutcome, RevertsInit, -}; +pub use reth_execution_types::*; /// Re-export `OriginalValuesKnown` pub use revm_database::states::OriginalValuesKnown; // reexport traits to avoid breaking changes diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index dcd95f3de23..fecd87a0e8e 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -782,7 +782,6 @@ mod tests { use alloy_primitives::{BlockNumber, TxNumber, B256}; use itertools::Itertools; use rand::Rng; - use reth_chain::Chain; use reth_chain_state::{ test_utils::TestBlockBuilder, CanonStateNotification, CanonStateSubscriptions, CanonicalInMemoryState, ExecutedBlock, NewCanonicalChain, @@ -791,7 +790,9 @@ mod tests { use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; use reth_ethereum_primitives::{Block, Receipt}; - use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, ExecutionOutcome}; + use reth_execution_types::{ + BlockExecutionOutput, BlockExecutionResult, Chain, ExecutionOutcome, + }; use reth_primitives_traits::{RecoveredBlock, SealedBlock, SignerRecoverable}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, @@ -1347,33 +1348,24 @@ mod tests { // Send and receive commit notifications. let block_2 = test_block_builder.generate_random_block(1, block_hash_1).try_recover()?; - let chain = Chain::new(vec![block_2.clone()], ExecutionOutcome::default(), BTreeMap::new()); + let chain = Chain::new(vec![block_2], ExecutionOutcome::default(), BTreeMap::new()); let commit = CanonStateNotification::Commit { new: Arc::new(chain.clone()) }; in_memory_state.notify_canon_state(commit.clone()); let (notification_1, notification_2) = tokio::join!(rx_1.recv(), rx_2.recv()); - // Verify both subscribers received commit notifications with matching tip - let n1 = notification_1.unwrap(); - let n2 = notification_2.unwrap(); - assert_eq!(*n1.tip(), block_2); - assert_eq!(*n2.tip(), block_2); + assert_eq!(notification_1, Ok(commit.clone())); + assert_eq!(notification_2, Ok(commit.clone())); // Send and receive re-org notifications. let block_3 = test_block_builder.generate_random_block(1, block_hash_1).try_recover()?; let block_4 = test_block_builder.generate_random_block(2, block_3.hash()).try_recover()?; - let new_chain = Chain::new( - vec![block_3, block_4.clone()], - ExecutionOutcome::default(), - BTreeMap::new(), - ); + let new_chain = + Chain::new(vec![block_3, block_4], ExecutionOutcome::default(), BTreeMap::new()); let re_org = CanonStateNotification::Reorg { old: Arc::new(chain), new: Arc::new(new_chain) }; in_memory_state.notify_canon_state(re_org.clone()); let (notification_1, notification_2) = tokio::join!(rx_1.recv(), rx_2.recv()); - // Verify both subscribers received reorg notifications with matching tip - let n1 = notification_1.unwrap(); - let n2 = notification_2.unwrap(); - assert_eq!(*n1.tip(), block_4); - assert_eq!(*n2.tip(), block_4); + assert_eq!(notification_1, Ok(re_org.clone())); + assert_eq!(notification_2, Ok(re_org.clone())); Ok(()) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 46ca89ba32b..4233dfd9a6e 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -33,7 +33,6 @@ use alloy_primitives::{ use itertools::Itertools; use parking_lot::RwLock; use rayon::slice::ParallelSliceMut; -use reth_chain::Chain; use reth_chain_state::{ComputedTrieData, ExecutedBlock}; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec}; use reth_db_api::{ @@ -48,7 +47,7 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, BlockNumberList, PlainAccountState, PlainStorageState, }; -use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, ExecutionOutcome}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, Chain, ExecutionOutcome}; use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives_traits::{ Account, Block as _, BlockBody as _, Bytecode, RecoveredBlock, SealedHeader, StorageEntry, diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 70723cda284..83cbbbd714e 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-chain = { workspace = true, optional = true } reth-db-models.workspace = true reth-chainspec.workspace = true reth-db-api = { workspace = true, optional = true } @@ -38,7 +37,6 @@ serde_json = { workspace = true, optional = true } [features] default = ["std"] std = [ - "dep:reth-chain", "reth-chainspec/std", "alloy-consensus/std", "alloy-eips/std", @@ -62,7 +60,6 @@ db-api = [ ] serde = [ - "reth-chain?/serde", "reth-ethereum-primitives/serde", "reth-db-models/serde", "reth-execution-types/serde", @@ -81,7 +78,6 @@ serde-bincode-compat = [ "reth-execution-types/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", "reth-trie-common/serde-bincode-compat", - "reth-chain?/serde-bincode-compat", "reth-ethereum-primitives/serde-bincode-compat", "alloy-eips/serde-bincode-compat", "alloy-consensus/serde-bincode-compat", diff --git a/crates/storage/storage-api/src/block_writer.rs b/crates/storage/storage-api/src/block_writer.rs index 5124ff1676a..233e9898d11 100644 --- a/crates/storage/storage-api/src/block_writer.rs +++ b/crates/storage/storage-api/src/block_writer.rs @@ -1,9 +1,8 @@ use crate::NodePrimitivesProvider; use alloc::vec::Vec; use alloy_primitives::BlockNumber; -use reth_chain::Chain; use reth_db_models::StoredBlockBodyIndices; -use reth_execution_types::ExecutionOutcome; +use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock}; use reth_storage_errors::provider::ProviderResult; use reth_trie_common::HashedPostStateSorted; diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index bb4f9ba310b..02030719840 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-chain.workspace = true reth-chain-state.workspace = true reth-ethereum-primitives.workspace = true reth-chainspec.workspace = true @@ -91,7 +90,6 @@ serde = [ "revm-primitives/serde", "reth-primitives-traits/serde", "reth-ethereum-primitives/serde", - "reth-chain/serde", "reth-chain-state/serde", "reth-storage-api/serde", ] diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index 6edb41a6a23..f2a93bd6453 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -3,7 +3,7 @@ use alloy_consensus::Typed2718; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{BlockNumber, B256}; -use reth_chain::ChainBlocks; +use reth_execution_types::ChainBlocks; use reth_primitives_traits::{Block, BlockBody, SignedTransaction}; use std::collections::BTreeMap; @@ -91,8 +91,8 @@ mod tests { use super::*; use alloy_consensus::{Header, Signed}; use alloy_primitives::Signature; - use reth_chain::Chain; use reth_ethereum_primitives::Transaction; + use reth_execution_types::Chain; use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; #[test] diff --git a/crates/trie/common/src/lazy.rs b/crates/trie/common/src/lazy.rs new file mode 100644 index 00000000000..b4d6a4afd18 --- /dev/null +++ b/crates/trie/common/src/lazy.rs @@ -0,0 +1,194 @@ +//! Lazy initialization wrapper for trie data. +//! +//! Provides a no-std compatible [`LazyTrieData`] type for lazily initialized +//! trie-related data containing sorted hashed state and trie updates. + +use crate::{updates::TrieUpdatesSorted, HashedPostStateSorted}; +use alloc::sync::Arc; +use core::fmt; +use reth_primitives_traits::sync::OnceLock; + +/// Container for sorted trie data: hashed state and trie updates. +/// +/// This bundles both [`HashedPostStateSorted`] and [`TrieUpdatesSorted`] together +/// for convenient passing and storage. +#[derive(Clone, Debug, Default, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct SortedTrieData { + /// Sorted hashed post-state produced by execution. + pub hashed_state: Arc, + /// Sorted trie updates produced by state root computation. + pub trie_updates: Arc, +} + +impl SortedTrieData { + /// Creates a new [`SortedTrieData`] with the given values. + pub const fn new( + hashed_state: Arc, + trie_updates: Arc, + ) -> Self { + Self { hashed_state, trie_updates } + } +} + +/// Lazily initialized trie data containing sorted hashed state and trie updates. +/// +/// This is a no-std compatible wrapper that supports two modes: +/// 1. **Ready mode**: Data is available immediately (created via `ready()`) +/// 2. **Deferred mode**: Data is computed on first access (created via `deferred()`) +/// +/// In deferred mode, the computation runs on the first call to `get()`, `hashed_state()`, +/// or `trie_updates()`, and results are cached for subsequent calls. +/// +/// Cloning is cheap (Arc clone) and clones share the cached state. +pub struct LazyTrieData { + /// Cached sorted trie data, computed on first access. + data: Arc>, + /// Optional deferred computation function. + compute: Option SortedTrieData + Send + Sync>>, +} + +impl Clone for LazyTrieData { + fn clone(&self) -> Self { + Self { data: Arc::clone(&self.data), compute: self.compute.clone() } + } +} + +impl fmt::Debug for LazyTrieData { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("LazyTrieData") + .field("data", &if self.data.get().is_some() { "initialized" } else { "pending" }) + .finish() + } +} + +impl PartialEq for LazyTrieData { + fn eq(&self, other: &Self) -> bool { + self.get() == other.get() + } +} + +impl Eq for LazyTrieData {} + +impl LazyTrieData { + /// Creates a new [`LazyTrieData`] that is already initialized with the given values. + pub fn ready( + hashed_state: Arc, + trie_updates: Arc, + ) -> Self { + let data = OnceLock::new(); + let _ = data.set(SortedTrieData::new(hashed_state, trie_updates)); + Self { data: Arc::new(data), compute: None } + } + + /// Creates a new [`LazyTrieData`] from pre-computed [`SortedTrieData`]. + pub fn from_sorted(sorted: SortedTrieData) -> Self { + let data = OnceLock::new(); + let _ = data.set(sorted); + Self { data: Arc::new(data), compute: None } + } + + /// Creates a new [`LazyTrieData`] with a deferred computation function. + /// + /// The computation will run on the first call to `get()`, `hashed_state()`, + /// or `trie_updates()`. Results are cached for subsequent calls. + pub fn deferred(compute: impl Fn() -> SortedTrieData + Send + Sync + 'static) -> Self { + Self { data: Arc::new(OnceLock::new()), compute: Some(Arc::new(compute)) } + } + + /// Returns a reference to the sorted trie data, computing if necessary. + /// + /// # Panics + /// + /// Panics if created via `deferred()` and the computation function was not provided. + pub fn get(&self) -> &SortedTrieData { + self.data.get_or_init(|| { + self.compute.as_ref().expect("LazyTrieData::get called before initialization")() + }) + } + + /// Returns a clone of the hashed state Arc. + /// + /// If not initialized, computes from the deferred source or panics. + pub fn hashed_state(&self) -> Arc { + Arc::clone(&self.get().hashed_state) + } + + /// Returns a clone of the trie updates Arc. + /// + /// If not initialized, computes from the deferred source or panics. + pub fn trie_updates(&self) -> Arc { + Arc::clone(&self.get().trie_updates) + } + + /// Returns a clone of the [`SortedTrieData`]. + /// + /// If not initialized, computes from the deferred source or panics. + pub fn sorted_trie_data(&self) -> SortedTrieData { + self.get().clone() + } +} + +#[cfg(feature = "serde")] +impl serde::Serialize for LazyTrieData { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.get().serialize(serializer) + } +} + +#[cfg(feature = "serde")] +impl<'de> serde::Deserialize<'de> for LazyTrieData { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let data = SortedTrieData::deserialize(deserializer)?; + Ok(Self::from_sorted(data)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_lazy_ready_is_initialized() { + let lazy = LazyTrieData::ready( + Arc::new(HashedPostStateSorted::default()), + Arc::new(TrieUpdatesSorted::default()), + ); + let _ = lazy.hashed_state(); + let _ = lazy.trie_updates(); + } + + #[test] + fn test_lazy_clone_shares_state() { + let lazy1 = LazyTrieData::ready( + Arc::new(HashedPostStateSorted::default()), + Arc::new(TrieUpdatesSorted::default()), + ); + let lazy2 = lazy1.clone(); + + // Both point to the same data + assert!(Arc::ptr_eq(&lazy1.hashed_state(), &lazy2.hashed_state())); + assert!(Arc::ptr_eq(&lazy1.trie_updates(), &lazy2.trie_updates())); + } + + #[test] + fn test_lazy_deferred() { + let lazy = LazyTrieData::deferred(SortedTrieData::default); + assert!(lazy.hashed_state().is_empty()); + assert!(lazy.trie_updates().is_empty()); + } + + #[test] + fn test_lazy_from_sorted() { + let sorted = SortedTrieData::default(); + let lazy = LazyTrieData::from_sorted(sorted); + assert!(lazy.hashed_state().is_empty()); + assert!(lazy.trie_updates().is_empty()); + } +} diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index 8faa44622fa..bc842768b8f 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -11,6 +11,10 @@ extern crate alloc; +/// Lazy initialization wrapper for trie data. +mod lazy; +pub use lazy::{LazyTrieData, SortedTrieData}; + /// In-memory hashed state. mod hashed_state; pub use hashed_state::*; From f624372334052b6da9a793cfd6b0a5f3d85c790e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 17 Jan 2026 19:20:28 +0100 Subject: [PATCH 065/267] feat(execution-types): add receipts_iter helper (#21162) Co-authored-by: Amp --- crates/evm/execution-types/src/execution_outcome.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 6df354219ea..7d1723f56e4 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -249,6 +249,14 @@ impl ExecutionOutcome { &self.receipts[index] } + /// Returns an iterator over receipt slices, one per block. + /// + /// This is a more ergonomic alternative to `receipts()` that yields slices + /// instead of requiring indexing into a nested `Vec>`. + pub fn receipts_iter(&self) -> impl Iterator + '_ { + self.receipts.iter().map(|v| v.as_slice()) + } + /// Is execution outcome empty. pub const fn is_empty(&self) -> bool { self.len() == 0 From be3234d848d230eba7cbaf6f41e2cd609b092389 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 18 Jan 2026 14:57:20 +0000 Subject: [PATCH 066/267] chore(deps): weekly `cargo update` (#21167) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 65 +++++++++++++++++++++++++++--------------------------- 1 file changed, 33 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 17e53c81377..77bee70d722 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2125,9 +2125,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.52" +version = "1.2.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3" +checksum = "755d2fce177175ffca841e9a06afdb2c4ab0f593d53b4dee48147dfaade85932" dependencies = [ "find-msvc-tools", "jobserver", @@ -4075,9 +4075,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41" +checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" [[package]] name = "fixed-cache" @@ -5122,9 +5122,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.46.0" +version = "1.46.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b66886d14d18d420ab5052cbff544fc5d34d0b2cdd35eb5976aaa10a4a472e5" +checksum = "248b42847813a1550dafd15296fd9748c651d0c32194559dbc05d804d54b21e8" dependencies = [ "console", "once_cell", @@ -5307,9 +5307,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", @@ -11724,9 +11724,9 @@ checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" [[package]] name = "rustc-demangle" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" +checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" [[package]] name = "rustc-hash" @@ -11825,9 +11825,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ "web-time", "zeroize", @@ -11862,9 +11862,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.8" +version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ "ring", "rustls-pki-types", @@ -13753,18 +13753,18 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasip2" -version = "1.0.1+wasi-0.2.4" +version = "1.0.2+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ "cfg-if", "once_cell", @@ -13775,11 +13775,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.56" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", + "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -13788,9 +13789,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -13798,9 +13799,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ "bumpalo", "proc-macro2", @@ -13811,9 +13812,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] @@ -13847,9 +13848,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -14419,9 +14420,9 @@ dependencies = [ [[package]] name = "wit-bindgen" -version = "0.46.0" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" [[package]] name = "write16" @@ -14605,9 +14606,9 @@ dependencies = [ [[package]] name = "zmij" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd8f3f50b848df28f887acb68e41201b5aea6bc8a8dacc00fb40635ff9a72fea" +checksum = "94f63c051f4fe3c1509da62131a678643c5b6fbdc9273b2b79d4378ebda003d2" [[package]] name = "zstd" From 915164078fd2dde7299cf94745c1a3654d72e4e4 Mon Sep 17 00:00:00 2001 From: MoNyAvA Date: Mon, 19 Jan 2026 10:27:45 +0100 Subject: [PATCH 067/267] docs: document minimal storage mode in pruning FAQ (#21025) Co-authored-by: Matthias Seitz --- docs/vocs/docs/pages/run/faq/pruning.mdx | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/vocs/docs/pages/run/faq/pruning.mdx b/docs/vocs/docs/pages/run/faq/pruning.mdx index e5812832012..4aac4ff542e 100644 --- a/docs/vocs/docs/pages/run/faq/pruning.mdx +++ b/docs/vocs/docs/pages/run/faq/pruning.mdx @@ -52,6 +52,23 @@ reth node \ --authrpc.port 8551 ``` +### Minimal Storage Mode + +To run Reth in minimal storage mode, follow the steps from the previous chapter on +[how to run on mainnet or official testnets](/run/ethereum), and add a `--minimal` flag. For example: + +```bash +reth node \ + --minimal \ + --authrpc.jwtsecret /path/to/secret \ + --authrpc.addr 127.0.0.1 \ + --authrpc.port 8551 +``` + +Minimal storage mode is a preconfigured pruned node profile that aims to minimize disk usage by fully +pruning sender recovery, transaction lookup, and receipts, and by keeping only the last 10,064 blocks +of account history, storage history, and block bodies with smaller static file segments. + ## Size All numbers are as of April 2024 at block number 19.6M for mainnet. From a901d80ee678dbd378ec8b488a9ddfd19d46adc3 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Mon, 19 Jan 2026 02:21:25 -0800 Subject: [PATCH 068/267] chore: apply spelling and typo fixes (#21182) --- CLAUDE.md | 2 +- crates/ethereum/node/tests/e2e/pool.rs | 4 ++-- crates/ethereum/primitives/src/transaction.rs | 2 +- crates/net/banlist/src/lib.rs | 4 ++-- crates/storage/codecs/derive/src/compact/generator.rs | 4 ++-- crates/storage/codecs/derive/src/compact/mod.rs | 2 +- crates/storage/libmdbx-rs/src/cursor.rs | 2 +- crates/storage/nippy-jar/src/consistency.rs | 4 ++-- docs/design/database.md | 4 ++-- docs/repo/layout.md | 3 ++- 10 files changed, 16 insertions(+), 15 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 7a194c01b09..ca276aa4d01 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -249,7 +249,7 @@ Write comments that remain valuable after the PR is merged. Future readers won't unsafe impl GlobalAlloc for LimitedAllocator { ... } // Binary search requires sorted input. Panics on unsorted slices. -fn find_index(items: &[Item], target: &Item) -> Option +fn find_index(items: &[Item], target: &Item) -> Option // Timeout set to 5s to match EVM block processing limits const TRACER_TIMEOUT: Duration = Duration::from_secs(5); diff --git a/crates/ethereum/node/tests/e2e/pool.rs b/crates/ethereum/node/tests/e2e/pool.rs index 9187cb61405..3777c4945dd 100644 --- a/crates/ethereum/node/tests/e2e/pool.rs +++ b/crates/ethereum/node/tests/e2e/pool.rs @@ -153,7 +153,7 @@ async fn maintain_txpool_reorg() -> eyre::Result<()> { w1.address(), ); let pooled_tx1 = EthPooledTransaction::new(tx1.clone(), 200); - let tx_hash1 = *pooled_tx1.clone().hash(); + let tx_hash1 = *pooled_tx1.hash(); // build tx2 from wallet2 let envelop2 = TransactionTestContext::transfer_tx(1, w2.clone()).await; @@ -162,7 +162,7 @@ async fn maintain_txpool_reorg() -> eyre::Result<()> { w2.address(), ); let pooled_tx2 = EthPooledTransaction::new(tx2.clone(), 200); - let tx_hash2 = *pooled_tx2.clone().hash(); + let tx_hash2 = *pooled_tx2.hash(); let block_info = BlockInfo { block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M, diff --git a/crates/ethereum/primitives/src/transaction.rs b/crates/ethereum/primitives/src/transaction.rs index 28782c2ac66..f4de2994e38 100644 --- a/crates/ethereum/primitives/src/transaction.rs +++ b/crates/ethereum/primitives/src/transaction.rs @@ -236,7 +236,7 @@ impl reth_codecs::Compact for Transaction { // # Panics // // A panic will be triggered if an identifier larger than 3 is passed from the database. For - // optimism a identifier with value [`DEPOSIT_TX_TYPE_ID`] is allowed. + // optimism an identifier with value [`DEPOSIT_TX_TYPE_ID`] is allowed. fn from_compact(buf: &[u8], identifier: usize) -> (Self, &[u8]) { let (tx_type, buf) = TxType::from_compact(buf, identifier); diff --git a/crates/net/banlist/src/lib.rs b/crates/net/banlist/src/lib.rs index 402041ed2f8..2de02e13c30 100644 --- a/crates/net/banlist/src/lib.rs +++ b/crates/net/banlist/src/lib.rs @@ -106,7 +106,7 @@ impl BanList { self.banned_ips.contains_key(ip) } - /// checks the ban list to see if it contains the given ip + /// checks the ban list to see if it contains the given peer #[inline] pub fn is_banned_peer(&self, peer_id: &PeerId) -> bool { self.banned_peers.contains_key(peer_id) @@ -117,7 +117,7 @@ impl BanList { self.banned_ips.remove(ip); } - /// Unbans the ip address + /// Unbans the peer pub fn unban_peer(&mut self, peer_id: &PeerId) { self.banned_peers.remove(peer_id); } diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index d72fc4644e9..569cebce224 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -69,7 +69,7 @@ pub fn generate_from_to( } } -/// Generates code to implement the `Compact` trait method `to_compact`. +/// Generates code to implement the `Compact` trait method `from_compact`. fn generate_from_compact( fields: &FieldList, ident: &Ident, @@ -155,7 +155,7 @@ fn generate_from_compact( } } -/// Generates code to implement the `Compact` trait method `from_compact`. +/// Generates code to implement the `Compact` trait method `to_compact`. fn generate_to_compact( fields: &FieldList, ident: &Ident, diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index ed43286923b..216b017b9b7 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -175,7 +175,7 @@ fn should_use_alt_impl(ftype: &str, segment: &syn::PathSegment) -> bool { let syn::PathArguments::AngleBracketed(ref args) = segment.arguments && let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.args.last() && let (Some(path), 1) = (arg_path.path.segments.first(), arg_path.path.segments.len()) && - ["B256", "Address", "Address", "Bloom", "TxHash", "BlockHash", "CompactPlaceholder"] + ["B256", "Address", "Bloom", "TxHash", "BlockHash", "CompactPlaceholder"] .iter() .any(|&s| path.ident == s) { diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 8f7a4b5cd44..2227f7292d1 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -312,7 +312,7 @@ where } /// Position at first key-value pair greater than or equal to specified, return both key and - /// data, and the return code depends on a exact match. + /// data, and the return code depends on an exact match. /// /// For non DupSort-ed collections this works the same as [`Self::set_range()`], but returns /// [false] if key found exactly and [true] if greater key was found. diff --git a/crates/storage/nippy-jar/src/consistency.rs b/crates/storage/nippy-jar/src/consistency.rs index 0abe118a4be..070582c22f6 100644 --- a/crates/storage/nippy-jar/src/consistency.rs +++ b/crates/storage/nippy-jar/src/consistency.rs @@ -37,12 +37,12 @@ impl NippyJarChecker { Self { jar, data_file: None, offsets_file: None } } - /// It will throw an error if the [`NippyJar`] is in a inconsistent state. + /// It will throw an error if the [`NippyJar`] is in an inconsistent state. pub fn check_consistency(&mut self) -> Result<(), NippyJarError> { self.handle_consistency(ConsistencyFailStrategy::ThrowError) } - /// It will attempt to heal if the [`NippyJar`] is in a inconsistent state. + /// It will attempt to heal if the [`NippyJar`] is in an inconsistent state. /// /// **ATTENTION**: disk commit should be handled externally by consuming `Self` pub fn ensure_consistency(&mut self) -> Result<(), NippyJarError> { diff --git a/docs/design/database.md b/docs/design/database.md index 0d22bb3f9a6..c2dc12d2d9b 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -13,9 +13,9 @@ - It also enables [out-of-the-box fuzzing](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/tables/codecs/fuzz/mod.rs) using [trailofbits/test-fuzz](https://github.com/trailofbits/test-fuzz). - We implemented that trait for the following encoding formats: - [Ethereum-specific Compact Encoding](https://github.com/paradigmxyz/reth/blob/main/crates/storage/codecs/derive/src/compact/mod.rs): A lot of Ethereum datatypes have unnecessary zeros when serialized, or optional (e.g. on empty hashes) which would be nice not to pay in storage costs. - - [Erigon](https://github.com/ledgerwatch/erigon/blob/12ee33a492f5d240458822d052820d9998653a63/docs/programmers_guide/db_walkthrough.MD) achieves that by having a `bitfield` set on Table "PlainState which adds a bitfield to Accounts. + - [Erigon](https://github.com/ledgerwatch/erigon/blob/12ee33a492f5d240458822d052820d9998653a63/docs/programmers_guide/db_walkthrough.MD) achieves that by having a `bitfield` set on Table "PlainState" which adds a bitfield to Accounts. - [Akula](https://github.com/akula-bft/akula/) expanded it for other tables and datatypes manually. It also saved some more space by storing the length of certain types (U256, u64) using the [`modular_bitfield`](https://docs.rs/modular-bitfield/latest/modular_bitfield/) crate, which compacts this information. - - We generalized it for all types, by writing a derive macro that autogenerates code for implementing the trait. It, also generates the interfaces required for fuzzing using ToB/test-fuzz: + - We generalized it for all types, by writing a derive macro that autogenerates code for implementing the trait. It also generates the interfaces required for fuzzing using ToB/test-fuzz: - [Scale Encoding](https://github.com/paritytech/parity-scale-codec) - [Postcard Encoding](https://github.com/jamesmunns/postcard) - Passthrough (called `no_codec` in the codebase) diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 93fbd28f3df..87312de920f 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -23,6 +23,7 @@ Generally, reth is composed of a few components, with supporting crates. The mai - [Payloads](#payloads) - [Primitives](#primitives) - [Optimism](#optimism) + - [Ethereum](#ethereum-specific-crates) - [Misc](#misc) The supporting crates are split into two categories: [primitives](#primitives) and [miscellaneous](#misc). @@ -181,7 +182,7 @@ These crates define primitive types or algorithms. Crates related to the Optimism rollup live in [optimism](../../crates/optimism/). -#### Ethereum-Specific Crates +### Ethereum-Specific Crates Ethereum mainnet-specific implementations and primitives live in `crates/ethereum/`. From 52ec8e9491c4d4c2719623843db43330627fd143 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 19 Jan 2026 11:21:37 +0100 Subject: [PATCH 069/267] ci: update to tempoxyz (#21176) --- .github/workflows/dependencies.yml | 2 +- .github/workflows/lint.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/dependencies.yml b/.github/workflows/dependencies.yml index 49c13d38b8d..6e2efede632 100644 --- a/.github/workflows/dependencies.yml +++ b/.github/workflows/dependencies.yml @@ -15,6 +15,6 @@ permissions: jobs: update: - uses: ithacaxyz/ci/.github/workflows/cargo-update-pr.yml@main + uses: tempoxyz/ci/.github/workflows/cargo-update-pr.yml@main secrets: token: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 839e7098372..bc2ab5bdded 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -285,7 +285,7 @@ jobs: - run: zepter run check deny: - uses: ithacaxyz/ci/.github/workflows/deny.yml@main + uses: tempoxyz/ci/.github/workflows/deny.yml@main lint-success: name: lint success From c2435ff6f8265088b9ded0014051c9a97d0d7b84 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Mon, 19 Jan 2026 02:26:24 -0800 Subject: [PATCH 070/267] feat(download): resumable snapshot downloads with auto-retry (#21161) --- crates/cli/commands/src/download.rs | 165 ++++++++++++++++++++++++++-- 1 file changed, 153 insertions(+), 12 deletions(-) diff --git a/crates/cli/commands/src/download.rs b/crates/cli/commands/src/download.rs index 3dd7fd33933..c0d8041e032 100644 --- a/crates/cli/commands/src/download.rs +++ b/crates/cli/commands/src/download.rs @@ -2,14 +2,15 @@ use crate::common::EnvironmentArgs; use clap::Parser; use eyre::Result; use lz4::Decoder; -use reqwest::Client; +use reqwest::{blocking::Client as BlockingClient, header::RANGE, Client, StatusCode}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_fs_util as fs; use std::{ borrow::Cow, - io::{self, Read, Write}, - path::Path, + fs::OpenOptions, + io::{self, BufWriter, Read, Write}, + path::{Path, PathBuf}, sync::{Arc, OnceLock}, time::{Duration, Instant}, }; @@ -327,18 +328,158 @@ fn extract_from_file(path: &Path, format: CompressionFormat, target_dir: &Path) extract_archive(file, total_size, format, target_dir) } -/// Fetches the snapshot from a remote URL, uncompressing it in a streaming fashion. +const MAX_DOWNLOAD_RETRIES: u32 = 10; +const RETRY_BACKOFF_SECS: u64 = 5; + +/// Wrapper that tracks download progress while writing data. +/// Used with [`io::copy`] to display progress during downloads. +struct ProgressWriter { + inner: W, + progress: DownloadProgress, +} + +impl Write for ProgressWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + let n = self.inner.write(buf)?; + let _ = self.progress.update(n as u64); + Ok(n) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +/// Downloads a file with resume support using HTTP Range requests. +/// Automatically retries on failure, resuming from where it left off. +/// Returns the path to the downloaded file and its total size. +fn resumable_download(url: &str, target_dir: &Path) -> Result<(PathBuf, u64)> { + let file_name = Url::parse(url) + .ok() + .and_then(|u| u.path_segments()?.next_back().map(|s| s.to_string())) + .unwrap_or_else(|| "snapshot.tar".to_string()); + + let final_path = target_dir.join(&file_name); + let part_path = target_dir.join(format!("{file_name}.part")); + + let client = BlockingClient::builder().timeout(Duration::from_secs(30)).build()?; + + let mut total_size: Option = None; + let mut last_error: Option = None; + + for attempt in 1..=MAX_DOWNLOAD_RETRIES { + let existing_size = fs::metadata(&part_path).map(|m| m.len()).unwrap_or(0); + + if let Some(total) = total_size && + existing_size >= total + { + fs::rename(&part_path, &final_path)?; + info!(target: "reth::cli", "Download complete: {}", final_path.display()); + return Ok((final_path, total)); + } + + if attempt > 1 { + info!(target: "reth::cli", + "Retry attempt {}/{} - resuming from {} bytes", + attempt, MAX_DOWNLOAD_RETRIES, existing_size + ); + } + + let mut request = client.get(url); + if existing_size > 0 { + request = request.header(RANGE, format!("bytes={existing_size}-")); + if attempt == 1 { + info!(target: "reth::cli", "Resuming download from {} bytes", existing_size); + } + } + + let response = match request.send().and_then(|r| r.error_for_status()) { + Ok(r) => r, + Err(e) => { + last_error = Some(e.into()); + if attempt < MAX_DOWNLOAD_RETRIES { + info!(target: "reth::cli", + "Download failed, retrying in {} seconds...", RETRY_BACKOFF_SECS + ); + std::thread::sleep(Duration::from_secs(RETRY_BACKOFF_SECS)); + } + continue; + } + }; + + let is_partial = response.status() == StatusCode::PARTIAL_CONTENT; + + let size = if is_partial { + response + .headers() + .get("Content-Range") + .and_then(|v| v.to_str().ok()) + .and_then(|v| v.split('/').next_back()) + .and_then(|v| v.parse().ok()) + } else { + response.content_length() + }; + + if total_size.is_none() { + total_size = size; + } + + let current_total = total_size.ok_or_else(|| { + eyre::eyre!("Server did not provide Content-Length or Content-Range header") + })?; + + let file = if is_partial && existing_size > 0 { + OpenOptions::new() + .append(true) + .open(&part_path) + .map_err(|e| fs::FsPathError::open(e, &part_path))? + } else { + fs::create_file(&part_path)? + }; + + let start_offset = if is_partial { existing_size } else { 0 }; + let mut progress = DownloadProgress::new(current_total); + progress.downloaded = start_offset; + + let mut writer = ProgressWriter { inner: BufWriter::new(file), progress }; + let mut reader = response; + + let copy_result = io::copy(&mut reader, &mut writer); + let flush_result = writer.inner.flush(); + println!(); + + if let Err(e) = copy_result.and(flush_result) { + last_error = Some(e.into()); + if attempt < MAX_DOWNLOAD_RETRIES { + info!(target: "reth::cli", + "Download interrupted, retrying in {} seconds...", RETRY_BACKOFF_SECS + ); + std::thread::sleep(Duration::from_secs(RETRY_BACKOFF_SECS)); + } + continue; + } + + fs::rename(&part_path, &final_path)?; + info!(target: "reth::cli", "Download complete: {}", final_path.display()); + return Ok((final_path, current_total)); + } + + Err(last_error + .unwrap_or_else(|| eyre::eyre!("Download failed after {} attempts", MAX_DOWNLOAD_RETRIES))) +} + +/// Fetches the snapshot from a remote URL with resume support, then extracts it. fn download_and_extract(url: &str, format: CompressionFormat, target_dir: &Path) -> Result<()> { - let client = reqwest::blocking::Client::builder().build()?; - let response = client.get(url).send()?.error_for_status()?; + let (downloaded_path, total_size) = resumable_download(url, target_dir)?; + + info!(target: "reth::cli", "Extracting snapshot..."); + let file = fs::open(&downloaded_path)?; + extract_archive(file, total_size, format, target_dir)?; - let total_size = response.content_length().ok_or_else(|| { - eyre::eyre!( - "Server did not provide Content-Length header. This is required for snapshot downloads" - ) - })?; + fs::remove_file(&downloaded_path)?; + info!(target: "reth::cli", "Removed downloaded archive"); - extract_archive(response, total_size, format, target_dir) + Ok(()) } /// Downloads and extracts a snapshot, blocking until finished. From 6a2010e59519ca2b933ff5de9790e952dee9cb6c Mon Sep 17 00:00:00 2001 From: MozirDmitriy Date: Mon, 19 Jan 2026 13:39:52 +0200 Subject: [PATCH 071/267] refactor(stages): reuse history index cache buffers in `collect_history_indices` (#21017) --- crates/stages/stages/src/stages/utils.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 74e9b6b679a..82760a09e25 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -57,12 +57,12 @@ where let mut collector = Collector::new(etl_config.file_size, etl_config.dir.clone()); let mut cache: HashMap> = HashMap::default(); - let mut collect = |cache: &HashMap>| { - for (key, indices) in cache { - let last = indices.last().expect("qed"); + let mut collect = |cache: &mut HashMap>| { + for (key, indices) in cache.drain() { + let last = *indices.last().expect("qed"); collector.insert( - sharded_key_factory(*key, *last), - BlockNumberList::new_pre_sorted(indices.iter().copied()), + sharded_key_factory(key, last), + BlockNumberList::new_pre_sorted(indices.into_iter()), )?; } Ok::<(), StageError>(()) @@ -87,13 +87,12 @@ where current_block_number = block_number; flush_counter += 1; if flush_counter > DEFAULT_CACHE_THRESHOLD { - collect(&cache)?; - cache.clear(); + collect(&mut cache)?; flush_counter = 0; } } } - collect(&cache)?; + collect(&mut cache)?; Ok(collector) } From 0c66315f20d53f5a2ef4630e9ba9fde64e2ac41b Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Mon, 19 Jan 2026 03:45:56 -0800 Subject: [PATCH 072/267] chore(bench): add --disable-tx-gossip to benchmark node args (#21171) --- bin/reth-bench-compare/src/node.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/reth-bench-compare/src/node.rs b/bin/reth-bench-compare/src/node.rs index 66f86df9773..8888d5d8c0a 100644 --- a/bin/reth-bench-compare/src/node.rs +++ b/bin/reth-bench-compare/src/node.rs @@ -163,6 +163,7 @@ impl NodeManager { "eth,reth".to_string(), "--disable-discovery".to_string(), "--trusted-only".to_string(), + "--disable-tx-gossip".to_string(), ]); // Add tracing arguments if OTLP endpoint is configured From f7460e219ce903da410d2f571e5cd64e02afbbe9 Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 19 Jan 2026 20:01:33 +0800 Subject: [PATCH 073/267] fix(flashblocks): Add flashblock ws connection retry period (#20510) --- crates/optimism/flashblocks/src/service.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/crates/optimism/flashblocks/src/service.rs b/crates/optimism/flashblocks/src/service.rs index 23a71688fea..ee0229d7f00 100644 --- a/crates/optimism/flashblocks/src/service.rs +++ b/crates/optimism/flashblocks/src/service.rs @@ -12,10 +12,18 @@ use reth_primitives_traits::{AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives use reth_revm::cached::CachedReads; use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskExecutor; -use std::{sync::Arc, time::Instant}; -use tokio::sync::{oneshot, watch}; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::{ + sync::{oneshot, watch}, + time::sleep, +}; use tracing::*; +const CONNECTION_BACKOUT_PERIOD: Duration = Duration::from_secs(5); + /// The `FlashBlockService` maintains an in-memory [`PendingFlashBlock`] built out of a sequence of /// [`FlashBlock`]s. #[derive(Debug)] @@ -167,7 +175,13 @@ where self.try_start_build_job(); } Some(Err(err)) => { - warn!(target: "flashblocks", %err, "Error receiving flashblock"); + warn!( + target: "flashblocks", + %err, + retry_period = CONNECTION_BACKOUT_PERIOD.as_secs(), + "Error receiving flashblock" + ); + sleep(CONNECTION_BACKOUT_PERIOD).await; } None => { warn!(target: "flashblocks", "Flashblock stream ended"); From 1d55abeef3a7c40787e97325ec16358b22ae7174 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 19 Jan 2026 13:04:57 +0000 Subject: [PATCH 074/267] chore: rename extend_ref methods on sorted data structures (#21043) --- crates/chain-state/src/deferred_trie.rs | 14 +++++++------- crates/chain-state/src/lazy_overlay.rs | 4 ++-- .../stages/stages/src/stages/merkle_changesets.rs | 4 ++-- .../provider/src/providers/database/provider.rs | 3 ++- .../provider/src/providers/state/historical.rs | 4 ++-- .../provider/src/providers/state/overlay.rs | 8 ++++---- crates/trie/common/src/hashed_state.rs | 6 ++++-- crates/trie/common/src/updates.rs | 11 +++++++---- crates/trie/db/src/changesets.rs | 4 ++-- 9 files changed, 32 insertions(+), 26 deletions(-) diff --git a/crates/chain-state/src/deferred_trie.rs b/crates/chain-state/src/deferred_trie.rs index efe23a2ded3..6e758a12205 100644 --- a/crates/chain-state/src/deferred_trie.rs +++ b/crates/chain-state/src/deferred_trie.rs @@ -192,10 +192,10 @@ impl DeferredTrieData { ); // Only trigger COW clone if there's actually data to add. if !sorted_hashed_state.is_empty() { - Arc::make_mut(&mut overlay.state).extend_ref(&sorted_hashed_state); + Arc::make_mut(&mut overlay.state).extend_ref_and_sort(&sorted_hashed_state); } if !sorted_trie_updates.is_empty() { - Arc::make_mut(&mut overlay.nodes).extend_ref(&sorted_trie_updates); + Arc::make_mut(&mut overlay.nodes).extend_ref_and_sort(&sorted_trie_updates); } overlay } @@ -242,13 +242,13 @@ impl DeferredTrieData { for ancestor in ancestors { let ancestor_data = ancestor.wait_cloned(); - state_mut.extend_ref(ancestor_data.hashed_state.as_ref()); - nodes_mut.extend_ref(ancestor_data.trie_updates.as_ref()); + state_mut.extend_ref_and_sort(ancestor_data.hashed_state.as_ref()); + nodes_mut.extend_ref_and_sort(ancestor_data.trie_updates.as_ref()); } // Extend with current block's sorted data last (takes precedence) - state_mut.extend_ref(sorted_hashed_state); - nodes_mut.extend_ref(sorted_trie_updates); + state_mut.extend_ref_and_sort(sorted_hashed_state); + nodes_mut.extend_ref_and_sort(sorted_trie_updates); overlay } @@ -521,7 +521,7 @@ mod tests { let hashed_state = Arc::new(HashedPostStateSorted::new(accounts, B256Map::default())); let trie_updates = Arc::default(); let mut overlay = TrieInputSorted::default(); - Arc::make_mut(&mut overlay.state).extend_ref(hashed_state.as_ref()); + Arc::make_mut(&mut overlay.state).extend_ref_and_sort(hashed_state.as_ref()); DeferredTrieData::ready(ComputedTrieData { hashed_state, diff --git a/crates/chain-state/src/lazy_overlay.rs b/crates/chain-state/src/lazy_overlay.rs index 712d85d1989..b611f29241e 100644 --- a/crates/chain-state/src/lazy_overlay.rs +++ b/crates/chain-state/src/lazy_overlay.rs @@ -155,8 +155,8 @@ impl LazyOverlay { for block in blocks_iter { let block_data = block.wait_cloned(); - Arc::make_mut(&mut state).extend_ref(block_data.hashed_state.as_ref()); - Arc::make_mut(&mut nodes).extend_ref(block_data.trie_updates.as_ref()); + Arc::make_mut(&mut state).extend_ref_and_sort(block_data.hashed_state.as_ref()); + Arc::make_mut(&mut nodes).extend_ref_and_sort(block_data.trie_updates.as_ref()); } TrieInputSorted { state, nodes, prefix_sets: Default::default() } diff --git a/crates/stages/stages/src/stages/merkle_changesets.rs b/crates/stages/stages/src/stages/merkle_changesets.rs index 248718de90b..e81f8f18564 100644 --- a/crates/stages/stages/src/stages/merkle_changesets.rs +++ b/crates/stages/stages/src/stages/merkle_changesets.rs @@ -217,7 +217,7 @@ impl MerkleChangeSets { let compute_cumulative_state_revert = |block_number: BlockNumber| -> HashedPostStateSorted { let mut cumulative_revert = HashedPostStateSorted::default(); for n in (block_number..target_end).rev() { - cumulative_revert.extend_ref(get_block_state_revert(n)) + cumulative_revert.extend_ref_and_sort(get_block_state_revert(n)) } cumulative_revert }; @@ -270,7 +270,7 @@ impl MerkleChangeSets { let trie_overlay = Arc::clone(&nodes); let mut nodes_mut = Arc::unwrap_or_clone(nodes); - nodes_mut.extend_ref(&this_trie_updates); + nodes_mut.extend_ref_and_sort(&this_trie_updates); nodes = Arc::new(nodes_mut); // Write the changesets to the DB using the trie updates produced by the block, and the diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 4233dfd9a6e..ca8314099c5 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -582,7 +582,8 @@ impl DatabaseProvider StateRootProvider fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let mut revert_state = self.revert_state()?; let hashed_state_sorted = hashed_state.into_sorted(); - revert_state.extend_ref(&hashed_state_sorted); + revert_state.extend_ref_and_sort(&hashed_state_sorted); Ok(StateRoot::overlay_root(self.tx(), &revert_state)?) } @@ -306,7 +306,7 @@ impl StateRootProvider ) -> ProviderResult<(B256, TrieUpdates)> { let mut revert_state = self.revert_state()?; let hashed_state_sorted = hashed_state.into_sorted(); - revert_state.extend_ref(&hashed_state_sorted); + revert_state.extend_ref_and_sort(&hashed_state_sorted); Ok(StateRoot::overlay_root_with_updates(self.tx(), &revert_state)?) } diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 23e972938c9..b233d621b7e 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -163,12 +163,12 @@ impl OverlayStateProviderFactory { pub fn with_extended_hashed_state_overlay(mut self, other: HashedPostStateSorted) -> Self { match &mut self.overlay_source { Some(OverlaySource::Immediate { state, .. }) => { - Arc::make_mut(state).extend_ref(&other); + Arc::make_mut(state).extend_ref_and_sort(&other); } Some(OverlaySource::Lazy(lazy)) => { // Resolve lazy overlay and convert to immediate with extension let (trie, mut state) = lazy.as_overlay(); - Arc::make_mut(&mut state).extend_ref(&other); + Arc::make_mut(&mut state).extend_ref_and_sort(&other); self.overlay_source = Some(OverlaySource::Immediate { trie, state }); } None => { @@ -342,7 +342,7 @@ where let trie_updates = if trie_reverts.is_empty() { overlay_trie } else if !overlay_trie.is_empty() { - trie_reverts.extend_ref(&overlay_trie); + trie_reverts.extend_ref_and_sort(&overlay_trie); Arc::new(trie_reverts) } else { Arc::new(trie_reverts) @@ -351,7 +351,7 @@ where let hashed_state_updates = if hashed_state_reverts.is_empty() { overlay_state } else if !overlay_state.is_empty() { - hashed_state_reverts.extend_ref(&overlay_state); + hashed_state_reverts.extend_ref_and_sort(&overlay_state); Arc::new(hashed_state_reverts) } else { Arc::new(hashed_state_reverts) diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 410fdf00998..3b232d43467 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -621,7 +621,9 @@ impl HashedPostStateSorted { /// Extends this state with contents of another sorted state. /// Entries in `other` take precedence for duplicate keys. - pub fn extend_ref(&mut self, other: &Self) { + /// + /// Sorts the accounts after extending. Sorts the storage after extending, for each account. + pub fn extend_ref_and_sort(&mut self, other: &Self) { // Extend accounts extend_sorted_vec(&mut self.accounts, &other.accounts); @@ -1416,7 +1418,7 @@ mod tests { storages: B256Map::default(), }; - state1.extend_ref(&state2); + state1.extend_ref_and_sort(&state2); // Check accounts are merged and sorted assert_eq!(state1.accounts.len(), 6); diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index 6214d5ec084..17f0d02b5ef 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -605,7 +605,10 @@ impl TrieUpdatesSorted { /// This merges the account nodes and storage tries from `other` into `self`. /// Account nodes are merged and re-sorted, with `other`'s values taking precedence /// for duplicate keys. - pub fn extend_ref(&mut self, other: &Self) { + /// + /// Sorts the account nodes after extending. Sorts the storage tries after extending, for each + /// storage trie. + pub fn extend_ref_and_sort(&mut self, other: &Self) { // Extend account nodes extend_sorted_vec(&mut self.account_nodes, &other.account_nodes); @@ -834,7 +837,7 @@ mod tests { // Test extending with empty updates let mut updates1 = TrieUpdatesSorted::default(); let updates2 = TrieUpdatesSorted::default(); - updates1.extend_ref(&updates2); + updates1.extend_ref_and_sort(&updates2); assert_eq!(updates1.account_nodes.len(), 0); assert_eq!(updates1.storage_tries.len(), 0); @@ -853,7 +856,7 @@ mod tests { ], storage_tries: B256Map::default(), }; - updates1.extend_ref(&updates2); + updates1.extend_ref_and_sort(&updates2); assert_eq!(updates1.account_nodes.len(), 3); // Should be sorted: 0x01, 0x02, 0x03 assert_eq!(updates1.account_nodes[0].0, Nibbles::from_nibbles_unchecked([0x01])); @@ -889,7 +892,7 @@ mod tests { (hashed_address2, storage_trie1), ]), }; - updates1.extend_ref(&updates2); + updates1.extend_ref_and_sort(&updates2); assert_eq!(updates1.storage_tries.len(), 2); assert!(updates1.storage_tries.contains_key(&hashed_address1)); assert!(updates1.storage_tries.contains_key(&hashed_address2)); diff --git a/crates/trie/db/src/changesets.rs b/crates/trie/db/src/changesets.rs index efc7fb62e87..fe9558e3bc5 100644 --- a/crates/trie/db/src/changesets.rs +++ b/crates/trie/db/src/changesets.rs @@ -87,7 +87,7 @@ where // This reverts all changes from db tip back to just after block-1 was processed let mut cumulative_state_revert_prev = cumulative_state_revert.clone(); - cumulative_state_revert_prev.extend_ref(&individual_state_revert); + cumulative_state_revert_prev.extend_ref_and_sort(&individual_state_revert); // Step 2: Calculate cumulative trie updates revert for block-1 // This gives us the trie state as it was after block-1 was processed @@ -469,7 +469,7 @@ impl ChangesetCache { // Since we iterate newest to oldest, older values are added last // and overwrite any conflicting newer values (oldest changeset values take // precedence). - accumulated_reverts.extend_ref(&changesets); + accumulated_reverts.extend_ref_and_sort(&changesets); } let elapsed = timer.elapsed(); From c9dad4765df6c96a427d513227e09767e8e56f14 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 19 Jan 2026 15:04:08 +0100 Subject: [PATCH 075/267] chore: bump version to 1.10.1 (#21188) --- Cargo.lock | 499 +++++++++++++++++++-------------------- Cargo.toml | 2 +- docs/vocs/vocs.config.ts | 2 +- 3 files changed, 251 insertions(+), 252 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 77bee70d722..e4fc4fc741f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -144,7 +144,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -181,7 +181,7 @@ dependencies = [ "futures", "futures-util", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -213,7 +213,7 @@ dependencies = [ "crc", "rand 0.8.5", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -244,7 +244,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -282,7 +282,7 @@ dependencies = [ "serde", "serde_with", "sha2", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -304,7 +304,7 @@ dependencies = [ "op-alloy", "op-revm", "revm", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -359,7 +359,7 @@ dependencies = [ "http", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] @@ -386,7 +386,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -417,7 +417,7 @@ dependencies = [ "op-alloy", "op-revm", "revm", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -503,7 +503,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", "url", @@ -643,7 +643,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", "tree_hash", "tree_hash_derive", ] @@ -700,7 +700,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -729,7 +729,7 @@ dependencies = [ "alloy-serde", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -768,7 +768,7 @@ dependencies = [ "either", "elliptic-curve", "k256", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -786,7 +786,7 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 2.0.17", + "thiserror 2.0.18", "zeroize", ] @@ -875,7 +875,7 @@ dependencies = [ "parking_lot", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tower", "tracing", @@ -1801,7 +1801,7 @@ dependencies = [ "tag_ptr", "tap", "thin-vec", - "thiserror 2.0.17", + "thiserror 2.0.18", "time", "xsum", ] @@ -2085,7 +2085,7 @@ dependencies = [ "semver 1.0.27", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -2099,7 +2099,7 @@ dependencies = [ "semver 1.0.27", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -3331,7 +3331,7 @@ dependencies = [ [[package]] name = "ef-test-runner" -version = "1.10.0" +version = "1.10.1" dependencies = [ "clap", "ef-tests", @@ -3339,7 +3339,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3366,7 +3366,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "walkdir", ] @@ -3574,7 +3574,7 @@ dependencies = [ "reth-ethereum", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -3617,7 +3617,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tracing", @@ -3663,7 +3663,7 @@ dependencies = [ "reth-payload-builder", "reth-tracing", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", ] @@ -3732,7 +3732,7 @@ dependencies = [ "revm", "revm-primitives", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -3829,7 +3829,7 @@ dependencies = [ [[package]] name = "example-full-contract-state" -version = "1.10.0" +version = "1.10.1" dependencies = [ "eyre", "reth-ethereum", @@ -3968,7 +3968,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "clap", @@ -4063,14 +4063,13 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filetime" -version = "0.2.26" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" dependencies = [ "cfg-if", "libc", "libredox", - "windows-sys 0.60.2", ] [[package]] @@ -4216,9 +4215,9 @@ dependencies = [ [[package]] name = "futures-concurrency" -version = "7.7.0" +version = "7.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69a9561702beff46b705a8ac9c0803ec4c7fc5d01330a99b1feaf86e206e92ba" +checksum = "175cd8cca9e1d45b87f18ffa75088f2099e3c4fe5e2f83e42de112560bea8ea6" dependencies = [ "fixedbitset", "futures-core", @@ -4637,7 +4636,7 @@ dependencies = [ "rand 0.9.2", "ring", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tinyvec", "tokio", "tracing", @@ -4661,7 +4660,7 @@ dependencies = [ "resolv-conf", "serde", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] @@ -5350,7 +5349,7 @@ dependencies = [ "rustls-pki-types", "rustls-platform-verifier", "soketto", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-rustls", "tokio-util", @@ -5378,7 +5377,7 @@ dependencies = [ "rustc-hash", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tower", @@ -5403,7 +5402,7 @@ dependencies = [ "rustls-platform-verifier", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tower", "url", @@ -5441,7 +5440,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -5458,7 +5457,7 @@ dependencies = [ "http", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -5610,7 +5609,7 @@ dependencies = [ "multihash", "quick-protobuf", "sha2", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", "zeroize", ] @@ -5893,7 +5892,7 @@ dependencies = [ "metrics", "metrics-util", "quanta", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -5945,7 +5944,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] @@ -6384,7 +6383,7 @@ dependencies = [ "derive_more", "serde", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -6451,7 +6450,7 @@ dependencies = [ "op-alloy-consensus", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -6474,12 +6473,12 @@ dependencies = [ "serde", "sha2", "snap", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "op-reth" -version = "1.10.0" +version = "1.10.1" dependencies = [ "clap", "reth-cli-util", @@ -6528,7 +6527,7 @@ dependencies = [ "futures-sink", "js-sys", "pin-project-lite", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] @@ -6570,7 +6569,7 @@ dependencies = [ "opentelemetry_sdk", "prost 0.14.3", "reqwest", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tonic", "tracing", @@ -6607,7 +6606,7 @@ dependencies = [ "opentelemetry", "percent-encoding", "rand 0.9.2", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -7226,7 +7225,7 @@ dependencies = [ "rustc-hash", "rustls", "socket2 0.6.1", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", "web-time", @@ -7247,7 +7246,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.17", + "thiserror 2.0.18", "tinyvec", "tracing", "web-time", @@ -7512,7 +7511,7 @@ checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ "getrandom 0.2.17", "libredox", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -7631,7 +7630,7 @@ checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" [[package]] name = "reth" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7678,7 +7677,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7701,7 +7700,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7739,7 +7738,7 @@ dependencies = [ "reth-tracing", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tower", "tracing", @@ -7748,7 +7747,7 @@ dependencies = [ [[package]] name = "reth-bench-compare" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "alloy-provider", @@ -7776,7 +7775,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7808,7 +7807,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7828,7 +7827,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-genesis", "clap", @@ -7841,7 +7840,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7926,7 +7925,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.10.0" +version = "1.10.1" dependencies = [ "reth-tasks", "tokio", @@ -7935,7 +7934,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7949,14 +7948,14 @@ dependencies = [ "secp256k1 0.30.0", "serde", "snmalloc-rs", - "thiserror 2.0.17", + "thiserror 2.0.18", "tikv-jemallocator", "tracy-client", ] [[package]] name = "reth-codecs" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7980,7 +7979,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.10.0" +version = "1.10.1" dependencies = [ "proc-macro2", "quote", @@ -7990,7 +7989,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "eyre", @@ -8008,19 +8007,19 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-primitives", "auto_impl", "reth-execution-types", "reth-primitives-traits", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-consensus-common" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8034,7 +8033,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8059,7 +8058,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8088,12 +8087,12 @@ dependencies = [ "strum 0.27.2", "sysinfo", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-db-api" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -8123,7 +8122,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -8147,13 +8146,13 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] [[package]] name = "reth-db-models" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8169,7 +8168,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8187,7 +8186,7 @@ dependencies = [ "schnellru", "secp256k1 0.30.0", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tracing", @@ -8195,7 +8194,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8213,14 +8212,14 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1 0.30.0", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] [[package]] name = "reth-dns-discovery" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8240,7 +8239,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tracing", @@ -8248,7 +8247,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8277,7 +8276,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -8286,7 +8285,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8343,7 +8342,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.10.0" +version = "1.10.1" dependencies = [ "aes", "alloy-primitives", @@ -8361,7 +8360,7 @@ dependencies = [ "reth-network-peers", "secp256k1 0.30.0", "sha2", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -8370,7 +8369,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8394,7 +8393,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8412,13 +8411,13 @@ dependencies = [ "reth-primitives-traits", "reth-trie-common", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", ] [[package]] name = "reth-engine-service" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eips", "futures", @@ -8449,7 +8448,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eip7928", @@ -8517,14 +8516,14 @@ dependencies = [ "schnellru", "serde_json", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] [[package]] name = "reth-engine-util" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8551,7 +8550,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8567,13 +8566,13 @@ dependencies = [ "snap", "tempfile", "test-case", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", ] [[package]] name = "reth-era-downloader" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "bytes", @@ -8591,7 +8590,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8617,17 +8616,17 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.10.0" +version = "1.10.1" dependencies = [ "reth-consensus", "reth-execution-errors", "reth-storage-errors", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-eth-wire" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8656,7 +8655,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -8665,7 +8664,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8685,12 +8684,12 @@ dependencies = [ "reth-ethereum-primitives", "reth-primitives-traits", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-ethereum" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8730,7 +8729,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.10.0" +version = "1.10.1" dependencies = [ "clap", "eyre", @@ -8752,7 +8751,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8768,7 +8767,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8781,12 +8780,12 @@ dependencies = [ "serde", "serde_json", "sha2", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-ethereum-forks" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8799,7 +8798,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8827,7 +8826,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8854,7 +8853,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "rayon", @@ -8864,7 +8863,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8889,7 +8888,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8913,19 +8912,19 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-evm", "alloy-primitives", "alloy-rlp", "nybbles", "reth-storage-errors", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-execution-types" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8945,7 +8944,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8982,7 +8981,7 @@ dependencies = [ "rmp-serde", "secp256k1 0.30.0", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "tracing", @@ -8990,7 +8989,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eips", "eyre", @@ -9015,13 +9014,13 @@ dependencies = [ "reth-tasks", "reth-transaction-pool", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", ] [[package]] name = "reth-exex-types" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9038,16 +9037,16 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.10.0" +version = "1.10.1" dependencies = [ "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-invalid-block-hooks" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9080,7 +9079,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.10.0" +version = "1.10.1" dependencies = [ "bytes", "futures", @@ -9092,7 +9091,7 @@ dependencies = [ "reth-tracing", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -9102,7 +9101,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.10.0" +version = "1.10.1" dependencies = [ "bitflags 2.10.0", "byteorder", @@ -9114,13 +9113,13 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] [[package]] name = "reth-mdbx-sys" -version = "1.10.0" +version = "1.10.1" dependencies = [ "bindgen 0.71.1", "cc", @@ -9128,7 +9127,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.10.0" +version = "1.10.1" dependencies = [ "futures", "metrics", @@ -9139,7 +9138,7 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "ipnet", @@ -9147,21 +9146,21 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "1.10.0" +version = "1.10.1" dependencies = [ "futures-util", "if-addrs", "reqwest", "reth-tracing", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] [[package]] name = "reth-network" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9212,7 +9211,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -9222,7 +9221,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9239,14 +9238,14 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", ] [[package]] name = "reth-network-p2p" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9268,7 +9267,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9278,14 +9277,14 @@ dependencies = [ "secp256k1 0.30.0", "serde_json", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "url", ] [[package]] name = "reth-network-types" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -9298,7 +9297,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.10.0" +version = "1.10.1" dependencies = [ "anyhow", "bincode 1.3.3", @@ -9309,14 +9308,14 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", "zstd", ] [[package]] name = "reth-node-api" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -9339,7 +9338,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9411,7 +9410,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9457,7 +9456,7 @@ dependencies = [ "serde", "shellexpand", "strum 0.27.2", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "toml", "tracing", @@ -9468,7 +9467,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-contract", @@ -9528,7 +9527,7 @@ dependencies = [ [[package]] name = "reth-node-ethstats" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9541,7 +9540,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-tungstenite", @@ -9551,7 +9550,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9574,7 +9573,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.10.0" +version = "1.10.1" dependencies = [ "bytes", "eyre", @@ -9603,7 +9602,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.10.0" +version = "1.10.1" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9614,7 +9613,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.10.0" +version = "1.10.1" dependencies = [ "reth-chainspec", "reth-cli-util", @@ -9654,7 +9653,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9677,12 +9676,12 @@ dependencies = [ "serde", "serde_json", "tar-no-std", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-optimism-cli" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9731,7 +9730,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9756,13 +9755,13 @@ dependencies = [ "reth-trie", "reth-trie-common", "revm", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] [[package]] name = "reth-optimism-evm" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9786,12 +9785,12 @@ dependencies = [ "reth-rpc-eth-api", "reth-storage-errors", "revm", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-optimism-flashblocks" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9829,7 +9828,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9839,7 +9838,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9900,7 +9899,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9933,13 +9932,13 @@ dependencies = [ "revm", "serde", "sha2", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] [[package]] name = "reth-optimism-primitives" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9966,7 +9965,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10019,7 +10018,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tower", @@ -10028,7 +10027,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "reth-codecs", @@ -10040,7 +10039,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10070,14 +10069,14 @@ dependencies = [ "reth-storage-api", "reth-transaction-pool", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] [[package]] name = "reth-payload-builder" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10097,7 +10096,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.10.0" +version = "1.10.1" dependencies = [ "pin-project", "reth-payload-primitives", @@ -10108,7 +10107,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10125,13 +10124,13 @@ dependencies = [ "reth-primitives-traits", "reth-trie-common", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", ] [[package]] name = "reth-payload-util" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10140,7 +10139,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -10149,7 +10148,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10171,7 +10170,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10203,12 +10202,12 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-provider" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10257,7 +10256,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10282,18 +10281,18 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "rustc-hash", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] [[package]] name = "reth-prune-db" -version = "1.10.0" +version = "1.10.1" [[package]] name = "reth-prune-types" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -10306,13 +10305,13 @@ dependencies = [ "serde", "serde_json", "strum 0.27.2", - "thiserror 2.0.17", + "thiserror 2.0.18", "toml", ] [[package]] name = "reth-ress-protocol" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10338,7 +10337,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10364,7 +10363,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10378,7 +10377,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10453,7 +10452,7 @@ dependencies = [ "serde", "serde_json", "sha2", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tower", @@ -10463,7 +10462,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eip7928", "alloy-eips", @@ -10493,7 +10492,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10512,7 +10511,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eips", "alloy-network", @@ -10558,7 +10557,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "tower", @@ -10568,7 +10567,7 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-evm", @@ -10589,12 +10588,12 @@ dependencies = [ "reth-primitives-traits", "reth-storage-api", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-rpc-e2e-tests" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", @@ -10614,7 +10613,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10643,14 +10642,14 @@ dependencies = [ "reth-testing-utils", "reth-transaction-pool", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] [[package]] name = "reth-rpc-eth-api" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10693,7 +10692,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10732,7 +10731,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tracing", @@ -10741,7 +10740,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10758,7 +10757,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10773,7 +10772,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10824,14 +10823,14 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] [[package]] name = "reth-stages-api" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10855,7 +10854,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tracing", @@ -10863,7 +10862,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -10879,7 +10878,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10902,12 +10901,12 @@ dependencies = [ "secp256k1 0.30.0", "serde", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-static-file" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "assert_matches", @@ -10930,7 +10929,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "clap", @@ -10945,7 +10944,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10968,7 +10967,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10979,12 +10978,12 @@ dependencies = [ "reth-static-file-types", "revm-database-interface", "revm-state", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "reth-storage-rpc-provider" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11013,7 +11012,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.10.0" +version = "1.10.1" dependencies = [ "auto_impl", "dyn-clone", @@ -11022,7 +11021,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", "tracing-futures", @@ -11030,7 +11029,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11046,7 +11045,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.10.0" +version = "1.10.1" dependencies = [ "tokio", "tokio-stream", @@ -11055,7 +11054,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.10.0" +version = "1.10.1" dependencies = [ "clap", "eyre", @@ -11073,7 +11072,7 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.10.0" +version = "1.10.1" dependencies = [ "clap", "eyre", @@ -11090,7 +11089,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11130,7 +11129,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tracing", @@ -11138,7 +11137,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11172,7 +11171,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -11205,7 +11204,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -11236,7 +11235,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11259,14 +11258,14 @@ dependencies = [ "reth-trie-common", "reth-trie-db", "reth-trie-sparse", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] [[package]] name = "reth-trie-sparse" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11299,7 +11298,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" -version = "1.10.0" +version = "1.10.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11329,7 +11328,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.10.0" +version = "1.10.1" dependencies = [ "zstd", ] @@ -11423,7 +11422,7 @@ dependencies = [ "revm-primitives", "revm-state", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -11480,7 +11479,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -12368,7 +12367,7 @@ checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror 2.0.17", + "thiserror 2.0.18", "time", ] @@ -12784,11 +12783,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl 2.0.17", + "thiserror-impl 2.0.18", ] [[package]] @@ -12804,9 +12803,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", @@ -13213,7 +13212,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf" dependencies = [ "crossbeam-channel", - "thiserror 2.0.17", + "thiserror 2.0.18", "time", "tracing-subscriber 0.3.22", ] @@ -13460,7 +13459,7 @@ dependencies = [ "rustls", "rustls-pki-types", "sha1", - "thiserror 2.0.17", + "thiserror 2.0.18", "utf-8", ] @@ -14449,7 +14448,7 @@ dependencies = [ "pharos", "rustc_version 0.4.1", "send_wrapper 0.6.0", - "thiserror 2.0.17", + "thiserror 2.0.18", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", diff --git a/Cargo.toml b/Cargo.toml index c9a3ba0d93c..316f5ee2b57 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.10.0" +version = "1.10.1" edition = "2024" rust-version = "1.88" license = "MIT OR Apache-2.0" diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index f29394c6928..c814040b736 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -21,7 +21,7 @@ export default defineConfig({ }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, { - text: 'v1.10.0', + text: 'v1.10.1', items: [ { text: 'Releases', From 8f37cd08fca81a67efffa0d8ab2a4031cab30802 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 20 Jan 2026 12:33:27 +0100 Subject: [PATCH 076/267] feat(engine-api): add EIP-7928 BAL stub methods (#21204) --- crates/rpc/rpc-api/src/engine.rs | 12 +++++++++ crates/rpc/rpc-engine-api/src/engine_api.rs | 27 +++++++++++++++++++++ typos.toml | 1 + 3 files changed, 40 insertions(+) diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 175825fe921..aca0af4e76e 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -252,6 +252,18 @@ pub trait EngineApi { &self, versioned_hashes: Vec, ) -> RpcResult>>>; + + /// Returns the Block Access Lists for the given block hashes. + /// + /// See also + #[method(name = "getBALsByHashV1")] + async fn get_bals_by_hash_v1(&self, block_hashes: Vec) -> RpcResult>; + + /// Returns the Block Access Lists for the given block range. + /// + /// See also + #[method(name = "getBALsByRangeV1")] + async fn get_bals_by_range_v1(&self, start: U64, count: U64) -> RpcResult>; } /// A subset of the ETH rpc interface: diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 8db352f7b44..b1e9986c41c 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1161,6 +1161,33 @@ where trace!(target: "rpc::engine", "Serving engine_getBlobsV3"); Ok(self.get_blobs_v3_metered(versioned_hashes)?) } + + /// Handler for `engine_getBALsByHashV1` + /// + /// See also + async fn get_bals_by_hash_v1( + &self, + _block_hashes: Vec, + ) -> RpcResult> { + trace!(target: "rpc::engine", "Serving engine_getBALsByHashV1"); + Err(EngineApiError::EngineObjectValidationError( + reth_payload_primitives::EngineObjectValidationError::UnsupportedFork, + ))? + } + + /// Handler for `engine_getBALsByRangeV1` + /// + /// See also + async fn get_bals_by_range_v1( + &self, + _start: U64, + _count: U64, + ) -> RpcResult> { + trace!(target: "rpc::engine", "Serving engine_getBALsByRangeV1"); + Err(EngineApiError::EngineObjectValidationError( + reth_payload_primitives::EngineObjectValidationError::UnsupportedFork, + ))? + } } impl IntoEngineApiRpcModule diff --git a/typos.toml b/typos.toml index 25f54392661..896c2c783e3 100644 --- a/typos.toml +++ b/typos.toml @@ -37,3 +37,4 @@ ONL = "ONL" # Part of base64 encoded ENR Iy = "Iy" # Part of base64 encoded ENR flate = "flate" # zlib-flate is a valid tool name Pn = "Pn" # Part of UPnP (Universal Plug and Play) +BA = "BA" # Part of BAL - Block Access List (EIP-7928) From c825c8c1872937c28c453863fe994f993b3f44ee Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Tue, 20 Jan 2026 13:38:46 +0100 Subject: [PATCH 077/267] chore(trie): Move hybrid check for trie input merges into common code (#21198) --- crates/chain-state/src/lazy_overlay.rs | 54 +++---------------- .../src/providers/database/provider.rs | 37 ++----------- crates/trie/common/src/hashed_state.rs | 43 +++++++++------ crates/trie/common/src/updates.rs | 50 +++++++++-------- 4 files changed, 66 insertions(+), 118 deletions(-) diff --git a/crates/chain-state/src/lazy_overlay.rs b/crates/chain-state/src/lazy_overlay.rs index b611f29241e..58ccee5f90f 100644 --- a/crates/chain-state/src/lazy_overlay.rs +++ b/crates/chain-state/src/lazy_overlay.rs @@ -123,60 +123,18 @@ impl LazyOverlay { /// Merge all blocks' trie data into a single [`TrieInputSorted`]. /// - /// Blocks are ordered newest to oldest. Uses hybrid merge algorithm that - /// switches between `extend_ref` (small batches) and k-way merge (large batches). + /// Blocks are ordered newest to oldest. fn merge_blocks(blocks: &[DeferredTrieData]) -> TrieInputSorted { - const MERGE_BATCH_THRESHOLD: usize = 64; - if blocks.is_empty() { return TrieInputSorted::default(); } - // Single block: use its data directly (no allocation) - if blocks.len() == 1 { - let data = blocks[0].wait_cloned(); - return TrieInputSorted { - state: data.hashed_state, - nodes: data.trie_updates, - prefix_sets: Default::default(), - }; - } - - if blocks.len() < MERGE_BATCH_THRESHOLD { - // Small k: extend_ref loop with Arc::make_mut is faster. - // Uses copy-on-write - only clones inner data if Arc has multiple refs. - // Iterate oldest->newest so newer values override older ones. - let mut blocks_iter = blocks.iter().rev(); - let first = blocks_iter.next().expect("blocks is non-empty"); - let data = first.wait_cloned(); - - let mut state = data.hashed_state; - let mut nodes = data.trie_updates; - - for block in blocks_iter { - let block_data = block.wait_cloned(); - Arc::make_mut(&mut state).extend_ref_and_sort(block_data.hashed_state.as_ref()); - Arc::make_mut(&mut nodes).extend_ref_and_sort(block_data.trie_updates.as_ref()); - } + let state = + HashedPostStateSorted::merge_batch(blocks.iter().map(|b| b.wait_cloned().hashed_state)); + let nodes = + TrieUpdatesSorted::merge_batch(blocks.iter().map(|b| b.wait_cloned().trie_updates)); - TrieInputSorted { state, nodes, prefix_sets: Default::default() } - } else { - // Large k: k-way merge is faster (O(n log k)). - // Collect is unavoidable here - we need all data materialized for k-way merge. - let trie_data: Vec<_> = blocks.iter().map(|b| b.wait_cloned()).collect(); - - let merged_state = HashedPostStateSorted::merge_batch( - trie_data.iter().map(|d| d.hashed_state.as_ref()), - ); - let merged_nodes = - TrieUpdatesSorted::merge_batch(trie_data.iter().map(|d| d.trie_updates.as_ref())); - - TrieInputSorted { - state: Arc::new(merged_state), - nodes: Arc::new(merged_nodes), - prefix_sets: Default::default(), - } - } + TrieInputSorted { state, nodes, prefix_sets: Default::default() } } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index ca8314099c5..0fe7d854720 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -560,43 +560,12 @@ impl DatabaseProvider owned, - Err(arc) => (*arc).clone(), - } - } else if num_blocks < MERGE_BATCH_THRESHOLD { - // Small k: extend_ref with Arc::make_mut (copy-on-write). - // Blocks are oldest-to-newest, iterate forward so newest overrides. - let mut blocks_iter = blocks.iter(); - let mut result = blocks_iter.next().expect("non-empty").trie_updates(); - - for block in blocks_iter { - Arc::make_mut(&mut result) - .extend_ref_and_sort(block.trie_updates().as_ref()); - } - match Arc::try_unwrap(result) { - Ok(owned) => owned, - Err(arc) => (*arc).clone(), - } - } else { - // Large k: k-way merge is faster (O(n log k)). - // Collect Arcs first to extend lifetime, then pass refs. - // Blocks are oldest-to-newest, merge_batch expects newest-to-oldest. - let arcs: Vec<_> = blocks.iter().rev().map(|b| b.trie_updates()).collect(); - TrieUpdatesSorted::merge_batch(arcs.iter().map(|arc| arc.as_ref())) - }; + // Blocks are oldest-to-newest, merge_batch expects newest-to-oldest. + let merged = + TrieUpdatesSorted::merge_batch(blocks.iter().rev().map(|b| b.trie_updates())); if !merged.is_empty() { self.write_trie_updates_sorted(&merged)?; diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 3b232d43467..315bda49a45 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -638,31 +638,44 @@ impl HashedPostStateSorted { /// Batch-merge sorted hashed post states. Iterator yields **newest to oldest**. /// - /// Uses k-way merge for O(n log k) complexity and one-pass accumulation for storages. - pub fn merge_batch<'a>(states: impl IntoIterator) -> Self { - let states: Vec<_> = states.into_iter().collect(); - if states.is_empty() { - return Self::default(); + /// For small batches, uses `extend_ref_and_sort` loop. + /// For large batches, uses k-way merge for O(n log k) complexity. + pub fn merge_batch + From>(iter: impl IntoIterator) -> T { + const THRESHOLD: usize = 30; + + let items: alloc::vec::Vec<_> = iter.into_iter().collect(); + let k = items.len(); + + if k == 0 { + return Self::default().into(); + } + if k == 1 { + return items.into_iter().next().expect("k == 1"); } - let accounts = kway_merge_sorted(states.iter().map(|s| s.accounts.as_slice())); + if k < THRESHOLD { + // Small k: extend loop, oldest-to-newest so newer overrides older. + let mut iter = items.iter().rev(); + let mut acc = iter.next().expect("k > 0").as_ref().clone(); + for next in iter { + acc.extend_ref_and_sort(next.as_ref()); + } + return acc.into(); + } + + // Large k: k-way merge. + let accounts = kway_merge_sorted(items.iter().map(|i| i.as_ref().accounts.as_slice())); struct StorageAcc<'a> { - /// Account storage was cleared (e.g., SELFDESTRUCT). wiped: bool, - /// Stop collecting older slices after seeing a wipe. sealed: bool, - /// Storage slot slices to merge, ordered newest to oldest. slices: Vec<&'a [(B256, U256)]>, } let mut acc: B256Map> = B256Map::default(); - // Accumulate storage slices per address from newest to oldest state. - // Once we see a `wiped` flag, the account was cleared at that point, - // so older storage slots are irrelevant - we "seal" and stop collecting. - for state in &states { - for (addr, storage) in &state.storages { + for item in &items { + for (addr, storage) in &item.as_ref().storages { let entry = acc.entry(*addr).or_insert_with(|| StorageAcc { wiped: false, sealed: false, @@ -689,7 +702,7 @@ impl HashedPostStateSorted { }) .collect(); - Self { accounts, storages } + Self { accounts, storages }.into() } /// Clears all accounts and storage data. diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index 17f0d02b5ef..08c62cee3f7 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -629,48 +629,57 @@ impl TrieUpdatesSorted { /// Batch-merge sorted trie updates. Iterator yields **newest to oldest**. /// - /// This is more efficient than repeated `extend_ref` calls for large batches, - /// using k-way merge for O(n log k) complexity instead of O(n * k). - pub fn merge_batch<'a>(updates: impl IntoIterator) -> Self { - let updates: Vec<_> = updates.into_iter().collect(); - if updates.is_empty() { - return Self::default(); + /// For small batches, uses `extend_ref_and_sort` loop. + /// For large batches, uses k-way merge for O(n log k) complexity. + pub fn merge_batch + From>(iter: impl IntoIterator) -> T { + const THRESHOLD: usize = 30; + + let items: alloc::vec::Vec<_> = iter.into_iter().collect(); + let k = items.len(); + + if k == 0 { + return Self::default().into(); + } + if k == 1 { + return items.into_iter().next().expect("k == 1"); } - // Merge account nodes using k-way merge. Newest (index 0) takes precedence. - let account_nodes = kway_merge_sorted(updates.iter().map(|u| u.account_nodes.as_slice())); + if k < THRESHOLD { + // Small k: extend loop, oldest-to-newest so newer overrides older. + let mut iter = items.iter().rev(); + let mut acc = iter.next().expect("k > 0").as_ref().clone(); + for next in iter { + acc.extend_ref_and_sort(next.as_ref()); + } + return acc.into(); + } + + // Large k: k-way merge. + let account_nodes = + kway_merge_sorted(items.iter().map(|i| i.as_ref().account_nodes.as_slice())); - // Accumulator for collecting storage trie slices per address. - // We process updates newest-to-oldest and stop collecting for an address - // once we hit a "deleted" storage (sealed=true), since older data is irrelevant. struct StorageAcc<'a> { - /// Storage trie was deleted (account removed or cleared). is_deleted: bool, - /// Stop collecting older slices after seeing a deletion. sealed: bool, - /// Storage trie node slices to merge, ordered newest to oldest. slices: Vec<&'a [(Nibbles, Option)]>, } let mut acc: B256Map> = B256Map::default(); - // Collect storage slices per address, respecting deletion boundaries - for update in &updates { - for (addr, storage) in &update.storage_tries { + for item in &items { + for (addr, storage) in &item.as_ref().storage_tries { let entry = acc.entry(*addr).or_insert_with(|| StorageAcc { is_deleted: false, sealed: false, slices: Vec::new(), }); - // Skip if we already hit a deletion for this address (older data is irrelevant) if entry.sealed { continue; } entry.slices.push(storage.storage_nodes.as_slice()); - // If this storage was deleted, mark as deleted and seal to ignore older updates if storage.is_deleted { entry.is_deleted = true; entry.sealed = true; @@ -678,7 +687,6 @@ impl TrieUpdatesSorted { } } - // Merge each address's storage slices using k-way merge let storage_tries = acc .into_iter() .map(|(addr, entry)| { @@ -687,7 +695,7 @@ impl TrieUpdatesSorted { }) .collect(); - Self { account_nodes, storage_tries } + Self { account_nodes, storage_tries }.into() } } From 5a3887148995e8df0b76d038b9a46ae96f5c4e39 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 20 Jan 2026 12:39:36 +0000 Subject: [PATCH 078/267] fix: set `StaticFileArgs` defaults for `edge` (#21208) --- crates/node/core/Cargo.toml | 2 +- crates/node/core/src/args/static_files.rs | 39 ++++++++++++++++--- crates/storage/provider/Cargo.toml | 1 + crates/storage/storage-api/Cargo.toml | 1 + docs/vocs/docs/pages/cli/op-reth/db.mdx | 15 +++++-- .../vocs/docs/pages/cli/op-reth/import-op.mdx | 15 +++++-- .../pages/cli/op-reth/import-receipts-op.mdx | 15 +++++-- .../docs/pages/cli/op-reth/init-state.mdx | 15 +++++-- docs/vocs/docs/pages/cli/op-reth/init.mdx | 15 +++++-- docs/vocs/docs/pages/cli/op-reth/node.mdx | 15 +++++-- docs/vocs/docs/pages/cli/op-reth/prune.mdx | 15 +++++-- .../docs/pages/cli/op-reth/re-execute.mdx | 15 +++++-- .../docs/pages/cli/op-reth/stage/drop.mdx | 15 +++++-- .../docs/pages/cli/op-reth/stage/dump.mdx | 15 +++++-- .../vocs/docs/pages/cli/op-reth/stage/run.mdx | 15 +++++-- .../docs/pages/cli/op-reth/stage/unwind.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/db.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/download.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/export-era.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/import-era.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/import.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/init-state.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/init.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/node.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/prune.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/re-execute.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 15 +++++-- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 15 +++++-- .../vocs/docs/pages/cli/reth/stage/unwind.mdx | 15 +++++-- 30 files changed, 348 insertions(+), 85 deletions(-) diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 3ed981297eb..2be74b17613 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -92,7 +92,7 @@ min-debug-logs = ["tracing/release_max_level_debug"] min-trace-logs = ["tracing/release_max_level_trace"] # Marker feature for edge/unstable builds - captured by vergen in build.rs -edge = [] +edge = ["reth-provider/edge"] [build-dependencies] vergen = { workspace = true, features = ["build", "cargo", "emit_and_set"] } diff --git a/crates/node/core/src/args/static_files.rs b/crates/node/core/src/args/static_files.rs index 44116dd84b5..aa52164f2f2 100644 --- a/crates/node/core/src/args/static_files.rs +++ b/crates/node/core/src/args/static_files.rs @@ -9,8 +9,16 @@ use reth_provider::StorageSettings; /// 10000 blocks per static file allows us to prune all history every 10k blocks. pub const MINIMAL_BLOCKS_PER_FILE: u64 = 10000; +/// Default value for static file storage flags. +/// +/// When the `edge` feature is enabled, defaults to `true` to enable edge storage features. +/// Otherwise defaults to `false` for legacy behavior. +const fn default_static_file_flag() -> bool { + cfg!(feature = "edge") +} + /// Parameters for static files configuration -#[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)] +#[derive(Debug, Args, PartialEq, Eq, Clone, Copy)] #[command(next_help_heading = "Static Files")] pub struct StaticFilesArgs { /// Number of blocks per file for the headers segment. @@ -39,7 +47,7 @@ pub struct StaticFilesArgs { /// /// Note: This setting can only be configured at genesis initialization. Once /// the node has been initialized, changing this flag requires re-syncing from scratch. - #[arg(long = "static-files.receipts")] + #[arg(long = "static-files.receipts", default_value_t = default_static_file_flag(), action = clap::ArgAction::Set)] pub receipts: bool, /// Store transaction senders in static files instead of the database. @@ -49,7 +57,7 @@ pub struct StaticFilesArgs { /// /// Note: This setting can only be configured at genesis initialization. Once /// the node has been initialized, changing this flag requires re-syncing from scratch. - #[arg(long = "static-files.transaction-senders")] + #[arg(long = "static-files.transaction-senders", default_value_t = default_static_file_flag(), action = clap::ArgAction::Set)] pub transaction_senders: bool, /// Store account changesets in static files. @@ -59,7 +67,7 @@ pub struct StaticFilesArgs { /// /// Note: This setting can only be configured at genesis initialization. Once /// the node has been initialized, changing this flag requires re-syncing from scratch. - #[arg(long = "static-files.account-change-sets")] + #[arg(long = "static-files.account-change-sets", default_value_t = default_static_file_flag(), action = clap::ArgAction::Set)] pub account_changesets: bool, } @@ -97,9 +105,28 @@ impl StaticFilesArgs { /// Converts the static files arguments into [`StorageSettings`]. pub const fn to_settings(&self) -> StorageSettings { - StorageSettings::legacy() - .with_receipts_in_static_files(self.receipts) + #[cfg(feature = "edge")] + let base = StorageSettings::edge(); + #[cfg(not(feature = "edge"))] + let base = StorageSettings::legacy(); + + base.with_receipts_in_static_files(self.receipts) .with_transaction_senders_in_static_files(self.transaction_senders) .with_account_changesets_in_static_files(self.account_changesets) } } + +impl Default for StaticFilesArgs { + fn default() -> Self { + Self { + blocks_per_file_headers: None, + blocks_per_file_transactions: None, + blocks_per_file_receipts: None, + blocks_per_file_transaction_senders: None, + blocks_per_file_account_change_sets: None, + receipts: default_static_file_flag(), + transaction_senders: default_static_file_flag(), + account_changesets: default_static_file_flag(), + } + } +} diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 0199b6d2fc4..2aa30ab1b9c 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -85,6 +85,7 @@ rand.workspace = true tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } [features] +edge = ["reth-storage-api/edge"] rocksdb = ["dep:rocksdb"] test-utils = [ "reth-db/test-utils", diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 83cbbbd714e..9076dc64b5d 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -36,6 +36,7 @@ serde_json = { workspace = true, optional = true } [features] default = ["std"] +edge = ["reth-db-api/edge"] std = [ "reth-chainspec/std", "alloy-consensus/std", diff --git a/docs/vocs/docs/pages/cli/op-reth/db.mdx b/docs/vocs/docs/pages/cli/op-reth/db.mdx index c2d7b89b031..d6c8ef5669e 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db.mdx @@ -124,27 +124,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx index 42398a75159..891439b4f6d 100644 --- a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --chunk-len Chunk byte length to read from file. diff --git a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx index 75b260f7e04..cabcf3b0401 100644 --- a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --chunk-len Chunk byte length to read from file. diff --git a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx index 3f1b0bff2e2..429c4fe1f0d 100644 --- a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --without-evm Specifies whether to initialize the state without relying on EVM historical data. diff --git a/docs/vocs/docs/pages/cli/op-reth/init.mdx b/docs/vocs/docs/pages/cli/op-reth/init.mdx index 3f7c5ab5479..1094918f334 100644 --- a/docs/vocs/docs/pages/cli/op-reth/init.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/init.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index f245315040d..25e248076c4 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -1003,27 +1003,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + Rollup: --rollup.sequencer Endpoint for the sequencer mempool (can be both HTTP and WS) diff --git a/docs/vocs/docs/pages/cli/op-reth/prune.mdx b/docs/vocs/docs/pages/cli/op-reth/prune.mdx index 2df4b66eb9b..953e77d6cac 100644 --- a/docs/vocs/docs/pages/cli/op-reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/prune.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx index 247f8ead687..8e40a32b9ea 100644 --- a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --from The height to start at diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx index 25df549b934..e176564435b 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + Possible values: - headers: The headers stage within the pipeline diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx index 3107256bbe5..99d18f48ea7 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx @@ -115,27 +115,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx index b5c5af7729d..13a1599bd76 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --metrics Enable Prometheus metrics. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx index 426d481f6ed..3e380975e52 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx @@ -113,27 +113,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --offline If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index d9e12bd7471..4fda4538d16 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -124,27 +124,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 2f7fd058425..8c8b047d94e 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + -u, --url Specify a snapshot URL or let the command propose a default one. diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 4eab7b84a07..4dcbbd18aa3 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --first-block-number Optional first block number to export from the db. It is by default 0. diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 97386ec8579..fb7a3d394c0 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --path The path to a directory for import. diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 10eed084909..c3482e2a46c 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --no-state Disables stages that require state. diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index eaab28160ee..16aa7f61482 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --without-evm Specifies whether to initialize the state without relying on EVM historical data. diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index 586f0d4a44e..d2da76d31c6 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index c052076fc89..9eb5b2ddbf6 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -1003,27 +1003,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + Ress: --ress.enable Enable support for `ress` subprotocol diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 07cde6cd02c..c2d1e830099 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index 238f07c5655..c4a254ed511 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --from The height to start at diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 8e8135c5858..26178aad354 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + Possible values: - headers: The headers stage within the pipeline diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index b162958fd60..5750798c6fe 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -115,27 +115,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 4ad13bc3fce..1213a272647 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -108,27 +108,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --metrics Enable Prometheus metrics. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index ecb0f3f82d6..ed16cfb48fc 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -113,27 +113,36 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment - --static-files.receipts + --static-files.receipts Store receipts in static files instead of the database. When enabled, receipts will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.transaction-senders + [default: false] + [possible values: true, false] + + --static-files.transaction-senders Store transaction senders in static files instead of the database. When enabled, transaction senders will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. - --static-files.account-change-sets + [default: false] + [possible values: true, false] + + --static-files.account-change-sets Store account changesets in static files. When enabled, account changesets will be written to static files on disk instead of the database. Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + [default: false] + [possible values: true, false] + --offline If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound From 7cfb19c98ef9c5564eea1242888569c54a4e4929 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Tue, 20 Jan 2026 14:25:54 +0100 Subject: [PATCH 079/267] feat(trie): Add V2 reveal method and target types (#21196) Co-authored-by: Amp --- crates/trie/common/src/proofs.rs | 29 +- crates/trie/parallel/src/lib.rs | 3 + crates/trie/parallel/src/targets_v2.rs | 148 ++++++++++ crates/trie/sparse/src/state.rs | 365 ++++++++++++++++++++++++ crates/trie/trie/src/proof_v2/target.rs | 5 + 5 files changed, 549 insertions(+), 1 deletion(-) create mode 100644 crates/trie/parallel/src/targets_v2.rs diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index f2d75b2d3fd..c3eab920112 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -1,6 +1,6 @@ //! Merkle trie proofs. -use crate::{BranchNodeMasksMap, Nibbles, TrieAccount}; +use crate::{BranchNodeMasksMap, Nibbles, ProofTrieNode, TrieAccount}; use alloc::{borrow::Cow, vec::Vec}; use alloy_consensus::constants::KECCAK_EMPTY; use alloy_primitives::{ @@ -431,6 +431,33 @@ impl TryFrom for DecodedMultiProof { } } +/// V2 decoded multiproof which contains the results of both account and storage V2 proof +/// calculations. +#[derive(Clone, Debug, PartialEq, Eq, Default)] +pub struct DecodedMultiProofV2 { + /// Account trie proof nodes + pub account_proofs: Vec, + /// Storage trie proof nodes indexed by account + pub storage_proofs: B256Map>, +} + +impl DecodedMultiProofV2 { + /// Returns true if there are no proofs + pub fn is_empty(&self) -> bool { + self.account_proofs.is_empty() && self.storage_proofs.is_empty() + } + + /// Appends the given multiproof's data to this one. + /// + /// This implementation does not deduplicate redundant proofs. + pub fn extend(&mut self, other: Self) { + self.account_proofs.extend(other.account_proofs); + for (hashed_address, other_storage_proofs) in other.storage_proofs { + self.storage_proofs.entry(hashed_address).or_default().extend(other_storage_proofs); + } + } +} + /// The merkle multiproof of storage trie. #[derive(Clone, Debug, PartialEq, Eq)] pub struct StorageMultiProof { diff --git a/crates/trie/parallel/src/lib.rs b/crates/trie/parallel/src/lib.rs index d713ce1520e..ba88ab690db 100644 --- a/crates/trie/parallel/src/lib.rs +++ b/crates/trie/parallel/src/lib.rs @@ -22,6 +22,9 @@ pub mod proof; pub mod proof_task; +/// V2 multiproof targets and chunking. +pub mod targets_v2; + /// Parallel state root metrics. #[cfg(feature = "metrics")] pub mod metrics; diff --git a/crates/trie/parallel/src/targets_v2.rs b/crates/trie/parallel/src/targets_v2.rs new file mode 100644 index 00000000000..bda7f0c31f6 --- /dev/null +++ b/crates/trie/parallel/src/targets_v2.rs @@ -0,0 +1,148 @@ +//! V2 multiproof targets and chunking. + +use alloy_primitives::{map::B256Map, B256}; +use reth_trie::proof_v2; + +/// A set of account and storage V2 proof targets. The account and storage targets do not need to +/// necessarily overlap. +#[derive(Debug, Default)] +pub struct MultiProofTargetsV2 { + /// The set of account proof targets to generate proofs for. + pub account_targets: Vec, + /// The sets of storage proof targets to generate proofs for. + pub storage_targets: B256Map>, +} + +impl MultiProofTargetsV2 { + /// Returns true is there are no account or storage targets. + pub fn is_empty(&self) -> bool { + self.account_targets.is_empty() && self.storage_targets.is_empty() + } +} + +/// An iterator that yields chunks of V2 proof targets of at most `size` account and storage +/// targets. +/// +/// Unlike legacy chunking, V2 preserves account targets exactly as they were (with their `min_len` +/// metadata). Account targets must appear in a chunk. Storage targets for those accounts are +/// chunked together, but if they exceed the chunk size, subsequent chunks contain only the +/// remaining storage targets without repeating the account target. +#[derive(Debug)] +pub struct ChunkedMultiProofTargetsV2 { + /// Remaining account targets to process + account_targets: std::vec::IntoIter, + /// Storage targets by account address + storage_targets: B256Map>, + /// Current account being processed (if any storage slots remain) + current_account_storage: Option<(B256, std::vec::IntoIter)>, + /// Chunk size + size: usize, +} + +impl ChunkedMultiProofTargetsV2 { + /// Creates a new chunked iterator for the given targets. + pub fn new(targets: MultiProofTargetsV2, size: usize) -> Self { + Self { + account_targets: targets.account_targets.into_iter(), + storage_targets: targets.storage_targets, + current_account_storage: None, + size, + } + } +} + +impl Iterator for ChunkedMultiProofTargetsV2 { + type Item = MultiProofTargetsV2; + + fn next(&mut self) -> Option { + let mut chunk = MultiProofTargetsV2::default(); + let mut count = 0; + + // First, finish any remaining storage slots from previous account + if let Some((account_addr, ref mut storage_iter)) = self.current_account_storage { + let remaining_capacity = self.size - count; + let slots: Vec<_> = storage_iter.by_ref().take(remaining_capacity).collect(); + + count += slots.len(); + chunk.storage_targets.insert(account_addr, slots); + + // If iterator is exhausted, clear current_account_storage + if storage_iter.len() == 0 { + self.current_account_storage = None; + } + } + + // Process account targets and their storage + while count < self.size { + let Some(account_target) = self.account_targets.next() else { + break; + }; + + // Add the account target + chunk.account_targets.push(account_target); + count += 1; + + // Check if this account has storage targets + let account_addr = account_target.key(); + if let Some(storage_slots) = self.storage_targets.remove(&account_addr) { + let remaining_capacity = self.size - count; + + if storage_slots.len() <= remaining_capacity { + // Optimization: We can take all slots, just move the vec + count += storage_slots.len(); + chunk.storage_targets.insert(account_addr, storage_slots); + } else { + // We need to split the storage slots + let mut storage_iter = storage_slots.into_iter(); + let slots_in_chunk: Vec<_> = + storage_iter.by_ref().take(remaining_capacity).collect(); + count += slots_in_chunk.len(); + + chunk.storage_targets.insert(account_addr, slots_in_chunk); + + // Save remaining storage slots for next chunk + self.current_account_storage = Some((account_addr, storage_iter)); + break; + } + } + } + + // Process any remaining storage-only entries (accounts not in account_targets) + while let Some((account_addr, storage_slots)) = self.storage_targets.iter_mut().next() && + count < self.size + { + let account_addr = *account_addr; + let storage_slots = std::mem::take(storage_slots); + let remaining_capacity = self.size - count; + + // Always remove from the map - if there are remaining slots they go to + // current_account_storage + self.storage_targets.remove(&account_addr); + + if storage_slots.len() <= remaining_capacity { + // Optimization: We can take all slots, just move the vec + count += storage_slots.len(); + chunk.storage_targets.insert(account_addr, storage_slots); + } else { + // We need to split the storage slots + let mut storage_iter = storage_slots.into_iter(); + let slots_in_chunk: Vec<_> = + storage_iter.by_ref().take(remaining_capacity).collect(); + + chunk.storage_targets.insert(account_addr, slots_in_chunk); + + // Save remaining storage slots for next chunk + if storage_iter.len() > 0 { + self.current_account_storage = Some((account_addr, storage_iter)); + } + break; + } + } + + if chunk.account_targets.is_empty() && chunk.storage_targets.is_empty() { + None + } else { + Some(chunk) + } + } +} diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index a1c6ac4e9f6..415938915ad 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -316,6 +316,86 @@ where } } + /// Reveals a V2 decoded multiproof. + /// + /// V2 multiproofs use a simpler format where proof nodes are stored as vectors rather than + /// hashmaps, with masks already included in the `ProofTrieNode` structure. + #[instrument( + skip_all, + fields( + account_nodes = multiproof.account_proofs.len(), + storages = multiproof.storage_proofs.len() + ) + )] + pub fn reveal_decoded_multiproof_v2( + &mut self, + multiproof: reth_trie_common::DecodedMultiProofV2, + ) -> SparseStateTrieResult<()> { + // Reveal the account proof nodes + self.reveal_account_v2_proof_nodes(multiproof.account_proofs)?; + + #[cfg(not(feature = "std"))] + // If nostd then serially reveal storage proof nodes for each storage trie + { + for (account, storage_proofs) in multiproof.storage_proofs { + self.reveal_storage_v2_proof_nodes(account, storage_proofs)?; + } + + Ok(()) + } + + #[cfg(feature = "std")] + // If std then reveal storage proofs in parallel + { + use rayon::iter::{ParallelBridge, ParallelIterator}; + + let retain_updates = self.retain_updates; + + // Process all storage trie revealings in parallel, having first removed the + // `reveal_nodes` tracking and `SparseTrie`s for each account from their HashMaps. + // These will be returned after processing. + let results: Vec<_> = multiproof + .storage_proofs + .into_iter() + .map(|(account, storage_proofs)| { + let revealed_nodes = self.storage.take_or_create_revealed_paths(&account); + let trie = self.storage.take_or_create_trie(&account); + (account, storage_proofs, revealed_nodes, trie) + }) + .par_bridge() + .map(|(account, storage_proofs, mut revealed_nodes, mut trie)| { + let result = Self::reveal_storage_v2_proof_nodes_inner( + account, + storage_proofs, + &mut revealed_nodes, + &mut trie, + retain_updates, + ); + (account, result, revealed_nodes, trie) + }) + .collect(); + + let mut any_err = Ok(()); + for (account, result, revealed_nodes, trie) in results { + self.storage.revealed_paths.insert(account, revealed_nodes); + self.storage.tries.insert(account, trie); + if let Ok(_metric_values) = result { + #[cfg(feature = "metrics")] + { + self.metrics + .increment_total_storage_nodes(_metric_values.total_nodes as u64); + self.metrics + .increment_skipped_storage_nodes(_metric_values.skipped_nodes as u64); + } + } else { + any_err = result.map(|_| ()); + } + } + + any_err + } + } + /// Reveals an account multiproof. pub fn reveal_account_multiproof( &mut self, @@ -362,6 +442,89 @@ where Ok(()) } + /// Reveals account proof nodes from a V2 proof. + /// + /// V2 proofs already include the masks in the `ProofTrieNode` structure, + /// so no separate masks map is needed. + pub fn reveal_account_v2_proof_nodes( + &mut self, + nodes: Vec, + ) -> SparseStateTrieResult<()> { + let FilteredV2ProofNodes { root_node, nodes, new_nodes, metric_values: _metric_values } = + filter_revealed_v2_proof_nodes(nodes, &mut self.revealed_account_paths)?; + + #[cfg(feature = "metrics")] + { + self.metrics.increment_total_account_nodes(_metric_values.total_nodes as u64); + self.metrics.increment_skipped_account_nodes(_metric_values.skipped_nodes as u64); + } + + if let Some(root_node) = root_node { + trace!(target: "trie::sparse", ?root_node, "Revealing root account node from V2 proof"); + let trie = + self.state.reveal_root(root_node.node, root_node.masks, self.retain_updates)?; + + trie.reserve_nodes(new_nodes); + + trace!(target: "trie::sparse", total_nodes = ?nodes.len(), "Revealing account nodes from V2 proof"); + trie.reveal_nodes(nodes)?; + } + + Ok(()) + } + + /// Reveals storage proof nodes from a V2 proof for the given address. + /// + /// V2 proofs already include the masks in the `ProofTrieNode` structure, + /// so no separate masks map is needed. + pub fn reveal_storage_v2_proof_nodes( + &mut self, + account: B256, + nodes: Vec, + ) -> SparseStateTrieResult<()> { + let (trie, revealed_paths) = self.storage.get_trie_and_revealed_paths_mut(account); + let _metric_values = Self::reveal_storage_v2_proof_nodes_inner( + account, + nodes, + revealed_paths, + trie, + self.retain_updates, + )?; + + #[cfg(feature = "metrics")] + { + self.metrics.increment_total_storage_nodes(_metric_values.total_nodes as u64); + self.metrics.increment_skipped_storage_nodes(_metric_values.skipped_nodes as u64); + } + + Ok(()) + } + + /// Reveals storage V2 proof nodes for the given address. This is an internal static function + /// designed to handle a variety of associated public functions. + fn reveal_storage_v2_proof_nodes_inner( + account: B256, + nodes: Vec, + revealed_nodes: &mut HashSet, + trie: &mut SparseTrie, + retain_updates: bool, + ) -> SparseStateTrieResult { + let FilteredV2ProofNodes { root_node, nodes, new_nodes, metric_values } = + filter_revealed_v2_proof_nodes(nodes, revealed_nodes)?; + + if let Some(root_node) = root_node { + trace!(target: "trie::sparse", ?account, ?root_node, "Revealing root storage node from V2 proof"); + let trie = trie.reveal_root(root_node.node, root_node.masks, retain_updates)?; + + trie.reserve_nodes(new_nodes); + + trace!(target: "trie::sparse", ?account, total_nodes = ?nodes.len(), "Revealing storage nodes from V2 proof"); + trie.reveal_nodes(nodes)?; + } + + Ok(metric_values) + } + /// Reveals a storage multiproof for the given address. pub fn reveal_storage_multiproof( &mut self, @@ -1000,6 +1163,87 @@ fn filter_map_revealed_nodes( Ok(result) } +/// Result of [`filter_revealed_v2_proof_nodes`]. +#[derive(Debug, PartialEq, Eq)] +struct FilteredV2ProofNodes { + /// Root node which was pulled out of the original node set to be handled specially. + root_node: Option, + /// Filtered proof nodes. Root node is removed. + nodes: Vec, + /// Number of new nodes that will be revealed. This includes all children of branch nodes, even + /// if they are not in the proof. + new_nodes: usize, + /// Values which are being returned so they can be incremented into metrics. + metric_values: ProofNodesMetricValues, +} + +/// Filters V2 proof nodes that are already revealed, separates the root node if present, and +/// returns additional information about the number of total, skipped, and new nodes. +/// +/// Unlike [`filter_map_revealed_nodes`], V2 proof nodes already have masks included in the +/// `ProofTrieNode` structure, so no separate masks map is needed. +fn filter_revealed_v2_proof_nodes( + proof_nodes: Vec, + revealed_nodes: &mut HashSet, +) -> SparseStateTrieResult { + let mut result = FilteredV2ProofNodes { + root_node: None, + nodes: Vec::with_capacity(proof_nodes.len()), + new_nodes: 0, + metric_values: Default::default(), + }; + + // Count non-EmptyRoot nodes for sanity check. When multiple proofs are extended together, + // duplicate EmptyRoot nodes may appear (e.g., storage proofs split across chunks for an + // account with empty storage). We only error if there's an EmptyRoot alongside real nodes. + let non_empty_root_count = + proof_nodes.iter().filter(|n| !matches!(n.node, TrieNode::EmptyRoot)).count(); + + for node in proof_nodes { + result.metric_values.total_nodes += 1; + + let is_root = node.path.is_empty(); + + // If the node is already revealed, skip it. We don't ever skip the root node, nor do we add + // it to `revealed_nodes`. + if !is_root && !revealed_nodes.insert(node.path) { + result.metric_values.skipped_nodes += 1; + continue + } + + result.new_nodes += 1; + + // Count children for capacity estimation + match &node.node { + TrieNode::Branch(branch) => { + result.new_nodes += branch.state_mask.count_ones() as usize; + } + TrieNode::Extension(_) => { + result.new_nodes += 1; + } + _ => {} + }; + + if is_root { + // Perform sanity check: EmptyRoot is only valid if there are no other real nodes. + if matches!(node.node, TrieNode::EmptyRoot) && non_empty_root_count > 0 { + return Err(SparseStateTrieErrorKind::InvalidRootNode { + path: node.path, + node: alloy_rlp::encode(&node.node).into(), + } + .into()) + } + + result.root_node = Some(node); + continue + } + + result.nodes.push(node); + } + + Ok(result) +} + #[cfg(test)] mod tests { use super::*; @@ -1174,6 +1418,127 @@ mod tests { .is_none()); } + #[test] + fn reveal_v2_proof_nodes() { + let provider_factory = DefaultTrieNodeProviderFactory; + let mut sparse = SparseStateTrie::::default(); + + let leaf_value = alloy_rlp::encode(TrieAccount::default()); + let leaf_1_node = TrieNode::Leaf(LeafNode::new(Nibbles::default(), leaf_value.clone())); + let leaf_2_node = TrieNode::Leaf(LeafNode::new(Nibbles::default(), leaf_value.clone())); + + let branch_node = TrieNode::Branch(BranchNode { + stack: vec![ + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_1_node)), + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_2_node)), + ], + state_mask: TrieMask::new(0b11), + }); + + // Create V2 proof nodes with masks already included + let v2_proof_nodes = vec![ + ProofTrieNode { + path: Nibbles::default(), + node: branch_node, + masks: Some(BranchNodeMasks { + hash_mask: TrieMask::default(), + tree_mask: TrieMask::default(), + }), + }, + ProofTrieNode { path: Nibbles::from_nibbles([0x0]), node: leaf_1_node, masks: None }, + ProofTrieNode { path: Nibbles::from_nibbles([0x1]), node: leaf_2_node, masks: None }, + ]; + + // Reveal V2 proof nodes + sparse.reveal_account_v2_proof_nodes(v2_proof_nodes.clone()).unwrap(); + + // Check that the state trie contains the leaf node and value + assert!(sparse + .state_trie_ref() + .unwrap() + .nodes_ref() + .contains_key(&Nibbles::from_nibbles([0x0]))); + assert_eq!( + sparse.state_trie_ref().unwrap().get_leaf_value(&Nibbles::from_nibbles([0x0])), + Some(&leaf_value) + ); + + // Remove the leaf node + sparse.remove_account_leaf(&Nibbles::from_nibbles([0x0]), &provider_factory).unwrap(); + assert!(sparse + .state_trie_ref() + .unwrap() + .get_leaf_value(&Nibbles::from_nibbles([0x0])) + .is_none()); + + // Reveal again - should skip already revealed paths + sparse.reveal_account_v2_proof_nodes(v2_proof_nodes).unwrap(); + assert!(sparse + .state_trie_ref() + .unwrap() + .get_leaf_value(&Nibbles::from_nibbles([0x0])) + .is_none()); + } + + #[test] + fn reveal_storage_v2_proof_nodes() { + let provider_factory = DefaultTrieNodeProviderFactory; + let mut sparse = SparseStateTrie::::default(); + + let storage_value: Vec = alloy_rlp::encode_fixed_size(&U256::from(42)).to_vec(); + let leaf_1_node = TrieNode::Leaf(LeafNode::new(Nibbles::default(), storage_value.clone())); + let leaf_2_node = TrieNode::Leaf(LeafNode::new(Nibbles::default(), storage_value.clone())); + + let branch_node = TrieNode::Branch(BranchNode { + stack: vec![ + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_1_node)), + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_2_node)), + ], + state_mask: TrieMask::new(0b11), + }); + + let v2_proof_nodes = vec![ + ProofTrieNode { path: Nibbles::default(), node: branch_node, masks: None }, + ProofTrieNode { path: Nibbles::from_nibbles([0x0]), node: leaf_1_node, masks: None }, + ProofTrieNode { path: Nibbles::from_nibbles([0x1]), node: leaf_2_node, masks: None }, + ]; + + // Reveal V2 storage proof nodes for account + sparse.reveal_storage_v2_proof_nodes(B256::ZERO, v2_proof_nodes.clone()).unwrap(); + + // Check that the storage trie contains the leaf node and value + assert!(sparse + .storage_trie_ref(&B256::ZERO) + .unwrap() + .nodes_ref() + .contains_key(&Nibbles::from_nibbles([0x0]))); + assert_eq!( + sparse + .storage_trie_ref(&B256::ZERO) + .unwrap() + .get_leaf_value(&Nibbles::from_nibbles([0x0])), + Some(&storage_value) + ); + + // Remove the leaf node + sparse + .remove_storage_leaf(B256::ZERO, &Nibbles::from_nibbles([0x0]), &provider_factory) + .unwrap(); + assert!(sparse + .storage_trie_ref(&B256::ZERO) + .unwrap() + .get_leaf_value(&Nibbles::from_nibbles([0x0])) + .is_none()); + + // Reveal again - should skip already revealed paths + sparse.reveal_storage_v2_proof_nodes(B256::ZERO, v2_proof_nodes).unwrap(); + assert!(sparse + .storage_trie_ref(&B256::ZERO) + .unwrap() + .get_leaf_value(&Nibbles::from_nibbles([0x0])) + .is_none()); + } + #[test] fn take_trie_updates() { reth_tracing::init_test_tracing(); diff --git a/crates/trie/trie/src/proof_v2/target.rs b/crates/trie/trie/src/proof_v2/target.rs index c96b202e85a..a969089f3ae 100644 --- a/crates/trie/trie/src/proof_v2/target.rs +++ b/crates/trie/trie/src/proof_v2/target.rs @@ -19,6 +19,11 @@ impl Target { Self { key, min_len: 0 } } + /// Returns the key the target was initialized with. + pub fn key(&self) -> B256 { + B256::from_slice(&self.key.pack()) + } + /// Only match trie nodes whose path is at least this long. /// /// # Panics From 3667d3b5aa2128a2e3c2e462f91df1cec5be1791 Mon Sep 17 00:00:00 2001 From: Hwangjae Lee Date: Tue, 20 Jan 2026 22:33:08 +0900 Subject: [PATCH 080/267] perf(trie): defer child RLP conversion in proof_v2 for async encoder support (#20873) Signed-off-by: Hwangjae Lee Co-authored-by: Brian Picciano --- crates/trie/trie/src/proof_v2/mod.rs | 51 ++++++++++++++++------------ 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/crates/trie/trie/src/proof_v2/mod.rs b/crates/trie/trie/src/proof_v2/mod.rs index 2abc55686fc..f421ba7bb8a 100644 --- a/crates/trie/trie/src/proof_v2/mod.rs +++ b/crates/trie/trie/src/proof_v2/mod.rs @@ -317,14 +317,15 @@ where } /// Returns the path of the child on top of the `child_stack`, or the root path if the stack is - /// empty. - fn last_child_path(&self) -> Nibbles { + /// empty. Returns None if the current branch has not yet pushed a child (empty `state_mask`). + fn last_child_path(&self) -> Option { // If there is no branch under construction then the top child must be the root child. let Some(branch) = self.branch_stack.last() else { - return Nibbles::new(); + return Some(Nibbles::new()); }; - self.child_path_at(Self::highest_set_nibble(branch.state_mask)) + (!branch.state_mask.is_empty()) + .then(|| self.child_path_at(Self::highest_set_nibble(branch.state_mask))) } /// Calls [`Self::commit_child`] on the last child of `child_stack`, replacing it with a @@ -340,7 +341,9 @@ where &mut self, targets: &mut TargetsCursor<'a>, ) -> Result<(), StateProofError> { - let Some(child) = self.child_stack.pop() else { return Ok(()) }; + let Some(child_path) = self.last_child_path() else { return Ok(()) }; + let child = + self.child_stack.pop().expect("child_stack can't be empty if there's a child path"); // If the child is already an `RlpNode` then there is nothing to do, push it back on with no // changes. @@ -349,14 +352,15 @@ where return Ok(()) } - let child_path = self.last_child_path(); - // TODO theoretically `commit_child` only needs to convert to an `RlpNode` if it's going to - // retain the proof, otherwise we could leave the child as-is on the stack and convert it - // when popping the branch, giving more time to the DeferredEncoder to do async work. - let child_rlp_node = self.commit_child(targets, child_path, child)?; + // Only commit immediately if retained for the proof. Otherwise, defer conversion + // to pop_branch() to give DeferredEncoder time for async work. + if self.should_retain(targets, &child_path, true) { + let child_rlp_node = self.commit_child(targets, child_path, child)?; + self.child_stack.push(ProofTrieBranchChild::RlpNode(child_rlp_node)); + } else { + self.child_stack.push(child); + } - // Replace the child on the stack - self.child_stack.push(ProofTrieBranchChild::RlpNode(child_rlp_node)); Ok(()) } @@ -499,15 +503,20 @@ where "Stack is missing necessary children ({num_children:?})" ); - // Collect children into an `RlpNode` Vec by committing and pushing each of them. - for (idx, child) in - self.child_stack.drain(self.child_stack.len() - num_children..).enumerate() - { - let ProofTrieBranchChild::RlpNode(child_rlp_node) = child else { - panic!( - "all branch children must have been committed, found {} at index {idx:?}", - std::any::type_name_of_val(&child) - ); + // Collect children into RlpNode Vec. Children are in lexicographic order. + for child in self.child_stack.drain(self.child_stack.len() - num_children..) { + let child_rlp_node = match child { + ProofTrieBranchChild::RlpNode(rlp_node) => rlp_node, + uncommitted_child => { + // Convert uncommitted child (not retained for proof) to RlpNode now. + self.rlp_encode_buf.clear(); + let (rlp_node, freed_buf) = + uncommitted_child.into_rlp(&mut self.rlp_encode_buf)?; + if let Some(buf) = freed_buf { + self.rlp_nodes_bufs.push(buf); + } + rlp_node + } }; rlp_nodes_buf.push(child_rlp_node); } From ea3d4663ae2550622040c1ff9b212bcfde04dfe3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 20 Jan 2026 14:34:41 +0100 Subject: [PATCH 081/267] perf(trie): use HashMap reserve heuristic in MultiProof::extend (#21199) --- crates/trie/common/src/proofs.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index c3eab920112..b65b3ec9665 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -267,6 +267,12 @@ impl MultiProof { self.account_subtree.extend_from(other.account_subtree); self.branch_node_masks.extend(other.branch_node_masks); + let reserve = if self.storages.is_empty() { + other.storages.len() + } else { + other.storages.len().div_ceil(2) + }; + self.storages.reserve(reserve); for (hashed_address, storage) in other.storages { match self.storages.entry(hashed_address) { hash_map::Entry::Occupied(mut entry) => { @@ -390,6 +396,12 @@ impl DecodedMultiProof { self.account_subtree.extend_from(other.account_subtree); self.branch_node_masks.extend(other.branch_node_masks); + let reserve = if self.storages.is_empty() { + other.storages.len() + } else { + other.storages.len().div_ceil(2) + }; + self.storages.reserve(reserve); for (hashed_address, storage) in other.storages { match self.storages.entry(hashed_address) { hash_map::Entry::Occupied(mut entry) => { From 346cc0da71a10393a6426aa77952528e44e8dafb Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Tue, 20 Jan 2026 14:50:29 +0100 Subject: [PATCH 082/267] feat(trie): add AsyncAccountValueEncoder for V2 proof computation (#21197) Co-authored-by: Amp --- crates/trie/parallel/Cargo.toml | 1 + crates/trie/parallel/src/lib.rs | 3 + crates/trie/parallel/src/proof_task.rs | 33 ++-- crates/trie/parallel/src/value_encoder.rs | 185 ++++++++++++++++++++++ crates/trie/trie/src/proof_v2/mod.rs | 26 ++- crates/trie/trie/src/proof_v2/value.rs | 6 +- 6 files changed, 217 insertions(+), 37 deletions(-) create mode 100644 crates/trie/parallel/src/value_encoder.rs diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 9fb882b44a5..d64f2dfb519 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] # reth reth-execution-errors.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-storage-errors.workspace = true reth-trie-common.workspace = true diff --git a/crates/trie/parallel/src/lib.rs b/crates/trie/parallel/src/lib.rs index ba88ab690db..cba9d9440e6 100644 --- a/crates/trie/parallel/src/lib.rs +++ b/crates/trie/parallel/src/lib.rs @@ -22,6 +22,9 @@ pub mod proof; pub mod proof_task; +/// Async value encoder for V2 proofs. +pub(crate) mod value_encoder; + /// V2 multiproof targets and chunking. pub mod targets_v2; diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index c6c0d895559..1d492e27755 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -41,7 +41,7 @@ use alloy_primitives::{ use alloy_rlp::{BufMut, Encodable}; use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use dashmap::DashMap; -use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; +use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind, StateProofError}; use reth_provider::{DatabaseProviderROFactory, ProviderError, ProviderResult}; use reth_storage_errors::db::DatabaseError; use reth_trie::{ @@ -305,17 +305,17 @@ impl ProofWorkerHandle { self.storage_work_tx .send(StorageWorkerJob::StorageProof { input, proof_result_sender }) .map_err(|err| { - let error = - ProviderError::other(std::io::Error::other("storage workers unavailable")); - if let StorageWorkerJob::StorageProof { proof_result_sender, .. } = err.0 { let _ = proof_result_sender.send(StorageProofResultMessage { hashed_address, - result: Err(ParallelStateRootError::Provider(error.clone())), + result: Err(DatabaseError::Other( + "storage workers unavailable".to_string(), + ) + .into()), }); } - error + ProviderError::other(std::io::Error::other("storage workers unavailable")) }) } @@ -432,7 +432,7 @@ where input: StorageProofInput, trie_cursor_metrics: &mut TrieCursorMetricsCache, hashed_cursor_metrics: &mut HashedCursorMetricsCache, - ) -> Result { + ) -> Result { // Consume the input so we can move large collections (e.g. target slots) without cloning. let StorageProofInput::Legacy { hashed_address, @@ -469,20 +469,13 @@ where .with_added_removed_keys(added_removed_keys) .with_trie_cursor_metrics(trie_cursor_metrics) .with_hashed_cursor_metrics(hashed_cursor_metrics) - .storage_multiproof(target_slots) - .map_err(|e| ParallelStateRootError::Other(e.to_string())); + .storage_multiproof(target_slots); trie_cursor_metrics.record_span("trie_cursor"); hashed_cursor_metrics.record_span("hashed_cursor"); // Decode proof into DecodedStorageMultiProof - let decoded_result = raw_proof_result.and_then(|raw_proof| { - raw_proof.try_into().map_err(|e: alloy_rlp::Error| { - ParallelStateRootError::Other(format!( - "Failed to decode storage proof for {}: {}", - hashed_address, e - )) - }) - })?; + let decoded_result = + raw_proof_result.and_then(|raw_proof| raw_proof.try_into().map_err(Into::into))?; trace!( target: "trie::proof_task", @@ -502,7 +495,7 @@ where ::StorageTrieCursor<'_>, ::StorageCursor<'_>, >, - ) -> Result { + ) -> Result { let StorageProofInput::V2 { hashed_address, mut targets } = input else { panic!("compute_v2_storage_proof only accepts StorageProofInput::V2") }; @@ -717,12 +710,12 @@ pub struct StorageProofResultMessage { /// The hashed address this storage proof belongs to pub(crate) hashed_address: B256, /// The storage proof calculation result - pub(crate) result: Result, + pub(crate) result: Result, } /// Internal message for storage workers. #[derive(Debug)] -enum StorageWorkerJob { +pub(crate) enum StorageWorkerJob { /// Storage proof computation request StorageProof { /// Storage proof input parameters diff --git a/crates/trie/parallel/src/value_encoder.rs b/crates/trie/parallel/src/value_encoder.rs new file mode 100644 index 00000000000..13c611922db --- /dev/null +++ b/crates/trie/parallel/src/value_encoder.rs @@ -0,0 +1,185 @@ +use crate::proof_task::{ + StorageProofInput, StorageProofResult, StorageProofResultMessage, StorageWorkerJob, +}; +use alloy_primitives::{map::B256Map, B256}; +use alloy_rlp::Encodable; +use core::cell::RefCell; +use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; +use dashmap::DashMap; +use reth_execution_errors::trie::StateProofError; +use reth_primitives_traits::Account; +use reth_storage_errors::db::DatabaseError; +use reth_trie::{ + proof_v2::{DeferredValueEncoder, LeafValueEncoder, Target}, + ProofTrieNode, +}; +use std::{rc::Rc, sync::Arc}; + +/// Returned from [`AsyncAccountValueEncoder`], used to track an async storage root calculation. +pub(crate) enum AsyncAccountDeferredValueEncoder { + Dispatched { + hashed_address: B256, + account: Account, + proof_result_rx: Result, DatabaseError>, + // None if results shouldn't be retained for this dispatched proof. + storage_proof_results: Option>>>>, + }, + FromCache { + account: Account, + root: B256, + }, +} + +impl DeferredValueEncoder for AsyncAccountDeferredValueEncoder { + fn encode(self, buf: &mut Vec) -> Result<(), StateProofError> { + let (account, root) = match self { + Self::Dispatched { + hashed_address, + account, + proof_result_rx, + storage_proof_results, + } => { + let result = proof_result_rx? + .recv() + .map_err(|_| { + StateProofError::Database(DatabaseError::Other(format!( + "Storage proof channel closed for {hashed_address:?}", + ))) + })? + .result?; + + let StorageProofResult::V2 { root: Some(root), proof } = result else { + panic!("StorageProofResult is not V2 with root: {result:?}") + }; + + if let Some(storage_proof_results) = storage_proof_results.as_ref() { + storage_proof_results.borrow_mut().insert(hashed_address, proof); + } + + (account, root) + } + Self::FromCache { account, root } => (account, root), + }; + + let account = account.into_trie_account(root); + account.encode(buf); + Ok(()) + } +} + +/// Implements the [`LeafValueEncoder`] trait for accounts using a [`CrossbeamSender`] to dispatch +/// and compute storage roots asynchronously. Can also accept a set of already dispatched account +/// storage proofs, for cases where it's possible to determine some necessary accounts ahead of +/// time. +pub(crate) struct AsyncAccountValueEncoder { + storage_work_tx: CrossbeamSender, + /// Storage proof jobs which were dispatched ahead of time. + dispatched: B256Map>, + /// Storage roots which have already been computed. This can be used only if a storage proof + /// wasn't dispatched for an account, otherwise we must consume the proof result. + cached_storage_roots: Arc>, + /// Tracks storage proof results received from the storage workers. [`Rc`] + [`RefCell`] is + /// required because [`DeferredValueEncoder`] cannot have a lifetime. + storage_proof_results: Rc>>>, +} + +impl AsyncAccountValueEncoder { + /// Initializes a [`Self`] using a `ProofWorkerHandle` which will be used to calculate storage + /// roots asynchronously. + #[expect(dead_code)] + pub(crate) fn new( + storage_work_tx: CrossbeamSender, + dispatched: B256Map>, + cached_storage_roots: Arc>, + ) -> Self { + Self { + storage_work_tx, + dispatched, + cached_storage_roots, + storage_proof_results: Default::default(), + } + } + + /// Consume [`Self`] and return all collected storage proofs which had been dispatched. + /// + /// # Panics + /// + /// This method panics if any deferred encoders produced by [`Self::deferred_encoder`] have not + /// been dropped. + #[expect(dead_code)] + pub(crate) fn into_storage_proofs( + self, + ) -> Result>, StateProofError> { + let mut storage_proof_results = Rc::into_inner(self.storage_proof_results) + .expect("no deferred encoders are still allocated") + .into_inner(); + + // Any remaining dispatched proofs need to have their results collected + for (hashed_address, rx) in &self.dispatched { + let result = rx + .recv() + .map_err(|_| { + StateProofError::Database(DatabaseError::Other(format!( + "Storage proof channel closed for {hashed_address:?}", + ))) + })? + .result?; + + let StorageProofResult::V2 { proof, .. } = result else { + panic!("StorageProofResult is not V2: {result:?}") + }; + + storage_proof_results.insert(*hashed_address, proof); + } + + Ok(storage_proof_results) + } +} + +impl LeafValueEncoder for AsyncAccountValueEncoder { + type Value = Account; + type DeferredEncoder = AsyncAccountDeferredValueEncoder; + + fn deferred_encoder( + &mut self, + hashed_address: B256, + account: Self::Value, + ) -> Self::DeferredEncoder { + // If the proof job has already been dispatched for this account then it's not necessary to + // dispatch another. + if let Some(rx) = self.dispatched.remove(&hashed_address) { + return AsyncAccountDeferredValueEncoder::Dispatched { + hashed_address, + account, + proof_result_rx: Ok(rx), + storage_proof_results: Some(self.storage_proof_results.clone()), + } + } + + // If the address didn't have a job dispatched for it then we can assume it has no targets, + // and we only need its root. + + // If the root is already calculated then just use it directly + if let Some(root) = self.cached_storage_roots.get(&hashed_address) { + return AsyncAccountDeferredValueEncoder::FromCache { account, root: *root } + } + + // Create a proof input which targets a bogus key, so that we calculate the root as a + // side-effect. + let input = StorageProofInput::new(hashed_address, vec![Target::new(B256::ZERO)]); + let (tx, rx) = crossbeam_channel::bounded(1); + + let proof_result_rx = self + .storage_work_tx + .send(StorageWorkerJob::StorageProof { input, proof_result_sender: tx }) + .map_err(|_| DatabaseError::Other("storage workers unavailable".to_string())) + .map(|_| rx); + + AsyncAccountDeferredValueEncoder::Dispatched { + hashed_address, + account, + proof_result_rx, + storage_proof_results: None, + } + } +} diff --git a/crates/trie/trie/src/proof_v2/mod.rs b/crates/trie/trie/src/proof_v2/mod.rs index f421ba7bb8a..8861def8a5c 100644 --- a/crates/trie/trie/src/proof_v2/mod.rs +++ b/crates/trie/trie/src/proof_v2/mod.rs @@ -651,7 +651,7 @@ where )] fn calculate_key_range<'a>( &mut self, - value_encoder: &VE, + value_encoder: &mut VE, targets: &mut TargetsCursor<'a>, hashed_cursor_current: &mut Option<(Nibbles, VE::DeferredEncoder)>, lower_bound: Nibbles, @@ -660,7 +660,7 @@ where // A helper closure for mapping entries returned from the `hashed_cursor`, converting the // key to Nibbles and immediately creating the DeferredValueEncoder so that encoding of the // leaf value can begin ASAP. - let map_hashed_cursor_entry = |(key_b256, val): (B256, _)| { + let mut map_hashed_cursor_entry = |(key_b256, val): (B256, _)| { debug_assert_eq!(key_b256.len(), 32); // SAFETY: key is a B256 and so is exactly 32-bytes. let key = unsafe { Nibbles::unpack_unchecked(key_b256.as_slice()) }; @@ -679,7 +679,7 @@ where let lower_key = B256::right_padding_from(&lower_bound.pack()); *hashed_cursor_current = - self.hashed_cursor.seek(lower_key)?.map(map_hashed_cursor_entry); + self.hashed_cursor.seek(lower_key)?.map(&mut map_hashed_cursor_entry); } // Loop over all keys in the range, calling `push_leaf` on each. @@ -689,7 +689,7 @@ where let (key, val) = core::mem::take(hashed_cursor_current).expect("while-let checks for Some"); self.push_leaf(targets, key, val)?; - *hashed_cursor_current = self.hashed_cursor.next()?.map(map_hashed_cursor_entry); + *hashed_cursor_current = self.hashed_cursor.next()?.map(&mut map_hashed_cursor_entry); } trace!(target: TRACE_TARGET, "No further keys within range"); @@ -1125,7 +1125,7 @@ where )] fn proof_subtrie<'a>( &mut self, - value_encoder: &VE, + value_encoder: &mut VE, trie_cursor_state: &mut TrieCursorState, hashed_cursor_current: &mut Option<(Nibbles, VE::DeferredEncoder)>, sub_trie_targets: SubTrieTargets<'a>, @@ -1254,7 +1254,7 @@ where /// See docs on [`Self::proof`] for expected behavior. fn proof_inner( &mut self, - value_encoder: &VE, + value_encoder: &mut VE, targets: &mut [Target], ) -> Result, StateProofError> { // If there are no targets then nothing could be returned, return early. @@ -1305,7 +1305,7 @@ where #[instrument(target = TRACE_TARGET, level = "trace", skip_all)] pub fn proof( &mut self, - value_encoder: &VE, + value_encoder: &mut VE, targets: &mut [Target], ) -> Result, StateProofError> { self.trie_cursor.reset(); @@ -1341,9 +1341,6 @@ where hashed_address: B256, targets: &mut [Target], ) -> Result, StateProofError> { - /// Static storage value encoder instance used by all storage proofs. - static STORAGE_VALUE_ENCODER: StorageValueEncoder = StorageValueEncoder; - self.hashed_cursor.set_hashed_address(hashed_address); // Shortcut: check if storage is empty @@ -1360,8 +1357,9 @@ where // been checked. self.trie_cursor.set_hashed_address(hashed_address); - // Use the static StorageValueEncoder and pass it to proof_inner - self.proof_inner(&STORAGE_VALUE_ENCODER, targets) + // Create a mutable storage value encoder + let mut storage_value_encoder = StorageValueEncoder; + self.proof_inner(&mut storage_value_encoder, targets) } /// Computes the root hash from a set of proof nodes. @@ -1639,13 +1637,13 @@ mod tests { InstrumentedHashedCursor::new(hashed_cursor, &mut hashed_cursor_metrics); // Call ProofCalculator::proof with account targets - let value_encoder = SyncAccountValueEncoder::new( + let mut value_encoder = SyncAccountValueEncoder::new( self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone(), ); let mut proof_calculator = ProofCalculator::new(trie_cursor, hashed_cursor); let proof_v2_result = - proof_calculator.proof(&value_encoder, &mut targets_vec.clone())?; + proof_calculator.proof(&mut value_encoder, &mut targets_vec.clone())?; // Output metrics trace!(target: TRACE_TARGET, ?trie_cursor_metrics, "V2 trie cursor metrics"); diff --git a/crates/trie/trie/src/proof_v2/value.rs b/crates/trie/trie/src/proof_v2/value.rs index dd330d9a879..2b7b0851192 100644 --- a/crates/trie/trie/src/proof_v2/value.rs +++ b/crates/trie/trie/src/proof_v2/value.rs @@ -44,7 +44,7 @@ pub trait LeafValueEncoder { /// /// The returned deferred encoder will be called as late as possible in the algorithm to /// maximize the time available for parallel computation (e.g., storage root calculation). - fn deferred_encoder(&self, key: B256, value: Self::Value) -> Self::DeferredEncoder; + fn deferred_encoder(&mut self, key: B256, value: Self::Value) -> Self::DeferredEncoder; } /// An encoder for storage slot values. @@ -68,7 +68,7 @@ impl LeafValueEncoder for StorageValueEncoder { type Value = U256; type DeferredEncoder = StorageDeferredValueEncoder; - fn deferred_encoder(&self, _key: B256, value: Self::Value) -> Self::DeferredEncoder { + fn deferred_encoder(&mut self, _key: B256, value: Self::Value) -> Self::DeferredEncoder { StorageDeferredValueEncoder(value) } } @@ -157,7 +157,7 @@ where type DeferredEncoder = SyncAccountDeferredValueEncoder; fn deferred_encoder( - &self, + &mut self, hashed_address: B256, account: Self::Value, ) -> Self::DeferredEncoder { From a0845bab18caa520a311383929cd7cccd299e186 Mon Sep 17 00:00:00 2001 From: tonis Date: Tue, 20 Jan 2026 21:19:31 +0700 Subject: [PATCH 083/267] feat: Check CL/Reth capability compatibility (#20348) Co-authored-by: Matthias Seitz Co-authored-by: Amp --- crates/rpc/rpc-engine-api/src/capabilities.rs | 150 +++++++++++++++--- crates/rpc/rpc-engine-api/src/engine_api.rs | 9 +- 2 files changed, 138 insertions(+), 21 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/capabilities.rs b/crates/rpc/rpc-engine-api/src/capabilities.rs index 1e95d7ed1ca..75583c821e2 100644 --- a/crates/rpc/rpc-engine-api/src/capabilities.rs +++ b/crates/rpc/rpc-engine-api/src/capabilities.rs @@ -1,6 +1,11 @@ +//! Engine API capabilities. + use std::collections::HashSet; +use tracing::warn; -/// The list of all supported Engine capabilities available over the engine endpoint. +/// All Engine API capabilities supported by Reth (Ethereum mainnet). +/// +/// See for updates. pub const CAPABILITIES: &[&str] = &[ "engine_forkchoiceUpdatedV1", "engine_forkchoiceUpdatedV2", @@ -22,43 +27,150 @@ pub const CAPABILITIES: &[&str] = &[ "engine_getBlobsV3", ]; -// The list of all supported Engine capabilities available over the engine endpoint. -/// -/// Latest spec: Prague +/// Engine API capabilities set. #[derive(Debug, Clone)] pub struct EngineCapabilities { inner: HashSet, } impl EngineCapabilities { - /// Creates a new `EngineCapabilities` instance with the given capabilities. - pub fn new(capabilities: impl IntoIterator>) -> Self { + /// Creates from an iterator of capability strings. + pub fn new(capabilities: impl IntoIterator>) -> Self { Self { inner: capabilities.into_iter().map(Into::into).collect() } } - /// Returns the list of all supported Engine capabilities for Prague spec. - fn prague() -> Self { - Self { inner: CAPABILITIES.iter().copied().map(str::to_owned).collect() } - } - - /// Returns the list of all supported Engine capabilities. + /// Returns the capabilities as a list of strings. pub fn list(&self) -> Vec { self.inner.iter().cloned().collect() } - /// Inserts a new capability. - pub fn add_capability(&mut self, capability: impl Into) { - self.inner.insert(capability.into()); + /// Returns a reference to the inner set. + pub const fn as_set(&self) -> &HashSet { + &self.inner } - /// Removes a capability. - pub fn remove_capability(&mut self, capability: &str) -> Option { - self.inner.take(capability) + /// Compares CL capabilities with this EL's capabilities and returns any mismatches. + /// + /// Called during `engine_exchangeCapabilities` to detect version mismatches + /// between the consensus layer and execution layer. + pub fn get_capability_mismatches(&self, cl_capabilities: &[String]) -> CapabilityMismatches { + let cl_set: HashSet<&str> = cl_capabilities.iter().map(String::as_str).collect(); + + // CL has methods EL doesn't support + let mut missing_in_el: Vec<_> = cl_capabilities + .iter() + .filter(|cap| !self.inner.contains(cap.as_str())) + .cloned() + .collect(); + missing_in_el.sort(); + + // EL has methods CL doesn't support + let mut missing_in_cl: Vec<_> = + self.inner.iter().filter(|cap| !cl_set.contains(cap.as_str())).cloned().collect(); + missing_in_cl.sort(); + + CapabilityMismatches { missing_in_el, missing_in_cl } + } + + /// Logs warnings if CL and EL capabilities don't match. + /// + /// Called during `engine_exchangeCapabilities` to warn operators about + /// version mismatches between the consensus layer and execution layer. + pub fn log_capability_mismatches(&self, cl_capabilities: &[String]) { + let mismatches = self.get_capability_mismatches(cl_capabilities); + + if !mismatches.missing_in_el.is_empty() { + warn!( + target: "rpc::engine", + missing = ?mismatches.missing_in_el, + "CL supports Engine API methods that Reth doesn't. Consider upgrading Reth." + ); + } + + if !mismatches.missing_in_cl.is_empty() { + warn!( + target: "rpc::engine", + missing = ?mismatches.missing_in_cl, + "Reth supports Engine API methods that CL doesn't. Consider upgrading your consensus client." + ); + } } } impl Default for EngineCapabilities { fn default() -> Self { - Self::prague() + Self::new(CAPABILITIES.iter().copied()) + } +} + +/// Result of comparing CL and EL capabilities. +#[derive(Debug, Default, PartialEq, Eq)] +pub struct CapabilityMismatches { + /// Methods supported by CL but not by EL (Reth). + /// Operators should consider upgrading Reth. + pub missing_in_el: Vec, + /// Methods supported by EL (Reth) but not by CL. + /// Operators should consider upgrading their consensus client. + pub missing_in_cl: Vec, +} + +impl CapabilityMismatches { + /// Returns `true` if there are no mismatches. + pub const fn is_empty(&self) -> bool { + self.missing_in_el.is_empty() && self.missing_in_cl.is_empty() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_no_mismatches() { + let el = EngineCapabilities::new(["method_a", "method_b"]); + let cl = vec!["method_a".to_string(), "method_b".to_string()]; + + let result = el.get_capability_mismatches(&cl); + assert!(result.is_empty()); + } + + #[test] + fn test_cl_has_extra_methods() { + let el = EngineCapabilities::new(["method_a"]); + let cl = vec!["method_a".to_string(), "method_b".to_string()]; + + let result = el.get_capability_mismatches(&cl); + assert_eq!(result.missing_in_el, vec!["method_b"]); + assert!(result.missing_in_cl.is_empty()); + } + + #[test] + fn test_el_has_extra_methods() { + let el = EngineCapabilities::new(["method_a", "method_b"]); + let cl = vec!["method_a".to_string()]; + + let result = el.get_capability_mismatches(&cl); + assert!(result.missing_in_el.is_empty()); + assert_eq!(result.missing_in_cl, vec!["method_b"]); + } + + #[test] + fn test_both_have_extra_methods() { + let el = EngineCapabilities::new(["method_a", "method_c"]); + let cl = vec!["method_a".to_string(), "method_b".to_string()]; + + let result = el.get_capability_mismatches(&cl); + assert_eq!(result.missing_in_el, vec!["method_b"]); + assert_eq!(result.missing_in_cl, vec!["method_c"]); + } + + #[test] + fn test_results_are_sorted() { + let el = EngineCapabilities::new(["z_method", "a_method"]); + let cl = vec!["z_other".to_string(), "a_other".to_string()]; + + let result = el.get_capability_mismatches(&cl); + assert_eq!(result.missing_in_el, vec!["a_other", "z_other"]); + assert_eq!(result.missing_in_cl, vec!["a_method", "z_method"]); } } diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index b1e9986c41c..5a7b69dd9e1 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1134,8 +1134,13 @@ where /// Handler for `engine_exchangeCapabilitiesV1` /// See also - async fn exchange_capabilities(&self, _capabilities: Vec) -> RpcResult> { - Ok(self.capabilities().list()) + async fn exchange_capabilities(&self, capabilities: Vec) -> RpcResult> { + trace!(target: "rpc::engine", "Serving engine_exchangeCapabilities"); + + let el_caps = self.capabilities(); + el_caps.log_capability_mismatches(&capabilities); + + Ok(el_caps.list()) } async fn get_blobs_v1( From bd144a4c42758b4eec814807db562b1e1b4b64c8 Mon Sep 17 00:00:00 2001 From: YK Date: Tue, 20 Jan 2026 15:23:29 +0100 Subject: [PATCH 084/267] feat(stages): add RocksDB support for IndexAccountHistoryStage (#21165) Co-authored-by: Georgios Konstantopoulos Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- .../src/stages/index_account_history.rs | 210 ++++++- crates/stages/stages/src/stages/utils.rs | 188 ++++++- crates/storage/provider/src/either_writer.rs | 37 +- .../src/providers/database/provider.rs | 40 +- .../src/providers/rocksdb/provider.rs | 511 +++++++++++++++++- .../provider/src/traits/rocksdb_provider.rs | 29 +- 6 files changed, 970 insertions(+), 45 deletions(-) diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index 25dbf104456..92fa5f3244c 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -1,11 +1,10 @@ -use crate::stages::utils::collect_history_indices; - -use super::{collect_account_history_indices, load_history_indices}; -use alloy_primitives::Address; +use super::collect_account_history_indices; +use crate::stages::utils::{collect_history_indices, load_account_history}; use reth_config::config::{EtlConfig, IndexHistoryConfig}; -use reth_db_api::{models::ShardedKey, table::Decode, tables, transaction::DbTxMut}; +use reth_db_api::{models::ShardedKey, tables, transaction::DbTxMut}; use reth_provider::{ - DBProvider, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, StorageSettingsCache, + DBProvider, EitherWriter, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, + RocksDBProviderFactory, StorageSettingsCache, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment}; use reth_stages_api::{ @@ -53,7 +52,8 @@ where + PruneCheckpointWriter + reth_storage_api::ChangeSetReader + reth_provider::StaticFileProviderFactory - + StorageSettingsCache, + + StorageSettingsCache + + RocksDBProviderFactory, { /// Return the id of the stage fn id(&self) -> StageId { @@ -101,15 +101,25 @@ where let mut range = input.next_block_range(); let first_sync = input.checkpoint().block_number == 0; + let use_rocksdb = provider.cached_storage_settings().account_history_in_rocksdb; // On first sync we might have history coming from genesis. We clear the table since it's // faster to rebuild from scratch. if first_sync { - provider.tx_ref().clear::()?; + if use_rocksdb { + // Note: RocksDB clear() executes immediately (not deferred to commit like MDBX), + // but this is safe for first_sync because if we crash before commit, the + // checkpoint stays at 0 and we'll just clear and rebuild again on restart. The + // source data (changesets) is intact. + #[cfg(all(unix, feature = "rocksdb"))] + provider.rocksdb_provider().clear::()?; + } else { + provider.tx_ref().clear::()?; + } range = 0..=*input.next_block_range().end(); } - info!(target: "sync::stages::index_account_history::exec", ?first_sync, "Collecting indices"); + info!(target: "sync::stages::index_account_history::exec", ?first_sync, ?use_rocksdb, "Collecting indices"); let collector = if provider.cached_storage_settings().account_changesets_in_static_files { // Use the provider-based collection that can read from static files. @@ -125,14 +135,13 @@ where }; info!(target: "sync::stages::index_account_history::exec", "Loading indices into database"); - load_history_indices::<_, tables::AccountsHistory, _>( - provider, - collector, - first_sync, - ShardedKey::new, - ShardedKey::

::decode_owned, - |key| key.key, - )?; + + provider.with_rocksdb_batch(|rocksdb_batch| { + let mut writer = EitherWriter::new_accounts_history(provider, rocksdb_batch)?; + load_account_history(collector, first_sync, &mut writer) + .map_err(|e| reth_provider::ProviderError::other(Box::new(e)))?; + Ok(((), writer.into_raw_rocksdb_batch())) + })?; Ok(ExecOutput { checkpoint: StageCheckpoint::new(*range.end()), done: true }) } @@ -160,7 +169,7 @@ mod tests { stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, TestStageDB, UnwindStageTestRunner, }; - use alloy_primitives::{address, BlockNumber, B256}; + use alloy_primitives::{address, Address, BlockNumber, B256}; use itertools::Itertools; use reth_db_api::{ cursor::DbCursorRO, @@ -646,4 +655,169 @@ mod tests { Ok(()) } } + + #[cfg(all(unix, feature = "rocksdb"))] + mod rocksdb_tests { + use super::*; + use reth_provider::RocksDBProviderFactory; + use reth_storage_api::StorageSettings; + + /// Test that when `account_history_in_rocksdb` is enabled, the stage + /// writes account history indices to `RocksDB` instead of MDBX. + #[tokio::test] + async fn execute_writes_to_rocksdb_when_enabled() { + // init + let db = TestStageDB::default(); + + // Enable RocksDB for account history + db.factory.set_storage_settings_cache( + StorageSettings::legacy().with_account_history_in_rocksdb(true), + ); + + db.commit(|tx| { + for block in 0..=10 { + tx.put::( + block, + StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, + )?; + tx.put::(block, acc())?; + } + Ok(()) + }) + .unwrap(); + + let input = ExecInput { target: Some(10), ..Default::default() }; + let mut stage = IndexAccountHistoryStage::default(); + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.execute(&provider, input).unwrap(); + assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(10), done: true }); + provider.commit().unwrap(); + + // Verify MDBX table is empty (data should be in RocksDB) + let mdbx_table = db.table::().unwrap(); + assert!( + mdbx_table.is_empty(), + "MDBX AccountsHistory should be empty when RocksDB is enabled" + ); + + // Verify RocksDB has the data + let rocksdb = db.factory.rocksdb_provider(); + let result = rocksdb.get::(shard(u64::MAX)).unwrap(); + assert!(result.is_some(), "RocksDB should contain account history"); + + let block_list = result.unwrap(); + let blocks: Vec = block_list.iter().collect(); + assert_eq!(blocks, (0..=10).collect::>()); + } + + /// Test that unwind works correctly when `account_history_in_rocksdb` is enabled. + #[tokio::test] + async fn unwind_works_when_rocksdb_enabled() { + let db = TestStageDB::default(); + + db.factory.set_storage_settings_cache( + StorageSettings::legacy().with_account_history_in_rocksdb(true), + ); + + db.commit(|tx| { + for block in 0..=10 { + tx.put::( + block, + StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, + )?; + tx.put::(block, acc())?; + } + Ok(()) + }) + .unwrap(); + + let input = ExecInput { target: Some(10), ..Default::default() }; + let mut stage = IndexAccountHistoryStage::default(); + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.execute(&provider, input).unwrap(); + assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(10), done: true }); + provider.commit().unwrap(); + + // Verify RocksDB has blocks 0-10 before unwind + let rocksdb = db.factory.rocksdb_provider(); + let result = rocksdb.get::(shard(u64::MAX)).unwrap(); + assert!(result.is_some(), "RocksDB should have data before unwind"); + let blocks_before: Vec = result.unwrap().iter().collect(); + assert_eq!(blocks_before, (0..=10).collect::>()); + + // Unwind to block 5 (remove blocks 6-10) + let unwind_input = + UnwindInput { checkpoint: StageCheckpoint::new(10), unwind_to: 5, bad_block: None }; + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.unwind(&provider, unwind_input).unwrap(); + assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(5) }); + provider.commit().unwrap(); + + // Verify RocksDB now only has blocks 0-5 (blocks 6-10 removed) + let rocksdb = db.factory.rocksdb_provider(); + let result = rocksdb.get::(shard(u64::MAX)).unwrap(); + assert!(result.is_some(), "RocksDB should still have data after unwind"); + let blocks_after: Vec = result.unwrap().iter().collect(); + assert_eq!(blocks_after, (0..=5).collect::>(), "Should only have blocks 0-5"); + } + + /// Test incremental sync merges new data with existing shards. + #[tokio::test] + async fn execute_incremental_sync() { + let db = TestStageDB::default(); + + db.factory.set_storage_settings_cache( + StorageSettings::legacy().with_account_history_in_rocksdb(true), + ); + + db.commit(|tx| { + for block in 0..=5 { + tx.put::( + block, + StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, + )?; + tx.put::(block, acc())?; + } + Ok(()) + }) + .unwrap(); + + let input = ExecInput { target: Some(5), ..Default::default() }; + let mut stage = IndexAccountHistoryStage::default(); + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.execute(&provider, input).unwrap(); + assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(5), done: true }); + provider.commit().unwrap(); + + let rocksdb = db.factory.rocksdb_provider(); + let result = rocksdb.get::(shard(u64::MAX)).unwrap(); + assert!(result.is_some()); + let blocks: Vec = result.unwrap().iter().collect(); + assert_eq!(blocks, (0..=5).collect::>()); + + db.commit(|tx| { + for block in 6..=10 { + tx.put::( + block, + StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, + )?; + tx.put::(block, acc())?; + } + Ok(()) + }) + .unwrap(); + + let input = ExecInput { target: Some(10), checkpoint: Some(StageCheckpoint::new(5)) }; + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.execute(&provider, input).unwrap(); + assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(10), done: true }); + provider.commit().unwrap(); + + let rocksdb = db.factory.rocksdb_provider(); + let result = rocksdb.get::(shard(u64::MAX)).unwrap(); + assert!(result.is_some(), "RocksDB should have merged data"); + let blocks: Vec = result.unwrap().iter().collect(); + assert_eq!(blocks, (0..=10).collect::>()); + } + } } diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 82760a09e25..7d7d5612b9f 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -4,13 +4,14 @@ use reth_config::config::EtlConfig; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, models::{sharded_key::NUM_OF_INDICES_IN_SHARD, AccountBeforeTx, ShardedKey}, - table::{Decompress, Table}, + table::{Decode, Decompress, Table}, transaction::{DbTx, DbTxMut}, BlockNumberList, DatabaseError, }; use reth_etl::Collector; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - providers::StaticFileProvider, to_range, BlockReader, DBProvider, ProviderError, + providers::StaticFileProvider, to_range, BlockReader, DBProvider, EitherWriter, ProviderError, StaticFileProviderFactory, }; use reth_stages_api::StageError; @@ -108,7 +109,7 @@ where for (address, indices) in cache { insert_fn(address, indices)? } - Ok::<(), StageError>(()) + Ok(()) } /// Collects account history indices using a provider that implements `ChangeSetReader`. @@ -124,12 +125,12 @@ where let mut cache: HashMap> = HashMap::default(); let mut insert_fn = |address: Address, indices: Vec| { - let last = indices.last().expect("qed"); + let last = indices.last().expect("indices is non-empty"); collector.insert( ShardedKey::new(address, *last), BlockNumberList::new_pre_sorted(indices.into_iter()), )?; - Ok::<(), StageError>(()) + Ok(()) }; // Convert range bounds to concrete range @@ -320,6 +321,183 @@ impl LoadMode { } } +/// Loads account history indices into the database via `EitherWriter`. +/// +/// Similar to [`load_history_indices`] but works with [`EitherWriter`] to support +/// both MDBX and `RocksDB` backends. +/// +/// ## Process +/// Iterates over elements, grouping indices by their address. It flushes indices to disk +/// when reaching a shard's max length (`NUM_OF_INDICES_IN_SHARD`) or when the address changes, +/// ensuring the last previous address shard is stored. +/// +/// Uses `Option
` instead of `Address::default()` as the sentinel to avoid +/// incorrectly treating `Address::ZERO` as "no previous address". +pub(crate) fn load_account_history( + mut collector: Collector, BlockNumberList>, + append_only: bool, + writer: &mut EitherWriter<'_, CURSOR, N>, +) -> Result<(), StageError> +where + N: NodePrimitives, + CURSOR: DbCursorRW + + DbCursorRO, +{ + let mut current_address: Option
= None; + // Accumulator for block numbers where the current address changed. + let mut current_list = Vec::::new(); + + let total_entries = collector.len(); + let interval = (total_entries / 10).max(1); + + for (index, element) in collector.iter()?.enumerate() { + let (k, v) = element?; + let sharded_key = ShardedKey::
::decode_owned(k)?; + let new_list = BlockNumberList::decompress_owned(v)?; + + if index > 0 && index.is_multiple_of(interval) && total_entries > 10 { + info!(target: "sync::stages::index_history", progress = %format!("{:.2}%", (index as f64 / total_entries as f64) * 100.0), "Writing indices"); + } + + let address = sharded_key.key; + + // When address changes, flush the previous address's shards and start fresh. + if current_address != Some(address) { + // Flush all remaining shards for the previous address (uses u64::MAX for last shard). + if let Some(prev_addr) = current_address { + flush_account_history_shards(prev_addr, &mut current_list, append_only, writer)?; + } + + current_address = Some(address); + current_list.clear(); + + // On incremental sync, merge with the existing last shard from the database. + // The last shard is stored with key (address, u64::MAX) so we can find it. + if !append_only && + let Some(last_shard) = writer.get_last_account_history_shard(address)? + { + current_list.extend(last_shard.iter()); + } + } + + // Append new block numbers to the accumulator. + current_list.extend(new_list.iter()); + + // Flush complete shards, keeping the last (partial) shard buffered. + flush_account_history_shards_partial(address, &mut current_list, append_only, writer)?; + } + + // Flush the final address's remaining shard. + if let Some(addr) = current_address { + flush_account_history_shards(addr, &mut current_list, append_only, writer)?; + } + + Ok(()) +} + +/// Flushes complete shards for account history, keeping the trailing partial shard buffered. +/// +/// Only flushes when we have more than one shard's worth of data, keeping the last +/// (possibly partial) shard for continued accumulation. This avoids writing a shard +/// that may need to be updated when more indices arrive. +/// +/// Equivalent to [`load_indices`] with [`LoadMode::KeepLast`]. +fn flush_account_history_shards_partial( + address: Address, + list: &mut Vec, + append_only: bool, + writer: &mut EitherWriter<'_, CURSOR, N>, +) -> Result<(), StageError> +where + N: NodePrimitives, + CURSOR: DbCursorRW + + DbCursorRO, +{ + // Nothing to flush if we haven't filled a complete shard yet. + if list.len() <= NUM_OF_INDICES_IN_SHARD { + return Ok(()); + } + + let num_full_shards = list.len() / NUM_OF_INDICES_IN_SHARD; + + // Always keep at least one shard buffered for continued accumulation. + // If len is exact multiple of shard size, keep the last full shard. + let shards_to_flush = if list.len().is_multiple_of(NUM_OF_INDICES_IN_SHARD) { + num_full_shards - 1 + } else { + num_full_shards + }; + + if shards_to_flush == 0 { + return Ok(()); + } + + // Split: flush the first N shards, keep the remainder buffered. + let flush_len = shards_to_flush * NUM_OF_INDICES_IN_SHARD; + let remainder = list.split_off(flush_len); + + // Write each complete shard with its highest block number as the key. + for chunk in list.chunks(NUM_OF_INDICES_IN_SHARD) { + let highest = *chunk.last().expect("chunk is non-empty"); + let key = ShardedKey::new(address, highest); + let value = BlockNumberList::new_pre_sorted(chunk.iter().copied()); + + if append_only { + writer.append_account_history(key, &value)?; + } else { + writer.upsert_account_history(key, &value)?; + } + } + + // Keep the remaining indices for the next iteration. + *list = remainder; + Ok(()) +} + +/// Flushes all remaining shards for account history, using `u64::MAX` for the last shard. +/// +/// The `u64::MAX` key for the final shard is an invariant that allows `seek_exact(address, +/// u64::MAX)` to find the last shard during incremental sync for merging with new indices. +/// +/// Equivalent to [`load_indices`] with [`LoadMode::Flush`]. +fn flush_account_history_shards( + address: Address, + list: &mut Vec, + append_only: bool, + writer: &mut EitherWriter<'_, CURSOR, N>, +) -> Result<(), StageError> +where + N: NodePrimitives, + CURSOR: DbCursorRW + + DbCursorRO, +{ + if list.is_empty() { + return Ok(()); + } + + let num_chunks = list.len().div_ceil(NUM_OF_INDICES_IN_SHARD); + + for (i, chunk) in list.chunks(NUM_OF_INDICES_IN_SHARD).enumerate() { + let is_last = i == num_chunks - 1; + + // Use u64::MAX for the final shard's key. This invariant allows incremental sync + // to find the last shard via seek_exact(address, u64::MAX) for merging. + let highest = if is_last { u64::MAX } else { *chunk.last().expect("chunk is non-empty") }; + + let key = ShardedKey::new(address, highest); + let value = BlockNumberList::new_pre_sorted(chunk.iter().copied()); + + if append_only { + writer.append_account_history(key, &value)?; + } else { + writer.upsert_account_history(key, &value)?; + } + } + + list.clear(); + Ok(()) +} + /// Called when database is ahead of static files. Attempts to find the first block we are missing /// transactions for. pub(crate) fn missing_static_data_error( diff --git a/crates/storage/provider/src/either_writer.rs b/crates/storage/provider/src/either_writer.rs index 5cc79d85227..16eced90dd6 100644 --- a/crates/storage/provider/src/either_writer.rs +++ b/crates/storage/provider/src/either_writer.rs @@ -518,8 +518,22 @@ impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> where CURSOR: DbCursorRW + DbCursorRO, { - /// Puts an account history entry. - pub fn put_account_history( + /// Appends an account history entry (for first sync - more efficient). + pub fn append_account_history( + &mut self, + key: ShardedKey
, + value: &BlockNumberList, + ) -> ProviderResult<()> { + match self { + Self::Database(cursor) => Ok(cursor.append(key, value)?), + Self::StaticFile(_) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(batch) => batch.put::(key, value), + } + } + + /// Upserts an account history entry (for incremental sync). + pub fn upsert_account_history( &mut self, key: ShardedKey
, value: &BlockNumberList, @@ -532,6 +546,21 @@ where } } + /// Gets the last shard for an address (keyed with `u64::MAX`). + pub fn get_last_account_history_shard( + &mut self, + address: Address, + ) -> ProviderResult> { + match self { + Self::Database(cursor) => { + Ok(cursor.seek_exact(ShardedKey::last(address))?.map(|(_, v)| v)) + } + Self::StaticFile(_) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(batch) => batch.get::(ShardedKey::last(address)), + } + } + /// Deletes an account history entry. pub fn delete_account_history(&mut self, key: ShardedKey
) -> ProviderResult<()> { match self { @@ -1266,8 +1295,8 @@ mod rocksdb_tests { for (highest_block, blocks) in shards { let key = ShardedKey::new(address, *highest_block); let value = IntegerList::new(blocks.clone()).unwrap(); - mdbx_writer.put_account_history(key.clone(), &value).unwrap(); - rocks_writer.put_account_history(key, &value).unwrap(); + mdbx_writer.upsert_account_history(key.clone(), &value).unwrap(); + rocks_writer.upsert_account_history(key, &value).unwrap(); } // Commit both backends diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 0fe7d854720..a8032ae66a3 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -2939,25 +2939,33 @@ impl HistoryWriter for DatabaseProvi .into_iter() .map(|(index, account)| (account.address, *index)) .collect::>(); - last_indices.sort_by_key(|(a, _)| *a); + last_indices.sort_unstable_by_key(|(a, _)| *a); - // Unwind the account history index. - let mut cursor = self.tx.cursor_write::()?; - for &(address, rem_index) in &last_indices { - let partial_shard = unwind_history_shards::<_, tables::AccountsHistory, _>( - &mut cursor, - ShardedKey::last(address), - rem_index, - |sharded_key| sharded_key.key == address, - )?; - - // Check the last returned partial shard. - // If it's not empty, the shard needs to be reinserted. - if !partial_shard.is_empty() { - cursor.insert( + if self.cached_storage_settings().account_history_in_rocksdb { + #[cfg(all(unix, feature = "rocksdb"))] + { + let batch = self.rocksdb_provider.unwind_account_history_indices(&last_indices)?; + self.pending_rocksdb_batches.lock().push(batch); + } + } else { + // Unwind the account history index in MDBX. + let mut cursor = self.tx.cursor_write::()?; + for &(address, rem_index) in &last_indices { + let partial_shard = unwind_history_shards::<_, tables::AccountsHistory, _>( + &mut cursor, ShardedKey::last(address), - &BlockNumberList::new_pre_sorted(partial_shard), + rem_index, + |sharded_key| sharded_key.key == address, )?; + + // Check the last returned partial shard. + // If it's not empty, the shard needs to be reinserted. + if !partial_shard.is_empty() { + cursor.insert( + ShardedKey::last(address), + &BlockNumberList::new_pre_sorted(partial_shard), + )?; + } } } diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index cc427fcb8b8..1a1ecbd8f6c 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -25,7 +25,7 @@ use rocksdb::{ OptimisticTransactionOptions, Options, Transaction, WriteBatchWithTransaction, WriteOptions, }; use std::{ - collections::BTreeMap, + collections::{BTreeMap, HashMap}, fmt, path::{Path, PathBuf}, sync::Arc, @@ -430,6 +430,24 @@ impl RocksDBProvider { }) } + /// Clears all entries from the specified table. + /// + /// Uses `delete_range_cf` from empty key to a max key (256 bytes of 0xFF). + /// This end key must exceed the maximum encoded key size for any table. + /// Current max is ~60 bytes (`StorageShardedKey` = 20 + 32 + 8). + pub fn clear(&self) -> ProviderResult<()> { + let cf = self.get_cf_handle::()?; + + self.0.db.delete_range_cf(cf, &[] as &[u8], &[0xFF; 256]).map_err(|e| { + ProviderError::Database(DatabaseError::Delete(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + })?; + + Ok(()) + } + /// Gets the first (smallest key) entry from the specified table. pub fn first(&self) -> ProviderResult> { self.execute_with_operation_metric(RocksDBOperation::Get, T::NAME, |this| { @@ -489,6 +507,90 @@ impl RocksDBProvider { Ok(RocksDBIter { inner: iter, _marker: std::marker::PhantomData }) } + /// Returns all account history shards for the given address in ascending key order. + /// + /// This is used for unwind operations where we need to scan all shards for an address + /// and potentially delete or truncate them. + pub fn account_history_shards( + &self, + address: Address, + ) -> ProviderResult, BlockNumberList)>> { + // Get the column family handle for the AccountsHistory table. + let cf = self.get_cf_handle::()?; + + // Build a seek key starting at the first shard (highest_block_number = 0) for this address. + // ShardedKey is (address, highest_block_number) so this positions us at the beginning. + let start_key = ShardedKey::new(address, 0u64); + let start_bytes = start_key.encode(); + + // Create a forward iterator starting from our seek position. + let iter = self + .0 + .db + .iterator_cf(cf, IteratorMode::From(start_bytes.as_ref(), rocksdb::Direction::Forward)); + + let mut result = Vec::new(); + for item in iter { + match item { + Ok((key_bytes, value_bytes)) => { + // Decode the sharded key to check if we're still on the same address. + let key = ShardedKey::
::decode(&key_bytes) + .map_err(|_| ProviderError::Database(DatabaseError::Decode))?; + + // Stop when we reach a different address (keys are sorted by address first). + if key.key != address { + break; + } + + // Decompress the block number list stored in this shard. + let value = BlockNumberList::decompress(&value_bytes) + .map_err(|_| ProviderError::Database(DatabaseError::Decode))?; + + result.push((key, value)); + } + Err(e) => { + return Err(ProviderError::Database(DatabaseError::Read(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + }))); + } + } + } + + Ok(result) + } + + /// Unwinds account history indices for the given `(address, block_number)` pairs. + /// + /// Groups addresses by their minimum block number and calls the appropriate unwind + /// operations. For each address, keeps only blocks less than the minimum block + /// (i.e., removes the minimum block and all higher blocks). + /// + /// Returns a `WriteBatchWithTransaction` that can be committed later. + pub fn unwind_account_history_indices( + &self, + last_indices: &[(Address, BlockNumber)], + ) -> ProviderResult> { + let mut address_min_block: HashMap = + HashMap::with_capacity_and_hasher(last_indices.len(), Default::default()); + for &(address, block_number) in last_indices { + address_min_block + .entry(address) + .and_modify(|min| *min = (*min).min(block_number)) + .or_insert(block_number); + } + + let mut batch = self.batch(); + for (address, min_block) in address_min_block { + match min_block.checked_sub(1) { + Some(keep_to) => batch.unwind_account_history_to(address, keep_to)?, + None => batch.clear_account_history(address)?, + } + } + + Ok(batch.into_inner()) + } + /// Writes a batch of operations atomically. pub fn write_batch(&self, f: F) -> ProviderResult<()> where @@ -719,6 +821,14 @@ impl<'a> RocksDBBatch<'a> { self.inner } + /// Gets a value from the database. + /// + /// **Important constraint:** This reads only committed state, not pending writes in this + /// batch or other pending batches in `pending_rocksdb_batches`. + pub fn get(&self, key: T::Key) -> ProviderResult> { + self.provider.get::(key) + } + /// Appends indices to an account history shard with proper shard management. /// /// Loads the existing shard (if any), appends new indices, and rechunks into @@ -841,6 +951,91 @@ impl<'a> RocksDBBatch<'a> { Ok(()) } + + /// Unwinds account history for the given address, keeping only blocks <= `keep_to`. + /// + /// Mirrors MDBX `unwind_history_shards` behavior: + /// - Deletes shards entirely above `keep_to` + /// - Truncates boundary shards and re-keys to `u64::MAX` sentinel + /// - Preserves shards entirely below `keep_to` + pub fn unwind_account_history_to( + &mut self, + address: Address, + keep_to: BlockNumber, + ) -> ProviderResult<()> { + let shards = self.provider.account_history_shards(address)?; + if shards.is_empty() { + return Ok(()); + } + + // Find the first shard that might contain blocks > keep_to. + // A shard is affected if it's the sentinel (u64::MAX) or its highest_block_number > keep_to + let boundary_idx = shards.iter().position(|(key, _)| { + key.highest_block_number == u64::MAX || key.highest_block_number > keep_to + }); + + // Repair path: no shards affected means all blocks <= keep_to, just ensure sentinel exists + let Some(boundary_idx) = boundary_idx else { + let (last_key, last_value) = shards.last().expect("shards is non-empty"); + if last_key.highest_block_number != u64::MAX { + self.delete::(last_key.clone())?; + self.put::( + ShardedKey::new(address, u64::MAX), + last_value, + )?; + } + return Ok(()); + }; + + // Delete all shards strictly after the boundary (they are entirely > keep_to) + for (key, _) in shards.iter().skip(boundary_idx + 1) { + self.delete::(key.clone())?; + } + + // Process the boundary shard: filter out blocks > keep_to + let (boundary_key, boundary_list) = &shards[boundary_idx]; + + // Delete the boundary shard (we'll either drop it or rewrite at u64::MAX) + self.delete::(boundary_key.clone())?; + + // Build truncated list once; check emptiness directly (avoids double iteration) + let new_last = + BlockNumberList::new_pre_sorted(boundary_list.iter().take_while(|&b| b <= keep_to)); + + if new_last.is_empty() { + // Boundary shard is now empty. Previous shard becomes the last and must be keyed + // u64::MAX. + if boundary_idx == 0 { + // Nothing left for this address + return Ok(()); + } + + let (prev_key, prev_value) = &shards[boundary_idx - 1]; + if prev_key.highest_block_number != u64::MAX { + self.delete::(prev_key.clone())?; + self.put::( + ShardedKey::new(address, u64::MAX), + prev_value, + )?; + } + return Ok(()); + } + + self.put::(ShardedKey::new(address, u64::MAX), &new_last)?; + + Ok(()) + } + + /// Clears all account history shards for the given address. + /// + /// Used when unwinding from block 0 (i.e., removing all history). + pub fn clear_account_history(&mut self, address: Address) -> ProviderResult<()> { + let shards = self.provider.account_history_shards(address)?; + for (key, _) in shards { + self.delete::(key)?; + } + Ok(()) + } } /// `RocksDB` transaction wrapper providing MDBX-like semantics. @@ -1720,4 +1915,318 @@ mod tests { "sentinel shard should exist" ); } + + #[test] + fn test_clear_table() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x42; 20]); + let key = ShardedKey::new(address, u64::MAX); + let blocks = BlockNumberList::new_pre_sorted([1, 2, 3]); + + provider.put::(key.clone(), &blocks).unwrap(); + assert!(provider.get::(key.clone()).unwrap().is_some()); + + provider.clear::().unwrap(); + + assert!( + provider.get::(key).unwrap().is_none(), + "table should be empty after clear" + ); + assert!( + provider.first::().unwrap().is_none(), + "first() should return None after clear" + ); + } + + #[test] + fn test_clear_empty_table() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + assert!(provider.first::().unwrap().is_none()); + + provider.clear::().unwrap(); + + assert!(provider.first::().unwrap().is_none()); + } + + #[test] + fn test_unwind_account_history_to_basic() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x42; 20]); + + // Add blocks 0-10 + let mut batch = provider.batch(); + batch.append_account_history_shard(address, 0..=10).unwrap(); + batch.commit().unwrap(); + + // Verify we have blocks 0-10 + let key = ShardedKey::new(address, u64::MAX); + let result = provider.get::(key.clone()).unwrap(); + assert!(result.is_some()); + let blocks: Vec = result.unwrap().iter().collect(); + assert_eq!(blocks, (0..=10).collect::>()); + + // Unwind to block 5 (keep blocks 0-5, remove 6-10) + let mut batch = provider.batch(); + batch.unwind_account_history_to(address, 5).unwrap(); + batch.commit().unwrap(); + + // Verify only blocks 0-5 remain + let result = provider.get::(key).unwrap(); + assert!(result.is_some()); + let blocks: Vec = result.unwrap().iter().collect(); + assert_eq!(blocks, (0..=5).collect::>()); + } + + #[test] + fn test_unwind_account_history_to_removes_all() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x42; 20]); + + // Add blocks 5-10 + let mut batch = provider.batch(); + batch.append_account_history_shard(address, 5..=10).unwrap(); + batch.commit().unwrap(); + + // Unwind to block 4 (removes all blocks since they're all > 4) + let mut batch = provider.batch(); + batch.unwind_account_history_to(address, 4).unwrap(); + batch.commit().unwrap(); + + // Verify no data remains for this address + let key = ShardedKey::new(address, u64::MAX); + let result = provider.get::(key).unwrap(); + assert!(result.is_none(), "Should have no data after full unwind"); + } + + #[test] + fn test_unwind_account_history_to_no_op() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x42; 20]); + + // Add blocks 0-5 + let mut batch = provider.batch(); + batch.append_account_history_shard(address, 0..=5).unwrap(); + batch.commit().unwrap(); + + // Unwind to block 10 (no-op since all blocks are <= 10) + let mut batch = provider.batch(); + batch.unwind_account_history_to(address, 10).unwrap(); + batch.commit().unwrap(); + + // Verify blocks 0-5 still remain + let key = ShardedKey::new(address, u64::MAX); + let result = provider.get::(key).unwrap(); + assert!(result.is_some()); + let blocks: Vec = result.unwrap().iter().collect(); + assert_eq!(blocks, (0..=5).collect::>()); + } + + #[test] + fn test_unwind_account_history_to_block_zero() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x42; 20]); + + // Add blocks 0-5 (including block 0) + let mut batch = provider.batch(); + batch.append_account_history_shard(address, 0..=5).unwrap(); + batch.commit().unwrap(); + + // Unwind to block 0 (keep only block 0, remove 1-5) + // This simulates the caller doing: unwind_to = min_block.checked_sub(1) where min_block = 1 + let mut batch = provider.batch(); + batch.unwind_account_history_to(address, 0).unwrap(); + batch.commit().unwrap(); + + // Verify only block 0 remains + let key = ShardedKey::new(address, u64::MAX); + let result = provider.get::(key).unwrap(); + assert!(result.is_some()); + let blocks: Vec = result.unwrap().iter().collect(); + assert_eq!(blocks, vec![0]); + } + + #[test] + fn test_unwind_account_history_to_multi_shard() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x42; 20]); + + // Create multiple shards by adding more than NUM_OF_INDICES_IN_SHARD entries + // For testing, we'll manually create shards with specific keys + let mut batch = provider.batch(); + + // First shard: blocks 1-50, keyed by 50 + let shard1 = BlockNumberList::new_pre_sorted(1..=50); + batch.put::(ShardedKey::new(address, 50), &shard1).unwrap(); + + // Second shard: blocks 51-100, keyed by MAX (sentinel) + let shard2 = BlockNumberList::new_pre_sorted(51..=100); + batch.put::(ShardedKey::new(address, u64::MAX), &shard2).unwrap(); + + batch.commit().unwrap(); + + // Verify we have 2 shards + let shards = provider.account_history_shards(address).unwrap(); + assert_eq!(shards.len(), 2); + + // Unwind to block 75 (keep 1-75, remove 76-100) + let mut batch = provider.batch(); + batch.unwind_account_history_to(address, 75).unwrap(); + batch.commit().unwrap(); + + // Verify: shard1 should be untouched, shard2 should be truncated + let shards = provider.account_history_shards(address).unwrap(); + assert_eq!(shards.len(), 2); + + // First shard unchanged + assert_eq!(shards[0].0.highest_block_number, 50); + assert_eq!(shards[0].1.iter().collect::>(), (1..=50).collect::>()); + + // Second shard truncated and re-keyed to MAX + assert_eq!(shards[1].0.highest_block_number, u64::MAX); + assert_eq!(shards[1].1.iter().collect::>(), (51..=75).collect::>()); + } + + #[test] + fn test_unwind_account_history_to_multi_shard_boundary_empty() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x42; 20]); + + // Create two shards + let mut batch = provider.batch(); + + // First shard: blocks 1-50, keyed by 50 + let shard1 = BlockNumberList::new_pre_sorted(1..=50); + batch.put::(ShardedKey::new(address, 50), &shard1).unwrap(); + + // Second shard: blocks 75-100, keyed by MAX + let shard2 = BlockNumberList::new_pre_sorted(75..=100); + batch.put::(ShardedKey::new(address, u64::MAX), &shard2).unwrap(); + + batch.commit().unwrap(); + + // Unwind to block 60 (removes all of shard2 since 75 > 60, promotes shard1 to MAX) + let mut batch = provider.batch(); + batch.unwind_account_history_to(address, 60).unwrap(); + batch.commit().unwrap(); + + // Verify: only shard1 remains, now keyed as MAX + let shards = provider.account_history_shards(address).unwrap(); + assert_eq!(shards.len(), 1); + assert_eq!(shards[0].0.highest_block_number, u64::MAX); + assert_eq!(shards[0].1.iter().collect::>(), (1..=50).collect::>()); + } + + #[test] + fn test_account_history_shards_iterator() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x42; 20]); + let other_address = Address::from([0x43; 20]); + + // Add data for two addresses + let mut batch = provider.batch(); + batch.append_account_history_shard(address, 0..=5).unwrap(); + batch.append_account_history_shard(other_address, 10..=15).unwrap(); + batch.commit().unwrap(); + + // Query shards for first address only + let shards = provider.account_history_shards(address).unwrap(); + assert_eq!(shards.len(), 1); + assert_eq!(shards[0].0.key, address); + + // Query shards for second address only + let shards = provider.account_history_shards(other_address).unwrap(); + assert_eq!(shards.len(), 1); + assert_eq!(shards[0].0.key, other_address); + + // Query shards for non-existent address + let non_existent = Address::from([0x99; 20]); + let shards = provider.account_history_shards(non_existent).unwrap(); + assert!(shards.is_empty()); + } + + #[test] + fn test_clear_account_history() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x42; 20]); + + // Add blocks 0-10 + let mut batch = provider.batch(); + batch.append_account_history_shard(address, 0..=10).unwrap(); + batch.commit().unwrap(); + + // Clear all history (simulates unwind from block 0) + let mut batch = provider.batch(); + batch.clear_account_history(address).unwrap(); + batch.commit().unwrap(); + + // Verify no data remains + let shards = provider.account_history_shards(address).unwrap(); + assert!(shards.is_empty(), "All shards should be deleted"); + } + + #[test] + fn test_unwind_non_sentinel_boundary() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + let address = Address::from([0x42; 20]); + + // Create three shards with non-sentinel boundary + let mut batch = provider.batch(); + + // Shard 1: blocks 1-50, keyed by 50 + let shard1 = BlockNumberList::new_pre_sorted(1..=50); + batch.put::(ShardedKey::new(address, 50), &shard1).unwrap(); + + // Shard 2: blocks 51-100, keyed by 100 (non-sentinel, will be boundary) + let shard2 = BlockNumberList::new_pre_sorted(51..=100); + batch.put::(ShardedKey::new(address, 100), &shard2).unwrap(); + + // Shard 3: blocks 101-150, keyed by MAX (will be deleted) + let shard3 = BlockNumberList::new_pre_sorted(101..=150); + batch.put::(ShardedKey::new(address, u64::MAX), &shard3).unwrap(); + + batch.commit().unwrap(); + + // Verify 3 shards + let shards = provider.account_history_shards(address).unwrap(); + assert_eq!(shards.len(), 3); + + // Unwind to block 75 (truncates shard2, deletes shard3) + let mut batch = provider.batch(); + batch.unwind_account_history_to(address, 75).unwrap(); + batch.commit().unwrap(); + + // Verify: shard1 unchanged, shard2 truncated and re-keyed to MAX, shard3 deleted + let shards = provider.account_history_shards(address).unwrap(); + assert_eq!(shards.len(), 2); + + // First shard unchanged + assert_eq!(shards[0].0.highest_block_number, 50); + assert_eq!(shards[0].1.iter().collect::>(), (1..=50).collect::>()); + + // Second shard truncated and re-keyed to MAX + assert_eq!(shards[1].0.highest_block_number, u64::MAX); + assert_eq!(shards[1].1.iter().collect::>(), (51..=75).collect::>()); + } } diff --git a/crates/storage/provider/src/traits/rocksdb_provider.rs b/crates/storage/provider/src/traits/rocksdb_provider.rs index 3394fa16f67..b4abacd86e5 100644 --- a/crates/storage/provider/src/traits/rocksdb_provider.rs +++ b/crates/storage/provider/src/traits/rocksdb_provider.rs @@ -1,4 +1,7 @@ -use crate::{either_writer::RocksTxRefArg, providers::RocksDBProvider}; +use crate::{ + either_writer::{RawRocksDBBatch, RocksBatchArg, RocksTxRefArg}, + providers::RocksDBProvider, +}; use reth_storage_errors::provider::ProviderResult; /// `RocksDB` provider factory. @@ -31,4 +34,28 @@ pub trait RocksDBProviderFactory { #[cfg(not(all(unix, feature = "rocksdb")))] f(()) } + + /// Executes a closure with a `RocksDB` batch, automatically registering it for commit. + /// + /// This helper encapsulates all the cfg-gated `RocksDB` batch handling. + fn with_rocksdb_batch(&self, f: F) -> ProviderResult + where + F: FnOnce(RocksBatchArg<'_>) -> ProviderResult<(R, Option)>, + { + #[cfg(all(unix, feature = "rocksdb"))] + { + let rocksdb = self.rocksdb_provider(); + let batch = rocksdb.batch(); + let (result, raw_batch) = f(batch)?; + if let Some(b) = raw_batch { + self.set_pending_rocksdb_batch(b); + } + Ok(result) + } + #[cfg(not(all(unix, feature = "rocksdb")))] + { + let (result, _) = f(())?; + Ok(result) + } + } } From bb39cba50411bc76b91b1afba7dcdaccde4b1134 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 20 Jan 2026 14:29:48 +0000 Subject: [PATCH 085/267] ci: partition bench codspeed job (#20332) Co-authored-by: Matthias Seitz --- .github/scripts/codspeed-build.sh | 14 -------------- .github/workflows/bench.yml | 14 ++++++++++++-- 2 files changed, 12 insertions(+), 16 deletions(-) delete mode 100755 .github/scripts/codspeed-build.sh diff --git a/.github/scripts/codspeed-build.sh b/.github/scripts/codspeed-build.sh deleted file mode 100755 index 9976a3314c9..00000000000 --- a/.github/scripts/codspeed-build.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -# TODO: Benchmarks run WAY too slow due to excessive amount of iterations. - -cmd=(cargo codspeed build --profile profiling) -crates=( - -p reth-primitives - -p reth-trie - -p reth-trie-common - -p reth-trie-sparse -) - -"${cmd[@]}" --features test-utils "${crates[@]}" diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 264b1059ab1..886919a9e5d 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -17,6 +17,16 @@ name: bench jobs: codspeed: runs-on: depot-ubuntu-latest + strategy: + matrix: + partition: [1, 2] + total_partitions: [2] + include: + - partition: 1 + crates: "-p reth-primitives -p reth-trie-common -p reth-trie-sparse" + - partition: 2 + crates: "-p reth-trie" + name: codspeed (${{ matrix.partition }}/${{ matrix.total_partitions }}) steps: - uses: actions/checkout@v6 with: @@ -32,10 +42,10 @@ jobs: with: tool: cargo-codspeed - name: Build the benchmark target(s) - run: ./.github/scripts/codspeed-build.sh + run: cargo codspeed build --profile profiling --features test-utils ${{ matrix.crates }} - name: Run the benchmarks uses: CodSpeedHQ/action@v4 with: - run: cargo codspeed run --workspace + run: cargo codspeed run ${{ matrix.crates }} mode: instrumentation token: ${{ secrets.CODSPEED_TOKEN }} From d002dacc13f55d20a41746766d9c4c4e9f5cb80c Mon Sep 17 00:00:00 2001 From: ethfanWilliam Date: Tue, 20 Jan 2026 19:06:26 +0400 Subject: [PATCH 086/267] chore: remove deprecated and unused ExecuteOutput struct (#20887) Co-authored-by: Matthias Seitz --- crates/evm/evm/src/execute.rs | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index e70db5296b0..866099a996d 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -148,20 +148,6 @@ pub trait Executor: Sized { fn size_hint(&self) -> usize; } -/// Helper type for the output of executing a block. -/// -/// Deprecated: this type is unused within reth and will be removed in the next -/// major release. Use `reth_execution_types::BlockExecutionResult` or -/// `reth_execution_types::BlockExecutionOutput`. -#[deprecated(note = "Use reth_execution_types::BlockExecutionResult or BlockExecutionOutput")] -#[derive(Debug, Clone)] -pub struct ExecuteOutput { - /// Receipts obtained after executing a block. - pub receipts: Vec, - /// Cumulative gas used in the block execution. - pub gas_used: u64, -} - /// Input for block building. Consumed by [`BlockAssembler`]. /// /// This struct contains all the data needed by the [`BlockAssembler`] to create From 5ef200eaad20c1930d5856a55ffb5ed5d9e37311 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Tue, 20 Jan 2026 07:58:43 -0800 Subject: [PATCH 087/267] perf(db): stack-allocate ShardedKey and StorageShardedKey encoding (#21200) Co-authored-by: Amp --- Cargo.lock | 1 + crates/storage/db-api/Cargo.toml | 5 + .../db-api/benches/sharded_key_encode.rs | 142 ++++++++++++++++++ .../storage/db-api/src/models/sharded_key.rs | 70 +++++++-- .../db-api/src/models/storage_sharded_key.rs | 60 +++++++- 5 files changed, 263 insertions(+), 15 deletions(-) create mode 100644 crates/storage/db-api/benches/sharded_key_encode.rs diff --git a/Cargo.lock b/Cargo.lock index e4fc4fc741f..66fee9d2e21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8099,6 +8099,7 @@ dependencies = [ "alloy-primitives", "arbitrary", "bytes", + "codspeed-criterion-compat", "derive_more", "metrics", "modular-bitfield", diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 49e4c84f7a0..e25595f1ac7 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -60,6 +60,11 @@ test-fuzz.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true +criterion.workspace = true + +[[bench]] +name = "sharded_key_encode" +harness = false [features] test-utils = [ diff --git a/crates/storage/db-api/benches/sharded_key_encode.rs b/crates/storage/db-api/benches/sharded_key_encode.rs new file mode 100644 index 00000000000..5366e234e09 --- /dev/null +++ b/crates/storage/db-api/benches/sharded_key_encode.rs @@ -0,0 +1,142 @@ +//! Benchmarks for `ShardedKey` and `StorageShardedKey` encoding. +//! +//! These benchmarks measure the performance of stack-allocated vs heap-allocated key encoding, +//! inspired by Anza Labs' PR #3603 which saved ~20k allocations/sec by moving `RocksDB` keys +//! from heap to stack. +//! +//! Run with: `cargo bench -p reth-db-api --bench sharded_key_encode` + +#![allow(missing_docs)] + +use alloy_primitives::{Address, B256}; +use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion, Throughput}; +use reth_db_api::{ + models::{storage_sharded_key::StorageShardedKey, ShardedKey}, + table::Encode, +}; + +/// Number of keys to encode per iteration for throughput measurement. +const BATCH_SIZE: usize = 10_000; + +fn bench_sharded_key_address_encode(c: &mut Criterion) { + let mut group = c.benchmark_group("sharded_key_encode"); + group.throughput(Throughput::Elements(BATCH_SIZE as u64)); + + // Pre-generate test data + let keys: Vec> = (0..BATCH_SIZE) + .map(|i| { + let mut addr_bytes = [0u8; 20]; + addr_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes()); + ShardedKey::new(Address::from(addr_bytes), i as u64) + }) + .collect(); + + group.bench_function("ShardedKey
::encode", |b| { + b.iter_batched( + || keys.clone(), + |keys| { + for key in keys { + let encoded = black_box(key.encode()); + black_box(encoded.as_ref()); + } + }, + BatchSize::SmallInput, + ) + }); + + group.finish(); +} + +fn bench_storage_sharded_key_encode(c: &mut Criterion) { + let mut group = c.benchmark_group("storage_sharded_key_encode"); + group.throughput(Throughput::Elements(BATCH_SIZE as u64)); + + // Pre-generate test data + let keys: Vec = (0..BATCH_SIZE) + .map(|i| { + let mut addr_bytes = [0u8; 20]; + addr_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes()); + let mut key_bytes = [0u8; 32]; + key_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes()); + StorageShardedKey::new(Address::from(addr_bytes), B256::from(key_bytes), i as u64) + }) + .collect(); + + group.bench_function("StorageShardedKey::encode", |b| { + b.iter_batched( + || keys.clone(), + |keys| { + for key in keys { + let encoded = black_box(key.encode()); + black_box(encoded.as_ref()); + } + }, + BatchSize::SmallInput, + ) + }); + + group.finish(); +} + +fn bench_encode_decode_roundtrip(c: &mut Criterion) { + use reth_db_api::table::Decode; + + let mut group = c.benchmark_group("sharded_key_roundtrip"); + group.throughput(Throughput::Elements(BATCH_SIZE as u64)); + + let keys: Vec> = (0..BATCH_SIZE) + .map(|i| { + let mut addr_bytes = [0u8; 20]; + addr_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes()); + ShardedKey::new(Address::from(addr_bytes), i as u64) + }) + .collect(); + + group.bench_function("ShardedKey
::encode_then_decode", |b| { + b.iter_batched( + || keys.clone(), + |keys| { + for key in keys { + let encoded = key.encode(); + let decoded = black_box(ShardedKey::
::decode(&encoded).unwrap()); + black_box(decoded); + } + }, + BatchSize::SmallInput, + ) + }); + + let storage_keys: Vec = (0..BATCH_SIZE) + .map(|i| { + let mut addr_bytes = [0u8; 20]; + addr_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes()); + let mut key_bytes = [0u8; 32]; + key_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes()); + StorageShardedKey::new(Address::from(addr_bytes), B256::from(key_bytes), i as u64) + }) + .collect(); + + group.bench_function("StorageShardedKey::encode_then_decode", |b| { + b.iter_batched( + || storage_keys.clone(), + |keys| { + for key in keys { + let encoded = key.encode(); + let decoded = black_box(StorageShardedKey::decode(&encoded).unwrap()); + black_box(decoded); + } + }, + BatchSize::SmallInput, + ) + }); + + group.finish(); +} + +criterion_group!( + benches, + bench_sharded_key_address_encode, + bench_storage_sharded_key_encode, + bench_encode_decode_roundtrip, +); +criterion_main!(benches); diff --git a/crates/storage/db-api/src/models/sharded_key.rs b/crates/storage/db-api/src/models/sharded_key.rs index fdd583f0f55..ae8b0cf5ae3 100644 --- a/crates/storage/db-api/src/models/sharded_key.rs +++ b/crates/storage/db-api/src/models/sharded_key.rs @@ -3,13 +3,16 @@ use crate::{ table::{Decode, Encode}, DatabaseError, }; -use alloy_primitives::BlockNumber; +use alloy_primitives::{Address, BlockNumber}; use serde::{Deserialize, Serialize}; use std::hash::Hash; /// Number of indices in one shard. pub const NUM_OF_INDICES_IN_SHARD: usize = 2_000; +/// Size of `BlockNumber` in bytes (u64 = 8 bytes). +const BLOCK_NUMBER_SIZE: usize = std::mem::size_of::(); + /// Sometimes data can be too big to be saved for a single key. This helps out by dividing the data /// into different shards. Example: /// @@ -43,21 +46,68 @@ impl ShardedKey { } } -impl Encode for ShardedKey { - type Encoded = Vec; +/// Stack-allocated encoded key for `ShardedKey
`. +/// +/// This avoids heap allocation in hot database paths. The key layout is: +/// - 20 bytes: `Address` +/// - 8 bytes: `BlockNumber` (big-endian) +pub type ShardedKeyAddressEncoded = [u8; 20 + BLOCK_NUMBER_SIZE]; + +impl Encode for ShardedKey
{ + type Encoded = ShardedKeyAddressEncoded; + #[inline] fn encode(self) -> Self::Encoded { - let mut buf: Vec = Encode::encode(self.key).into(); - buf.extend_from_slice(&self.highest_block_number.to_be_bytes()); + let mut buf = [0u8; 20 + BLOCK_NUMBER_SIZE]; + buf[..20].copy_from_slice(self.key.as_slice()); + buf[20..].copy_from_slice(&self.highest_block_number.to_be_bytes()); buf } } -impl Decode for ShardedKey { +impl Decode for ShardedKey
{ fn decode(value: &[u8]) -> Result { - let (key, highest_tx_number) = value.split_last_chunk().ok_or(DatabaseError::Decode)?; - let key = T::decode(key)?; - let highest_tx_number = u64::from_be_bytes(*highest_tx_number); - Ok(Self::new(key, highest_tx_number)) + if value.len() != 20 + BLOCK_NUMBER_SIZE { + return Err(DatabaseError::Decode); + } + let key = Address::from_slice(&value[..20]); + let highest_block_number = + u64::from_be_bytes(value[20..].try_into().map_err(|_| DatabaseError::Decode)?); + Ok(Self::new(key, highest_block_number)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::address; + + #[test] + fn sharded_key_address_encode_decode_roundtrip() { + let addr = address!("0102030405060708091011121314151617181920"); + let block_num = 0x123456789ABCDEF0u64; + let key = ShardedKey::new(addr, block_num); + + let encoded = key.encode(); + + // Verify it's stack-allocated (28 bytes) + assert_eq!(encoded.len(), 28); + assert_eq!(std::mem::size_of_val(&encoded), 28); + + // Verify roundtrip (check against expected values since key was consumed) + let decoded = ShardedKey::
::decode(&encoded).unwrap(); + assert_eq!(decoded.key, address!("0102030405060708091011121314151617181920")); + assert_eq!(decoded.highest_block_number, 0x123456789ABCDEF0u64); + } + + #[test] + fn sharded_key_last_works() { + let addr = address!("0102030405060708091011121314151617181920"); + let key = ShardedKey::
::last(addr); + assert_eq!(key.highest_block_number, u64::MAX); + + let encoded = key.encode(); + let decoded = ShardedKey::
::decode(&encoded).unwrap(); + assert_eq!(decoded.highest_block_number, u64::MAX); } } diff --git a/crates/storage/db-api/src/models/storage_sharded_key.rs b/crates/storage/db-api/src/models/storage_sharded_key.rs index 6c7e40e2730..d9f724cdf52 100644 --- a/crates/storage/db-api/src/models/storage_sharded_key.rs +++ b/crates/storage/db-api/src/models/storage_sharded_key.rs @@ -16,6 +16,14 @@ pub const NUM_OF_INDICES_IN_SHARD: usize = 2_000; /// The fields are: 20-byte address, 32-byte key, and 8-byte block number const STORAGE_SHARD_KEY_BYTES_SIZE: usize = 20 + 32 + 8; +/// Stack-allocated encoded key for `StorageShardedKey`. +/// +/// This avoids heap allocation in hot database paths. The key layout is: +/// - 20 bytes: `Address` +/// - 32 bytes: `B256` storage key +/// - 8 bytes: `BlockNumber` (big-endian) +pub type StorageShardedKeyEncoded = [u8; STORAGE_SHARD_KEY_BYTES_SIZE]; + /// Sometimes data can be too big to be saved for a single key. This helps out by dividing the data /// into different shards. Example: /// @@ -54,13 +62,14 @@ impl StorageShardedKey { } impl Encode for StorageShardedKey { - type Encoded = Vec; + type Encoded = StorageShardedKeyEncoded; + #[inline] fn encode(self) -> Self::Encoded { - let mut buf: Vec = Vec::with_capacity(STORAGE_SHARD_KEY_BYTES_SIZE); - buf.extend_from_slice(&Encode::encode(self.address)); - buf.extend_from_slice(&Encode::encode(self.sharded_key.key)); - buf.extend_from_slice(&self.sharded_key.highest_block_number.to_be_bytes()); + let mut buf = [0u8; STORAGE_SHARD_KEY_BYTES_SIZE]; + buf[..20].copy_from_slice(self.address.as_slice()); + buf[20..52].copy_from_slice(self.sharded_key.key.as_slice()); + buf[52..].copy_from_slice(&self.sharded_key.highest_block_number.to_be_bytes()); buf } } @@ -81,3 +90,44 @@ impl Decode for StorageShardedKey { Ok(Self { address, sharded_key: ShardedKey::new(storage_key, highest_block_number) }) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{address, b256}; + + #[test] + fn storage_sharded_key_encode_decode_roundtrip() { + let addr = address!("0102030405060708091011121314151617181920"); + let storage_key = b256!("0001020304050607080910111213141516171819202122232425262728293031"); + let block_num = 0x123456789ABCDEFu64; + let key = StorageShardedKey::new(addr, storage_key, block_num); + + let encoded = key.encode(); + + // Verify it's stack-allocated (60 bytes) + assert_eq!(encoded.len(), 60); + assert_eq!(std::mem::size_of_val(&encoded), 60); + + // Verify roundtrip (check against expected values since key was consumed) + let decoded = StorageShardedKey::decode(&encoded).unwrap(); + assert_eq!(decoded.address, address!("0102030405060708091011121314151617181920")); + assert_eq!( + decoded.sharded_key.key, + b256!("0001020304050607080910111213141516171819202122232425262728293031") + ); + assert_eq!(decoded.sharded_key.highest_block_number, 0x123456789ABCDEFu64); + } + + #[test] + fn storage_sharded_key_last_works() { + let addr = address!("0102030405060708091011121314151617181920"); + let storage_key = b256!("0001020304050607080910111213141516171819202122232425262728293031"); + let key = StorageShardedKey::last(addr, storage_key); + assert_eq!(key.sharded_key.highest_block_number, u64::MAX); + + let encoded = key.encode(); + let decoded = StorageShardedKey::decode(&encoded).unwrap(); + assert_eq!(decoded.sharded_key.highest_block_number, u64::MAX); + } +} From 39d5ae73e84521b817ee48878555a5a9b214b0ac Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Tue, 20 Jan 2026 08:09:51 -0800 Subject: [PATCH 088/267] feat(storage): add read-only mode for RocksDB provider (#21210) Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- crates/cli/commands/src/common.rs | 2 +- .../src/providers/rocksdb/provider.rs | 284 ++++++++++++++---- .../provider/src/providers/rocksdb_stub.rs | 5 + 3 files changed, 232 insertions(+), 59 deletions(-) diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 8864bbf0e3b..56d574f74cc 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -121,10 +121,10 @@ impl EnvironmentArgs { }) } }; - // TransactionDB only support read-write mode let rocksdb_provider = RocksDBProvider::builder(data_dir.rocksdb()) .with_default_tables() .with_database_log_level(self.db.log_level) + .with_read_only(!access.is_read_write()) .build()?; let provider_factory = diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 1a1ecbd8f6c..55c040f7f27 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -23,6 +23,7 @@ use rocksdb::{ BlockBasedOptions, Cache, ColumnFamilyDescriptor, CompactionPri, DBCompressionType, DBRawIteratorWithThreadMode, IteratorMode, OptimisticTransactionDB, OptimisticTransactionOptions, Options, Transaction, WriteBatchWithTransaction, WriteOptions, + DB, }; use std::{ collections::{BTreeMap, HashMap}, @@ -89,6 +90,7 @@ pub struct RocksDBBuilder { enable_statistics: bool, log_level: rocksdb::LogLevel, block_cache: Cache, + read_only: bool, } impl fmt::Debug for RocksDBBuilder { @@ -112,6 +114,7 @@ impl RocksDBBuilder { enable_statistics: false, log_level: rocksdb::LogLevel::Info, block_cache: cache, + read_only: false, } } @@ -223,6 +226,14 @@ impl RocksDBBuilder { self } + /// Sets read-only mode. + /// + /// Note: Write operations on a read-only provider will panic at runtime. + pub const fn with_read_only(mut self, read_only: bool) -> Self { + self.read_only = read_only; + self + } + /// Builds the [`RocksDBProvider`]. pub fn build(self) -> ProviderResult { let options = @@ -239,21 +250,32 @@ impl RocksDBBuilder { }) .collect(); - // Use OptimisticTransactionDB for MDBX-like transaction semantics (read-your-writes, - // rollback) OptimisticTransactionDB uses optimistic concurrency control (conflict - // detection at commit) and is backed by DBCommon, giving us access to - // cancel_all_background_work for clean shutdown. - let db = OptimisticTransactionDB::open_cf_descriptors(&options, &self.path, cf_descriptors) - .map_err(|e| { - ProviderError::Database(DatabaseError::Open(DatabaseErrorInfo { - message: e.to_string().into(), - code: -1, - })) - })?; - let metrics = self.enable_metrics.then(RocksDBMetrics::default); - Ok(RocksDBProvider(Arc::new(RocksDBProviderInner { db, metrics }))) + if self.read_only { + let db = DB::open_cf_descriptors_read_only(&options, &self.path, cf_descriptors, false) + .map_err(|e| { + ProviderError::Database(DatabaseError::Open(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + })?; + Ok(RocksDBProvider(Arc::new(RocksDBProviderInner::ReadOnly { db, metrics }))) + } else { + // Use OptimisticTransactionDB for MDBX-like transaction semantics (read-your-writes, + // rollback) OptimisticTransactionDB uses optimistic concurrency control (conflict + // detection at commit) and is backed by DBCommon, giving us access to + // cancel_all_background_work for clean shutdown. + let db = + OptimisticTransactionDB::open_cf_descriptors(&options, &self.path, cf_descriptors) + .map_err(|e| { + ProviderError::Database(DatabaseError::Open(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + })?; + Ok(RocksDBProvider(Arc::new(RocksDBProviderInner::ReadWrite { db, metrics }))) + } } } @@ -276,27 +298,138 @@ macro_rules! compress_to_buf_or_ref { pub struct RocksDBProvider(Arc); /// Inner state for `RocksDB` provider. -struct RocksDBProviderInner { - /// `RocksDB` database instance with optimistic transaction support. - db: OptimisticTransactionDB, - /// Metrics latency & operations. - metrics: Option, +enum RocksDBProviderInner { + /// Read-write mode using `OptimisticTransactionDB`. + ReadWrite { + /// `RocksDB` database instance with optimistic transaction support. + db: OptimisticTransactionDB, + /// Metrics latency & operations. + metrics: Option, + }, + /// Read-only mode using `DB` opened with `open_cf_descriptors_read_only`. + /// This doesn't acquire an exclusive lock, allowing concurrent reads. + ReadOnly { + /// Read-only `RocksDB` database instance. + db: DB, + /// Metrics latency & operations. + metrics: Option, + }, +} + +impl RocksDBProviderInner { + /// Returns the metrics for this provider. + const fn metrics(&self) -> Option<&RocksDBMetrics> { + match self { + Self::ReadWrite { metrics, .. } | Self::ReadOnly { metrics, .. } => metrics.as_ref(), + } + } + + /// Returns the read-write database, panicking if in read-only mode. + fn db_rw(&self) -> &OptimisticTransactionDB { + match self { + Self::ReadWrite { db, .. } => db, + Self::ReadOnly { .. } => { + panic!("Cannot perform write operation on read-only RocksDB provider") + } + } + } + + /// Gets the column family handle for a table. + fn cf_handle(&self) -> Result<&rocksdb::ColumnFamily, DatabaseError> { + let cf = match self { + Self::ReadWrite { db, .. } => db.cf_handle(T::NAME), + Self::ReadOnly { db, .. } => db.cf_handle(T::NAME), + }; + cf.ok_or_else(|| DatabaseError::Other(format!("Column family '{}' not found", T::NAME))) + } + + /// Gets the column family handle for a table from the read-write database. + /// + /// # Panics + /// Panics if in read-only mode. + fn cf_handle_rw(&self, name: &str) -> Result<&rocksdb::ColumnFamily, DatabaseError> { + self.db_rw() + .cf_handle(name) + .ok_or_else(|| DatabaseError::Other(format!("Column family '{}' not found", name))) + } + + /// Gets a value from a column family. + fn get_cf( + &self, + cf: &rocksdb::ColumnFamily, + key: impl AsRef<[u8]>, + ) -> Result>, rocksdb::Error> { + match self { + Self::ReadWrite { db, .. } => db.get_cf(cf, key), + Self::ReadOnly { db, .. } => db.get_cf(cf, key), + } + } + + /// Puts a value into a column family. + fn put_cf( + &self, + cf: &rocksdb::ColumnFamily, + key: impl AsRef<[u8]>, + value: impl AsRef<[u8]>, + ) -> Result<(), rocksdb::Error> { + self.db_rw().put_cf(cf, key, value) + } + + /// Deletes a value from a column family. + fn delete_cf( + &self, + cf: &rocksdb::ColumnFamily, + key: impl AsRef<[u8]>, + ) -> Result<(), rocksdb::Error> { + self.db_rw().delete_cf(cf, key) + } + + /// Deletes a range of values from a column family. + fn delete_range_cf>( + &self, + cf: &rocksdb::ColumnFamily, + from: K, + to: K, + ) -> Result<(), rocksdb::Error> { + self.db_rw().delete_range_cf(cf, from, to) + } + + /// Returns an iterator over a column family. + fn iterator_cf( + &self, + cf: &rocksdb::ColumnFamily, + mode: IteratorMode<'_>, + ) -> RocksDBIterEnum<'_> { + match self { + Self::ReadWrite { db, .. } => RocksDBIterEnum::ReadWrite(db.iterator_cf(cf, mode)), + Self::ReadOnly { db, .. } => RocksDBIterEnum::ReadOnly(db.iterator_cf(cf, mode)), + } + } } impl fmt::Debug for RocksDBProviderInner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RocksDBProviderInner") - .field("db", &"") - .field("metrics", &self.metrics) - .finish() + match self { + Self::ReadWrite { metrics, .. } => f + .debug_struct("RocksDBProviderInner::ReadWrite") + .field("db", &"") + .field("metrics", metrics) + .finish(), + Self::ReadOnly { metrics, .. } => f + .debug_struct("RocksDBProviderInner::ReadOnly") + .field("db", &"") + .field("metrics", metrics) + .finish(), + } } } impl Drop for RocksDBProviderInner { fn drop(&mut self) { - // Cancel all background work (compaction, flush) before dropping. - // This prevents pthread lock errors during shutdown. - self.db.cancel_all_background_work(true); + match self { + Self::ReadWrite { db, .. } => db.cancel_all_background_work(true), + Self::ReadOnly { db, .. } => db.cancel_all_background_work(true), + } } } @@ -317,14 +450,22 @@ impl RocksDBProvider { RocksDBBuilder::new(path) } + /// Returns `true` if this provider is in read-only mode. + pub fn is_read_only(&self) -> bool { + matches!(self.0.as_ref(), RocksDBProviderInner::ReadOnly { .. }) + } + /// Creates a new transaction with MDBX-like semantics (read-your-writes, rollback). /// /// Note: With `OptimisticTransactionDB`, commits may fail if there are conflicts. /// Conflict detection happens at commit time, not at write time. + /// + /// # Panics + /// Panics if the provider is in read-only mode. pub fn tx(&self) -> RocksTx<'_> { let write_options = WriteOptions::default(); let txn_options = OptimisticTransactionOptions::default(); - let inner = self.0.db.transaction_opt(&write_options, &txn_options); + let inner = self.0.db_rw().transaction_opt(&write_options, &txn_options); RocksTx { inner, provider: self } } @@ -332,6 +473,9 @@ impl RocksDBProvider { /// /// Use [`Self::write_batch`] for closure-based atomic writes. /// Use this method when the batch needs to be held by [`crate::EitherWriter`]. + /// + /// # Panics + /// Panics if the provider is in read-only mode when attempting to commit. pub fn batch(&self) -> RocksDBBatch<'_> { RocksDBBatch { provider: self, @@ -342,23 +486,20 @@ impl RocksDBProvider { /// Gets the column family handle for a table. fn get_cf_handle(&self) -> Result<&rocksdb::ColumnFamily, DatabaseError> { - self.0 - .db - .cf_handle(T::NAME) - .ok_or_else(|| DatabaseError::Other(format!("Column family '{}' not found", T::NAME))) + self.0.cf_handle::() } /// Executes a function and records metrics with the given operation and table name. - fn execute_with_operation_metric( + fn execute_with_operation_metric( &self, operation: RocksDBOperation, table: &'static str, - f: impl FnOnce(&Self) -> T, - ) -> T { - let start = self.0.metrics.as_ref().map(|_| Instant::now()); + f: impl FnOnce(&Self) -> R, + ) -> R { + let start = self.0.metrics().map(|_| Instant::now()); let res = f(self); - if let (Some(start), Some(metrics)) = (start, &self.0.metrics) { + if let (Some(start), Some(metrics)) = (start, self.0.metrics()) { metrics.record_operation(operation, table, start.elapsed()); } @@ -376,25 +517,30 @@ impl RocksDBProvider { key: &::Encoded, ) -> ProviderResult> { self.execute_with_operation_metric(RocksDBOperation::Get, T::NAME, |this| { - let result = - this.0.db.get_cf(this.get_cf_handle::()?, key.as_ref()).map_err(|e| { - ProviderError::Database(DatabaseError::Read(DatabaseErrorInfo { - message: e.to_string().into(), - code: -1, - })) - })?; + let result = this.0.get_cf(this.get_cf_handle::()?, key.as_ref()).map_err(|e| { + ProviderError::Database(DatabaseError::Read(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + })?; Ok(result.and_then(|value| T::Value::decompress(&value).ok())) }) } /// Puts upsert a value into the specified table with the given key. + /// + /// # Panics + /// Panics if the provider is in read-only mode. pub fn put(&self, key: T::Key, value: &T::Value) -> ProviderResult<()> { let encoded_key = key.encode(); self.put_encoded::(&encoded_key, value) } /// Puts a value into the specified table using pre-encoded key. + /// + /// # Panics + /// Panics if the provider is in read-only mode. pub fn put_encoded( &self, key: &::Encoded, @@ -407,7 +553,7 @@ impl RocksDBProvider { let mut buf = Vec::new(); let value_bytes = compress_to_buf_or_ref!(buf, value).unwrap_or(&buf); - this.0.db.put_cf(this.get_cf_handle::()?, key, value_bytes).map_err(|e| { + this.0.put_cf(this.get_cf_handle::()?, key, value_bytes).map_err(|e| { ProviderError::Database(DatabaseError::Write(Box::new(DatabaseWriteError { info: DatabaseErrorInfo { message: e.to_string().into(), code: -1 }, operation: DatabaseWriteOperation::PutUpsert, @@ -419,9 +565,12 @@ impl RocksDBProvider { } /// Deletes a value from the specified table. + /// + /// # Panics + /// Panics if the provider is in read-only mode. pub fn delete(&self, key: T::Key) -> ProviderResult<()> { self.execute_with_operation_metric(RocksDBOperation::Delete, T::NAME, |this| { - this.0.db.delete_cf(this.get_cf_handle::()?, key.encode().as_ref()).map_err(|e| { + this.0.delete_cf(this.get_cf_handle::()?, key.encode().as_ref()).map_err(|e| { ProviderError::Database(DatabaseError::Delete(DatabaseErrorInfo { message: e.to_string().into(), code: -1, @@ -438,7 +587,7 @@ impl RocksDBProvider { pub fn clear(&self) -> ProviderResult<()> { let cf = self.get_cf_handle::()?; - self.0.db.delete_range_cf(cf, &[] as &[u8], &[0xFF; 256]).map_err(|e| { + self.0.delete_range_cf(cf, &[] as &[u8], &[0xFF; 256]).map_err(|e| { ProviderError::Database(DatabaseError::Delete(DatabaseErrorInfo { message: e.to_string().into(), code: -1, @@ -452,7 +601,7 @@ impl RocksDBProvider { pub fn first(&self) -> ProviderResult> { self.execute_with_operation_metric(RocksDBOperation::Get, T::NAME, |this| { let cf = this.get_cf_handle::()?; - let mut iter = this.0.db.iterator_cf(cf, IteratorMode::Start); + let mut iter = this.0.iterator_cf(cf, IteratorMode::Start); match iter.next() { Some(Ok((key_bytes, value_bytes))) => { @@ -477,7 +626,7 @@ impl RocksDBProvider { pub fn last(&self) -> ProviderResult> { self.execute_with_operation_metric(RocksDBOperation::Get, T::NAME, |this| { let cf = this.get_cf_handle::()?; - let mut iter = this.0.db.iterator_cf(cf, IteratorMode::End); + let mut iter = this.0.iterator_cf(cf, IteratorMode::End); match iter.next() { Some(Ok((key_bytes, value_bytes))) => { @@ -503,7 +652,7 @@ impl RocksDBProvider { /// Returns decoded `(Key, Value)` pairs in key order. pub fn iter(&self) -> ProviderResult> { let cf = self.get_cf_handle::()?; - let iter = self.0.db.iterator_cf(cf, IteratorMode::Start); + let iter = self.0.iterator_cf(cf, IteratorMode::Start); Ok(RocksDBIter { inner: iter, _marker: std::marker::PhantomData }) } @@ -526,7 +675,6 @@ impl RocksDBProvider { // Create a forward iterator starting from our seek position. let iter = self .0 - .db .iterator_cf(cf, IteratorMode::From(start_bytes.as_ref(), rocksdb::Direction::Forward)); let mut result = Vec::new(); @@ -607,8 +755,11 @@ impl RocksDBProvider { /// /// This is used when the batch was extracted via [`RocksDBBatch::into_inner`] /// and needs to be committed at a later point (e.g., at provider commit time). + /// + /// # Panics + /// Panics if the provider is in read-only mode. pub fn commit_batch(&self, batch: WriteBatchWithTransaction) -> ProviderResult<()> { - self.0.db.write_opt(batch, &WriteOptions::default()).map_err(|e| { + self.0.db_rw().write_opt(batch, &WriteOptions::default()).map_err(|e| { ProviderError::Database(DatabaseError::Commit(DatabaseErrorInfo { message: e.to_string().into(), code: -1, @@ -790,8 +941,11 @@ impl<'a> RocksDBBatch<'a> { /// Commits the batch to the database. /// /// This consumes the batch and writes all operations atomically to `RocksDB`. + /// + /// # Panics + /// Panics if the provider is in read-only mode. pub fn commit(self) -> ProviderResult<()> { - self.provider.0.db.write_opt(self.inner, &WriteOptions::default()).map_err(|e| { + self.provider.0.db_rw().write_opt(self.inner, &WriteOptions::default()).map_err(|e| { ProviderError::Database(DatabaseError::Commit(DatabaseErrorInfo { message: e.to_string().into(), code: -1, @@ -1233,12 +1387,7 @@ impl<'db> RocksTx<'db> { }) }; - let cf = self.provider.0.db.cf_handle(T::NAME).ok_or_else(|| { - ProviderError::Database(DatabaseError::Other(format!( - "column family not found: {}", - T::NAME - ))) - })?; + let cf = self.provider.0.cf_handle_rw(T::NAME)?; // Create a raw iterator to access key bytes directly. let mut iter: DBRawIteratorWithThreadMode<'_, Transaction<'_, OptimisticTransactionDB>> = @@ -1305,11 +1454,30 @@ impl<'db> RocksTx<'db> { } } +/// Wrapper enum for `RocksDB` iterators that works in both read-write and read-only modes. +enum RocksDBIterEnum<'db> { + /// Iterator from read-write `OptimisticTransactionDB`. + ReadWrite(rocksdb::DBIteratorWithThreadMode<'db, OptimisticTransactionDB>), + /// Iterator from read-only `DB`. + ReadOnly(rocksdb::DBIteratorWithThreadMode<'db, DB>), +} + +impl Iterator for RocksDBIterEnum<'_> { + type Item = Result<(Box<[u8]>, Box<[u8]>), rocksdb::Error>; + + fn next(&mut self) -> Option { + match self { + Self::ReadWrite(iter) => iter.next(), + Self::ReadOnly(iter) => iter.next(), + } + } +} + /// Iterator over a `RocksDB` table (non-transactional). /// /// Yields decoded `(Key, Value)` pairs in key order. pub struct RocksDBIter<'db, T: Table> { - inner: rocksdb::DBIteratorWithThreadMode<'db, OptimisticTransactionDB>, + inner: RocksDBIterEnum<'db>, _marker: std::marker::PhantomData, } diff --git a/crates/storage/provider/src/providers/rocksdb_stub.rs b/crates/storage/provider/src/providers/rocksdb_stub.rs index 0160ef87021..a5b52605fa8 100644 --- a/crates/storage/provider/src/providers/rocksdb_stub.rs +++ b/crates/storage/provider/src/providers/rocksdb_stub.rs @@ -102,6 +102,11 @@ impl RocksDBBuilder { self } + /// Sets read-only mode (stub implementation). + pub const fn with_read_only(self, _read_only: bool) -> Self { + self + } + /// Build the `RocksDB` provider (stub implementation). pub const fn build(self) -> ProviderResult { Ok(RocksDBProvider) From 4f009728e25057d72fa34da1985bab297392ee9a Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Tue, 20 Jan 2026 08:11:51 -0800 Subject: [PATCH 089/267] feat(cli): add `reth db checksum mdbx/static-file` command (#21211) Co-authored-by: Amp Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- crates/cli/commands/src/db/checksum.rs | 169 +++++++++++++--- crates/cli/commands/src/db/mod.rs | 2 +- docs/vocs/docs/pages/cli/SUMMARY.mdx | 4 + docs/vocs/docs/pages/cli/op-reth/db.mdx | 2 +- .../docs/pages/cli/op-reth/db/checksum.mdx | 20 +- .../pages/cli/op-reth/db/checksum/mdbx.mdx | 179 +++++++++++++++++ .../cli/op-reth/db/checksum/static-file.mdx | 186 ++++++++++++++++++ docs/vocs/docs/pages/cli/reth/db.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/checksum.mdx | 20 +- .../docs/pages/cli/reth/db/checksum/mdbx.mdx | 179 +++++++++++++++++ .../cli/reth/db/checksum/static-file.mdx | 186 ++++++++++++++++++ docs/vocs/sidebar-cli-op-reth.ts | 13 +- docs/vocs/sidebar-cli-reth.ts | 13 +- 13 files changed, 920 insertions(+), 55 deletions(-) create mode 100644 docs/vocs/docs/pages/cli/op-reth/db/checksum/mdbx.mdx create mode 100644 docs/vocs/docs/pages/cli/op-reth/db/checksum/static-file.mdx create mode 100644 docs/vocs/docs/pages/cli/reth/db/checksum/mdbx.mdx create mode 100644 docs/vocs/docs/pages/cli/reth/db/checksum/static-file.mdx diff --git a/crates/cli/commands/src/db/checksum.rs b/crates/cli/commands/src/db/checksum.rs index e5ed9d909cd..b1c6c6c5574 100644 --- a/crates/cli/commands/src/db/checksum.rs +++ b/crates/cli/commands/src/db/checksum.rs @@ -4,15 +4,17 @@ use crate::{ }; use alloy_primitives::map::foldhash::fast::FixedState; use clap::Parser; +use itertools::Itertools; use reth_chainspec::EthereumHardforks; -use reth_db::DatabaseEnv; +use reth_db::{static_file::iter_static_files, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, table::Table, transaction::DbTx, RawKey, RawTable, RawValue, TableViewer, Tables, }; use reth_db_common::DbTool; use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; -use reth_provider::{providers::ProviderNodeTypes, DBProvider}; +use reth_provider::{providers::ProviderNodeTypes, DBProvider, StaticFileProviderFactory}; +use reth_static_file_types::StaticFileSegment; use std::{ hash::{BuildHasher, Hasher}, sync::Arc, @@ -20,24 +22,54 @@ use std::{ }; use tracing::{info, warn}; +/// Interval for logging progress during checksum computation. +const PROGRESS_LOG_INTERVAL: usize = 100_000; + #[derive(Parser, Debug)] /// The arguments for the `reth db checksum` command pub struct Command { - /// The table name - table: Tables, + #[command(subcommand)] + subcommand: Subcommand, +} - /// The start of the range to checksum. - #[arg(long, value_parser = maybe_json_value_parser)] - start_key: Option, +#[derive(clap::Subcommand, Debug)] +enum Subcommand { + /// Calculates the checksum of a database table + Mdbx { + /// The table name + table: Tables, - /// The end of the range to checksum. - #[arg(long, value_parser = maybe_json_value_parser)] - end_key: Option, + /// The start of the range to checksum. + #[arg(long, value_parser = maybe_json_value_parser)] + start_key: Option, - /// The maximum number of records that are queried and used to compute the - /// checksum. - #[arg(long)] - limit: Option, + /// The end of the range to checksum. + #[arg(long, value_parser = maybe_json_value_parser)] + end_key: Option, + + /// The maximum number of records that are queried and used to compute the + /// checksum. + #[arg(long)] + limit: Option, + }, + /// Calculates the checksum of a static file segment + StaticFile { + /// The static file segment + #[arg(value_enum)] + segment: StaticFileSegment, + + /// The block number to start from (inclusive). + #[arg(long)] + start_block: Option, + + /// The block number to end at (inclusive). + #[arg(long)] + end_block: Option, + + /// The maximum number of rows to checksum. + #[arg(long)] + limit: Option, + }, } impl Command { @@ -47,16 +79,109 @@ impl Command { tool: &DbTool>>, ) -> eyre::Result<()> { warn!("This command should be run without the node running!"); - self.table.view(&ChecksumViewer { - tool, - start_key: self.start_key, - end_key: self.end_key, - limit: self.limit, - })?; + + match self.subcommand { + Subcommand::Mdbx { table, start_key, end_key, limit } => { + table.view(&ChecksumViewer { tool, start_key, end_key, limit })?; + } + Subcommand::StaticFile { segment, start_block, end_block, limit } => { + checksum_static_file(tool, segment, start_block, end_block, limit)?; + } + } + Ok(()) } } +/// Creates a new hasher with the standard seed used for checksum computation. +fn checksum_hasher() -> impl Hasher { + FixedState::with_seed(u64::from_be_bytes(*b"RETHRETH")).build_hasher() +} + +fn checksum_static_file>( + tool: &DbTool>>, + segment: StaticFileSegment, + start_block: Option, + end_block: Option, + limit: Option, +) -> eyre::Result<()> { + let static_file_provider = tool.provider_factory.static_file_provider(); + if let Err(err) = static_file_provider.check_consistency(&tool.provider_factory.provider()?) { + warn!("Error checking consistency of static files: {err}"); + } + + let static_files = iter_static_files(static_file_provider.directory())?; + + let ranges = static_files + .get(segment) + .ok_or_else(|| eyre::eyre!("No static files found for segment: {}", segment))?; + + let start_time = Instant::now(); + let mut hasher = checksum_hasher(); + let mut total = 0usize; + let limit = limit.unwrap_or(usize::MAX); + + let start_block = start_block.unwrap_or(0); + let end_block = end_block.unwrap_or(u64::MAX); + + info!( + "Computing checksum for {} static files, start_block={}, end_block={}, limit={:?}", + segment, + start_block, + end_block, + if limit == usize::MAX { None } else { Some(limit) } + ); + + 'outer: for (block_range, _header) in ranges.iter().sorted_by_key(|(range, _)| range.start()) { + if block_range.end() < start_block || block_range.start() > end_block { + continue; + } + + let fixed_block_range = static_file_provider.find_fixed_range(segment, block_range.start()); + let jar_provider = static_file_provider + .get_segment_provider_for_range(segment, || Some(fixed_block_range), None)? + .ok_or_else(|| { + eyre::eyre!( + "Failed to get segment provider for segment {} at range {}", + segment, + block_range + ) + })?; + + let mut cursor = jar_provider.cursor()?; + + while let Ok(Some(row)) = cursor.next_row() { + for col_data in row.iter() { + hasher.write(col_data); + } + + total += 1; + + if total.is_multiple_of(PROGRESS_LOG_INTERVAL) { + info!("Hashed {total} entries."); + } + + if total >= limit { + break 'outer; + } + } + + // Explicitly drop provider before removing from cache to avoid deadlock + drop(jar_provider); + static_file_provider.remove_cached_provider(segment, fixed_block_range.end()); + } + + let checksum = hasher.finish(); + let elapsed = start_time.elapsed(); + + info!( + "Checksum for static file segment `{}`: {:#x} ({} entries, elapsed: {:?})", + segment, checksum, total, elapsed + ); + + Ok(()) +} + pub(crate) struct ChecksumViewer<'a, N: NodeTypesWithDB> { tool: &'a DbTool, start_key: Option, @@ -102,7 +227,7 @@ impl TableViewer<(u64, Duration)> for ChecksumViewer<'_, N }; let start_time = Instant::now(); - let mut hasher = FixedState::with_seed(u64::from_be_bytes(*b"RETHRETH")).build_hasher(); + let mut hasher = checksum_hasher(); let mut total = 0; let limit = self.limit.unwrap_or(usize::MAX); @@ -111,7 +236,7 @@ impl TableViewer<(u64, Duration)> for ChecksumViewer<'_, N for (index, entry) in walker.enumerate() { let (k, v): (RawKey, RawValue) = entry?; - if index.is_multiple_of(100_000) { + if index.is_multiple_of(PROGRESS_LOG_INTERVAL) { info!("Hashed {index} entries."); } diff --git a/crates/cli/commands/src/db/mod.rs b/crates/cli/commands/src/db/mod.rs index 189bf3d72e3..f813924efb2 100644 --- a/crates/cli/commands/src/db/mod.rs +++ b/crates/cli/commands/src/db/mod.rs @@ -39,7 +39,7 @@ pub enum Subcommands { Stats(stats::Command), /// Lists the contents of a table List(list::Command), - /// Calculates the content checksum of a table + /// Calculates the content checksum of a table or static file segment Checksum(checksum::Command), /// Create a diff between two database tables or two entire databases. Diff(diff::Command), diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index 4381ed78427..89a390f3f72 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -10,6 +10,8 @@ - [`reth db stats`](./reth/db/stats.mdx) - [`reth db list`](./reth/db/list.mdx) - [`reth db checksum`](./reth/db/checksum.mdx) + - [`reth db checksum mdbx`](./reth/db/checksum/mdbx.mdx) + - [`reth db checksum static-file`](./reth/db/checksum/static-file.mdx) - [`reth db diff`](./reth/db/diff.mdx) - [`reth db get`](./reth/db/get.mdx) - [`reth db get mdbx`](./reth/db/get/mdbx.mdx) @@ -66,6 +68,8 @@ - [`op-reth db stats`](./op-reth/db/stats.mdx) - [`op-reth db list`](./op-reth/db/list.mdx) - [`op-reth db checksum`](./op-reth/db/checksum.mdx) + - [`op-reth db checksum mdbx`](./op-reth/db/checksum/mdbx.mdx) + - [`op-reth db checksum static-file`](./op-reth/db/checksum/static-file.mdx) - [`op-reth db diff`](./op-reth/db/diff.mdx) - [`op-reth db get`](./op-reth/db/get.mdx) - [`op-reth db get mdbx`](./op-reth/db/get/mdbx.mdx) diff --git a/docs/vocs/docs/pages/cli/op-reth/db.mdx b/docs/vocs/docs/pages/cli/op-reth/db.mdx index d6c8ef5669e..335b54cba6c 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db.mdx @@ -11,7 +11,7 @@ Usage: op-reth db [OPTIONS] Commands: stats Lists all the tables, their entry count and their size list Lists the contents of a table - checksum Calculates the content checksum of a table + checksum Calculates the content checksum of a table or static file segment diff Create a diff between two database tables or two entire databases get Gets the content of a table for the given key drop Deletes all database entries diff --git a/docs/vocs/docs/pages/cli/op-reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/op-reth/db/checksum.mdx index 45870209413..8027558cfbc 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/checksum.mdx @@ -1,27 +1,19 @@ # op-reth db checksum -Calculates the content checksum of a table +Calculates the content checksum of a table or static file segment ```bash $ op-reth db checksum --help ``` ```txt -Usage: op-reth db checksum [OPTIONS] +Usage: op-reth db checksum [OPTIONS] -Arguments: -
- The table name +Commands: + mdbx Calculates the checksum of a database table + static-file Calculates the checksum of a static file segment + help Print this message or the help of the given subcommand(s) Options: - --start-key - The start of the range to checksum - - --end-key - The end of the range to checksum - - --limit - The maximum number of records that are queried and used to compute the checksum - -h, --help Print help (see a summary with '-h') diff --git a/docs/vocs/docs/pages/cli/op-reth/db/checksum/mdbx.mdx b/docs/vocs/docs/pages/cli/op-reth/db/checksum/mdbx.mdx new file mode 100644 index 00000000000..aa34fef6941 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/checksum/mdbx.mdx @@ -0,0 +1,179 @@ +# op-reth db checksum mdbx + +Calculates the checksum of a database table + +```bash +$ op-reth db checksum mdbx --help +``` +```txt +Usage: op-reth db checksum mdbx [OPTIONS]
+ +Arguments: +
+ The table name + +Options: + --start-key + The start of the range to checksum + + --end-key + The end of the range to checksum + + --limit + The maximum number of records that are queried and used to compute the checksum + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/checksum/static-file.mdx b/docs/vocs/docs/pages/cli/op-reth/db/checksum/static-file.mdx new file mode 100644 index 00000000000..0515b998834 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/checksum/static-file.mdx @@ -0,0 +1,186 @@ +# op-reth db checksum static-file + +Calculates the checksum of a static file segment + +```bash +$ op-reth db checksum static-file --help +``` +```txt +Usage: op-reth db checksum static-file [OPTIONS] + +Arguments: + + The static file segment + + Possible values: + - headers: Static File segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables + - transactions: Static File segment responsible for the `Transactions` table + - receipts: Static File segment responsible for the `Receipts` table + - transaction-senders: Static File segment responsible for the `TransactionSenders` table + - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + +Options: + --start-block + The block number to start from (inclusive) + + --end-block + The block number to end at (inclusive) + + --limit + The maximum number of rows to checksum + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 4fda4538d16..ef1793696bd 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -11,7 +11,7 @@ Usage: reth db [OPTIONS] Commands: stats Lists all the tables, their entry count and their size list Lists the contents of a table - checksum Calculates the content checksum of a table + checksum Calculates the content checksum of a table or static file segment diff Create a diff between two database tables or two entire databases get Gets the content of a table for the given key drop Deletes all database entries diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index 067737df48b..31030442a31 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -1,27 +1,19 @@ # reth db checksum -Calculates the content checksum of a table +Calculates the content checksum of a table or static file segment ```bash $ reth db checksum --help ``` ```txt -Usage: reth db checksum [OPTIONS]
+Usage: reth db checksum [OPTIONS] -Arguments: -
- The table name +Commands: + mdbx Calculates the checksum of a database table + static-file Calculates the checksum of a static file segment + help Print this message or the help of the given subcommand(s) Options: - --start-key - The start of the range to checksum - - --end-key - The end of the range to checksum - - --limit - The maximum number of records that are queried and used to compute the checksum - -h, --help Print help (see a summary with '-h') diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum/mdbx.mdx new file mode 100644 index 00000000000..4608d1c2824 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/checksum/mdbx.mdx @@ -0,0 +1,179 @@ +# reth db checksum mdbx + +Calculates the checksum of a database table + +```bash +$ reth db checksum mdbx --help +``` +```txt +Usage: reth db checksum mdbx [OPTIONS]
+ +Arguments: +
+ The table name + +Options: + --start-key + The start of the range to checksum + + --end-key + The end of the range to checksum + + --limit + The maximum number of records that are queried and used to compute the checksum + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum/static-file.mdx new file mode 100644 index 00000000000..04bd067b27b --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/checksum/static-file.mdx @@ -0,0 +1,186 @@ +# reth db checksum static-file + +Calculates the checksum of a static file segment + +```bash +$ reth db checksum static-file --help +``` +```txt +Usage: reth db checksum static-file [OPTIONS] + +Arguments: + + The static file segment + + Possible values: + - headers: Static File segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables + - transactions: Static File segment responsible for the `Transactions` table + - receipts: Static File segment responsible for the `Receipts` table + - transaction-senders: Static File segment responsible for the `TransactionSenders` table + - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + +Options: + --start-block + The block number to start from (inclusive) + + --end-block + The block number to end at (inclusive) + + --limit + The maximum number of rows to checksum + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/sidebar-cli-op-reth.ts b/docs/vocs/sidebar-cli-op-reth.ts index ad2c6be69c7..58d814f0e31 100644 --- a/docs/vocs/sidebar-cli-op-reth.ts +++ b/docs/vocs/sidebar-cli-op-reth.ts @@ -44,7 +44,18 @@ export const opRethCliSidebar: SidebarItem = { }, { text: "op-reth db checksum", - link: "/cli/op-reth/db/checksum" + link: "/cli/op-reth/db/checksum", + collapsed: true, + items: [ + { + text: "op-reth db checksum mdbx", + link: "/cli/op-reth/db/checksum/mdbx" + }, + { + text: "op-reth db checksum static-file", + link: "/cli/op-reth/db/checksum/static-file" + } + ] }, { text: "op-reth db diff", diff --git a/docs/vocs/sidebar-cli-reth.ts b/docs/vocs/sidebar-cli-reth.ts index 1b0f88b4035..91c39eabb6b 100644 --- a/docs/vocs/sidebar-cli-reth.ts +++ b/docs/vocs/sidebar-cli-reth.ts @@ -48,7 +48,18 @@ export const rethCliSidebar: SidebarItem = { }, { text: "reth db checksum", - link: "/cli/reth/db/checksum" + link: "/cli/reth/db/checksum", + collapsed: true, + items: [ + { + text: "reth db checksum mdbx", + link: "/cli/reth/db/checksum/mdbx" + }, + { + text: "reth db checksum static-file", + link: "/cli/reth/db/checksum/static-file" + } + ] }, { text: "reth db diff", From 2e2cd67663c63deb34e0fb21c56b44cde1715909 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 20 Jan 2026 17:42:16 +0100 Subject: [PATCH 090/267] perf(chain-state): parallelize into_sorted with rayon (#21193) --- Cargo.lock | 1 + crates/chain-state/Cargo.toml | 2 ++ crates/chain-state/src/deferred_trie.rs | 31 ++++++++++++++++++------- crates/engine/tree/Cargo.toml | 2 +- 4 files changed, 27 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66fee9d2e21..dc39ba36641 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7788,6 +7788,7 @@ dependencies = [ "parking_lot", "pin-project", "rand 0.9.2", + "rayon", "reth-chainspec", "reth-errors", "reth-ethereum-primitives", diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index d21c83ae7c4..313165d0982 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -41,6 +41,7 @@ derive_more.workspace = true metrics.workspace = true parking_lot.workspace = true pin-project.workspace = true +rayon = { workspace = true, optional = true } serde = { workspace = true, optional = true } # optional deps for test-utils @@ -84,6 +85,7 @@ test-utils = [ "reth-trie/test-utils", "reth-ethereum-primitives/test-utils", ] +rayon = ["dep:rayon"] [[bench]] name = "canonical_hashes_range" diff --git a/crates/chain-state/src/deferred_trie.rs b/crates/chain-state/src/deferred_trie.rs index 6e758a12205..1b4a3d43a35 100644 --- a/crates/chain-state/src/deferred_trie.rs +++ b/crates/chain-state/src/deferred_trie.rs @@ -163,14 +163,29 @@ impl DeferredTrieData { anchor_hash: B256, ancestors: &[Self], ) -> ComputedTrieData { - let sorted_hashed_state = match Arc::try_unwrap(hashed_state) { - Ok(state) => state.into_sorted(), - Err(arc) => arc.clone_into_sorted(), - }; - let sorted_trie_updates = match Arc::try_unwrap(trie_updates) { - Ok(updates) => updates.into_sorted(), - Err(arc) => arc.clone_into_sorted(), - }; + #[cfg(feature = "rayon")] + let (sorted_hashed_state, sorted_trie_updates) = rayon::join( + || match Arc::try_unwrap(hashed_state) { + Ok(state) => state.into_sorted(), + Err(arc) => arc.clone_into_sorted(), + }, + || match Arc::try_unwrap(trie_updates) { + Ok(updates) => updates.into_sorted(), + Err(arc) => arc.clone_into_sorted(), + }, + ); + + #[cfg(not(feature = "rayon"))] + let (sorted_hashed_state, sorted_trie_updates) = ( + match Arc::try_unwrap(hashed_state) { + Ok(state) => state.into_sorted(), + Err(arc) => arc.clone_into_sorted(), + }, + match Arc::try_unwrap(trie_updates) { + Ok(updates) => updates.into_sorted(), + Err(arc) => arc.clone_into_sorted(), + }, + ); // Reuse parent's overlay if available and anchors match. // We can only reuse the parent's overlay if it was built on top of the same diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 50122c10ff0..b2124098eae 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] # reth -reth-chain-state.workspace = true +reth-chain-state = { workspace = true, features = ["rayon"] } reth-chainspec = { workspace = true, optional = true } reth-consensus.workspace = true reth-db.workspace = true From 80980b8e4da28b65551b2a0c819e8c9b2023e9b5 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Tue, 20 Jan 2026 08:58:29 -0800 Subject: [PATCH 091/267] feat(pruning): add `DefaultPruningValues` for overridable pruning defaults (#21207) Co-authored-by: Alexey Shekhirin --- crates/node/core/src/args/mod.rs | 2 +- crates/node/core/src/args/pruning.rs | 115 +++++++++++++++++++++------ 2 files changed, 92 insertions(+), 25 deletions(-) diff --git a/crates/node/core/src/args/mod.rs b/crates/node/core/src/args/mod.rs index 9351128570d..f12fe7e322a 100644 --- a/crates/node/core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -54,7 +54,7 @@ pub use dev::DevArgs; /// PruneArgs for configuring the pruning and full node mod pruning; -pub use pruning::PruningArgs; +pub use pruning::{DefaultPruningValues, PruningArgs}; /// DatadirArgs for configuring data storage paths mod datadir_args; diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 0bd65253724..24575a8ff75 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -6,7 +6,88 @@ use clap::{builder::RangedU64ValueParser, Args}; use reth_chainspec::EthereumHardforks; use reth_config::config::PruneConfig; use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE}; -use std::{collections::BTreeMap, ops::Not}; +use std::{collections::BTreeMap, ops::Not, sync::OnceLock}; + +/// Global static pruning defaults +static PRUNING_DEFAULTS: OnceLock = OnceLock::new(); + +/// Default values for `--full` and `--minimal` pruning modes that can be customized. +/// +/// Global defaults can be set via [`DefaultPruningValues::try_init`]. +#[derive(Debug, Clone)] +pub struct DefaultPruningValues { + /// Prune modes for `--full` flag. + /// + /// Note: `bodies_history` is ignored when `full_bodies_history_use_pre_merge` is `true`. + pub full_prune_modes: PruneModes, + /// If `true`, `--full` will set `bodies_history` to prune everything before the merge block + /// (Paris hardfork). If `false`, uses `full_prune_modes.bodies_history` directly. + pub full_bodies_history_use_pre_merge: bool, + /// Prune modes for `--minimal` flag. + pub minimal_prune_modes: PruneModes, +} + +impl DefaultPruningValues { + /// Initialize the global pruning defaults with this configuration. + /// + /// Returns `Err(self)` if already initialized. + pub fn try_init(self) -> Result<(), Self> { + PRUNING_DEFAULTS.set(self) + } + + /// Get a reference to the global pruning defaults. + pub fn get_global() -> &'static Self { + PRUNING_DEFAULTS.get_or_init(Self::default) + } + + /// Set the prune modes for `--full` flag. + pub fn with_full_prune_modes(mut self, modes: PruneModes) -> Self { + self.full_prune_modes = modes; + self + } + + /// Set whether `--full` should use pre-merge pruning for bodies history. + /// + /// When `true` (default), bodies are pruned before the Paris hardfork block. + /// When `false`, uses `full_prune_modes.bodies_history` directly. + pub const fn with_full_bodies_history_use_pre_merge(mut self, use_pre_merge: bool) -> Self { + self.full_bodies_history_use_pre_merge = use_pre_merge; + self + } + + /// Set the prune modes for `--minimal` flag. + pub fn with_minimal_prune_modes(mut self, modes: PruneModes) -> Self { + self.minimal_prune_modes = modes; + self + } +} + +impl Default for DefaultPruningValues { + fn default() -> Self { + Self { + full_prune_modes: PruneModes { + sender_recovery: Some(PruneMode::Full), + transaction_lookup: None, + receipts: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + // This field is ignored when full_bodies_history_use_pre_merge is true + bodies_history: None, + receipts_log_filter: Default::default(), + }, + full_bodies_history_use_pre_merge: true, + minimal_prune_modes: PruneModes { + sender_recovery: Some(PruneMode::Full), + transaction_lookup: Some(PruneMode::Full), + receipts: Some(PruneMode::Full), + account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + bodies_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + receipts_log_filter: Default::default(), + }, + } + } +} /// Parameters for pruning and full node #[derive(Debug, Clone, Args, PartialEq, Eq, Default)] @@ -128,36 +209,22 @@ impl PruningArgs { // If --full is set, use full node defaults. if self.full { - config = PruneConfig { - block_interval: config.block_interval, - segments: PruneModes { - sender_recovery: Some(PruneMode::Full), - transaction_lookup: None, - receipts: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - bodies_history: chain_spec - .ethereum_fork_activation(EthereumHardfork::Paris) - .block_number() - .map(PruneMode::Before), - receipts_log_filter: Default::default(), - }, + let defaults = DefaultPruningValues::get_global(); + let mut segments = defaults.full_prune_modes.clone(); + if defaults.full_bodies_history_use_pre_merge { + segments.bodies_history = chain_spec + .ethereum_fork_activation(EthereumHardfork::Paris) + .block_number() + .map(PruneMode::Before); } + config = PruneConfig { block_interval: config.block_interval, segments } } // If --minimal is set, use minimal storage mode with aggressive pruning. if self.minimal { config = PruneConfig { block_interval: config.block_interval, - segments: PruneModes { - sender_recovery: Some(PruneMode::Full), - transaction_lookup: Some(PruneMode::Full), - receipts: Some(PruneMode::Full), - account_history: Some(PruneMode::Distance(10064)), - storage_history: Some(PruneMode::Distance(10064)), - bodies_history: Some(PruneMode::Distance(10064)), - receipts_log_filter: Default::default(), - }, + segments: DefaultPruningValues::get_global().minimal_prune_modes.clone(), } } From 7371bd3f2983a42f71cc41fcd4c039454f853cac Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Tue, 20 Jan 2026 09:01:12 -0800 Subject: [PATCH 092/267] chore(db-api): remove sharded_key_encode benchmark (#21215) Co-authored-by: Amp --- Cargo.lock | 9 +- crates/storage/db-api/Cargo.toml | 5 - .../db-api/benches/sharded_key_encode.rs | 142 ------------------ 3 files changed, 4 insertions(+), 152 deletions(-) delete mode 100644 crates/storage/db-api/benches/sharded_key_encode.rs diff --git a/Cargo.lock b/Cargo.lock index dc39ba36641..31ad77a44bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,9 +106,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.27" +version = "0.2.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db5bcdd086f0b1b9610140a12c59b757397be90bd130d8d836fc8da0815a34" +checksum = "3842d8c52fcd3378039f4703dba392dca8b546b1c8ed6183048f8dab95b2be78" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8100,7 +8100,6 @@ dependencies = [ "alloy-primitives", "arbitrary", "bytes", - "codspeed-criterion-compat", "derive_more", "metrics", "modular-bitfield", @@ -14607,9 +14606,9 @@ dependencies = [ [[package]] name = "zmij" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94f63c051f4fe3c1509da62131a678643c5b6fbdc9273b2b79d4378ebda003d2" +checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65" [[package]] name = "zstd" diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index e25595f1ac7..49e4c84f7a0 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -60,11 +60,6 @@ test-fuzz.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -criterion.workspace = true - -[[bench]] -name = "sharded_key_encode" -harness = false [features] test-utils = [ diff --git a/crates/storage/db-api/benches/sharded_key_encode.rs b/crates/storage/db-api/benches/sharded_key_encode.rs deleted file mode 100644 index 5366e234e09..00000000000 --- a/crates/storage/db-api/benches/sharded_key_encode.rs +++ /dev/null @@ -1,142 +0,0 @@ -//! Benchmarks for `ShardedKey` and `StorageShardedKey` encoding. -//! -//! These benchmarks measure the performance of stack-allocated vs heap-allocated key encoding, -//! inspired by Anza Labs' PR #3603 which saved ~20k allocations/sec by moving `RocksDB` keys -//! from heap to stack. -//! -//! Run with: `cargo bench -p reth-db-api --bench sharded_key_encode` - -#![allow(missing_docs)] - -use alloy_primitives::{Address, B256}; -use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion, Throughput}; -use reth_db_api::{ - models::{storage_sharded_key::StorageShardedKey, ShardedKey}, - table::Encode, -}; - -/// Number of keys to encode per iteration for throughput measurement. -const BATCH_SIZE: usize = 10_000; - -fn bench_sharded_key_address_encode(c: &mut Criterion) { - let mut group = c.benchmark_group("sharded_key_encode"); - group.throughput(Throughput::Elements(BATCH_SIZE as u64)); - - // Pre-generate test data - let keys: Vec> = (0..BATCH_SIZE) - .map(|i| { - let mut addr_bytes = [0u8; 20]; - addr_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes()); - ShardedKey::new(Address::from(addr_bytes), i as u64) - }) - .collect(); - - group.bench_function("ShardedKey
::encode", |b| { - b.iter_batched( - || keys.clone(), - |keys| { - for key in keys { - let encoded = black_box(key.encode()); - black_box(encoded.as_ref()); - } - }, - BatchSize::SmallInput, - ) - }); - - group.finish(); -} - -fn bench_storage_sharded_key_encode(c: &mut Criterion) { - let mut group = c.benchmark_group("storage_sharded_key_encode"); - group.throughput(Throughput::Elements(BATCH_SIZE as u64)); - - // Pre-generate test data - let keys: Vec = (0..BATCH_SIZE) - .map(|i| { - let mut addr_bytes = [0u8; 20]; - addr_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes()); - let mut key_bytes = [0u8; 32]; - key_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes()); - StorageShardedKey::new(Address::from(addr_bytes), B256::from(key_bytes), i as u64) - }) - .collect(); - - group.bench_function("StorageShardedKey::encode", |b| { - b.iter_batched( - || keys.clone(), - |keys| { - for key in keys { - let encoded = black_box(key.encode()); - black_box(encoded.as_ref()); - } - }, - BatchSize::SmallInput, - ) - }); - - group.finish(); -} - -fn bench_encode_decode_roundtrip(c: &mut Criterion) { - use reth_db_api::table::Decode; - - let mut group = c.benchmark_group("sharded_key_roundtrip"); - group.throughput(Throughput::Elements(BATCH_SIZE as u64)); - - let keys: Vec> = (0..BATCH_SIZE) - .map(|i| { - let mut addr_bytes = [0u8; 20]; - addr_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes()); - ShardedKey::new(Address::from(addr_bytes), i as u64) - }) - .collect(); - - group.bench_function("ShardedKey
::encode_then_decode", |b| { - b.iter_batched( - || keys.clone(), - |keys| { - for key in keys { - let encoded = key.encode(); - let decoded = black_box(ShardedKey::
::decode(&encoded).unwrap()); - black_box(decoded); - } - }, - BatchSize::SmallInput, - ) - }); - - let storage_keys: Vec = (0..BATCH_SIZE) - .map(|i| { - let mut addr_bytes = [0u8; 20]; - addr_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes()); - let mut key_bytes = [0u8; 32]; - key_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes()); - StorageShardedKey::new(Address::from(addr_bytes), B256::from(key_bytes), i as u64) - }) - .collect(); - - group.bench_function("StorageShardedKey::encode_then_decode", |b| { - b.iter_batched( - || storage_keys.clone(), - |keys| { - for key in keys { - let encoded = key.encode(); - let decoded = black_box(StorageShardedKey::decode(&encoded).unwrap()); - black_box(decoded); - } - }, - BatchSize::SmallInput, - ) - }); - - group.finish(); -} - -criterion_group!( - benches, - bench_sharded_key_address_encode, - bench_storage_sharded_key_encode, - bench_encode_decode_roundtrip, -); -criterion_main!(benches); From 79342949886ff2d10cbb976e8e1bc73abc9f7ac4 Mon Sep 17 00:00:00 2001 From: Ahsen Kamal <82591228+ahsenkamal@users.noreply.github.com> Date: Tue, 20 Jan 2026 22:39:20 +0530 Subject: [PATCH 093/267] perf(trie): dispatch storage proofs in lexicographical order (#21213) Signed-off-by: Ahsen Kamal --- crates/trie/parallel/src/proof_task.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 1d492e27755..eb6f8923469 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -1555,8 +1555,11 @@ fn dispatch_storage_proofs( let mut storage_proof_receivers = B256Map::with_capacity_and_hasher(targets.len(), Default::default()); + let mut sorted_targets: Vec<_> = targets.iter().collect(); + sorted_targets.sort_unstable_by_key(|(addr, _)| *addr); + // Dispatch all storage proofs to worker pool - for (hashed_address, target_slots) in targets.iter() { + for (hashed_address, target_slots) in sorted_targets { // Create channel for receiving ProofResultMessage let (result_tx, result_rx) = crossbeam_channel::unbounded(); From 3ba37082dc341d0999790835f27be8a67090a2d2 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 20 Jan 2026 17:36:35 +0000 Subject: [PATCH 094/267] fix(reth-bench): replay-payloads prefix (#21219) --- bin/reth-bench/src/bench/replay_payloads.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/reth-bench/src/bench/replay_payloads.rs b/bin/reth-bench/src/bench/replay_payloads.rs index a2595f81f30..3de65c9b7ac 100644 --- a/bin/reth-bench/src/bench/replay_payloads.rs +++ b/bin/reth-bench/src/bench/replay_payloads.rs @@ -191,7 +191,7 @@ impl Command { let name = e.file_name(); let name_str = name.to_string_lossy(); // Extract index from "payload_NNN.json" - let index_str = name_str.strip_prefix("payload_")?.strip_suffix(".json")?; + let index_str = name_str.strip_prefix("payload_block_")?.strip_suffix(".json")?; let index: u64 = index_str.parse().ok()?; Some((index, e.path())) }) From 9662dc52717ba01976dbbb9d97a224ed70308306 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 20 Jan 2026 22:20:28 +0400 Subject: [PATCH 095/267] fix: properly save history indices in pipeline (#21222) --- crates/stages/stages/src/stages/utils.rs | 46 +++++++++++++----------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 7d7d5612b9f..93158a62ed9 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -193,7 +193,7 @@ where P: Copy + Default + Eq, { let mut write_cursor = provider.tx_ref().cursor_write::()?; - let mut current_partial = P::default(); + let mut current_partial = None; let mut current_list = Vec::::new(); // observability @@ -213,26 +213,28 @@ where // StorageHistory: `Address.StorageKey`. let partial_key = get_partial(sharded_key); - if current_partial != partial_key { + if current_partial != Some(partial_key) { // We have reached the end of this subset of keys so // we need to flush its last indice shard. - load_indices( - &mut write_cursor, - current_partial, - &mut current_list, - &sharded_key_factory, - append_only, - LoadMode::Flush, - )?; + if let Some(current) = current_partial { + load_indices( + &mut write_cursor, + current, + &mut current_list, + &sharded_key_factory, + append_only, + LoadMode::Flush, + )?; + } - current_partial = partial_key; + current_partial = Some(partial_key); current_list.clear(); // If it's not the first sync, there might an existing shard already, so we need to // merge it with the one coming from the collector if !append_only && let Some((_, last_database_shard)) = - write_cursor.seek_exact(sharded_key_factory(current_partial, u64::MAX))? + write_cursor.seek_exact(sharded_key_factory(partial_key, u64::MAX))? { current_list.extend(last_database_shard.iter()); } @@ -241,7 +243,7 @@ where current_list.extend(new_list.iter()); load_indices( &mut write_cursor, - current_partial, + partial_key, &mut current_list, &sharded_key_factory, append_only, @@ -250,14 +252,16 @@ where } // There will be one remaining shard that needs to be flushed to DB. - load_indices( - &mut write_cursor, - current_partial, - &mut current_list, - &sharded_key_factory, - append_only, - LoadMode::Flush, - )?; + if let Some(current) = current_partial { + load_indices( + &mut write_cursor, + current, + &mut current_list, + &sharded_key_factory, + append_only, + LoadMode::Flush, + )?; + } Ok(()) } From ff8f434dcdae69beac52aa73b7810f03627a8e33 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Tue, 20 Jan 2026 11:10:34 -0800 Subject: [PATCH 096/267] feat(cli): add `reth db checksum rocksdb` command (#21217) Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- crates/cli/commands/Cargo.toml | 2 +- .../src/db/{checksum.rs => checksum/mod.rs} | 18 +++ .../cli/commands/src/db/checksum/rocksdb.rs | 106 ++++++++++++++++++ crates/storage/provider/src/providers/mod.rs | 2 +- .../provider/src/providers/rocksdb/mod.rs | 2 +- .../src/providers/rocksdb/provider.rs | 36 ++++++ .../provider/src/providers/rocksdb_stub.rs | 4 + 7 files changed, 167 insertions(+), 3 deletions(-) rename crates/cli/commands/src/db/{checksum.rs => checksum/mod.rs} (94%) create mode 100644 crates/cli/commands/src/db/checksum/rocksdb.rs diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 2b044f26baf..fa9d54b4fa5 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -131,4 +131,4 @@ arbitrary = [ "reth-ethereum-primitives/arbitrary", ] -edge = ["reth-db-common/edge", "reth-stages/rocksdb"] +edge = ["reth-db-common/edge", "reth-stages/rocksdb", "reth-provider/rocksdb"] diff --git a/crates/cli/commands/src/db/checksum.rs b/crates/cli/commands/src/db/checksum/mod.rs similarity index 94% rename from crates/cli/commands/src/db/checksum.rs rename to crates/cli/commands/src/db/checksum/mod.rs index b1c6c6c5574..37181a5d9be 100644 --- a/crates/cli/commands/src/db/checksum.rs +++ b/crates/cli/commands/src/db/checksum/mod.rs @@ -22,6 +22,9 @@ use std::{ }; use tracing::{info, warn}; +#[cfg(all(unix, feature = "edge"))] +mod rocksdb; + /// Interval for logging progress during checksum computation. const PROGRESS_LOG_INTERVAL: usize = 100_000; @@ -70,6 +73,17 @@ enum Subcommand { #[arg(long)] limit: Option, }, + /// Calculates the checksum of a RocksDB table + #[cfg(all(unix, feature = "edge"))] + Rocksdb { + /// The RocksDB table + #[arg(value_enum)] + table: rocksdb::RocksDbTable, + + /// The maximum number of records to checksum. + #[arg(long)] + limit: Option, + }, } impl Command { @@ -87,6 +101,10 @@ impl Command { Subcommand::StaticFile { segment, start_block, end_block, limit } => { checksum_static_file(tool, segment, start_block, end_block, limit)?; } + #[cfg(all(unix, feature = "edge"))] + Subcommand::Rocksdb { table, limit } => { + rocksdb::checksum_rocksdb(tool, table, limit)?; + } } Ok(()) diff --git a/crates/cli/commands/src/db/checksum/rocksdb.rs b/crates/cli/commands/src/db/checksum/rocksdb.rs new file mode 100644 index 00000000000..4b4fe3bd6b3 --- /dev/null +++ b/crates/cli/commands/src/db/checksum/rocksdb.rs @@ -0,0 +1,106 @@ +//! RocksDB checksum implementation. + +use super::{checksum_hasher, PROGRESS_LOG_INTERVAL}; +use crate::common::CliNodeTypes; +use clap::ValueEnum; +use reth_chainspec::EthereumHardforks; +use reth_db::{tables, DatabaseEnv}; +use reth_db_api::table::Table; +use reth_db_common::DbTool; +use reth_node_builder::NodeTypesWithDBAdapter; +use reth_provider::RocksDBProviderFactory; +use std::{hash::Hasher, sync::Arc, time::Instant}; +use tracing::info; + +/// RocksDB tables that can be checksummed. +#[derive(Debug, Clone, Copy, ValueEnum)] +pub enum RocksDbTable { + /// Transaction hash to transaction number mapping + TransactionHashNumbers, + /// Account history indices + AccountsHistory, + /// Storage history indices + StoragesHistory, +} + +impl RocksDbTable { + /// Returns the table name as a string + const fn name(&self) -> &'static str { + match self { + Self::TransactionHashNumbers => tables::TransactionHashNumbers::NAME, + Self::AccountsHistory => tables::AccountsHistory::NAME, + Self::StoragesHistory => tables::StoragesHistory::NAME, + } + } +} + +/// Computes a checksum for a RocksDB table. +pub fn checksum_rocksdb>( + tool: &DbTool>>, + table: RocksDbTable, + limit: Option, +) -> eyre::Result<()> { + let rocksdb = tool.provider_factory.rocksdb_provider(); + + let start_time = Instant::now(); + let limit = limit.unwrap_or(usize::MAX); + + info!( + "Computing checksum for RocksDB table `{}`, limit={:?}", + table.name(), + if limit == usize::MAX { None } else { Some(limit) } + ); + + let (checksum, total) = match table { + RocksDbTable::TransactionHashNumbers => { + checksum_rocksdb_table::(&rocksdb, limit)? + } + RocksDbTable::AccountsHistory => { + checksum_rocksdb_table::(&rocksdb, limit)? + } + RocksDbTable::StoragesHistory => { + checksum_rocksdb_table::(&rocksdb, limit)? + } + }; + + let elapsed = start_time.elapsed(); + + info!( + "Checksum for RocksDB table `{}`: {:#x} ({} entries, elapsed: {:?})", + table.name(), + checksum, + total, + elapsed + ); + + Ok(()) +} + +/// Computes checksum for a specific RocksDB table by iterating over rows. +fn checksum_rocksdb_table( + rocksdb: &reth_provider::providers::RocksDBProvider, + limit: usize, +) -> eyre::Result<(u64, usize)> { + let iter = rocksdb.raw_iter::()?; + let mut hasher = checksum_hasher(); + let mut total = 0usize; + + for entry in iter { + let (key_bytes, value_bytes) = entry?; + + hasher.write(&key_bytes); + hasher.write(&value_bytes); + + total += 1; + + if total.is_multiple_of(PROGRESS_LOG_INTERVAL) { + info!("Hashed {total} entries."); + } + + if total >= limit { + break; + } + } + + Ok((hasher.finish(), total)) +} diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 1047e58c063..7cdf32a8ade 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -38,7 +38,7 @@ pub use consistent::ConsistentProvider; #[cfg_attr(not(all(unix, feature = "rocksdb")), path = "rocksdb_stub.rs")] pub(crate) mod rocksdb; -pub use rocksdb::{RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksTx}; +pub use rocksdb::{RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksDBRawIter, RocksTx}; /// Helper trait to bound [`NodeTypes`] so that combined with database they satisfy /// [`ProviderNodeTypes`]. diff --git a/crates/storage/provider/src/providers/rocksdb/mod.rs b/crates/storage/provider/src/providers/rocksdb/mod.rs index f9b4ff83041..49a332ccce5 100644 --- a/crates/storage/provider/src/providers/rocksdb/mod.rs +++ b/crates/storage/provider/src/providers/rocksdb/mod.rs @@ -5,4 +5,4 @@ mod metrics; mod provider; pub(crate) use provider::{PendingRocksDBBatches, RocksDBWriteCtx}; -pub use provider::{RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksTx}; +pub use provider::{RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksDBRawIter, RocksTx}; diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 55c040f7f27..142486697e7 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -656,6 +656,15 @@ impl RocksDBProvider { Ok(RocksDBIter { inner: iter, _marker: std::marker::PhantomData }) } + /// Creates a raw iterator over all entries in the specified table. + /// + /// Returns raw `(key_bytes, value_bytes)` pairs without decoding. + pub fn raw_iter(&self) -> ProviderResult> { + let cf = self.get_cf_handle::()?; + let iter = self.0.iterator_cf(cf, IteratorMode::Start); + Ok(RocksDBRawIter { inner: iter }) + } + /// Returns all account history shards for the given address in ascending key order. /// /// This is used for unwind operations where we need to scan all shards for an address @@ -1517,6 +1526,33 @@ impl Iterator for RocksDBIter<'_, T> { } } +/// Raw iterator over a `RocksDB` table (non-transactional). +/// +/// Yields raw `(key_bytes, value_bytes)` pairs without decoding. +pub struct RocksDBRawIter<'db> { + inner: RocksDBIterEnum<'db>, +} + +impl fmt::Debug for RocksDBRawIter<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RocksDBRawIter").finish_non_exhaustive() + } +} + +impl Iterator for RocksDBRawIter<'_> { + type Item = ProviderResult<(Box<[u8]>, Box<[u8]>)>; + + fn next(&mut self) -> Option { + match self.inner.next()? { + Ok(kv) => Some(Ok(kv)), + Err(e) => Some(Err(ProviderError::Database(DatabaseError::Read(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })))), + } + } +} + /// Iterator over a `RocksDB` table within a transaction. /// /// Yields decoded `(Key, Value)` pairs. Sees uncommitted writes. diff --git a/crates/storage/provider/src/providers/rocksdb_stub.rs b/crates/storage/provider/src/providers/rocksdb_stub.rs index a5b52605fa8..d46cd15e2fb 100644 --- a/crates/storage/provider/src/providers/rocksdb_stub.rs +++ b/crates/storage/provider/src/providers/rocksdb_stub.rs @@ -116,3 +116,7 @@ impl RocksDBBuilder { /// A stub transaction for `RocksDB`. #[derive(Debug)] pub struct RocksTx; + +/// A stub raw iterator for `RocksDB`. +#[derive(Debug)] +pub struct RocksDBRawIter; From bc79cc44c9284e331f182cbbddd2d953811c27d8 Mon Sep 17 00:00:00 2001 From: YK Date: Tue, 20 Jan 2026 20:29:05 +0100 Subject: [PATCH 097/267] feat(cli): add --rocksdb.* flags for RocksDB table routing (#21191) --- crates/cli/commands/src/node.rs | 12 ++- crates/node/core/src/args/mod.rs | 4 + crates/node/core/src/args/rocksdb.rs | 122 ++++++++++++++++++++++ crates/node/core/src/node_config.rs | 27 ++++- docs/vocs/docs/pages/cli/op-reth/node.mdx | 21 ++++ docs/vocs/docs/pages/cli/reth/node.mdx | 21 ++++ 6 files changed, 205 insertions(+), 2 deletions(-) create mode 100644 crates/node/core/src/args/rocksdb.rs diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index cba857a3a84..a0d729499d3 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -10,7 +10,8 @@ use reth_node_builder::NodeBuilder; use reth_node_core::{ args::{ DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, MetricArgs, - NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, StaticFilesArgs, TxPoolArgs, + NetworkArgs, PayloadBuilderArgs, PruningArgs, RocksDbArgs, RpcServerArgs, StaticFilesArgs, + TxPoolArgs, }, node_config::NodeConfig, version, @@ -102,6 +103,10 @@ pub struct NodeCommand number table to `RocksDB` instead of MDBX. + #[arg(long = "rocksdb.tx-hash", action = ArgAction::Set)] + pub tx_hash: Option, + + /// Route storages history tables to `RocksDB` instead of MDBX. + #[arg(long = "rocksdb.storages-history", action = ArgAction::Set)] + pub storages_history: Option, + + /// Route account history tables to `RocksDB` instead of MDBX. + #[arg(long = "rocksdb.account-history", action = ArgAction::Set)] + pub account_history: Option, +} + +impl RocksDbArgs { + /// Validates the `RocksDB` arguments. + /// + /// Returns an error if `--rocksdb.all` is used with any individual flag set to `false`. + pub fn validate(&self) -> Result<(), RocksDbArgsError> { + if self.all { + if self.tx_hash == Some(false) { + return Err(RocksDbArgsError::ConflictingFlags("tx-hash")); + } + if self.storages_history == Some(false) { + return Err(RocksDbArgsError::ConflictingFlags("storages-history")); + } + if self.account_history == Some(false) { + return Err(RocksDbArgsError::ConflictingFlags("account-history")); + } + } + Ok(()) + } +} + +/// Error type for `RocksDB` argument validation. +#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] +pub enum RocksDbArgsError { + /// `--rocksdb.all` cannot be combined with an individual flag set to false. + #[error("--rocksdb.all cannot be combined with --rocksdb.{0}=false")] + ConflictingFlags(&'static str), +} + +#[cfg(test)] +mod tests { + use super::*; + use clap::Parser; + + #[derive(Parser)] + struct CommandParser { + #[command(flatten)] + args: T, + } + + #[test] + fn test_default_rocksdb_args() { + let args = CommandParser::::parse_from(["reth"]).args; + assert_eq!(args, RocksDbArgs::default()); + } + + #[test] + fn test_parse_all_flag() { + let args = CommandParser::::parse_from(["reth", "--rocksdb.all"]).args; + assert!(args.all); + assert_eq!(args.tx_hash, None); + } + + #[test] + fn test_parse_individual_flags() { + let args = CommandParser::::parse_from([ + "reth", + "--rocksdb.tx-hash=true", + "--rocksdb.storages-history=false", + "--rocksdb.account-history=true", + ]) + .args; + assert!(!args.all); + assert_eq!(args.tx_hash, Some(true)); + assert_eq!(args.storages_history, Some(false)); + assert_eq!(args.account_history, Some(true)); + } + + #[test] + fn test_validate_all_alone_ok() { + let args = RocksDbArgs { all: true, ..Default::default() }; + assert!(args.validate().is_ok()); + } + + #[test] + fn test_validate_all_with_true_ok() { + let args = RocksDbArgs { all: true, tx_hash: Some(true), ..Default::default() }; + assert!(args.validate().is_ok()); + } + + #[test] + fn test_validate_all_with_false_errors() { + let args = RocksDbArgs { all: true, tx_hash: Some(false), ..Default::default() }; + assert_eq!(args.validate(), Err(RocksDbArgsError::ConflictingFlags("tx-hash"))); + + let args = RocksDbArgs { all: true, storages_history: Some(false), ..Default::default() }; + assert_eq!(args.validate(), Err(RocksDbArgsError::ConflictingFlags("storages-history"))); + + let args = RocksDbArgs { all: true, account_history: Some(false), ..Default::default() }; + assert_eq!(args.validate(), Err(RocksDbArgsError::ConflictingFlags("account-history"))); + } +} diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 1d5b1700cbe..aeff14a8755 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -3,7 +3,7 @@ use crate::{ args::{ DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, NetworkArgs, PayloadBuilderArgs, - PruningArgs, RpcServerArgs, StaticFilesArgs, TxPoolArgs, + PruningArgs, RocksDbArgs, RpcServerArgs, StaticFilesArgs, TxPoolArgs, }, dirs::{ChainPath, DataDirPath}, utils::get_single_header, @@ -21,6 +21,7 @@ use reth_primitives_traits::SealedHeader; use reth_stages_types::StageId; use reth_storage_api::{ BlockHashReader, DatabaseProviderFactory, HeaderProvider, StageCheckpointReader, + StorageSettings, }; use reth_storage_errors::provider::ProviderResult; use reth_transaction_pool::TransactionPool; @@ -150,6 +151,9 @@ pub struct NodeConfig { /// All static files related arguments pub static_files: StaticFilesArgs, + + /// All `RocksDB` table routing arguments + pub rocksdb: RocksDbArgs, } impl NodeConfig { @@ -181,6 +185,7 @@ impl NodeConfig { engine: EngineArgs::default(), era: EraArgs::default(), static_files: StaticFilesArgs::default(), + rocksdb: RocksDbArgs::default(), } } @@ -255,6 +260,7 @@ impl NodeConfig { engine, era, static_files, + rocksdb, .. } = self; NodeConfig { @@ -274,6 +280,7 @@ impl NodeConfig { engine, era, static_files, + rocksdb, } } @@ -350,6 +357,22 @@ impl NodeConfig { self.pruning.prune_config(&self.chain) } + /// Returns the effective storage settings derived from static-file and `RocksDB` CLI args. + pub fn storage_settings(&self) -> StorageSettings { + let tx_hash = self.rocksdb.all || self.rocksdb.tx_hash.unwrap_or(false); + let storages_history = self.rocksdb.all || self.rocksdb.storages_history.unwrap_or(false); + let account_history = self.rocksdb.all || self.rocksdb.account_history.unwrap_or(false); + + StorageSettings { + receipts_in_static_files: self.static_files.receipts, + transaction_senders_in_static_files: self.static_files.transaction_senders, + account_changesets_in_static_files: self.static_files.account_changesets, + transaction_hash_numbers_in_rocksdb: tx_hash, + storages_history_in_rocksdb: storages_history, + account_history_in_rocksdb: account_history, + } + } + /// Returns the max block that the node should run to, looking it up from the network if /// necessary pub async fn max_block( @@ -544,6 +567,7 @@ impl NodeConfig { engine: self.engine, era: self.era, static_files: self.static_files, + rocksdb: self.rocksdb, } } @@ -585,6 +609,7 @@ impl Clone for NodeConfig { engine: self.engine.clone(), era: self.era.clone(), static_files: self.static_files, + rocksdb: self.rocksdb, } } } diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index 25e248076c4..cba041cbe78 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -897,6 +897,27 @@ Pruning: --prune.bodies.before Prune storage history before the specified block number. The specified block number is not pruned +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX + + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX + + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX + + [possible values: true, false] + Engine: --engine.persistence-threshold Configure persistence threshold for the engine. This determines how many canonical blocks must be in-memory, ahead of the last persisted block, before flushing canonical blocks to disk again. diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 9eb5b2ddbf6..328a22c445c 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -897,6 +897,27 @@ Pruning: --prune.bodies.before Prune storage history before the specified block number. The specified block number is not pruned +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX + + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX + + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX + + [possible values: true, false] + Engine: --engine.persistence-threshold Configure persistence threshold for the engine. This determines how many canonical blocks must be in-memory, ahead of the last persisted block, before flushing canonical blocks to disk again. From 78de3d8f610a61596d80738aec12367c3b0c40ee Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Tue, 20 Jan 2026 11:31:50 -0800 Subject: [PATCH 098/267] perf(db): use Cow::Borrowed in walk_dup to avoid allocation (#21220) --- .../db/src/implementation/mdbx/cursor.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index f432e76642d..61214a857d5 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -215,27 +215,26 @@ impl DbDupCursorRO for Cursor { ) -> Result, DatabaseError> { let start = match (key, subkey) { (Some(key), Some(subkey)) => { - // encode key and decode it after. - let key: Vec = key.encode().into(); + let encoded_key = key.encode(); self.inner - .get_both_range(key.as_ref(), subkey.encode().as_ref()) + .get_both_range(encoded_key.as_ref(), subkey.encode().as_ref()) .map_err(|e| DatabaseError::Read(e.into()))? - .map(|val| decoder::((Cow::Owned(key), val))) + .map(|val| decoder::((Cow::Borrowed(encoded_key.as_ref()), val))) } (Some(key), None) => { - let key: Vec = key.encode().into(); + let encoded_key = key.encode(); self.inner - .set(key.as_ref()) + .set(encoded_key.as_ref()) .map_err(|e| DatabaseError::Read(e.into()))? - .map(|val| decoder::((Cow::Owned(key), val))) + .map(|val| decoder::((Cow::Borrowed(encoded_key.as_ref()), val))) } (None, Some(subkey)) => { if let Some((key, _)) = self.first()? { - let key: Vec = key.encode().into(); + let encoded_key = key.encode(); self.inner - .get_both_range(key.as_ref(), subkey.encode().as_ref()) + .get_both_range(encoded_key.as_ref(), subkey.encode().as_ref()) .map_err(|e| DatabaseError::Read(e.into()))? - .map(|val| decoder::((Cow::Owned(key), val))) + .map(|val| decoder::((Cow::Borrowed(encoded_key.as_ref()), val))) } else { Some(Err(DatabaseError::Read(MDBXError::NotFound.into()))) } From 869b5d085147e7bc6e410e49ec633822d536ac8d Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Tue, 20 Jan 2026 12:02:02 -0800 Subject: [PATCH 099/267] feat(edge): enable transaction_hash_numbers_in_rocksdb for edge builds (#21224) Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- crates/storage/db-api/src/models/metadata.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/db-api/src/models/metadata.rs b/crates/storage/db-api/src/models/metadata.rs index 6586c8b7f46..b3dc4710936 100644 --- a/crates/storage/db-api/src/models/metadata.rs +++ b/crates/storage/db-api/src/models/metadata.rs @@ -45,7 +45,7 @@ impl StorageSettings { transaction_senders_in_static_files: true, account_changesets_in_static_files: true, storages_history_in_rocksdb: false, - transaction_hash_numbers_in_rocksdb: false, + transaction_hash_numbers_in_rocksdb: true, account_history_in_rocksdb: false, } } From d12752dc8a432657765b8bef0f0c46b3d91ed6c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E3=81=8B=E3=82=8A=E3=82=93=E3=81=A8=E3=81=86?= Date: Tue, 20 Jan 2026 22:06:11 +0100 Subject: [PATCH 100/267] feat(engine): add time_between_forkchoice_updated metric (#21227) --- crates/engine/tree/src/tree/metrics.rs | 27 ++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 303f3c62985..3d05cee8e0c 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -210,6 +210,12 @@ pub(crate) struct EngineMetrics { #[derive(Metrics)] #[metrics(scope = "consensus.engine.beacon")] pub(crate) struct ForkchoiceUpdatedMetrics { + /// Finish time of the latest forkchoice updated call. + #[metric(skip)] + pub(crate) latest_finish_at: Option, + /// Start time of the latest forkchoice updated call. + #[metric(skip)] + pub(crate) latest_start_at: Option, /// The total count of forkchoice updated messages received. pub(crate) forkchoice_updated_messages: Counter, /// The total count of forkchoice updated messages with payload received. @@ -232,18 +238,35 @@ pub(crate) struct ForkchoiceUpdatedMetrics { pub(crate) forkchoice_updated_last: Gauge, /// Time diff between new payload call response and the next forkchoice updated call request. pub(crate) new_payload_forkchoice_updated_time_diff: Histogram, + /// Time from previous forkchoice updated finish to current forkchoice updated start (idle + /// time). + pub(crate) time_between_forkchoice_updated: Histogram, + /// Time from previous forkchoice updated start to current forkchoice updated start (total + /// interval). + pub(crate) forkchoice_updated_interval: Histogram, } impl ForkchoiceUpdatedMetrics { /// Increment the forkchoiceUpdated counter based on the given result pub(crate) fn update_response_metrics( - &self, + &mut self, start: Instant, latest_new_payload_at: &mut Option, has_attrs: bool, result: &Result, ProviderError>, ) { - let elapsed = start.elapsed(); + let finish = Instant::now(); + let elapsed = finish - start; + + if let Some(prev_finish) = self.latest_finish_at { + self.time_between_forkchoice_updated.record(start - prev_finish); + } + if let Some(prev_start) = self.latest_start_at { + self.forkchoice_updated_interval.record(start - prev_start); + } + self.latest_finish_at = Some(finish); + self.latest_start_at = Some(start); + match result { Ok(outcome) => match outcome.outcome.forkchoice_status() { ForkchoiceStatus::Valid => self.forkchoice_updated_valid.increment(1), From 3ff575b877c7a7b083abd21e5b1917365b53877e Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Tue, 20 Jan 2026 14:03:12 -0800 Subject: [PATCH 101/267] feat(engine): add --engine.disable-cache-metrics flag (#21228) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- crates/engine/primitives/src/config.rs | 16 +++++++++++++ crates/engine/tree/src/tree/cached_state.rs | 23 +++++++++++++++---- .../tree/src/tree/payload_processor/mod.rs | 13 ++++++++--- .../src/tree/payload_processor/prewarm.rs | 5 ++-- crates/node/core/src/args/engine.rs | 17 ++++++++++++++ docs/vocs/docs/pages/cli/op-reth/node.mdx | 3 +++ docs/vocs/docs/pages/cli/reth/node.mdx | 3 +++ 7 files changed, 71 insertions(+), 9 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 1eacfef6c1a..2870d3dccc4 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -137,6 +137,8 @@ pub struct TreeConfig { account_worker_count: usize, /// Whether to enable V2 storage proofs. enable_proof_v2: bool, + /// Whether to disable cache metrics recording (can be expensive with large cached state). + disable_cache_metrics: bool, } impl Default for TreeConfig { @@ -166,6 +168,7 @@ impl Default for TreeConfig { storage_worker_count: default_storage_worker_count(), account_worker_count: default_account_worker_count(), enable_proof_v2: false, + disable_cache_metrics: false, } } } @@ -198,6 +201,7 @@ impl TreeConfig { storage_worker_count: usize, account_worker_count: usize, enable_proof_v2: bool, + disable_cache_metrics: bool, ) -> Self { Self { persistence_threshold, @@ -224,6 +228,7 @@ impl TreeConfig { storage_worker_count, account_worker_count, enable_proof_v2, + disable_cache_metrics, } } @@ -516,4 +521,15 @@ impl TreeConfig { self.enable_proof_v2 = enable_proof_v2; self } + + /// Returns whether cache metrics recording is disabled. + pub const fn disable_cache_metrics(&self) -> bool { + self.disable_cache_metrics + } + + /// Setter for whether to disable cache metrics recording. + pub const fn without_cache_metrics(mut self, disable_cache_metrics: bool) -> Self { + self.disable_cache_metrics = disable_cache_metrics; + self + } } diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index 2a45b15c18a..0f0b23b4ea2 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -606,12 +606,21 @@ pub(crate) struct SavedCache { /// A guard to track in-flight usage of this cache. /// The cache is considered available if the strong count is 1. usage_guard: Arc<()>, + + /// Whether to skip cache metrics recording (can be expensive with large cached state). + disable_cache_metrics: bool, } impl SavedCache { /// Creates a new instance with the internals pub(super) fn new(hash: B256, caches: ExecutionCache, metrics: CachedStateMetrics) -> Self { - Self { hash, caches, metrics, usage_guard: Arc::new(()) } + Self { hash, caches, metrics, usage_guard: Arc::new(()), disable_cache_metrics: false } + } + + /// Sets whether to disable cache metrics recording. + pub(super) const fn with_disable_cache_metrics(mut self, disable: bool) -> Self { + self.disable_cache_metrics = disable; + self } /// Returns the hash for this cache @@ -619,9 +628,9 @@ impl SavedCache { self.hash } - /// Splits the cache into its caches and metrics, consuming it. - pub(crate) fn split(self) -> (ExecutionCache, CachedStateMetrics) { - (self.caches, self.metrics) + /// Splits the cache into its caches, metrics, and `disable_cache_metrics` flag, consuming it. + pub(crate) fn split(self) -> (ExecutionCache, CachedStateMetrics, bool) { + (self.caches, self.metrics, self.disable_cache_metrics) } /// Returns true if the cache is available for use (no other tasks are currently using it). @@ -645,7 +654,13 @@ impl SavedCache { } /// Updates the metrics for the [`ExecutionCache`]. + /// + /// Note: This can be expensive with large cached state as it iterates over + /// all storage entries. Use `with_disable_cache_metrics(true)` to skip. pub(crate) fn update_metrics(&self) { + if self.disable_cache_metrics { + return; + } self.metrics.storage_cache_size.set(self.caches.total_storage_slots() as f64); self.metrics.account_cache_size.set(self.caches.account_cache.entry_count() as f64); self.metrics.code_cache_size.set(self.caches.code_cache.entry_count() as f64); diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index ed179afa8b2..1803929c89a 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -139,6 +139,8 @@ where disable_parallel_sparse_trie: bool, /// Maximum concurrency for prewarm task. prewarm_max_concurrency: usize, + /// Whether to disable cache metrics recording. + disable_cache_metrics: bool, } impl PayloadProcessor @@ -171,6 +173,7 @@ where sparse_state_trie: Arc::default(), disable_parallel_sparse_trie: config.disable_parallel_sparse_trie(), prewarm_max_concurrency: config.prewarm_max_concurrency(), + disable_cache_metrics: config.disable_cache_metrics(), } } } @@ -300,7 +303,7 @@ where // Build a state provider for the multiproof task let provider = provider_builder.build().expect("failed to build provider"); let provider = if let Some(saved_cache) = saved_cache { - let (cache, metrics) = saved_cache.split(); + let (cache, metrics, _) = saved_cache.split(); Box::new(CachedStateProvider::new(provider, cache, metrics)) as Box } else { @@ -477,6 +480,7 @@ where debug!("creating new execution cache on cache miss"); let cache = ExecutionCacheBuilder::default().build_caches(self.cross_block_cache_size); SavedCache::new(parent_hash, cache, CachedStateMetrics::zeroed()) + .with_disable_cache_metrics(self.disable_cache_metrics) } } @@ -558,6 +562,7 @@ where block_with_parent: BlockWithParent, bundle_state: &BundleState, ) { + let disable_cache_metrics = self.disable_cache_metrics; self.execution_cache.update_with_guard(|cached| { if cached.as_ref().is_some_and(|c| c.executed_block_hash() != block_with_parent.parent) { debug!( @@ -571,7 +576,8 @@ where // Take existing cache (if any) or create fresh caches let (caches, cache_metrics) = match cached.take() { Some(existing) => { - existing.split() + let (c, m, _) = existing.split(); + (c, m) } None => ( ExecutionCacheBuilder::default().build_caches(self.cross_block_cache_size), @@ -580,7 +586,8 @@ where }; // Insert the block's bundle state into cache - let new_cache = SavedCache::new(block_with_parent.block.hash, caches, cache_metrics); + let new_cache = SavedCache::new(block_with_parent.block.hash, caches, cache_metrics) + .with_disable_cache_metrics(disable_cache_metrics); if new_cache.cache().insert_state(bundle_state).is_err() { *cached = None; debug!(target: "engine::caching", "cleared execution cache on update error"); diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 494e2d0f261..6021098627c 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -278,8 +278,9 @@ where execution_cache.update_with_guard(|cached| { // consumes the `SavedCache` held by the prewarming task, which releases its usage // guard - let (caches, cache_metrics) = saved_cache.split(); - let new_cache = SavedCache::new(hash, caches, cache_metrics); + let (caches, cache_metrics, disable_cache_metrics) = saved_cache.split(); + let new_cache = SavedCache::new(hash, caches, cache_metrics) + .with_disable_cache_metrics(disable_cache_metrics); // Insert state into cache while holding the lock // Access the BundleState through the shared ExecutionOutcome diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 8662f797c73..d7c320fc52d 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -37,6 +37,7 @@ pub struct DefaultEngineValues { storage_worker_count: Option, account_worker_count: Option, enable_proof_v2: bool, + cache_metrics_disabled: bool, } impl DefaultEngineValues { @@ -172,6 +173,12 @@ impl DefaultEngineValues { self.enable_proof_v2 = v; self } + + /// Set whether to disable cache metrics by default + pub const fn with_cache_metrics_disabled(mut self, v: bool) -> Self { + self.cache_metrics_disabled = v; + self + } } impl Default for DefaultEngineValues { @@ -197,6 +204,7 @@ impl Default for DefaultEngineValues { storage_worker_count: None, account_worker_count: None, enable_proof_v2: false, + cache_metrics_disabled: false, } } } @@ -320,6 +328,10 @@ pub struct EngineArgs { /// Enable V2 storage proofs for state root calculations #[arg(long = "engine.enable-proof-v2", default_value_t = DefaultEngineValues::get_global().enable_proof_v2)] pub enable_proof_v2: bool, + + /// Disable cache metrics recording, which can take up to 50ms with large cached state. + #[arg(long = "engine.disable-cache-metrics", default_value_t = DefaultEngineValues::get_global().cache_metrics_disabled)] + pub cache_metrics_disabled: bool, } #[allow(deprecated)] @@ -346,6 +358,7 @@ impl Default for EngineArgs { storage_worker_count, account_worker_count, enable_proof_v2, + cache_metrics_disabled, } = DefaultEngineValues::get_global().clone(); Self { persistence_threshold, @@ -371,6 +384,7 @@ impl Default for EngineArgs { storage_worker_count, account_worker_count, enable_proof_v2, + cache_metrics_disabled, } } } @@ -407,6 +421,7 @@ impl EngineArgs { } config = config.with_enable_proof_v2(self.enable_proof_v2); + config = config.without_cache_metrics(self.cache_metrics_disabled); config } @@ -458,6 +473,7 @@ mod tests { storage_worker_count: Some(16), account_worker_count: Some(8), enable_proof_v2: false, + cache_metrics_disabled: true, }; let parsed_args = CommandParser::::parse_from([ @@ -488,6 +504,7 @@ mod tests { "16", "--engine.account-worker-count", "8", + "--engine.disable-cache-metrics", ]) .args; diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index cba041cbe78..c2de9fae562 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -993,6 +993,9 @@ Engine: --engine.enable-proof-v2 Enable V2 storage proofs for state root calculations + --engine.disable-cache-metrics + Disable cache metrics recording, which can take up to 50ms with large cached state + ERA: --era.enable Enable import from ERA1 files diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 328a22c445c..3766d7ed9df 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -993,6 +993,9 @@ Engine: --engine.enable-proof-v2 Enable V2 storage proofs for state root calculations + --engine.disable-cache-metrics + Disable cache metrics recording, which can take up to 50ms with large cached state + ERA: --era.enable Enable import from ERA1 files From 22b465dd64100164f27b7e88b0b9a6a69c49c1b2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 20 Jan 2026 23:57:08 +0100 Subject: [PATCH 102/267] chore(trie): remove unnecessary clone in into_sorted_ref (#21232) --- crates/trie/common/src/updates.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index 08c62cee3f7..26985108089 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -206,7 +206,7 @@ impl TrieUpdates { } /// Converts trie updates into [`TrieUpdatesSortedRef`]. - pub fn into_sorted_ref<'a>(&'a self) -> TrieUpdatesSortedRef<'a> { + pub fn into_sorted_ref(&self) -> TrieUpdatesSortedRef<'_> { let mut account_nodes = self.account_nodes.iter().collect::>(); account_nodes.sort_unstable_by(|a, b| a.0.cmp(b.0)); @@ -216,7 +216,7 @@ impl TrieUpdates { storage_tries: self .storage_tries .iter() - .map(|m| (*m.0, m.1.into_sorted_ref().clone())) + .map(|m| (*m.0, m.1.into_sorted_ref())) .collect(), } } From 660964a0f5eb413450e41802285dd648e84b9fa1 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Tue, 20 Jan 2026 16:58:23 -0800 Subject: [PATCH 103/267] feat(node): log storage settings after genesis init (#21229) --- crates/node/builder/src/launch/engine.rs | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index dcefbeeab68..319aa7a1ac4 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -32,7 +32,7 @@ use reth_node_core::{ use reth_node_events::node; use reth_provider::{ providers::{BlockchainProvider, NodeTypesForProvider}, - BlockNumReader, MetadataProvider, + BlockNumReader, StorageSettingsCache, }; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; @@ -41,7 +41,6 @@ use reth_trie_db::ChangesetCache; use std::{future::Future, pin::Pin, sync::Arc}; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::warn; /// The engine node launcher. #[derive(Debug)] @@ -104,24 +103,8 @@ impl EngineNodeLauncher { .with_adjusted_configs() // Create the provider factory with changeset cache .with_provider_factory::<_, >::Evm>(changeset_cache.clone()).await? - .inspect(|ctx| { + .inspect(|_| { info!(target: "reth::cli", "Database opened"); - match ctx.provider_factory().storage_settings() { - Ok(settings) => { - info!( - target: "reth::cli", - ?settings, - "Storage settings" - ); - }, - Err(err) => { - warn!( - target: "reth::cli", - ?err, - "Failed to get storage settings" - ); - }, - } }) .with_prometheus_server().await? .inspect(|this| { @@ -130,6 +113,8 @@ impl EngineNodeLauncher { .with_genesis()? .inspect(|this: &LaunchContextWith::ChainSpec>, _>>| { info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks()); + let settings = this.provider_factory().cached_storage_settings(); + info!(target: "reth::cli", ?settings, "Loaded storage settings"); }) .with_metrics_task() // passing FullNodeTypes as type parameter here so that we can build From 238433e146d82c432a9096d8bbd7c8c9f8d11fed Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 21 Jan 2026 02:19:36 +0000 Subject: [PATCH 104/267] fix(rocksdb): flush memtables before dropping (#21234) --- .../provider/src/providers/rocksdb/provider.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 142486697e7..a673fa62913 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -427,7 +427,17 @@ impl fmt::Debug for RocksDBProviderInner { impl Drop for RocksDBProviderInner { fn drop(&mut self) { match self { - Self::ReadWrite { db, .. } => db.cancel_all_background_work(true), + Self::ReadWrite { db, .. } => { + // Flush all memtables if possible. If not, they will be rebuilt from the WAL on + // restart + if let Err(e) = db.flush_wal(true) { + tracing::warn!(target: "storage::rocksdb", ?e, "Failed to flush WAL on drop"); + } + if let Err(e) = db.flush() { + tracing::warn!(target: "storage::rocksdb", ?e, "Failed to flush memtables on drop"); + } + db.cancel_all_background_work(true); + } Self::ReadOnly { db, .. } => db.cancel_all_background_work(true), } } From 37b5db0d475963395ae669992de2e210b90e2afd Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Wed, 21 Jan 2026 00:45:17 -0800 Subject: [PATCH 105/267] feat(cli): add RocksDB table stats to `reth db stats` command (#21221) Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- crates/cli/commands/src/db/stats.rs | 49 ++++++++++- crates/storage/provider/src/providers/mod.rs | 4 +- .../provider/src/providers/rocksdb/mod.rs | 4 +- .../src/providers/rocksdb/provider.rs | 83 +++++++++++++++++++ .../provider/src/providers/rocksdb_stub.rs | 20 +++++ 5 files changed, 157 insertions(+), 3 deletions(-) diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index d84091c2d6e..0e46bfdbb38 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -11,7 +11,10 @@ use reth_db_common::DbTool; use reth_fs_util as fs; use reth_node_builder::{NodePrimitives, NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_provider::providers::{ProviderNodeTypes, StaticFileProvider}; +use reth_provider::{ + providers::{ProviderNodeTypes, StaticFileProvider}, + RocksDBProviderFactory, +}; use reth_static_file_types::SegmentRangeInclusive; use std::{sync::Arc, time::Duration}; @@ -61,6 +64,11 @@ impl Command { let db_stats_table = self.db_stats_table(tool)?; println!("{db_stats_table}"); + println!("\n"); + + let rocksdb_stats_table = self.rocksdb_stats_table(tool); + println!("{rocksdb_stats_table}"); + Ok(()) } @@ -148,6 +156,45 @@ impl Command { Ok(table) } + fn rocksdb_stats_table(&self, tool: &DbTool) -> ComfyTable { + let mut table = ComfyTable::new(); + table.load_preset(comfy_table::presets::ASCII_MARKDOWN); + table.set_header(["RocksDB Table Name", "# Entries", "Total Size", "Pending Compaction"]); + + let stats = tool.provider_factory.rocksdb_provider().table_stats(); + let mut total_size: u64 = 0; + let mut total_pending: u64 = 0; + + for stat in &stats { + total_size += stat.estimated_size_bytes; + total_pending += stat.pending_compaction_bytes; + let mut row = Row::new(); + row.add_cell(Cell::new(&stat.name)) + .add_cell(Cell::new(stat.estimated_num_keys)) + .add_cell(Cell::new(human_bytes(stat.estimated_size_bytes as f64))) + .add_cell(Cell::new(human_bytes(stat.pending_compaction_bytes as f64))); + table.add_row(row); + } + + if !stats.is_empty() { + let max_widths = table.column_max_content_widths(); + let mut separator = Row::new(); + for width in max_widths { + separator.add_cell(Cell::new("-".repeat(width as usize))); + } + table.add_row(separator); + + let mut row = Row::new(); + row.add_cell(Cell::new("RocksDB Total")) + .add_cell(Cell::new("")) + .add_cell(Cell::new(human_bytes(total_size as f64))) + .add_cell(Cell::new(human_bytes(total_pending as f64))); + table.add_row(row); + } + + table + } + fn static_files_stats_table( &self, data_dir: ChainPath, diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 7cdf32a8ade..c477ccbb987 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -38,7 +38,9 @@ pub use consistent::ConsistentProvider; #[cfg_attr(not(all(unix, feature = "rocksdb")), path = "rocksdb_stub.rs")] pub(crate) mod rocksdb; -pub use rocksdb::{RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksDBRawIter, RocksTx}; +pub use rocksdb::{ + RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksDBRawIter, RocksDBTableStats, RocksTx, +}; /// Helper trait to bound [`NodeTypes`] so that combined with database they satisfy /// [`ProviderNodeTypes`]. diff --git a/crates/storage/provider/src/providers/rocksdb/mod.rs b/crates/storage/provider/src/providers/rocksdb/mod.rs index 49a332ccce5..efab03e2afd 100644 --- a/crates/storage/provider/src/providers/rocksdb/mod.rs +++ b/crates/storage/provider/src/providers/rocksdb/mod.rs @@ -5,4 +5,6 @@ mod metrics; mod provider; pub(crate) use provider::{PendingRocksDBBatches, RocksDBWriteCtx}; -pub use provider::{RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksDBRawIter, RocksTx}; +pub use provider::{ + RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksDBRawIter, RocksDBTableStats, RocksTx, +}; diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index a673fa62913..d1423ba1716 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -38,6 +38,19 @@ use tracing::instrument; /// Pending `RocksDB` batches type alias. pub(crate) type PendingRocksDBBatches = Arc>>>; +/// Statistics for a single `RocksDB` table (column family). +#[derive(Debug, Clone)] +pub struct RocksDBTableStats { + /// Name of the table/column family. + pub name: String, + /// Estimated number of keys in the table. + pub estimated_num_keys: u64, + /// Estimated size of live data in bytes (SST files + memtables). + pub estimated_size_bytes: u64, + /// Estimated bytes pending compaction (reclaimable space). + pub pending_compaction_bytes: u64, +} + /// Context for `RocksDB` block writes. #[derive(Clone)] pub(crate) struct RocksDBWriteCtx { @@ -405,6 +418,69 @@ impl RocksDBProviderInner { Self::ReadOnly { db, .. } => RocksDBIterEnum::ReadOnly(db.iterator_cf(cf, mode)), } } + + /// Returns statistics for all column families in the database. + fn table_stats(&self) -> Vec { + let cf_names = [ + tables::TransactionHashNumbers::NAME, + tables::AccountsHistory::NAME, + tables::StoragesHistory::NAME, + ]; + + let mut stats = Vec::new(); + + macro_rules! collect_stats { + ($db:expr) => { + for cf_name in cf_names { + if let Some(cf) = $db.cf_handle(cf_name) { + let estimated_num_keys = $db + .property_int_value_cf(cf, rocksdb::properties::ESTIMATE_NUM_KEYS) + .ok() + .flatten() + .unwrap_or(0); + + // SST files size (on-disk) + memtable size (in-memory) + let sst_size = $db + .property_int_value_cf(cf, rocksdb::properties::LIVE_SST_FILES_SIZE) + .ok() + .flatten() + .unwrap_or(0); + + let memtable_size = $db + .property_int_value_cf(cf, rocksdb::properties::SIZE_ALL_MEM_TABLES) + .ok() + .flatten() + .unwrap_or(0); + + let estimated_size_bytes = sst_size + memtable_size; + + let pending_compaction_bytes = $db + .property_int_value_cf( + cf, + rocksdb::properties::ESTIMATE_PENDING_COMPACTION_BYTES, + ) + .ok() + .flatten() + .unwrap_or(0); + + stats.push(RocksDBTableStats { + name: cf_name.to_string(), + estimated_num_keys, + estimated_size_bytes, + pending_compaction_bytes, + }); + } + } + }; + } + + match self { + Self::ReadWrite { db, .. } => collect_stats!(db), + Self::ReadOnly { db, .. } => collect_stats!(db), + } + + stats + } } impl fmt::Debug for RocksDBProviderInner { @@ -666,6 +742,13 @@ impl RocksDBProvider { Ok(RocksDBIter { inner: iter, _marker: std::marker::PhantomData }) } + /// Returns statistics for all column families in the database. + /// + /// Returns a vector of (`table_name`, `estimated_keys`, `estimated_size_bytes`) tuples. + pub fn table_stats(&self) -> Vec { + self.0.table_stats() + } + /// Creates a raw iterator over all entries in the specified table. /// /// Returns raw `(key_bytes, value_bytes)` pairs without decoding. diff --git a/crates/storage/provider/src/providers/rocksdb_stub.rs b/crates/storage/provider/src/providers/rocksdb_stub.rs index d46cd15e2fb..ff121131677 100644 --- a/crates/storage/provider/src/providers/rocksdb_stub.rs +++ b/crates/storage/provider/src/providers/rocksdb_stub.rs @@ -14,6 +14,19 @@ use std::{path::Path, sync::Arc}; /// Pending `RocksDB` batches type alias (stub - uses unit type). pub(crate) type PendingRocksDBBatches = Arc>>; +/// Statistics for a single `RocksDB` table (column family) - stub. +#[derive(Debug, Clone)] +pub struct RocksDBTableStats { + /// Name of the table/column family. + pub name: String, + /// Estimated number of keys in the table. + pub estimated_num_keys: u64, + /// Estimated size of live data in bytes (SST files + memtables). + pub estimated_size_bytes: u64, + /// Estimated bytes pending compaction (reclaimable space). + pub pending_compaction_bytes: u64, +} + /// Context for `RocksDB` block writes (stub). #[derive(Debug, Clone)] #[allow(dead_code)] @@ -56,6 +69,13 @@ impl RocksDBProvider { ) -> ProviderResult> { Ok(None) } + + /// Returns statistics for all column families in the database (stub implementation). + /// + /// Returns an empty vector since there is no `RocksDB` when the feature is disabled. + pub const fn table_stats(&self) -> Vec { + Vec::new() + } } /// A stub batch writer for `RocksDB`. From 2cae438642a7ccf6ed7101acfaf600c41e5dd83d Mon Sep 17 00:00:00 2001 From: Sergei Shulepov Date: Wed, 21 Jan 2026 09:42:36 +0000 Subject: [PATCH 106/267] fix: sigsegv handler (#21231) Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- crates/cli/util/src/sigsegv_handler.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/cli/util/src/sigsegv_handler.rs b/crates/cli/util/src/sigsegv_handler.rs index 78e37cf1578..c7087233ae2 100644 --- a/crates/cli/util/src/sigsegv_handler.rs +++ b/crates/cli/util/src/sigsegv_handler.rs @@ -121,7 +121,16 @@ pub fn install() { unsafe { let alt_stack_size: usize = min_sigstack_size() + 64 * 1024; let mut alt_stack: libc::stack_t = mem::zeroed(); - alt_stack.ss_sp = alloc(Layout::from_size_align(alt_stack_size, 1).unwrap()).cast(); + // Both SysV AMD64 ABI and aarch64 ABI require 16 bytes alignment. We are going to be + // generous here and just use a size of a page. + let raw_page_sz = libc::sysconf(libc::_SC_PAGESIZE); + let page_sz = if raw_page_sz == -1 { + // Fallback alignment in case sysconf fails. + 4096_usize + } else { + raw_page_sz as usize + }; + alt_stack.ss_sp = alloc(Layout::from_size_align(alt_stack_size, page_sz).unwrap()).cast(); alt_stack.ss_size = alt_stack_size; libc::sigaltstack(&raw const alt_stack, ptr::null_mut()); From 6f73c2447d35cc171da1b06fb8c2f8ec50205fa0 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 21 Jan 2026 10:42:52 +0100 Subject: [PATCH 107/267] feat(trie): Add `serde-bincode-compat` feature to `reth-trie` (#21235) --- crates/trie/trie/Cargo.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index d3540adda88..b08d7d89b46 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -96,6 +96,13 @@ test-utils = [ "reth-trie-sparse/test-utils", "reth-stages-types/test-utils", ] +serde-bincode-compat = [ + "alloy-consensus/serde-bincode-compat", + "alloy-eips/serde-bincode-compat", + "reth-ethereum-primitives/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", + "reth-trie-common/serde-bincode-compat", +] [[bench]] name = "hash_post_state" From 8a8a9126d66bacc603ec37c7fb399cb5455182d5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 21 Jan 2026 10:59:15 +0100 Subject: [PATCH 108/267] feat(execution-types): add receipts_iter and logs_iter helpers to Chain (#21240) --- crates/evm/execution-types/src/chain.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 3cde0eaa796..e235141cf26 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -2,9 +2,9 @@ use crate::ExecutionOutcome; use alloc::{borrow::Cow, collections::BTreeMap, vec::Vec}; -use alloy_consensus::{transaction::Recovered, BlockHeader}; +use alloy_consensus::{transaction::Recovered, BlockHeader, TxReceipt}; use alloy_eips::{eip1898::ForkBlock, eip2718::Encodable2718, BlockNumHash}; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; +use alloy_primitives::{Address, BlockHash, BlockNumber, Log, TxHash}; use core::{fmt, ops::RangeInclusive}; use reth_primitives_traits::{ transaction::signed::SignedTransaction, Block, BlockBody, IndexedTx, NodePrimitives, @@ -184,6 +184,19 @@ impl Chain { self.execution_outcome.receipts().iter() } + /// Returns an iterator over all receipts in the chain. + pub fn receipts_iter(&self) -> impl Iterator + '_ { + self.block_receipts_iter().flatten() + } + + /// Returns an iterator over all logs in the chain. + pub fn logs_iter(&self) -> impl Iterator + '_ + where + N::Receipt: TxReceipt, + { + self.receipts_iter().flat_map(|receipt| receipt.logs()) + } + /// Returns an iterator over all blocks in the chain with increasing block number. pub fn blocks_iter(&self) -> impl Iterator> + '_ { self.blocks().iter().map(|block| block.1) From 5a5c21cc1b5847072dfec1a399d5c3426ae9ceef Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 21 Jan 2026 11:01:32 +0100 Subject: [PATCH 109/267] feat(txpool): add IntoIterator for AllPoolTransactions (#21241) --- crates/transaction-pool/src/traits.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 5f6b0a5cf68..f70e74f7837 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -746,6 +746,18 @@ impl Default for AllPoolTransactions { } } +impl IntoIterator for AllPoolTransactions { + type Item = Arc>; + type IntoIter = std::iter::Chain< + std::vec::IntoIter>>, + std::vec::IntoIter>>, + >; + + fn into_iter(self) -> Self::IntoIter { + self.pending.into_iter().chain(self.queued) + } +} + /// Represents transactions that were propagated over the network. #[derive(Debug, Clone, Eq, PartialEq, Default)] pub struct PropagatedTransactions(pub HashMap>); From 43a84f1231ea26f4a0605ed5eb2ea7c88fcc3fd3 Mon Sep 17 00:00:00 2001 From: Sergei Shulepov Date: Wed, 21 Jan 2026 11:17:30 +0000 Subject: [PATCH 110/267] refactor(engine): move execution logic from metrics to payload_validator (#21226) --- crates/engine/tree/src/tree/metrics.rs | 372 +++--------------- .../engine/tree/src/tree/payload_validator.rs | 136 +++++-- 2 files changed, 168 insertions(+), 340 deletions(-) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 3d05cee8e0c..ea17ff23148 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -1,25 +1,15 @@ -use crate::tree::{error::InsertBlockFatalError, MeteredStateHook, TreeOutcome}; -use alloy_consensus::transaction::TxHashRef; -use alloy_evm::{ - block::{BlockExecutor, ExecutableTx}, - Evm, -}; +use crate::tree::{error::InsertBlockFatalError, TreeOutcome}; use alloy_rpc_types_engine::{PayloadStatus, PayloadStatusEnum}; -use core::borrow::BorrowMut; use reth_engine_primitives::{ForkchoiceStatus, OnForkChoiceUpdated}; -use reth_errors::{BlockExecutionError, ProviderError}; -use reth_evm::{metrics::ExecutorMetrics, OnStateHook}; +use reth_errors::ProviderError; +use reth_evm::metrics::ExecutorMetrics; use reth_execution_types::BlockExecutionOutput; use reth_metrics::{ metrics::{Counter, Gauge, Histogram}, Metrics, }; -use reth_primitives_traits::SignedTransaction; use reth_trie::updates::TrieUpdates; -use revm::database::{states::bundle_state::BundleRetention, State}; -use revm_primitives::Address; -use std::time::Instant; -use tracing::{debug_span, trace}; +use std::time::{Duration, Instant}; /// Metrics for the `EngineApi`. #[derive(Debug, Default)] @@ -35,114 +25,24 @@ pub(crate) struct EngineApiMetrics { } impl EngineApiMetrics { - /// Helper function for metered execution - fn metered(&self, f: F) -> R - where - F: FnOnce() -> (u64, R), - { - // Execute the block and record the elapsed time. - let execute_start = Instant::now(); - let (gas_used, output) = f(); - let execution_duration = execute_start.elapsed().as_secs_f64(); - - // Update gas metrics. - self.executor.gas_processed_total.increment(gas_used); - self.executor.gas_per_second.set(gas_used as f64 / execution_duration); - self.executor.gas_used_histogram.record(gas_used as f64); - self.executor.execution_histogram.record(execution_duration); - self.executor.execution_duration.set(execution_duration); - - output - } - - /// Execute the given block using the provided [`BlockExecutor`] and update metrics for the - /// execution. + /// Records metrics for block execution. /// /// This method updates metrics for execution time, gas usage, and the number - /// of accounts, storage slots and bytecodes loaded and updated. - /// - /// The optional `on_receipt` callback is invoked after each transaction with the receipt - /// index and a reference to all receipts collected so far. This allows callers to stream - /// receipts to a background task for incremental receipt root computation. - pub(crate) fn execute_metered( + /// of accounts, storage slots and bytecodes updated. + pub(crate) fn record_block_execution( &self, - executor: E, - mut transactions: impl Iterator, BlockExecutionError>>, - transaction_count: usize, - state_hook: Box, - mut on_receipt: F, - ) -> Result<(BlockExecutionOutput, Vec
), BlockExecutionError> - where - DB: alloy_evm::Database, - E: BlockExecutor>>, Transaction: SignedTransaction>, - F: FnMut(&[E::Receipt]), - { - // clone here is cheap, all the metrics are Option>. additionally - // they are globally registered so that the data recorded in the hook will - // be accessible. - let wrapper = MeteredStateHook { metrics: self.executor.clone(), inner_hook: state_hook }; - - let mut senders = Vec::with_capacity(transaction_count); - let mut executor = executor.with_state_hook(Some(Box::new(wrapper))); - - let f = || { - let start = Instant::now(); - debug_span!(target: "engine::tree", "pre execution") - .entered() - .in_scope(|| executor.apply_pre_execution_changes())?; - self.executor.pre_execution_histogram.record(start.elapsed()); - - let exec_span = debug_span!(target: "engine::tree", "execution").entered(); - loop { - let start = Instant::now(); - let Some(tx) = transactions.next() else { break }; - self.executor.transaction_wait_histogram.record(start.elapsed()); - - let tx = tx?; - senders.push(*tx.signer()); - - let span = debug_span!( - target: "engine::tree", - "execute tx", - tx_hash = ?tx.tx().tx_hash(), - gas_used = tracing::field::Empty, - ); - let enter = span.entered(); - trace!(target: "engine::tree", "Executing transaction"); - let start = Instant::now(); - let gas_used = executor.execute_transaction(tx)?; - self.executor.transaction_execution_histogram.record(start.elapsed()); - - // Invoke callback with the latest receipt - on_receipt(executor.receipts()); - - // record the tx gas used - enter.record("gas_used", gas_used); - } - drop(exec_span); - - let start = Instant::now(); - let result = debug_span!(target: "engine::tree", "finish") - .entered() - .in_scope(|| executor.finish()) - .map(|(evm, result)| (evm.into_db(), result)); - self.executor.post_execution_histogram.record(start.elapsed()); - - result - }; - - // Use metered to execute and track timing/gas metrics - let (mut db, result) = self.metered(|| { - let res = f(); - let gas_used = res.as_ref().map(|r| r.1.gas_used).unwrap_or(0); - (gas_used, res) - })?; + output: &BlockExecutionOutput, + execution_duration: Duration, + ) { + let execution_secs = execution_duration.as_secs_f64(); + let gas_used = output.result.gas_used; - // merge transitions into bundle state - debug_span!(target: "engine::tree", "merge transitions") - .entered() - .in_scope(|| db.borrow_mut().merge_transitions(BundleRetention::Reverts)); - let output = BlockExecutionOutput { result, state: db.borrow_mut().take_bundle() }; + // Update gas metrics + self.executor.gas_processed_total.increment(gas_used); + self.executor.gas_per_second.set(gas_used as f64 / execution_secs); + self.executor.gas_used_histogram.record(gas_used as f64); + self.executor.execution_histogram.record(execution_secs); + self.executor.execution_duration.set(execution_secs); // Update the metrics for the number of accounts, storage slots and bytecodes updated let accounts = output.state.state.len(); @@ -153,8 +53,31 @@ impl EngineApiMetrics { self.executor.accounts_updated_histogram.record(accounts as f64); self.executor.storage_slots_updated_histogram.record(storage_slots as f64); self.executor.bytecodes_updated_histogram.record(bytecodes as f64); + } - Ok((output, senders)) + /// Returns a reference to the executor metrics for use in state hooks. + pub(crate) const fn executor_metrics(&self) -> &ExecutorMetrics { + &self.executor + } + + /// Records the duration of block pre-execution changes (e.g., beacon root update). + pub(crate) fn record_pre_execution(&self, elapsed: Duration) { + self.executor.pre_execution_histogram.record(elapsed); + } + + /// Records the duration of block post-execution changes (e.g., finalization). + pub(crate) fn record_post_execution(&self, elapsed: Duration) { + self.executor.post_execution_histogram.record(elapsed); + } + + /// Records the time spent waiting for the next transaction from the iterator. + pub(crate) fn record_transaction_wait(&self, elapsed: Duration) { + self.executor.transaction_wait_histogram.record(elapsed); + } + + /// Records the duration of a single transaction execution. + pub(crate) fn record_transaction_execution(&self, elapsed: Duration) { + self.executor.transaction_execution_histogram.record(elapsed); } } @@ -433,138 +356,10 @@ pub(crate) struct BlockBufferMetrics { mod tests { use super::*; use alloy_eips::eip7685::Requests; - use alloy_evm::block::StateChangeSource; - use alloy_primitives::{B256, U256}; use metrics_util::debugging::{DebuggingRecorder, Snapshotter}; - use reth_ethereum_primitives::{Receipt, TransactionSigned}; - use reth_evm_ethereum::EthEvm; + use reth_ethereum_primitives::Receipt; use reth_execution_types::BlockExecutionResult; - use reth_primitives_traits::RecoveredBlock; - use revm::{ - context::result::{ExecutionResult, Output, ResultAndState, SuccessReason}, - database::State, - database_interface::EmptyDB, - inspector::NoOpInspector, - state::{Account, AccountInfo, AccountStatus, EvmState, EvmStorage, EvmStorageSlot}, - Context, MainBuilder, MainContext, - }; - use revm_primitives::Bytes; - use std::sync::mpsc; - - /// A simple mock executor for testing that doesn't require complex EVM setup - struct MockExecutor { - state: EvmState, - receipts: Vec, - hook: Option>, - } - - impl MockExecutor { - fn new(state: EvmState) -> Self { - Self { state, receipts: vec![], hook: None } - } - } - - // Mock Evm type for testing - type MockEvm = EthEvm, NoOpInspector>; - - impl BlockExecutor for MockExecutor { - type Transaction = TransactionSigned; - type Receipt = Receipt; - type Evm = MockEvm; - - fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> { - Ok(()) - } - - fn execute_transaction_without_commit( - &mut self, - _tx: impl ExecutableTx, - ) -> Result::HaltReason>, BlockExecutionError> { - // Call hook with our mock state for each transaction - if let Some(hook) = self.hook.as_mut() { - hook.on_state(StateChangeSource::Transaction(0), &self.state); - } - - Ok(ResultAndState::new( - ExecutionResult::Success { - reason: SuccessReason::Return, - gas_used: 1000, // Mock gas used - gas_refunded: 0, - logs: vec![], - output: Output::Call(Bytes::from(vec![])), - }, - Default::default(), - )) - } - - fn commit_transaction( - &mut self, - _output: ResultAndState<::HaltReason>, - _tx: impl ExecutableTx, - ) -> Result { - Ok(1000) - } - - fn finish( - self, - ) -> Result<(Self::Evm, BlockExecutionResult), BlockExecutionError> { - let Self { hook, state, .. } = self; - - // Call hook with our mock state - if let Some(mut hook) = hook { - hook.on_state(StateChangeSource::Transaction(0), &state); - } - - // Create a mock EVM - let db = State::builder() - .with_database(EmptyDB::default()) - .with_bundle_update() - .without_state_clear() - .build(); - let evm = EthEvm::new( - Context::mainnet().with_db(db).build_mainnet_with_inspector(NoOpInspector {}), - false, - ); - - // Return successful result like the original tests - Ok(( - evm, - BlockExecutionResult { - receipts: vec![], - requests: Requests::default(), - gas_used: 1000, - blob_gas_used: 0, - }, - )) - } - - fn set_state_hook(&mut self, hook: Option>) { - self.hook = hook; - } - - fn evm_mut(&mut self) -> &mut Self::Evm { - panic!("Mock executor evm_mut() not implemented") - } - - fn evm(&self) -> &Self::Evm { - panic!("Mock executor evm() not implemented") - } - - fn receipts(&self) -> &[Self::Receipt] { - &self.receipts - } - } - - struct ChannelStateHook { - output: i32, - sender: mpsc::Sender, - } - - impl OnStateHook for ChannelStateHook { - fn on_state(&mut self, _source: StateChangeSource, _state: &EvmState) { - let _ = self.sender.send(self.output); - } - } + use reth_revm::db::BundleState; fn setup_test_recorder() -> Snapshotter { let recorder = DebuggingRecorder::new(); @@ -574,38 +369,7 @@ mod tests { } #[test] - fn test_executor_metrics_hook_called() { - let metrics = EngineApiMetrics::default(); - let input = RecoveredBlock::::default(); - - let (tx, rx) = mpsc::channel(); - let expected_output = 42; - let state_hook = Box::new(ChannelStateHook { sender: tx, output: expected_output }); - - let state = EvmState::default(); - let executor = MockExecutor::new(state); - - // This will fail to create the EVM but should still call the hook - let _result = metrics.execute_metered::<_, EmptyDB, _>( - executor, - input.clone_transactions_recovered().map(Ok::<_, BlockExecutionError>), - input.transaction_count(), - state_hook, - |_| {}, - ); - - // Check if hook was called (it might not be if finish() fails early) - match rx.try_recv() { - Ok(actual_output) => assert_eq!(actual_output, expected_output), - Err(_) => { - // Hook wasn't called, which is expected if the mock fails early - // The test still validates that the code compiles and runs - } - } - } - - #[test] - fn test_executor_metrics_hook_metrics_recorded() { + fn test_record_block_execution_metrics() { let snapshotter = setup_test_recorder(); let metrics = EngineApiMetrics::default(); @@ -614,45 +378,17 @@ mod tests { metrics.executor.gas_per_second.set(0.0); metrics.executor.gas_used_histogram.record(0.0); - let input = RecoveredBlock::::default(); - - let (tx, _rx) = mpsc::channel(); - let state_hook = Box::new(ChannelStateHook { sender: tx, output: 42 }); - - // Create a state with some data - let state = { - let mut state = EvmState::default(); - let storage = - EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2), 0))]); - state.insert( - Default::default(), - Account { - info: AccountInfo { - balance: U256::from(100), - nonce: 10, - code_hash: B256::random(), - code: Default::default(), - account_id: None, - }, - original_info: Box::new(AccountInfo::default()), - storage, - status: AccountStatus::default(), - transaction_id: 0, - }, - ); - state + let output = BlockExecutionOutput:: { + state: BundleState::default(), + result: BlockExecutionResult { + receipts: vec![], + requests: Requests::default(), + gas_used: 21000, + blob_gas_used: 0, + }, }; - let executor = MockExecutor::new(state); - - // Execute (will fail but should still update some metrics) - let _result = metrics.execute_metered::<_, EmptyDB, _>( - executor, - input.clone_transactions_recovered().map(Ok::<_, BlockExecutionError>), - input.transaction_count(), - state_hook, - |_| {}, - ); + metrics.record_block_execution(&output, Duration::from_millis(100)); let snapshot = snapshotter.snapshot().into_vec(); diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 573962a8e5c..e02fb2f9e4f 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -7,10 +7,10 @@ use crate::tree::{ payload_processor::{executor::WorkloadExecutor, PayloadProcessor}, precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}, sparse_trie::StateRootComputeOutcome, - EngineApiMetrics, EngineApiTreeState, ExecutionEnv, PayloadHandle, StateProviderBuilder, - StateProviderDatabase, TreeConfig, + EngineApiMetrics, EngineApiTreeState, ExecutionEnv, MeteredStateHook, PayloadHandle, + StateProviderBuilder, StateProviderDatabase, TreeConfig, }; -use alloy_consensus::transaction::Either; +use alloy_consensus::transaction::{Either, TxHashRef}; use alloy_eip7928::BlockAccessList; use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_evm::Evm; @@ -41,7 +41,7 @@ use reth_provider::{ ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProvider, StateProviderFactory, StateReader, }; -use reth_revm::db::State; +use reth_revm::db::{states::bundle_state::BundleRetention, State}; use reth_trie::{updates::TrieUpdates, HashedPostState, StateRoot}; use reth_trie_db::ChangesetCache; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; @@ -638,7 +638,13 @@ where Ok(()) } - /// Executes a block with the given state provider + /// Executes a block with the given state provider. + /// + /// This method orchestrates block execution: + /// 1. Sets up the EVM with state database and precompile caching + /// 2. Spawns a background task for incremental receipt root computation + /// 3. Executes transactions with metrics collection via state hooks + /// 4. Merges state transitions and records execution metrics #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] #[expect(clippy::type_complexity)] fn execute_block( @@ -701,31 +707,117 @@ where let task_handle = ReceiptRootTaskHandle::new(receipt_rx, result_tx); self.payload_processor.executor().spawn_blocking(move || task_handle.run(receipts_len)); + // Wrap the state hook with metrics collection + let inner_hook = Box::new(handle.state_hook()); + let state_hook = + MeteredStateHook { metrics: self.metrics.executor_metrics().clone(), inner_hook }; + + let transaction_count = input.transaction_count(); + let executor = executor.with_state_hook(Some(Box::new(state_hook))); + let execution_start = Instant::now(); - let state_hook = Box::new(handle.state_hook()); - let (output, senders) = self.metrics.execute_metered( + + // Execute all transactions and finalize + let (executor, senders) = self.execute_transactions( executor, - handle.iter_transactions().map(|res| res.map_err(BlockExecutionError::other)), - input.transaction_count(), - state_hook, - |receipts| { - // Send the latest receipt to the background task for incremental root computation. - // The receipt is cloned here; encoding happens in the background thread. - if let Some(receipt) = receipts.last() { - // Infer tx_index from the number of receipts collected so far - let tx_index = receipts.len() - 1; - let _ = receipt_tx.send(IndexedReceipt::new(tx_index, receipt.clone())); - } - }, + transaction_count, + handle.iter_transactions(), + &receipt_tx, )?; drop(receipt_tx); - let execution_finish = Instant::now(); - let execution_time = execution_finish.duration_since(execution_start); - debug!(target: "engine::tree::payload_validator", elapsed = ?execution_time, "Executed block"); + // Finish execution and get the result + let post_exec_start = Instant::now(); + let (_evm, result) = debug_span!(target: "engine::tree", "finish") + .in_scope(|| executor.finish()) + .map(|(evm, result)| (evm.into_db(), result))?; + self.metrics.record_post_execution(post_exec_start.elapsed()); + + // Merge transitions into bundle state + debug_span!(target: "engine::tree", "merge transitions") + .in_scope(|| db.merge_transitions(BundleRetention::Reverts)); + + let output = BlockExecutionOutput { result, state: db.take_bundle() }; + + let execution_duration = execution_start.elapsed(); + self.metrics.record_block_execution(&output, execution_duration); + + debug!(target: "engine::tree::payload_validator", elapsed = ?execution_duration, "Executed block"); Ok((output, senders, result_rx)) } + /// Executes transactions and collects senders, streaming receipts to a background task. + /// + /// This method handles: + /// - Applying pre-execution changes (e.g., beacon root updates) + /// - Executing each transaction with timing metrics + /// - Streaming receipts to the receipt root computation task + /// - Collecting transaction senders for later use + /// + /// Returns the executor (for finalization) and the collected senders. + fn execute_transactions( + &self, + mut executor: E, + transaction_count: usize, + transactions: impl Iterator>, + receipt_tx: &crossbeam_channel::Sender>, + ) -> Result<(E, Vec
), BlockExecutionError> + where + E: BlockExecutor, + Tx: alloy_evm::block::ExecutableTx + alloy_evm::RecoveredTx, + InnerTx: TxHashRef, + Err: core::error::Error + Send + Sync + 'static, + { + let mut senders = Vec::with_capacity(transaction_count); + + // Apply pre-execution changes (e.g., beacon root update) + let pre_exec_start = Instant::now(); + debug_span!(target: "engine::tree", "pre execution") + .in_scope(|| executor.apply_pre_execution_changes())?; + self.metrics.record_pre_execution(pre_exec_start.elapsed()); + + // Execute transactions + let exec_span = debug_span!(target: "engine::tree", "execution").entered(); + let mut transactions = transactions.into_iter(); + loop { + // Measure time spent waiting for next transaction from iterator + // (e.g., parallel signature recovery) + let wait_start = Instant::now(); + let Some(tx_result) = transactions.next() else { break }; + self.metrics.record_transaction_wait(wait_start.elapsed()); + + let tx = tx_result.map_err(BlockExecutionError::other)?; + let tx_signer = *>::signer(&tx); + let tx_hash = >::tx(&tx).tx_hash(); + + senders.push(tx_signer); + + let span = debug_span!( + target: "engine::tree", + "execute tx", + ?tx_hash, + gas_used = tracing::field::Empty, + ); + let enter = span.entered(); + trace!(target: "engine::tree", "Executing transaction"); + + let tx_start = Instant::now(); + let gas_used = executor.execute_transaction(tx)?; + self.metrics.record_transaction_execution(tx_start.elapsed()); + + // Send the latest receipt to the background task for incremental root computation + if let Some(receipt) = executor.receipts().last() { + let tx_index = executor.receipts().len() - 1; + let _ = receipt_tx.send(IndexedReceipt::new(tx_index, receipt.clone())); + } + + enter.record("gas_used", gas_used); + } + drop(exec_span); + + Ok((executor, senders)) + } + /// Compute state root for the given hashed post state in parallel. /// /// Uses an overlay factory which provides the state of the parent block, along with the From 3065a328f9ee9d4b6e90db8f224ee112c565fee3 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 21 Jan 2026 16:08:24 +0400 Subject: [PATCH 111/267] fix: clear `overlay_cache` in `with_extended_hashed_state_overlay` (#21233) --- crates/storage/provider/src/providers/state/overlay.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index b233d621b7e..97baab150e5 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -129,6 +129,8 @@ impl OverlayStateProviderFactory { /// This overlay will be applied on top of any reverts applied via `with_block_hash`. pub fn with_overlay_source(mut self, source: Option) -> Self { self.overlay_source = source; + // Clear the overlay cache since we've updated the source. + self.overlay_cache = Default::default(); self } @@ -137,6 +139,8 @@ impl OverlayStateProviderFactory { /// Convenience method that wraps the lazy overlay in `OverlaySource::Lazy`. pub fn with_lazy_overlay(mut self, lazy_overlay: Option) -> Self { self.overlay_source = lazy_overlay.map(OverlaySource::Lazy); + // Clear the overlay cache since we've updated the source. + self.overlay_cache = Default::default(); self } @@ -152,6 +156,8 @@ impl OverlayStateProviderFactory { trie: Arc::new(TrieUpdatesSorted::default()), state, }); + // Clear the overlay cache since we've updated the source. + self.overlay_cache = Default::default(); } self } @@ -178,6 +184,8 @@ impl OverlayStateProviderFactory { }); } } + // Clear the overlay cache since we've updated the source. + self.overlay_cache = Default::default(); self } } From 04d4c9a02fb61164e42e1667c2fd7343b13ebe44 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 21 Jan 2026 12:44:08 +0000 Subject: [PATCH 112/267] fix(rocksdb): flush all column families on drop and show SST/memtable sizes (#21251) Co-authored-by: Amp --- crates/cli/commands/src/db/stats.rs | 17 ++++++++++++- .../provider/src/providers/rocksdb/metrics.rs | 2 +- .../src/providers/rocksdb/provider.rs | 24 +++++++++++-------- .../provider/src/providers/rocksdb_stub.rs | 4 ++++ 4 files changed, 35 insertions(+), 12 deletions(-) diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index 0e46bfdbb38..0b73727a608 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -159,18 +159,31 @@ impl Command { fn rocksdb_stats_table(&self, tool: &DbTool) -> ComfyTable { let mut table = ComfyTable::new(); table.load_preset(comfy_table::presets::ASCII_MARKDOWN); - table.set_header(["RocksDB Table Name", "# Entries", "Total Size", "Pending Compaction"]); + table.set_header([ + "RocksDB Table Name", + "# Entries", + "SST Size", + "Memtable Size", + "Total Size", + "Pending Compaction", + ]); let stats = tool.provider_factory.rocksdb_provider().table_stats(); + let mut total_sst: u64 = 0; + let mut total_memtable: u64 = 0; let mut total_size: u64 = 0; let mut total_pending: u64 = 0; for stat in &stats { + total_sst += stat.sst_size_bytes; + total_memtable += stat.memtable_size_bytes; total_size += stat.estimated_size_bytes; total_pending += stat.pending_compaction_bytes; let mut row = Row::new(); row.add_cell(Cell::new(&stat.name)) .add_cell(Cell::new(stat.estimated_num_keys)) + .add_cell(Cell::new(human_bytes(stat.sst_size_bytes as f64))) + .add_cell(Cell::new(human_bytes(stat.memtable_size_bytes as f64))) .add_cell(Cell::new(human_bytes(stat.estimated_size_bytes as f64))) .add_cell(Cell::new(human_bytes(stat.pending_compaction_bytes as f64))); table.add_row(row); @@ -187,6 +200,8 @@ impl Command { let mut row = Row::new(); row.add_cell(Cell::new("RocksDB Total")) .add_cell(Cell::new("")) + .add_cell(Cell::new(human_bytes(total_sst as f64))) + .add_cell(Cell::new(human_bytes(total_memtable as f64))) .add_cell(Cell::new(human_bytes(total_size as f64))) .add_cell(Cell::new(human_bytes(total_pending as f64))); table.add_row(row); diff --git a/crates/storage/provider/src/providers/rocksdb/metrics.rs b/crates/storage/provider/src/providers/rocksdb/metrics.rs index 913016a1f34..3971ed978ba 100644 --- a/crates/storage/provider/src/providers/rocksdb/metrics.rs +++ b/crates/storage/provider/src/providers/rocksdb/metrics.rs @@ -6,7 +6,7 @@ use reth_db::Tables; use reth_metrics::Metrics; use strum::{EnumIter, IntoEnumIterator}; -const ROCKSDB_TABLES: &[&str] = &[ +pub(super) const ROCKSDB_TABLES: &[&str] = &[ Tables::TransactionHashNumbers.name(), Tables::StoragesHistory.name(), Tables::AccountsHistory.name(), diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index d1423ba1716..45b049caf46 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -1,4 +1,4 @@ -use super::metrics::{RocksDBMetrics, RocksDBOperation}; +use super::metrics::{RocksDBMetrics, RocksDBOperation, ROCKSDB_TABLES}; use crate::providers::{compute_history_rank, needs_prev_shard_check, HistoryInfo}; use alloy_consensus::transaction::TxHashRef; use alloy_primitives::{Address, BlockNumber, TxNumber, B256}; @@ -41,6 +41,10 @@ pub(crate) type PendingRocksDBBatches = Arc Vec { - let cf_names = [ - tables::TransactionHashNumbers::NAME, - tables::AccountsHistory::NAME, - tables::StoragesHistory::NAME, - ]; - let mut stats = Vec::new(); macro_rules! collect_stats { ($db:expr) => { - for cf_name in cf_names { + for cf_name in ROCKSDB_TABLES { if let Some(cf) = $db.cf_handle(cf_name) { let estimated_num_keys = $db .property_int_value_cf(cf, rocksdb::properties::ESTIMATE_NUM_KEYS) @@ -464,6 +462,8 @@ impl RocksDBProviderInner { .unwrap_or(0); stats.push(RocksDBTableStats { + sst_size_bytes: sst_size, + memtable_size_bytes: memtable_size, name: cf_name.to_string(), estimated_num_keys, estimated_size_bytes, @@ -509,8 +509,12 @@ impl Drop for RocksDBProviderInner { if let Err(e) = db.flush_wal(true) { tracing::warn!(target: "storage::rocksdb", ?e, "Failed to flush WAL on drop"); } - if let Err(e) = db.flush() { - tracing::warn!(target: "storage::rocksdb", ?e, "Failed to flush memtables on drop"); + for cf_name in ROCKSDB_TABLES { + if let Some(cf) = db.cf_handle(cf_name) && + let Err(e) = db.flush_cf(&cf) + { + tracing::warn!(target: "storage::rocksdb", cf = cf_name, ?e, "Failed to flush CF on drop"); + } } db.cancel_all_background_work(true); } diff --git a/crates/storage/provider/src/providers/rocksdb_stub.rs b/crates/storage/provider/src/providers/rocksdb_stub.rs index ff121131677..c671964c9ea 100644 --- a/crates/storage/provider/src/providers/rocksdb_stub.rs +++ b/crates/storage/provider/src/providers/rocksdb_stub.rs @@ -17,6 +17,10 @@ pub(crate) type PendingRocksDBBatches = Arc>>; /// Statistics for a single `RocksDB` table (column family) - stub. #[derive(Debug, Clone)] pub struct RocksDBTableStats { + /// Size of SST files on disk in bytes. + pub sst_size_bytes: u64, + /// Size of memtables in memory in bytes. + pub memtable_size_bytes: u64, /// Name of the table/column family. pub name: String, /// Estimated number of keys in the table. From ebaa4bda3af18c3c506624351916eb22b4ed565b Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 21 Jan 2026 14:14:34 +0000 Subject: [PATCH 113/267] feat(rocksdb): add missing observability (#21253) --- .../provider/src/providers/rocksdb/provider.rs | 18 ++++++++++++------ .../src/providers/static_file/manager.rs | 12 ++++++------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 45b049caf46..129d8f1100a 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -507,13 +507,13 @@ impl Drop for RocksDBProviderInner { // Flush all memtables if possible. If not, they will be rebuilt from the WAL on // restart if let Err(e) = db.flush_wal(true) { - tracing::warn!(target: "storage::rocksdb", ?e, "Failed to flush WAL on drop"); + tracing::warn!(target: "providers::rocksdb", ?e, "Failed to flush WAL on drop"); } for cf_name in ROCKSDB_TABLES { if let Some(cf) = db.cf_handle(cf_name) && let Err(e) = db.flush_cf(&cf) { - tracing::warn!(target: "storage::rocksdb", cf = cf_name, ?e, "Failed to flush CF on drop"); + tracing::warn!(target: "providers::rocksdb", cf = cf_name, ?e, "Failed to flush CF on drop"); } } db.cancel_all_background_work(true); @@ -821,6 +821,7 @@ impl RocksDBProvider { /// (i.e., removes the minimum block and all higher blocks). /// /// Returns a `WriteBatchWithTransaction` that can be committed later. + #[instrument(level = "debug", target = "providers::rocksdb", skip_all)] pub fn unwind_account_history_indices( &self, last_indices: &[(Address, BlockNumber)], @@ -846,6 +847,7 @@ impl RocksDBProvider { } /// Writes a batch of operations atomically. + #[instrument(level = "debug", target = "providers::rocksdb", skip_all)] pub fn write_batch(&self, f: F) -> ProviderResult<()> where F: FnOnce(&mut RocksDBBatch<'_>) -> ProviderResult<()>, @@ -864,6 +866,7 @@ impl RocksDBProvider { /// /// # Panics /// Panics if the provider is in read-only mode. + #[instrument(level = "debug", target = "providers::rocksdb", skip_all, fields(batch_len = batch.len(), batch_size = batch.size_in_bytes()))] pub fn commit_batch(&self, batch: WriteBatchWithTransaction) -> ProviderResult<()> { self.0.db_rw().write_opt(batch, &WriteOptions::default()).map_err(|e| { ProviderError::Database(DatabaseError::Commit(DatabaseErrorInfo { @@ -878,7 +881,7 @@ impl RocksDBProvider { /// This handles transaction hash numbers, account history, and storage history based on /// the provided storage settings. Each operation runs in parallel with its own batch, /// pushing to `ctx.pending_batches` for later commit. - #[instrument(level = "debug", target = "providers::db", skip_all)] + #[instrument(level = "debug", target = "providers::rocksdb", skip_all, fields(num_blocks = blocks.len(), first_block = ctx.first_block_number))] pub(crate) fn write_blocks_data( &self, blocks: &[ExecutedBlock], @@ -919,7 +922,7 @@ impl RocksDBProvider { } /// Writes transaction hash to number mappings for the given blocks. - #[instrument(level = "debug", target = "providers::db", skip_all)] + #[instrument(level = "debug", target = "providers::rocksdb", skip_all)] fn write_tx_hash_numbers( &self, blocks: &[ExecutedBlock], @@ -940,7 +943,7 @@ impl RocksDBProvider { } /// Writes account history indices for the given blocks. - #[instrument(level = "debug", target = "providers::db", skip_all)] + #[instrument(level = "debug", target = "providers::rocksdb", skip_all)] fn write_account_history( &self, blocks: &[ExecutedBlock], @@ -965,7 +968,7 @@ impl RocksDBProvider { } /// Writes storage history indices for the given blocks. - #[instrument(level = "debug", target = "providers::db", skip_all)] + #[instrument(level = "debug", target = "providers::rocksdb", skip_all)] fn write_storage_history( &self, blocks: &[ExecutedBlock], @@ -1050,6 +1053,7 @@ impl<'a> RocksDBBatch<'a> { /// /// # Panics /// Panics if the provider is in read-only mode. + #[instrument(level = "debug", target = "providers::rocksdb", skip_all, fields(batch_len = self.inner.len(), batch_size = self.inner.size_in_bytes()))] pub fn commit(self) -> ProviderResult<()> { self.provider.0.db_rw().write_opt(self.inner, &WriteOptions::default()).map_err(|e| { ProviderError::Database(DatabaseError::Commit(DatabaseErrorInfo { @@ -1398,6 +1402,7 @@ impl<'db> RocksTx<'db> { } /// Commits the transaction, persisting all changes. + #[instrument(level = "debug", target = "providers::rocksdb", skip_all)] pub fn commit(self) -> ProviderResult<()> { self.inner.commit().map_err(|e| { ProviderError::Database(DatabaseError::Commit(DatabaseErrorInfo { @@ -1408,6 +1413,7 @@ impl<'db> RocksTx<'db> { } /// Rolls back the transaction, discarding all changes. + #[instrument(level = "debug", target = "providers::rocksdb", skip_all)] pub fn rollback(self) -> ProviderResult<()> { self.inner.rollback().map_err(|e| { ProviderError::Database(DatabaseError::Other(format!("rollback failed: {e}"))) diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index b835a4491de..8c7b5fb50a0 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -528,7 +528,7 @@ impl StaticFileProvider { } /// Writes headers for all blocks to the static file segment. - #[instrument(level = "debug", target = "providers::db", skip_all)] + #[instrument(level = "debug", target = "providers::static_file", skip_all)] fn write_headers( w: &mut StaticFileProviderRWRefMut<'_, N>, blocks: &[ExecutedBlock], @@ -541,7 +541,7 @@ impl StaticFileProvider { } /// Writes transactions for all blocks to the static file segment. - #[instrument(level = "debug", target = "providers::db", skip_all)] + #[instrument(level = "debug", target = "providers::static_file", skip_all)] fn write_transactions( w: &mut StaticFileProviderRWRefMut<'_, N>, blocks: &[ExecutedBlock], @@ -558,7 +558,7 @@ impl StaticFileProvider { } /// Writes transaction senders for all blocks to the static file segment. - #[instrument(level = "debug", target = "providers::db", skip_all)] + #[instrument(level = "debug", target = "providers::static_file", skip_all)] fn write_transaction_senders( w: &mut StaticFileProviderRWRefMut<'_, N>, blocks: &[ExecutedBlock], @@ -575,7 +575,7 @@ impl StaticFileProvider { } /// Writes receipts for all blocks to the static file segment. - #[instrument(level = "debug", target = "providers::db", skip_all)] + #[instrument(level = "debug", target = "providers::static_file", skip_all)] fn write_receipts( w: &mut StaticFileProviderRWRefMut<'_, N>, blocks: &[ExecutedBlock], @@ -602,7 +602,7 @@ impl StaticFileProvider { } /// Writes account changesets for all blocks to the static file segment. - #[instrument(level = "debug", target = "providers::db", skip_all)] + #[instrument(level = "debug", target = "providers::static_file", skip_all)] fn write_account_changesets( w: &mut StaticFileProviderRWRefMut<'_, N>, blocks: &[ExecutedBlock], @@ -647,7 +647,7 @@ impl StaticFileProvider { /// /// This spawns separate threads for each segment type and each thread calls `sync_all()` on its /// writer when done. - #[instrument(level = "debug", target = "providers::db", skip_all)] + #[instrument(level = "debug", target = "providers::static_file", skip_all)] pub fn write_blocks_data( &self, blocks: &[ExecutedBlock], From f85fcba8721a19c6a9d03683727fb2a5149daaa5 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 21 Jan 2026 15:18:44 +0100 Subject: [PATCH 114/267] feat(trie): add V2 account proof computation and refactor proof types (#21214) Co-authored-by: Amp --- crates/engine/primitives/src/config.rs | 16 + .../tree/src/tree/payload_processor/mod.rs | 19 +- .../src/tree/payload_processor/multiproof.rs | 507 ++++++++++++++---- .../src/tree/payload_processor/prewarm.rs | 89 ++- .../src/tree/payload_processor/sparse_trie.rs | 15 +- crates/trie/parallel/Cargo.toml | 2 +- crates/trie/parallel/src/proof.rs | 9 +- crates/trie/parallel/src/proof_task.rs | 444 ++++++++++----- crates/trie/parallel/src/stats.rs | 5 - crates/trie/parallel/src/value_encoder.rs | 2 - 10 files changed, 839 insertions(+), 269 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 2870d3dccc4..0b72e1d6243 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -34,6 +34,11 @@ fn default_account_worker_count() -> usize { /// The size of proof targets chunk to spawn in one multiproof calculation. pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 60; +/// The size of proof targets chunk to spawn in one multiproof calculation when V2 proofs are +/// enabled. This is 4x the default chunk size to take advantage of more efficient V2 proof +/// computation. +pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE_V2: usize = DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE * 4; + /// Default number of reserved CPU cores for non-reth processes. /// /// This will be deducted from the thread count of main reth global threadpool. @@ -267,6 +272,17 @@ impl TreeConfig { self.multiproof_chunk_size } + /// Return the multiproof task chunk size, using the V2 default if V2 proofs are enabled + /// and the chunk size is at the default value. + pub const fn effective_multiproof_chunk_size(&self) -> usize { + if self.enable_proof_v2 && self.multiproof_chunk_size == DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE + { + DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE_V2 + } else { + self.multiproof_chunk_size + } + } + /// Return the number of reserved CPU cores for non-reth processes pub const fn reserved_cpu_cores(&self) -> usize { self.reserved_cpu_cores diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 1803929c89a..1fa4232b0e2 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -245,6 +245,9 @@ where let (to_sparse_trie, sparse_trie_rx) = channel(); let (to_multi_proof, from_multi_proof) = crossbeam_channel::unbounded(); + // Extract V2 proofs flag early so we can pass it to prewarm + let v2_proofs_enabled = config.enable_proof_v2(); + // Handle BAL-based optimization if available let prewarm_handle = if let Some(bal) = bal { // When BAL is present, use BAL prewarming and send BAL to multiproof @@ -261,6 +264,7 @@ where provider_builder.clone(), None, // Don't send proof targets when BAL is present Some(bal), + v2_proofs_enabled, ) } else { // Normal path: spawn with transaction prewarming @@ -271,6 +275,7 @@ where provider_builder.clone(), Some(to_multi_proof.clone()), None, + v2_proofs_enabled, ) }; @@ -278,7 +283,6 @@ where let task_ctx = ProofTaskCtx::new(multiproof_provider_factory); let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); - let v2_proofs_enabled = config.enable_proof_v2(); let proof_handle = ProofWorkerHandle::new( self.executor.handle().clone(), task_ctx, @@ -290,10 +294,13 @@ where let multi_proof_task = MultiProofTask::new( proof_handle.clone(), to_sparse_trie, - config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), + config + .multiproof_chunking_enabled() + .then_some(config.effective_multiproof_chunk_size()), to_multi_proof.clone(), from_multi_proof, - ); + ) + .with_v2_proofs_enabled(v2_proofs_enabled); // spawn multi-proof task let parent_span = span.clone(); @@ -342,8 +349,9 @@ where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, { let (prewarm_rx, execution_rx, size_hint) = self.spawn_tx_iterator(transactions); + // This path doesn't use multiproof, so V2 proofs flag doesn't matter let prewarm_handle = - self.spawn_caching_with(env, prewarm_rx, size_hint, provider_builder, None, bal); + self.spawn_caching_with(env, prewarm_rx, size_hint, provider_builder, None, bal, false); PayloadHandle { to_multi_proof: None, prewarm_handle, @@ -410,6 +418,7 @@ where } /// Spawn prewarming optionally wired to the multiproof task for target updates. + #[expect(clippy::too_many_arguments)] fn spawn_caching_with

( &self, env: ExecutionEnv, @@ -418,6 +427,7 @@ where provider_builder: StateProviderBuilder, to_multi_proof: Option>, bal: Option>, + v2_proofs_enabled: bool, ) -> CacheTaskHandle where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, @@ -440,6 +450,7 @@ where terminate_execution: Arc::new(AtomicBool::new(false)), precompile_cache_disabled: self.precompile_cache_disabled, precompile_cache_map: self.precompile_cache_map.clone(), + v2_proofs_enabled, }; let (prewarm_task, to_prewarm_task) = PrewarmCacheTask::new( diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index b5f1272b67e..823c3e54e9b 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -11,14 +11,18 @@ use reth_metrics::Metrics; use reth_provider::AccountReader; use reth_revm::state::EvmState; use reth_trie::{ - added_removed_keys::MultiAddedRemovedKeys, DecodedMultiProof, HashedPostState, HashedStorage, + added_removed_keys::MultiAddedRemovedKeys, proof_v2, HashedPostState, HashedStorage, MultiProofTargets, }; +#[cfg(test)] +use reth_trie_parallel::stats::ParallelTrieTracker; use reth_trie_parallel::{ proof::ParallelProof, proof_task::{ - AccountMultiproofInput, ProofResultContext, ProofResultMessage, ProofWorkerHandle, + AccountMultiproofInput, ProofResult, ProofResultContext, ProofResultMessage, + ProofWorkerHandle, }, + targets_v2::{ChunkedMultiProofTargetsV2, MultiProofTargetsV2}, }; use revm_primitives::map::{hash_map, B256Map}; use std::{collections::BTreeMap, sync::Arc, time::Instant}; @@ -63,12 +67,12 @@ const DEFAULT_MAX_TARGETS_FOR_CHUNKING: usize = 300; /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. -#[derive(Default, Debug)] +#[derive(Debug)] pub struct SparseTrieUpdate { /// The state update that was used to calculate the proof pub(crate) state: HashedPostState, /// The calculated multiproof - pub(crate) multiproof: DecodedMultiProof, + pub(crate) multiproof: ProofResult, } impl SparseTrieUpdate { @@ -80,7 +84,11 @@ impl SparseTrieUpdate { /// Construct update from multiproof. #[cfg(test)] pub(super) fn from_multiproof(multiproof: reth_trie::MultiProof) -> alloy_rlp::Result { - Ok(Self { multiproof: multiproof.try_into()?, ..Default::default() }) + let stats = ParallelTrieTracker::default().finish(); + Ok(Self { + state: HashedPostState::default(), + multiproof: ProofResult::Legacy(multiproof.try_into()?, stats), + }) } /// Extend update with contents of the other. @@ -94,7 +102,7 @@ impl SparseTrieUpdate { #[derive(Debug)] pub(super) enum MultiProofMessage { /// Prefetch proof targets - PrefetchProofs(MultiProofTargets), + PrefetchProofs(VersionedMultiProofTargets), /// New state update from transaction execution with its source StateUpdate(Source, EvmState), /// State update that can be applied to the sparse trie without any new proofs. @@ -223,12 +231,155 @@ pub(crate) fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostStat hashed_state } +/// Extends a `MultiProofTargets` with the contents of a `VersionedMultiProofTargets`, +/// regardless of which variant the latter is. +fn extend_multiproof_targets(dest: &mut MultiProofTargets, src: &VersionedMultiProofTargets) { + match src { + VersionedMultiProofTargets::Legacy(targets) => { + dest.extend_ref(targets); + } + VersionedMultiProofTargets::V2(targets) => { + // Add all account targets + for target in &targets.account_targets { + dest.entry(target.key()).or_default(); + } + + // Add all storage targets + for (hashed_address, slots) in &targets.storage_targets { + let slot_set = dest.entry(*hashed_address).or_default(); + for slot in slots { + slot_set.insert(slot.key()); + } + } + } + } +} + +/// A set of multiproof targets which can be either in the legacy or V2 representations. +#[derive(Debug)] +pub(super) enum VersionedMultiProofTargets { + /// Legacy targets + Legacy(MultiProofTargets), + /// V2 targets + V2(MultiProofTargetsV2), +} + +impl VersionedMultiProofTargets { + /// Returns true if there are no account or storage targets. + fn is_empty(&self) -> bool { + match self { + Self::Legacy(targets) => targets.is_empty(), + Self::V2(targets) => targets.is_empty(), + } + } + + /// Returns the number of account targets in the multiproof target + fn account_targets_len(&self) -> usize { + match self { + Self::Legacy(targets) => targets.len(), + Self::V2(targets) => targets.account_targets.len(), + } + } + + /// Returns the number of storage targets in the multiproof target + fn storage_targets_len(&self) -> usize { + match self { + Self::Legacy(targets) => targets.values().map(|slots| slots.len()).sum::(), + Self::V2(targets) => { + targets.storage_targets.values().map(|slots| slots.len()).sum::() + } + } + } + + /// Returns the number of accounts in the multiproof targets. + fn len(&self) -> usize { + match self { + Self::Legacy(targets) => targets.len(), + Self::V2(targets) => targets.account_targets.len(), + } + } + + /// Returns the total storage slot count across all accounts. + fn storage_count(&self) -> usize { + match self { + Self::Legacy(targets) => targets.values().map(|slots| slots.len()).sum(), + Self::V2(targets) => targets.storage_targets.values().map(|slots| slots.len()).sum(), + } + } + + /// Returns the number of items that will be considered during chunking. + fn chunking_length(&self) -> usize { + match self { + Self::Legacy(targets) => targets.chunking_length(), + Self::V2(targets) => { + // For V2, count accounts + storage slots + targets.account_targets.len() + + targets.storage_targets.values().map(|slots| slots.len()).sum::() + } + } + } + + /// Retains the targets representing the difference with another `MultiProofTargets`. + /// Removes all targets that are already present in `other`. + fn retain_difference(&mut self, other: &MultiProofTargets) { + match self { + Self::Legacy(targets) => { + targets.retain_difference(other); + } + Self::V2(targets) => { + // Remove account targets that exist in other + targets.account_targets.retain(|target| !other.contains_key(&target.key())); + + // For each account in storage_targets, remove slots that exist in other + targets.storage_targets.retain(|hashed_address, slots| { + if let Some(other_slots) = other.get(hashed_address) { + slots.retain(|slot| !other_slots.contains(&slot.key())); + !slots.is_empty() + } else { + true + } + }); + } + } + } + + /// Extends this `VersionedMultiProofTargets` with the contents of another. + /// + /// Panics if the variants do not match. + fn extend(&mut self, other: Self) { + match (self, other) { + (Self::Legacy(dest), Self::Legacy(src)) => { + dest.extend(src); + } + (Self::V2(dest), Self::V2(src)) => { + dest.account_targets.extend(src.account_targets); + for (addr, slots) in src.storage_targets { + dest.storage_targets.entry(addr).or_default().extend(slots); + } + } + _ => panic!("Cannot extend VersionedMultiProofTargets with mismatched variants"), + } + } + + /// Chunks this `VersionedMultiProofTargets` into smaller chunks of the given size. + fn chunks(self, chunk_size: usize) -> Box> { + match self { + Self::Legacy(targets) => { + Box::new(MultiProofTargets::chunks(targets, chunk_size).map(Self::Legacy)) + } + Self::V2(targets) => { + Box::new(ChunkedMultiProofTargetsV2::new(targets, chunk_size).map(Self::V2)) + } + } + } +} + /// Input parameters for dispatching a multiproof calculation. #[derive(Debug)] struct MultiproofInput { source: Option, hashed_state_update: HashedPostState, - proof_targets: MultiProofTargets, + proof_targets: VersionedMultiProofTargets, proof_sequence_number: u64, state_root_message_sender: CrossbeamSender, multi_added_removed_keys: Option>, @@ -263,8 +414,6 @@ pub struct MultiproofManager { proof_result_tx: CrossbeamSender, /// Metrics metrics: MultiProofTaskMetrics, - /// Whether to use V2 storage proofs - v2_proofs_enabled: bool, } impl MultiproofManager { @@ -278,9 +427,7 @@ impl MultiproofManager { metrics.max_storage_workers.set(proof_worker_handle.total_storage_workers() as f64); metrics.max_account_workers.set(proof_worker_handle.total_account_workers() as f64); - let v2_proofs_enabled = proof_worker_handle.v2_proofs_enabled(); - - Self { metrics, proof_worker_handle, proof_result_tx, v2_proofs_enabled } + Self { metrics, proof_worker_handle, proof_result_tx } } /// Dispatches a new multiproof calculation to worker pools. @@ -325,41 +472,48 @@ impl MultiproofManager { multi_added_removed_keys, } = multiproof_input; - let account_targets = proof_targets.len(); - let storage_targets = proof_targets.values().map(|slots| slots.len()).sum::(); - trace!( target: "engine::tree::payload_processor::multiproof", proof_sequence_number, ?proof_targets, - account_targets, - storage_targets, + account_targets = proof_targets.account_targets_len(), + storage_targets = proof_targets.storage_targets_len(), ?source, "Dispatching multiproof to workers" ); let start = Instant::now(); - // Extend prefix sets with targets - let frozen_prefix_sets = - ParallelProof::extend_prefix_sets_with_targets(&Default::default(), &proof_targets); + // Workers will send ProofResultMessage directly to proof_result_rx + let proof_result_sender = ProofResultContext::new( + self.proof_result_tx.clone(), + proof_sequence_number, + hashed_state_update, + start, + ); - // Dispatch account multiproof to worker pool with result sender - let input = AccountMultiproofInput { - targets: proof_targets, - prefix_sets: frozen_prefix_sets, - collect_branch_node_masks: true, - multi_added_removed_keys, - // Workers will send ProofResultMessage directly to proof_result_rx - proof_result_sender: ProofResultContext::new( - self.proof_result_tx.clone(), - proof_sequence_number, - hashed_state_update, - start, - ), - v2_proofs_enabled: self.v2_proofs_enabled, + let input = match proof_targets { + VersionedMultiProofTargets::Legacy(proof_targets) => { + // Extend prefix sets with targets + let frozen_prefix_sets = ParallelProof::extend_prefix_sets_with_targets( + &Default::default(), + &proof_targets, + ); + + AccountMultiproofInput::Legacy { + targets: proof_targets, + prefix_sets: frozen_prefix_sets, + collect_branch_node_masks: true, + multi_added_removed_keys, + proof_result_sender, + } + } + VersionedMultiProofTargets::V2(proof_targets) => { + AccountMultiproofInput::V2 { targets: proof_targets, proof_result_sender } + } }; + // Dispatch account multiproof to worker pool with result sender if let Err(e) = self.proof_worker_handle.dispatch_account_multiproof(input) { error!(target: "engine::tree::payload_processor::multiproof", ?e, "Failed to dispatch account multiproof"); return; @@ -561,6 +715,9 @@ pub(super) struct MultiProofTask { /// there are any active workers and force chunking across workers. This is to prevent tasks /// which are very long from hitting a single worker. max_targets_for_chunking: usize, + /// Whether or not V2 proof calculation is enabled. If enabled then [`MultiProofTargetsV2`] + /// will be produced by state updates. + v2_proofs_enabled: bool, } impl MultiProofTask { @@ -592,9 +749,16 @@ impl MultiProofTask { ), metrics, max_targets_for_chunking: DEFAULT_MAX_TARGETS_FOR_CHUNKING, + v2_proofs_enabled: false, } } + /// Enables V2 proof target generation on state updates. + pub(super) const fn with_v2_proofs_enabled(mut self, v2_proofs_enabled: bool) -> Self { + self.v2_proofs_enabled = v2_proofs_enabled; + self + } + /// Handles request for proof prefetch. /// /// Returns how many multiproof tasks were dispatched for the prefetch request. @@ -602,37 +766,29 @@ impl MultiProofTask { level = "debug", target = "engine::tree::payload_processor::multiproof", skip_all, - fields(accounts = targets.len(), chunks = 0) + fields(accounts = targets.account_targets_len(), chunks = 0) )] - fn on_prefetch_proof(&mut self, mut targets: MultiProofTargets) -> u64 { + fn on_prefetch_proof(&mut self, mut targets: VersionedMultiProofTargets) -> u64 { // Remove already fetched proof targets to avoid redundant work. targets.retain_difference(&self.fetched_proof_targets); - self.fetched_proof_targets.extend_ref(&targets); + extend_multiproof_targets(&mut self.fetched_proof_targets, &targets); - // Make sure all target accounts have an `AddedRemovedKeySet` in the + // For Legacy multiproofs, make sure all target accounts have an `AddedRemovedKeySet` in the // [`MultiAddedRemovedKeys`]. Even if there are not any known removed keys for the account, // we still want to optimistically fetch extension children for the leaf addition case. - self.multi_added_removed_keys.touch_accounts(targets.keys().copied()); - - // Clone+Arc MultiAddedRemovedKeys for sharing with the dispatched multiproof tasks - let multi_added_removed_keys = Arc::new(MultiAddedRemovedKeys { - account: self.multi_added_removed_keys.account.clone(), - storages: targets - .keys() - .filter_map(|account| { - self.multi_added_removed_keys - .storages - .get(account) - .cloned() - .map(|keys| (*account, keys)) - }) - .collect(), - }); + // V2 multiproofs don't need this. + let multi_added_removed_keys = + if let VersionedMultiProofTargets::Legacy(legacy_targets) = &targets { + self.multi_added_removed_keys.touch_accounts(legacy_targets.keys().copied()); + Some(Arc::new(self.multi_added_removed_keys.clone())) + } else { + None + }; self.metrics.prefetch_proof_targets_accounts_histogram.record(targets.len() as f64); self.metrics .prefetch_proof_targets_storages_histogram - .record(targets.values().map(|slots| slots.len()).sum::() as f64); + .record(targets.storage_count() as f64); let chunking_len = targets.chunking_length(); let available_account_workers = @@ -646,7 +802,7 @@ impl MultiProofTask { self.max_targets_for_chunking, available_account_workers, available_storage_workers, - MultiProofTargets::chunks, + VersionedMultiProofTargets::chunks, |proof_targets| { self.multiproof_manager.dispatch(MultiproofInput { source: None, @@ -654,7 +810,7 @@ impl MultiProofTask { proof_targets, proof_sequence_number: self.proof_sequencer.next_sequence(), state_root_message_sender: self.tx.clone(), - multi_added_removed_keys: Some(multi_added_removed_keys.clone()), + multi_added_removed_keys: multi_added_removed_keys.clone(), }); }, ); @@ -757,6 +913,7 @@ impl MultiProofTask { self.multiproof_manager.proof_worker_handle.available_account_workers(); let available_storage_workers = self.multiproof_manager.proof_worker_handle.available_storage_workers(); + let num_chunks = dispatch_with_chunking( not_fetched_state_update, chunking_len, @@ -770,8 +927,9 @@ impl MultiProofTask { &hashed_state_update, &self.fetched_proof_targets, &multi_added_removed_keys, + self.v2_proofs_enabled, ); - spawned_proof_targets.extend_ref(&proof_targets); + extend_multiproof_targets(&mut spawned_proof_targets, &proof_targets); self.multiproof_manager.dispatch(MultiproofInput { source: Some(source), @@ -871,7 +1029,10 @@ impl MultiProofTask { batch_metrics.proofs_processed += 1; if let Some(combined_update) = self.on_proof( sequence_number, - SparseTrieUpdate { state, multiproof: Default::default() }, + SparseTrieUpdate { + state, + multiproof: ProofResult::empty(self.v2_proofs_enabled), + }, ) { let _ = self.to_sparse_trie.send(combined_update); } @@ -898,8 +1059,7 @@ impl MultiProofTask { } let account_targets = merged_targets.len(); - let storage_targets = - merged_targets.values().map(|slots| slots.len()).sum::(); + let storage_targets = merged_targets.storage_count(); batch_metrics.prefetch_proofs_requested += self.on_prefetch_proof(merged_targets); trace!( target: "engine::tree::payload_processor::multiproof", @@ -1003,7 +1163,10 @@ impl MultiProofTask { if let Some(combined_update) = self.on_proof( sequence_number, - SparseTrieUpdate { state, multiproof: Default::default() }, + SparseTrieUpdate { + state, + multiproof: ProofResult::empty(self.v2_proofs_enabled), + }, ) { let _ = self.to_sparse_trie.send(combined_update); } @@ -1106,7 +1269,7 @@ impl MultiProofTask { let update = SparseTrieUpdate { state: proof_result.state, - multiproof: proof_result_data.proof, + multiproof: proof_result_data, }; if let Some(combined_update) = @@ -1196,7 +1359,7 @@ struct MultiproofBatchCtx { /// received. updates_finished_time: Option, /// Reusable buffer for accumulating prefetch targets during batching. - accumulated_prefetch_targets: Vec, + accumulated_prefetch_targets: Vec, } impl MultiproofBatchCtx { @@ -1242,40 +1405,77 @@ fn get_proof_targets( state_update: &HashedPostState, fetched_proof_targets: &MultiProofTargets, multi_added_removed_keys: &MultiAddedRemovedKeys, -) -> MultiProofTargets { - let mut targets = MultiProofTargets::default(); + v2_enabled: bool, +) -> VersionedMultiProofTargets { + if v2_enabled { + let mut targets = MultiProofTargetsV2::default(); + + // first collect all new accounts (not previously fetched) + for &hashed_address in state_update.accounts.keys() { + if !fetched_proof_targets.contains_key(&hashed_address) { + targets.account_targets.push(hashed_address.into()); + } + } + + // then process storage slots for all accounts in the state update + for (hashed_address, storage) in &state_update.storages { + let fetched = fetched_proof_targets.get(hashed_address); + + // If the storage is wiped, we still need to fetch the account proof. + if storage.wiped && fetched.is_none() { + targets.account_targets.push(Into::::into(*hashed_address)); + continue + } + + let changed_slots = storage + .storage + .keys() + .filter(|slot| !fetched.is_some_and(|f| f.contains(*slot))) + .map(|slot| Into::::into(*slot)) + .collect::>(); - // first collect all new accounts (not previously fetched) - for hashed_address in state_update.accounts.keys() { - if !fetched_proof_targets.contains_key(hashed_address) { - targets.insert(*hashed_address, HashSet::default()); + if !changed_slots.is_empty() { + targets.account_targets.push((*hashed_address).into()); + targets.storage_targets.insert(*hashed_address, changed_slots); + } } - } - // then process storage slots for all accounts in the state update - for (hashed_address, storage) in &state_update.storages { - let fetched = fetched_proof_targets.get(hashed_address); - let storage_added_removed_keys = multi_added_removed_keys.get_storage(hashed_address); - let mut changed_slots = storage - .storage - .keys() - .filter(|slot| { - !fetched.is_some_and(|f| f.contains(*slot)) || - storage_added_removed_keys.is_some_and(|k| k.is_removed(slot)) - }) - .peekable(); + VersionedMultiProofTargets::V2(targets) + } else { + let mut targets = MultiProofTargets::default(); - // If the storage is wiped, we still need to fetch the account proof. - if storage.wiped && fetched.is_none() { - targets.entry(*hashed_address).or_default(); + // first collect all new accounts (not previously fetched) + for hashed_address in state_update.accounts.keys() { + if !fetched_proof_targets.contains_key(hashed_address) { + targets.insert(*hashed_address, HashSet::default()); + } } - if changed_slots.peek().is_some() { - targets.entry(*hashed_address).or_default().extend(changed_slots); + // then process storage slots for all accounts in the state update + for (hashed_address, storage) in &state_update.storages { + let fetched = fetched_proof_targets.get(hashed_address); + let storage_added_removed_keys = multi_added_removed_keys.get_storage(hashed_address); + let mut changed_slots = storage + .storage + .keys() + .filter(|slot| { + !fetched.is_some_and(|f| f.contains(*slot)) || + storage_added_removed_keys.is_some_and(|k| k.is_removed(slot)) + }) + .peekable(); + + // If the storage is wiped, we still need to fetch the account proof. + if storage.wiped && fetched.is_none() { + targets.entry(*hashed_address).or_default(); + } + + if changed_slots.peek().is_some() { + targets.entry(*hashed_address).or_default().extend(changed_slots); + } } - } - targets + VersionedMultiProofTargets::Legacy(targets) + } } /// Dispatches work items as a single unit or in chunks based on target size and worker @@ -1481,12 +1681,24 @@ mod tests { state } + fn unwrap_legacy_targets(targets: VersionedMultiProofTargets) -> MultiProofTargets { + match targets { + VersionedMultiProofTargets::Legacy(targets) => targets, + VersionedMultiProofTargets::V2(_) => panic!("Expected Legacy targets"), + } + } + #[test] fn test_get_proof_targets_new_account_targets() { let state = create_get_proof_targets_state(); let fetched = MultiProofTargets::default(); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); // should return all accounts as targets since nothing was fetched before assert_eq!(targets.len(), state.accounts.len()); @@ -1500,7 +1712,12 @@ mod tests { let state = create_get_proof_targets_state(); let fetched = MultiProofTargets::default(); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); // verify storage slots are included for accounts with storage for (addr, storage) in &state.storages { @@ -1528,7 +1745,12 @@ mod tests { // mark the account as already fetched fetched.insert(*fetched_addr, HashSet::default()); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); // should not include the already fetched account since it has no storage updates assert!(!targets.contains_key(fetched_addr)); @@ -1548,7 +1770,12 @@ mod tests { fetched_slots.insert(fetched_slot); fetched.insert(*addr, fetched_slots); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); // should not include the already fetched storage slot let target_slots = &targets[addr]; @@ -1561,7 +1788,12 @@ mod tests { let state = HashedPostState::default(); let fetched = MultiProofTargets::default(); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); assert!(targets.is_empty()); } @@ -1588,7 +1820,12 @@ mod tests { fetched_slots.insert(slot1); fetched.insert(addr1, fetched_slots); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); assert!(targets.contains_key(&addr2)); assert!(!targets[&addr1].contains(&slot1)); @@ -1614,7 +1851,12 @@ mod tests { assert!(!state.accounts.contains_key(&addr)); assert!(!fetched.contains_key(&addr)); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); // verify that we still get the storage slots for the unmodified account assert!(targets.contains_key(&addr)); @@ -1656,7 +1898,12 @@ mod tests { removed_state.storages.insert(addr, removed_storage); multi_added_removed_keys.update_with_state(&removed_state); - let targets = get_proof_targets(&state, &fetched, &multi_added_removed_keys); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &multi_added_removed_keys, + false, + )); // slot1 should be included despite being fetched, because it's marked as removed assert!(targets.contains_key(&addr)); @@ -1683,7 +1930,12 @@ mod tests { storage.storage.insert(slot1, U256::from(100)); state.storages.insert(addr, storage); - let targets = get_proof_targets(&state, &fetched, &multi_added_removed_keys); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &multi_added_removed_keys, + false, + )); // account should be included because storage is wiped and account wasn't fetched assert!(targets.contains_key(&addr)); @@ -1726,7 +1978,12 @@ mod tests { removed_state.storages.insert(addr, removed_storage); multi_added_removed_keys.update_with_state(&removed_state); - let targets = get_proof_targets(&state, &fetched, &multi_added_removed_keys); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &multi_added_removed_keys, + false, + )); // only slots in the state update can be included, so slot3 should not appear assert!(!targets.contains_key(&addr)); @@ -1753,9 +2010,12 @@ mod tests { targets3.insert(addr3, HashSet::default()); let tx = task.tx.clone(); - tx.send(MultiProofMessage::PrefetchProofs(targets1)).unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(targets2)).unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(targets3)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets1))) + .unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets2))) + .unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets3))) + .unwrap(); let proofs_requested = if let Ok(MultiProofMessage::PrefetchProofs(targets)) = task.rx.recv() { @@ -1769,11 +2029,12 @@ mod tests { assert_eq!(num_batched, 3); assert_eq!(merged_targets.len(), 3); - assert!(merged_targets.contains_key(&addr1)); - assert!(merged_targets.contains_key(&addr2)); - assert!(merged_targets.contains_key(&addr3)); + let legacy_targets = unwrap_legacy_targets(merged_targets); + assert!(legacy_targets.contains_key(&addr1)); + assert!(legacy_targets.contains_key(&addr2)); + assert!(legacy_targets.contains_key(&addr3)); - task.on_prefetch_proof(merged_targets) + task.on_prefetch_proof(VersionedMultiProofTargets::Legacy(legacy_targets)) } else { panic!("Expected PrefetchProofs message"); }; @@ -1848,11 +2109,16 @@ mod tests { // Queue: [PrefetchProofs1, PrefetchProofs2, StateUpdate1, StateUpdate2, PrefetchProofs3] let tx = task.tx.clone(); - tx.send(MultiProofMessage::PrefetchProofs(targets1)).unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(targets2)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets1))) + .unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets2))) + .unwrap(); tx.send(MultiProofMessage::StateUpdate(source.into(), state_update1)).unwrap(); tx.send(MultiProofMessage::StateUpdate(source.into(), state_update2)).unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(targets3.clone())).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy( + targets3.clone(), + ))) + .unwrap(); // Step 1: Receive and batch PrefetchProofs (should get targets1 + targets2) let mut pending_msg: Option = None; @@ -1878,9 +2144,10 @@ mod tests { // Should have batched exactly 2 PrefetchProofs (not 3!) assert_eq!(num_batched, 2, "Should batch only until different message type"); assert_eq!(merged_targets.len(), 2); - assert!(merged_targets.contains_key(&addr1)); - assert!(merged_targets.contains_key(&addr2)); - assert!(!merged_targets.contains_key(&addr3), "addr3 should NOT be in first batch"); + let legacy_targets = unwrap_legacy_targets(merged_targets); + assert!(legacy_targets.contains_key(&addr1)); + assert!(legacy_targets.contains_key(&addr2)); + assert!(!legacy_targets.contains_key(&addr3), "addr3 should NOT be in first batch"); } else { panic!("Expected PrefetchProofs message"); } @@ -1905,7 +2172,8 @@ mod tests { match task.rx.try_recv() { Ok(MultiProofMessage::PrefetchProofs(targets)) => { assert_eq!(targets.len(), 1); - assert!(targets.contains_key(&addr3)); + let legacy_targets = unwrap_legacy_targets(targets); + assert!(legacy_targets.contains_key(&addr3)); } _ => panic!("PrefetchProofs3 was lost!"), } @@ -1951,9 +2219,13 @@ mod tests { let source = StateChangeSource::Transaction(99); let tx = task.tx.clone(); - tx.send(MultiProofMessage::PrefetchProofs(prefetch1)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(prefetch1))) + .unwrap(); tx.send(MultiProofMessage::StateUpdate(source.into(), state_update)).unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(prefetch2.clone())).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy( + prefetch2.clone(), + ))) + .unwrap(); let mut ctx = MultiproofBatchCtx::new(Instant::now()); let mut batch_metrics = MultiproofBatchMetrics::default(); @@ -1986,7 +2258,8 @@ mod tests { match task.rx.try_recv() { Ok(MultiProofMessage::PrefetchProofs(targets)) => { assert_eq!(targets.len(), 1); - assert!(targets.contains_key(&prefetch_addr2)); + let legacy_targets = unwrap_legacy_targets(targets); + assert!(legacy_targets.contains_key(&prefetch_addr2)); } other => panic!("Expected PrefetchProofs2 in channel, got {:?}", other), } diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 6021098627c..1083450549d 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -16,7 +16,7 @@ use crate::tree::{ payload_processor::{ bal::{total_slots, BALSlotIter}, executor::WorkloadExecutor, - multiproof::MultiProofMessage, + multiproof::{MultiProofMessage, VersionedMultiProofTargets}, ExecutionCache as PayloadExecutionCache, }, precompile_cache::{CachedPrecompile, PrecompileCacheMap}, @@ -237,7 +237,7 @@ where } /// If configured and the tx returned proof targets, emit the targets the transaction produced - fn send_multi_proof_targets(&self, targets: Option) { + fn send_multi_proof_targets(&self, targets: Option) { if self.is_execution_terminated() { // if execution is already terminated then we dont need to send more proof fetch // messages @@ -479,6 +479,8 @@ where pub(super) terminate_execution: Arc, pub(super) precompile_cache_disabled: bool, pub(super) precompile_cache_map: PrecompileCacheMap>, + /// Whether V2 proof calculation is enabled. + pub(super) v2_proofs_enabled: bool, } impl PrewarmContext @@ -487,10 +489,12 @@ where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, Evm: ConfigureEvm + 'static, { - /// Splits this context into an evm, an evm config, metrics, and the atomic bool for terminating - /// execution. + /// Splits this context into an evm, an evm config, metrics, the atomic bool for terminating + /// execution, and whether V2 proofs are enabled. #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] - fn evm_for_ctx(self) -> Option<(EvmFor, PrewarmMetrics, Arc)> { + fn evm_for_ctx( + self, + ) -> Option<(EvmFor, PrewarmMetrics, Arc, bool)> { let Self { env, evm_config, @@ -500,6 +504,7 @@ where terminate_execution, precompile_cache_disabled, precompile_cache_map, + v2_proofs_enabled, } = self; let mut state_provider = match provider.build() { @@ -549,7 +554,7 @@ where }); } - Some((evm, metrics, terminate_execution)) + Some((evm, metrics, terminate_execution, v2_proofs_enabled)) } /// Accepts an [`mpsc::Receiver`] of transactions and a handle to prewarm task. Executes @@ -570,7 +575,10 @@ where ) where Tx: ExecutableTxFor, { - let Some((mut evm, metrics, terminate_execution)) = self.evm_for_ctx() else { return }; + let Some((mut evm, metrics, terminate_execution, v2_proofs_enabled)) = self.evm_for_ctx() + else { + return + }; while let Ok(IndexedTransaction { index, tx }) = { let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", "recv tx") @@ -633,7 +641,8 @@ where let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm outcome", index, tx_hash=%tx.tx().tx_hash()) .entered(); - let (targets, storage_targets) = multiproof_targets_from_state(res.state); + let (targets, storage_targets) = + multiproof_targets_from_state(res.state, v2_proofs_enabled); metrics.prefetch_storage_targets.record(storage_targets as f64); let _ = sender.send(PrewarmTaskEvent::Outcome { proof_targets: Some(targets) }); drop(_enter); @@ -778,9 +787,22 @@ where } } -/// Returns a set of [`MultiProofTargets`] and the total amount of storage targets, based on the +/// Returns a set of [`VersionedMultiProofTargets`] and the total amount of storage targets, based +/// on the given state. +fn multiproof_targets_from_state( + state: EvmState, + v2_enabled: bool, +) -> (VersionedMultiProofTargets, usize) { + if v2_enabled { + multiproof_targets_v2_from_state(state) + } else { + multiproof_targets_legacy_from_state(state) + } +} + +/// Returns legacy [`MultiProofTargets`] and the total amount of storage targets, based on the /// given state. -fn multiproof_targets_from_state(state: EvmState) -> (MultiProofTargets, usize) { +fn multiproof_targets_legacy_from_state(state: EvmState) -> (VersionedMultiProofTargets, usize) { let mut targets = MultiProofTargets::with_capacity(state.len()); let mut storage_targets = 0; for (addr, account) in state { @@ -810,7 +832,50 @@ fn multiproof_targets_from_state(state: EvmState) -> (MultiProofTargets, usize) targets.insert(keccak256(addr), storage_set); } - (targets, storage_targets) + (VersionedMultiProofTargets::Legacy(targets), storage_targets) +} + +/// Returns V2 [`reth_trie_parallel::targets_v2::MultiProofTargetsV2`] and the total amount of +/// storage targets, based on the given state. +fn multiproof_targets_v2_from_state(state: EvmState) -> (VersionedMultiProofTargets, usize) { + use reth_trie::proof_v2; + use reth_trie_parallel::targets_v2::MultiProofTargetsV2; + + let mut targets = MultiProofTargetsV2::default(); + let mut storage_target_count = 0; + for (addr, account) in state { + // if the account was not touched, or if the account was selfdestructed, do not + // fetch proofs for it + // + // Since selfdestruct can only happen in the same transaction, we can skip + // prefetching proofs for selfdestructed accounts + // + // See: https://eips.ethereum.org/EIPS/eip-6780 + if !account.is_touched() || account.is_selfdestructed() { + continue + } + + let hashed_address = keccak256(addr); + targets.account_targets.push(hashed_address.into()); + + let mut storage_slots = Vec::with_capacity(account.storage.len()); + for (key, slot) in account.storage { + // do nothing if unchanged + if !slot.is_changed() { + continue + } + + let hashed_slot = keccak256(B256::new(key.to_be_bytes())); + storage_slots.push(proof_v2::Target::from(hashed_slot)); + } + + storage_target_count += storage_slots.len(); + if !storage_slots.is_empty() { + targets.storage_targets.insert(hashed_address, storage_slots); + } + } + + (VersionedMultiProofTargets::V2(targets), storage_target_count) } /// The events the pre-warm task can handle. @@ -835,7 +900,7 @@ pub(super) enum PrewarmTaskEvent { /// The outcome of a pre-warm task Outcome { /// The prepared proof targets based on the evm state outcome - proof_targets: Option, + proof_targets: Option, }, /// Finished executing all transactions FinishedTxExecution { diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index b4c150cfa9a..052fd8672b2 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -4,7 +4,7 @@ use crate::tree::payload_processor::multiproof::{MultiProofTaskMetrics, SparseTr use alloy_primitives::B256; use rayon::iter::{ParallelBridge, ParallelIterator}; use reth_trie::{updates::TrieUpdates, Nibbles}; -use reth_trie_parallel::root::ParallelStateRootError; +use reth_trie_parallel::{proof_task::ProofResult, root::ParallelStateRootError}; use reth_trie_sparse::{ errors::{SparseStateTrieResult, SparseTrieErrorKind}, provider::{TrieNodeProvider, TrieNodeProviderFactory}, @@ -97,8 +97,8 @@ where debug!( target: "engine::root", num_updates, - account_proofs = update.multiproof.account_subtree.len(), - storage_proofs = update.multiproof.storages.len(), + account_proofs = update.multiproof.account_proofs_len(), + storage_proofs = update.multiproof.storage_proofs_len(), "Updating sparse trie" ); @@ -157,7 +157,14 @@ where let started_at = Instant::now(); // Reveal new accounts and storage slots. - trie.reveal_decoded_multiproof(multiproof)?; + match multiproof { + ProofResult::Legacy(decoded, _) => { + trie.reveal_decoded_multiproof(decoded)?; + } + ProofResult::V2(decoded_v2) => { + trie.reveal_decoded_multiproof_v2(decoded_v2)?; + } + } let reveal_multiproof_elapsed = started_at.elapsed(); trace!( target: "engine::root::sparse", diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index d64f2dfb519..812dd2b85b1 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] # reth -reth-execution-errors.workspace = true reth-primitives-traits.workspace = true +reth-execution-errors.workspace = true reth-provider.workspace = true reth-storage-errors.workspace = true reth-trie-common.workspace = true diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 7bf936bad3a..d42534c2713 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -197,7 +197,7 @@ impl ParallelProof { let (result_tx, result_rx) = crossbeam_unbounded(); let account_multiproof_start_time = Instant::now(); - let input = AccountMultiproofInput { + let input = AccountMultiproofInput::Legacy { targets, prefix_sets, collect_branch_node_masks: self.collect_branch_node_masks, @@ -208,7 +208,6 @@ impl ParallelProof { HashedPostState::default(), account_multiproof_start_time, ), - v2_proofs_enabled: self.v2_proofs_enabled, }; self.proof_worker_handle @@ -222,7 +221,9 @@ impl ParallelProof { ) })?; - let ProofResult { proof: multiproof, stats } = proof_result_msg.result?; + let ProofResult::Legacy(multiproof, stats) = proof_result_msg.result? else { + panic!("AccountMultiproofInput::Legacy was submitted, expected legacy result") + }; #[cfg(feature = "metrics")] self.metrics.record(stats); @@ -235,7 +236,7 @@ impl ParallelProof { leaves_added = stats.leaves_added(), missed_leaves = stats.missed_leaves(), precomputed_storage_roots = stats.precomputed_storage_roots(), - "Calculated decoded proof" + "Calculated decoded proof", ); Ok(multiproof) diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index eb6f8923469..076931f48c7 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -32,6 +32,8 @@ use crate::{ root::ParallelStateRootError, stats::{ParallelTrieStats, ParallelTrieTracker}, + targets_v2::MultiProofTargetsV2, + value_encoder::AsyncAccountValueEncoder, StorageRootTargets, }; use alloy_primitives::{ @@ -49,11 +51,11 @@ use reth_trie::{ node_iter::{TrieElement, TrieNodeIter}, prefix_set::TriePrefixSets, proof::{ProofBlindedAccountProvider, ProofBlindedStorageProvider, StorageProof}, - proof_v2::{self, StorageProofCalculator}, + proof_v2, trie_cursor::{InstrumentedTrieCursor, TrieCursorFactory, TrieCursorMetricsCache}, walker::TrieWalker, - DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostState, MultiProofTargets, - Nibbles, ProofTrieNode, TRIE_ACCOUNT_RLP_MAX_SIZE, + DecodedMultiProof, DecodedMultiProofV2, DecodedStorageMultiProof, HashBuilder, HashedPostState, + MultiProofTargets, Nibbles, ProofTrieNode, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::{ added_removed_keys::MultiAddedRemovedKeys, @@ -220,7 +222,8 @@ impl ProofWorkerHandle { metrics, #[cfg(feature = "metrics")] cursor_metrics, - ); + ) + .with_v2_proofs(v2_proofs_enabled); if let Err(error) = worker.run() { error!( target: "trie::proof_task", @@ -333,16 +336,12 @@ impl ProofWorkerHandle { ProviderError::other(std::io::Error::other("account workers unavailable")); if let AccountWorkerJob::AccountMultiproof { input } = err.0 { - let AccountMultiproofInput { - proof_result_sender: - ProofResultContext { - sender: result_tx, - sequence_number: seq, - state, - start_time: start, - }, - .. - } = *input; + let ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + } = input.into_proof_result_sender(); let _ = result_tx.send(ProofResultMessage { sequence_number: seq, @@ -605,11 +604,65 @@ impl TrieNodeProvider for ProofTaskTrieNodeProvider { /// Result of a multiproof calculation. #[derive(Debug)] -pub struct ProofResult { - /// The account multiproof - pub proof: DecodedMultiProof, - /// Statistics collected during proof computation - pub stats: ParallelTrieStats, +pub enum ProofResult { + /// Legacy multiproof calculation result. + Legacy(DecodedMultiProof, ParallelTrieStats), + /// V2 multiproof calculation result. + V2(DecodedMultiProofV2), +} + +impl ProofResult { + /// Creates an empty [`ProofResult`] of the appropriate variant based on `v2_enabled`. + /// + /// Use this when constructing empty proofs (e.g., for state updates where all targets + /// were already fetched) to ensure consistency with the proof version being used. + pub fn empty(v2_enabled: bool) -> Self { + if v2_enabled { + Self::V2(DecodedMultiProofV2::default()) + } else { + let stats = ParallelTrieTracker::default().finish(); + Self::Legacy(DecodedMultiProof::default(), stats) + } + } + + /// Returns true if the result contains no proofs + pub fn is_empty(&self) -> bool { + match self { + Self::Legacy(proof, _) => proof.is_empty(), + Self::V2(proof) => proof.is_empty(), + } + } + + /// Extends the receiver with the value of the given results. + /// + /// # Panics + /// + /// This method panics if the two [`ProofResult`]s are not the same variant. + pub fn extend(&mut self, other: Self) { + match (self, other) { + (Self::Legacy(proof, _), Self::Legacy(other, _)) => proof.extend(other), + (Self::V2(proof), Self::V2(other)) => proof.extend(other), + _ => panic!("mismatched ProofResults, cannot extend one with the other"), + } + } + + /// Returns the number of account proofs. + pub fn account_proofs_len(&self) -> usize { + match self { + Self::Legacy(proof, _) => proof.account_subtree.len(), + Self::V2(proof) => proof.account_proofs.len(), + } + } + + /// Returns the total number of storage proofs + pub fn storage_proofs_len(&self) -> usize { + match self { + Self::Legacy(proof, _) => { + proof.storages.values().map(|p| p.subtree.len()).sum::() + } + Self::V2(proof) => proof.storage_proofs.values().map(|p| p.len()).sum::(), + } + } } /// Channel used by worker threads to deliver `ProofResultMessage` items back to @@ -889,7 +942,7 @@ where &self, proof_tx: &ProofTaskTx, v2_calculator: Option< - &mut StorageProofCalculator< + &mut proof_v2::StorageProofCalculator< ::StorageTrieCursor<'_>, ::StorageCursor<'_>, >, @@ -1053,6 +1106,8 @@ struct AccountProofWorker { /// Cursor metrics for this worker #[cfg(feature = "metrics")] cursor_metrics: ProofTaskCursorMetrics, + /// Set to true if V2 proofs are enabled. + v2_enabled: bool, } impl AccountProofWorker @@ -1082,9 +1137,16 @@ where metrics, #[cfg(feature = "metrics")] cursor_metrics, + v2_enabled: false, } } + /// Changes whether or not V2 proofs are enabled. + const fn with_v2_proofs(mut self, v2_enabled: bool) -> Self { + self.v2_enabled = v2_enabled; + self + } + /// Runs the worker loop, processing jobs until the channel closes. /// /// # Lifecycle @@ -1117,6 +1179,17 @@ where let mut account_nodes_processed = 0u64; let mut cursor_metrics_cache = ProofTaskCursorMetricsCache::default(); + let mut v2_calculator = if self.v2_enabled { + let trie_cursor = proof_tx.provider.account_trie_cursor()?; + let hashed_cursor = proof_tx.provider.hashed_account_cursor()?; + Some(proof_v2::ProofCalculator::<_, _, AsyncAccountValueEncoder>::new( + trie_cursor, + hashed_cursor, + )) + } else { + None + }; + // Count this worker as available only after successful initialization. self.available_workers.fetch_add(1, Ordering::Relaxed); @@ -1128,6 +1201,7 @@ where AccountWorkerJob::AccountMultiproof { input } => { self.process_account_multiproof( &proof_tx, + v2_calculator.as_mut(), *input, &mut account_proofs_processed, &mut cursor_metrics_cache, @@ -1166,26 +1240,18 @@ where Ok(()) } - /// Processes an account multiproof request. - fn process_account_multiproof( + fn compute_legacy_account_multiproof( &self, proof_tx: &ProofTaskTx, - input: AccountMultiproofInput, - account_proofs_processed: &mut u64, - cursor_metrics_cache: &mut ProofTaskCursorMetricsCache, - ) where + targets: MultiProofTargets, + mut prefix_sets: TriePrefixSets, + collect_branch_node_masks: bool, + multi_added_removed_keys: Option>, + proof_cursor_metrics: &mut ProofTaskCursorMetricsCache, + ) -> Result + where Provider: TrieCursorFactory + HashedCursorFactory, { - let AccountMultiproofInput { - targets, - mut prefix_sets, - collect_branch_node_masks, - multi_added_removed_keys, - proof_result_sender: - ProofResultContext { sender: result_tx, sequence_number: seq, state, start_time: start }, - v2_proofs_enabled, - } = input; - let span = debug_span!( target: "trie::proof_task", "Account multiproof calculation", @@ -1199,8 +1265,6 @@ where "Processing account multiproof" ); - let proof_start = Instant::now(); - let mut tracker = ParallelTrieTracker::default(); let mut storage_prefix_sets = std::mem::take(&mut prefix_sets.storage_prefix_sets); @@ -1210,29 +1274,14 @@ where tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); - let storage_proof_receivers = match dispatch_storage_proofs( + let storage_proof_receivers = dispatch_storage_proofs( &self.storage_work_tx, &targets, &mut storage_prefix_sets, collect_branch_node_masks, multi_added_removed_keys.as_ref(), - v2_proofs_enabled, - ) { - Ok(receivers) => receivers, - Err(error) => { - // Send error through result channel - error!(target: "trie::proof_task", "Failed to dispatch storage proofs: {error}"); - let _ = result_tx.send(ProofResultMessage { - sequence_number: seq, - result: Err(error), - elapsed: start.elapsed(), - state, - }); - return; - } - }; + )?; - // Use the missed leaves cache passed from the multiproof manager let account_prefix_set = std::mem::take(&mut prefix_sets.account_prefix_set); let ctx = AccountMultiproofParams { @@ -1244,17 +1293,115 @@ where cached_storage_roots: &self.cached_storage_roots, }; - let result = - build_account_multiproof_with_storage_roots(&proof_tx.provider, ctx, &mut tracker); - - let now = Instant::now(); - let proof_elapsed = now.duration_since(proof_start); - let total_elapsed = now.duration_since(start); - let proof_cursor_metrics = tracker.cursor_metrics; - proof_cursor_metrics.record_spans(); + let result = build_account_multiproof_with_storage_roots( + &proof_tx.provider, + ctx, + &mut tracker, + proof_cursor_metrics, + ); let stats = tracker.finish(); - let result = result.map(|proof| ProofResult { proof, stats }); + result.map(|proof| ProofResult::Legacy(proof, stats)) + } + + fn compute_v2_account_multiproof( + &self, + v2_calculator: &mut proof_v2::ProofCalculator< + ::AccountTrieCursor<'_>, + ::AccountCursor<'_>, + AsyncAccountValueEncoder, + >, + targets: MultiProofTargetsV2, + ) -> Result + where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let MultiProofTargetsV2 { mut account_targets, storage_targets } = targets; + + let span = debug_span!( + target: "trie::proof_task", + "Account V2 multiproof calculation", + account_targets = account_targets.len(), + storage_targets = storage_targets.values().map(|t| t.len()).sum::(), + worker_id = self.worker_id, + ); + let _span_guard = span.enter(); + + trace!(target: "trie::proof_task", "Processing V2 account multiproof"); + + let storage_proof_receivers = + dispatch_v2_storage_proofs(&self.storage_work_tx, &account_targets, storage_targets)?; + + let mut value_encoder = AsyncAccountValueEncoder::new( + self.storage_work_tx.clone(), + storage_proof_receivers, + self.cached_storage_roots.clone(), + ); + + let proof = DecodedMultiProofV2 { + account_proofs: v2_calculator.proof(&mut value_encoder, &mut account_targets)?, + storage_proofs: value_encoder.into_storage_proofs()?, + }; + + Ok(ProofResult::V2(proof)) + } + + /// Processes an account multiproof request. + fn process_account_multiproof( + &self, + proof_tx: &ProofTaskTx, + v2_calculator: Option< + &mut proof_v2::ProofCalculator< + ::AccountTrieCursor<'_>, + ::AccountCursor<'_>, + AsyncAccountValueEncoder, + >, + >, + input: AccountMultiproofInput, + account_proofs_processed: &mut u64, + cursor_metrics_cache: &mut ProofTaskCursorMetricsCache, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let mut proof_cursor_metrics = ProofTaskCursorMetricsCache::default(); + let proof_start = Instant::now(); + + let (proof_result_sender, result) = match input { + AccountMultiproofInput::Legacy { + targets, + prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys, + proof_result_sender, + } => ( + proof_result_sender, + self.compute_legacy_account_multiproof( + proof_tx, + targets, + prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys, + &mut proof_cursor_metrics, + ), + ), + AccountMultiproofInput::V2 { targets, proof_result_sender } => ( + proof_result_sender, + self.compute_v2_account_multiproof::( + v2_calculator.expect("v2 calculator provided"), + targets, + ), + ), + }; + + let ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + } = proof_result_sender; + + let proof_elapsed = proof_start.elapsed(); + let total_elapsed = start.elapsed(); *account_proofs_processed += 1; // Send result to MultiProofTask @@ -1275,6 +1422,8 @@ where ); } + proof_cursor_metrics.record_spans(); + trace!( target: "trie::proof_task", proof_time_us = proof_elapsed.as_micros(), @@ -1355,6 +1504,7 @@ fn build_account_multiproof_with_storage_roots

( provider: &P, ctx: AccountMultiproofParams<'_>, tracker: &mut ParallelTrieTracker, + proof_cursor_metrics: &mut ProofTaskCursorMetricsCache, ) -> Result where P: TrieCursorFactory + HashedCursorFactory, @@ -1362,15 +1512,12 @@ where let accounts_added_removed_keys = ctx.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); - // Create local metrics caches for account cursors. We can't directly use the metrics caches in - // the tracker due to the call to `inc_missed_leaves` which occurs on it. - let mut account_trie_cursor_metrics = TrieCursorMetricsCache::default(); - let mut account_hashed_cursor_metrics = HashedCursorMetricsCache::default(); - // Wrap account trie cursor with instrumented cursor let account_trie_cursor = provider.account_trie_cursor().map_err(ProviderError::Database)?; - let account_trie_cursor = - InstrumentedTrieCursor::new(account_trie_cursor, &mut account_trie_cursor_metrics); + let account_trie_cursor = InstrumentedTrieCursor::new( + account_trie_cursor, + &mut proof_cursor_metrics.account_trie_cursor, + ); // Create the walker. let walker = TrieWalker::<_>::state_trie(account_trie_cursor, ctx.prefix_set) @@ -1397,8 +1544,10 @@ where // Wrap account hashed cursor with instrumented cursor let account_hashed_cursor = provider.hashed_account_cursor().map_err(ProviderError::Database)?; - let account_hashed_cursor = - InstrumentedHashedCursor::new(account_hashed_cursor, &mut account_hashed_cursor_metrics); + let account_hashed_cursor = InstrumentedHashedCursor::new( + account_hashed_cursor, + &mut proof_cursor_metrics.account_hashed_cursor, + ); let mut account_node_iter = TrieNodeIter::state_trie(walker, account_hashed_cursor); @@ -1462,10 +1611,10 @@ where StorageProof::new_hashed(provider, provider, hashed_address) .with_prefix_set_mut(Default::default()) .with_trie_cursor_metrics( - &mut tracker.cursor_metrics.storage_trie_cursor, + &mut proof_cursor_metrics.storage_trie_cursor, ) .with_hashed_cursor_metrics( - &mut tracker.cursor_metrics.storage_hashed_cursor, + &mut proof_cursor_metrics.storage_hashed_cursor, ) .storage_multiproof( ctx.targets @@ -1516,21 +1665,6 @@ where BranchNodeMasksMap::default() }; - // Extend tracker with accumulated metrics from account cursors - tracker.cursor_metrics.account_trie_cursor.extend(&account_trie_cursor_metrics); - tracker.cursor_metrics.account_hashed_cursor.extend(&account_hashed_cursor_metrics); - - // Consume remaining storage proof receivers for accounts not encountered during trie walk. - // Done last to allow storage workers more time to complete while we finalized the account trie. - for (hashed_address, receiver) in storage_proof_receivers { - if let Ok(proof_msg) = receiver.recv() { - let proof_result = proof_msg.result?; - let proof = Into::>::into(proof_result) - .expect("Partial proofs are not yet supported"); - collected_decoded_storages.insert(hashed_address, proof); - } - } - Ok(DecodedMultiProof { account_subtree: decoded_account_subtree, branch_node_masks, @@ -1550,7 +1684,6 @@ fn dispatch_storage_proofs( storage_prefix_sets: &mut B256Map, with_branch_node_masks: bool, multi_added_removed_keys: Option<&Arc>, - use_v2_proofs: bool, ) -> Result>, ParallelStateRootError> { let mut storage_proof_receivers = B256Map::with_capacity_and_hasher(targets.len(), Default::default()); @@ -1564,20 +1697,14 @@ fn dispatch_storage_proofs( let (result_tx, result_rx) = crossbeam_channel::unbounded(); // Create computation input based on V2 flag - let input = if use_v2_proofs { - // Convert target slots to V2 targets - let v2_targets = target_slots.iter().copied().map(Into::into).collect(); - StorageProofInput::new(*hashed_address, v2_targets) - } else { - let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); - StorageProofInput::legacy( - *hashed_address, - prefix_set, - target_slots.clone(), - with_branch_node_masks, - multi_added_removed_keys.cloned(), - ) - }; + let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); + let input = StorageProofInput::legacy( + *hashed_address, + prefix_set, + target_slots.clone(), + with_branch_node_masks, + multi_added_removed_keys.cloned(), + ); // Always dispatch a storage proof so we obtain the storage root even when no slots are // requested. @@ -1595,6 +1722,64 @@ fn dispatch_storage_proofs( Ok(storage_proof_receivers) } + +/// Queues V2 storage proofs for all accounts in the targets and returns receivers. +/// +/// This function queues all storage proof tasks to the worker pool but returns immediately +/// with receivers, allowing the account trie walk to proceed in parallel with storage proof +/// computation. This enables interleaved parallelism for better performance. +/// +/// Propagates errors up if queuing fails. Receivers must be consumed by the caller. +fn dispatch_v2_storage_proofs( + storage_work_tx: &CrossbeamSender, + account_targets: &Vec, + storage_targets: B256Map>, +) -> Result>, ParallelStateRootError> { + let mut storage_proof_receivers = + B256Map::with_capacity_and_hasher(account_targets.len(), Default::default()); + + // Dispatch all proofs for targeted storage slots + for (hashed_address, targets) in storage_targets { + // Create channel for receiving StorageProofResultMessage + let (result_tx, result_rx) = crossbeam_channel::unbounded(); + let input = StorageProofInput::new(hashed_address, targets); + + storage_work_tx + .send(StorageWorkerJob::StorageProof { input, proof_result_sender: result_tx }) + .map_err(|_| { + ParallelStateRootError::Other(format!( + "Failed to queue storage proof for {hashed_address:?}: storage worker pool unavailable", + )) + })?; + + storage_proof_receivers.insert(hashed_address, result_rx); + } + + // If there are any targeted accounts which did not have storage targets then we generate a + // single proof target for them so that we get their root. + for target in account_targets { + let hashed_address = target.key(); + if storage_proof_receivers.contains_key(&hashed_address) { + continue + } + + let (result_tx, result_rx) = crossbeam_channel::unbounded(); + let input = StorageProofInput::new(hashed_address, vec![proof_v2::Target::new(B256::ZERO)]); + + storage_work_tx + .send(StorageWorkerJob::StorageProof { input, proof_result_sender: result_tx }) + .map_err(|_| { + ParallelStateRootError::Other(format!( + "Failed to queue storage proof for {hashed_address:?}: storage worker pool unavailable", + )) + })?; + + storage_proof_receivers.insert(hashed_address, result_rx); + } + + Ok(storage_proof_receivers) +} + /// Input parameters for storage proof computation. #[derive(Debug)] pub enum StorageProofInput { @@ -1639,7 +1824,7 @@ impl StorageProofInput { } } - /// Creates a new [`StorageProofInput`] with the given hashed address and target slots. + /// Creates a new [`StorageProofInput`] with the given hashed address and target slots. pub const fn new(hashed_address: B256, targets: Vec) -> Self { Self::V2 { hashed_address, targets } } @@ -1655,20 +1840,39 @@ impl StorageProofInput { } /// Input parameters for account multiproof computation. -#[derive(Debug, Clone)] -pub struct AccountMultiproofInput { - /// The targets for which to compute the multiproof. - pub targets: MultiProofTargets, - /// The prefix sets for the proof calculation. - pub prefix_sets: TriePrefixSets, - /// Whether or not to collect branch node masks. - pub collect_branch_node_masks: bool, - /// Provided by the user to give the necessary context to retain extra proofs. - pub multi_added_removed_keys: Option>, - /// Context for sending the proof result. - pub proof_result_sender: ProofResultContext, - /// Whether to use V2 storage proofs. - pub v2_proofs_enabled: bool, +#[derive(Debug)] +pub enum AccountMultiproofInput { + /// Legacy account multiproof proof variant + Legacy { + /// The targets for which to compute the multiproof. + targets: MultiProofTargets, + /// The prefix sets for the proof calculation. + prefix_sets: TriePrefixSets, + /// Whether or not to collect branch node masks. + collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + multi_added_removed_keys: Option>, + /// Context for sending the proof result. + proof_result_sender: ProofResultContext, + }, + /// V2 account multiproof variant + V2 { + /// The targets for which to compute the multiproof. + targets: MultiProofTargetsV2, + /// Context for sending the proof result. + proof_result_sender: ProofResultContext, + }, +} + +impl AccountMultiproofInput { + /// Returns the [`ProofResultContext`] for this input, consuming the input. + fn into_proof_result_sender(self) -> ProofResultContext { + match self { + Self::Legacy { proof_result_sender, .. } | Self::V2 { proof_result_sender, .. } => { + proof_result_sender + } + } + } } /// Parameters for building an account multiproof with pre-computed storage roots. diff --git a/crates/trie/parallel/src/stats.rs b/crates/trie/parallel/src/stats.rs index 088b95c9708..de5b0a628ef 100644 --- a/crates/trie/parallel/src/stats.rs +++ b/crates/trie/parallel/src/stats.rs @@ -1,5 +1,3 @@ -#[cfg(feature = "metrics")] -use crate::proof_task_metrics::ProofTaskCursorMetricsCache; use derive_more::Deref; use reth_trie::stats::{TrieStats, TrieTracker}; @@ -36,9 +34,6 @@ pub struct ParallelTrieTracker { trie: TrieTracker, precomputed_storage_roots: u64, missed_leaves: u64, - #[cfg(feature = "metrics")] - /// Local tracking of cursor-related metrics - pub cursor_metrics: ProofTaskCursorMetricsCache, } impl ParallelTrieTracker { diff --git a/crates/trie/parallel/src/value_encoder.rs b/crates/trie/parallel/src/value_encoder.rs index 13c611922db..7b08d3e1b5e 100644 --- a/crates/trie/parallel/src/value_encoder.rs +++ b/crates/trie/parallel/src/value_encoder.rs @@ -86,7 +86,6 @@ pub(crate) struct AsyncAccountValueEncoder { impl AsyncAccountValueEncoder { /// Initializes a [`Self`] using a `ProofWorkerHandle` which will be used to calculate storage /// roots asynchronously. - #[expect(dead_code)] pub(crate) fn new( storage_work_tx: CrossbeamSender, dispatched: B256Map>, @@ -106,7 +105,6 @@ impl AsyncAccountValueEncoder { /// /// This method panics if any deferred encoders produced by [`Self::deferred_encoder`] have not /// been dropped. - #[expect(dead_code)] pub(crate) fn into_storage_proofs( self, ) -> Result>, StateProofError> { From 9cf82c840320f6845b2ec4a333202f899d09464b Mon Sep 17 00:00:00 2001 From: Sergei Shulepov Date: Wed, 21 Jan 2026 14:23:26 +0000 Subject: [PATCH 115/267] fix: supply a real ptr to mdbx_dbi_flags_ex (#21230) --- crates/storage/libmdbx-rs/src/transaction.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index f0f4f120ae1..67f28ae9e31 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -223,7 +223,11 @@ where let mut flags: c_uint = 0; unsafe { self.txn_execute(|txn| { - mdbx_result(ffi::mdbx_dbi_flags_ex(txn, dbi, &mut flags, ptr::null_mut())) + // `mdbx_dbi_flags_ex` requires `status` to be a non-NULL ptr, otherwise it will + // return an EINVAL and panic below, so we just provide a placeholder variable + // which we discard immediately. + let mut _status: c_uint = 0; + mdbx_result(ffi::mdbx_dbi_flags_ex(txn, dbi, &mut flags, &mut _status)) })??; } From 1954c91a603ce8f6d3c6e7bb1a9897b590065e5d Mon Sep 17 00:00:00 2001 From: Emma Jamieson-Hoare Date: Wed, 21 Jan 2026 14:40:54 +0000 Subject: [PATCH 116/267] chore: update CODEOWNERS (#21223) Co-authored-by: Emma Jamieson-Hoare Co-authored-by: YK Co-authored-by: Arsenii Kulikov Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- .github/CODEOWNERS | 43 +++++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5e334d13c65..4c3ce10c7ab 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,45 +1,52 @@ * @gakonst -crates/blockchain-tree-api/ @rakita @mattsse @Rjected -crates/blockchain-tree/ @rakita @mattsse @Rjected crates/chain-state/ @fgimenez @mattsse crates/chainspec/ @Rjected @joshieDo @mattsse crates/cli/ @mattsse +crates/config/ @shekhirin @mattsse @Rjected crates/consensus/ @mattsse @Rjected crates/e2e-test-utils/ @mattsse @Rjected @klkvr @fgimenez -crates/engine/ @mattsse @Rjected @fgimenez @mediocregopher @yongkangc -crates/era/ @mattsse @RomanHodulak +crates/engine/ @mattsse @Rjected @mediocregopher @yongkangc +crates/era/ @mattsse +crates/era-downloader/ @mattsse +crates/era-utils/ @mattsse crates/errors/ @mattsse -crates/ethereum-forks/ @mattsse @Rjected crates/ethereum/ @mattsse @Rjected crates/etl/ @joshieDo @shekhirin -crates/evm/ @rakita @mattsse @Rjected +crates/evm/ @mattsse @Rjected @klkvr crates/exex/ @shekhirin +crates/fs-util/ @mattsse +crates/metrics/ @mattsse @Rjected crates/net/ @mattsse @Rjected crates/net/downloaders/ @Rjected crates/node/ @mattsse @Rjected @klkvr -crates/optimism/ @mattsse @Rjected @fgimenez +crates/optimism/ @mattsse @Rjected crates/payload/ @mattsse @Rjected -crates/primitives-traits/ @Rjected @RomanHodulak @mattsse @klkvr +crates/primitives-traits/ @Rjected @mattsse @klkvr crates/primitives/ @Rjected @mattsse @klkvr crates/prune/ @shekhirin @joshieDo -crates/ress @shekhirin @Rjected -crates/revm/ @mattsse @rakita -crates/rpc/ @mattsse @Rjected @RomanHodulak +crates/ress/ @shekhirin @Rjected +crates/revm/ @mattsse +crates/rpc/ @mattsse @Rjected crates/stages/ @shekhirin @mediocregopher crates/static-file/ @joshieDo @shekhirin +crates/stateless/ @mattsse crates/storage/codecs/ @joshieDo -crates/storage/db-api/ @joshieDo @rakita +crates/storage/db-api/ @joshieDo crates/storage/db-common/ @Rjected -crates/storage/db/ @joshieDo @rakita -crates/storage/errors/ @rakita -crates/storage/libmdbx-rs/ @rakita @shekhirin +crates/storage/db/ @joshieDo +crates/storage/errors/ @joshieDo +crates/storage/libmdbx-rs/ @shekhirin crates/storage/nippy-jar/ @joshieDo @shekhirin -crates/storage/provider/ @rakita @joshieDo @shekhirin +crates/storage/provider/ @joshieDo @shekhirin crates/storage/storage-api/ @joshieDo crates/tasks/ @mattsse -crates/tokio-util/ @fgimenez +crates/tokio-util/ @mattsse +crates/tracing/ @mattsse @shekhirin +crates/tracing-otlp/ @mattsse @Rjected crates/transaction-pool/ @mattsse @yongkangc -crates/trie/ @Rjected @shekhirin @mediocregopher +crates/trie/ @Rjected @shekhirin @mediocregopher @yongkangc +bin/reth/ @mattsse @shekhirin @Rjected +bin/reth-bench/ @mattsse @Rjected @shekhirin @yongkangc bin/reth-bench-compare/ @mediocregopher @shekhirin @yongkangc etc/ @Rjected @shekhirin .github/ @gakonst @DaniPopes From 274394e777472c6a656fc9773f6f8c02b7eadc35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E3=81=8B=E3=82=8A=E3=82=93=E3=81=A8=E3=81=86?= Date: Wed, 21 Jan 2026 17:11:03 +0100 Subject: [PATCH 117/267] fix: fix payload file filter prefix in replay-payloads (#21255) --- bin/reth-bench/src/bench/replay_payloads.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/reth-bench/src/bench/replay_payloads.rs b/bin/reth-bench/src/bench/replay_payloads.rs index 3de65c9b7ac..e6d388d9efe 100644 --- a/bin/reth-bench/src/bench/replay_payloads.rs +++ b/bin/reth-bench/src/bench/replay_payloads.rs @@ -180,7 +180,7 @@ impl Command { .filter_map(|e| e.ok()) .filter(|e| { e.path().extension().and_then(|s| s.to_str()) == Some("json") && - e.file_name().to_string_lossy().starts_with("payload_") + e.file_name().to_string_lossy().starts_with("payload_block_") }) .collect(); From 097448586332eba2826bb538d8c9bd4a19e9c342 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 21 Jan 2026 16:19:22 +0000 Subject: [PATCH 118/267] feat(reth-bench): add --target-gas-limit option to gas-limit-ramp (#21262) --- bin/reth-bench/src/bench/gas_limit_ramp.rs | 78 ++++++++++++++++++---- 1 file changed, 66 insertions(+), 12 deletions(-) diff --git a/bin/reth-bench/src/bench/gas_limit_ramp.rs b/bin/reth-bench/src/bench/gas_limit_ramp.rs index 4f0d58dbd96..7c9e894ea3f 100644 --- a/bin/reth-bench/src/bench/gas_limit_ramp.rs +++ b/bin/reth-bench/src/bench/gas_limit_ramp.rs @@ -25,9 +25,14 @@ use tracing::info; /// `reth benchmark gas-limit-ramp` command. #[derive(Debug, Parser)] pub struct Command { - /// Number of blocks to generate. - #[arg(long, value_name = "BLOCKS")] - blocks: u64, + /// Number of blocks to generate. Mutually exclusive with --target-gas-limit. + #[arg(long, value_name = "BLOCKS", conflicts_with = "target_gas_limit")] + blocks: Option, + + /// Target gas limit to ramp up to. The benchmark will generate blocks until the gas limit + /// reaches or exceeds this value. Mutually exclusive with --blocks. + #[arg(long, value_name = "TARGET_GAS_LIMIT", conflicts_with = "blocks")] + target_gas_limit: Option, /// The Engine API RPC URL. #[arg(long = "engine-rpc-url", value_name = "ENGINE_RPC_URL")] @@ -42,12 +47,37 @@ pub struct Command { output: PathBuf, } +/// Mode for determining when to stop ramping. +#[derive(Debug, Clone, Copy)] +enum RampMode { + /// Ramp for a fixed number of blocks. + Blocks(u64), + /// Ramp until reaching or exceeding target gas limit. + TargetGasLimit(u64), +} + impl Command { /// Execute `benchmark gas-limit-ramp` command. pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { - if self.blocks == 0 { - return Err(eyre::eyre!("--blocks must be greater than 0")); - } + let mode = match (self.blocks, self.target_gas_limit) { + (Some(blocks), None) => { + if blocks == 0 { + return Err(eyre::eyre!("--blocks must be greater than 0")); + } + RampMode::Blocks(blocks) + } + (None, Some(target)) => { + if target == 0 { + return Err(eyre::eyre!("--target-gas-limit must be greater than 0")); + } + RampMode::TargetGasLimit(target) + } + _ => { + return Err(eyre::eyre!( + "Exactly one of --blocks or --target-gas-limit must be specified" + )); + } + }; // Ensure output directory exists if self.output.is_file() { @@ -84,14 +114,31 @@ impl Command { let canonical_parent = parent_header.number; let start_block = canonical_parent + 1; - let end_block = start_block + self.blocks - 1; - info!(canonical_parent, start_block, end_block, "Starting gas limit ramp benchmark"); + match mode { + RampMode::Blocks(blocks) => { + info!( + canonical_parent, + start_block, + end_block = start_block + blocks - 1, + "Starting gas limit ramp benchmark (block count mode)" + ); + } + RampMode::TargetGasLimit(target) => { + info!( + canonical_parent, + start_block, + current_gas_limit = parent_header.gas_limit, + target_gas_limit = target, + "Starting gas limit ramp benchmark (target gas limit mode)" + ); + } + } - let mut next_block_number = start_block; + let mut blocks_processed = 0u64; let total_benchmark_duration = Instant::now(); - while next_block_number <= end_block { + while !should_stop(mode, blocks_processed, parent_header.gas_limit) { let timestamp = parent_header.timestamp.saturating_add(1); let request = prepare_payload_request(&chain_spec, timestamp, parent_hash); @@ -140,13 +187,13 @@ impl Command { parent_header = block.header; parent_hash = block_hash; - next_block_number += 1; + blocks_processed += 1; } let final_gas_limit = parent_header.gas_limit; info!( total_duration=?total_benchmark_duration.elapsed(), - blocks_processed = self.blocks, + blocks_processed, final_gas_limit, "Benchmark complete" ); @@ -158,3 +205,10 @@ impl Command { const fn max_gas_limit_increase(parent_gas_limit: u64) -> u64 { (parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR).saturating_sub(1) } + +const fn should_stop(mode: RampMode, blocks_processed: u64, current_gas_limit: u64) -> bool { + match mode { + RampMode::Blocks(target_blocks) => blocks_processed >= target_blocks, + RampMode::TargetGasLimit(target) => current_gas_limit >= target, + } +} From ff8ac97e33de13a659400ef1e5b694db4e91ccb4 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 21 Jan 2026 16:27:30 +0000 Subject: [PATCH 119/267] fix(stages): clear ETL collectors on HeaderStage error paths (#21258) --- crates/stages/stages/src/stages/headers.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 360b34b5db9..6927622766e 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -86,6 +86,14 @@ where } } + /// Clear all ETL state. Called on error paths to prevent buffer pollution on retry. + fn clear_etl_state(&mut self) { + self.sync_gap = None; + self.hash_collector.clear(); + self.header_collector.clear(); + self.is_etl_ready = false; + } + /// Write downloaded headers to storage from ETL. /// /// Writes to static files ( `Header | HeaderTD | HeaderHash` ) and [`tables::HeaderNumbers`] @@ -258,7 +266,7 @@ where } Some(Err(HeadersDownloaderError::DetachedHead { local_head, header, error })) => { error!(target: "sync::stages::headers", %error, "Cannot attach header to head"); - self.sync_gap = None; + self.clear_etl_state(); return Poll::Ready(Err(StageError::DetachedHead { local_head: Box::new(local_head.block_with_parent()), header: Box::new(header.block_with_parent()), @@ -266,7 +274,7 @@ where })) } None => { - self.sync_gap = None; + self.clear_etl_state(); return Poll::Ready(Err(StageError::ChannelClosed)) } } @@ -324,7 +332,7 @@ where provider: &Provider, input: UnwindInput, ) -> Result { - self.sync_gap.take(); + self.clear_etl_state(); // First unwind the db tables, until the unwind_to block number. use the walker to unwind // HeaderNumbers based on the index in CanonicalHeaders From dd72cfe23ee1626d8c490cf9e32b88e3931abab0 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Wed, 21 Jan 2026 08:52:24 -0800 Subject: [PATCH 120/267] refactor: remove static_files.to_settings() and add edge feature to RocksDB flags (#21225) --- Cargo.lock | 1 - crates/cli/commands/src/common.rs | 21 ++++- crates/node/builder/src/launch/common.rs | 10 +-- crates/node/core/Cargo.toml | 3 +- crates/node/core/src/args/rocksdb.rs | 88 +++++++++++++------ crates/node/core/src/args/static_files.rs | 13 --- crates/node/core/src/node_config.rs | 21 ++--- crates/storage/db-api/src/models/metadata.rs | 15 ++++ docs/vocs/docs/pages/cli/op-reth/db.mdx | 30 +++++++ .../vocs/docs/pages/cli/op-reth/import-op.mdx | 30 +++++++ .../pages/cli/op-reth/import-receipts-op.mdx | 30 +++++++ .../docs/pages/cli/op-reth/init-state.mdx | 30 +++++++ docs/vocs/docs/pages/cli/op-reth/init.mdx | 30 +++++++ docs/vocs/docs/pages/cli/op-reth/node.mdx | 15 +++- docs/vocs/docs/pages/cli/op-reth/prune.mdx | 30 +++++++ .../docs/pages/cli/op-reth/re-execute.mdx | 30 +++++++ .../docs/pages/cli/op-reth/stage/drop.mdx | 30 +++++++ .../docs/pages/cli/op-reth/stage/dump.mdx | 30 +++++++ .../vocs/docs/pages/cli/op-reth/stage/run.mdx | 30 +++++++ .../docs/pages/cli/op-reth/stage/unwind.mdx | 30 +++++++ docs/vocs/docs/pages/cli/reth/db.mdx | 30 +++++++ docs/vocs/docs/pages/cli/reth/download.mdx | 30 +++++++ docs/vocs/docs/pages/cli/reth/export-era.mdx | 30 +++++++ docs/vocs/docs/pages/cli/reth/import-era.mdx | 30 +++++++ docs/vocs/docs/pages/cli/reth/import.mdx | 30 +++++++ docs/vocs/docs/pages/cli/reth/init-state.mdx | 30 +++++++ docs/vocs/docs/pages/cli/reth/init.mdx | 30 +++++++ docs/vocs/docs/pages/cli/reth/node.mdx | 15 +++- docs/vocs/docs/pages/cli/reth/prune.mdx | 30 +++++++ docs/vocs/docs/pages/cli/reth/re-execute.mdx | 30 +++++++ docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 30 +++++++ docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 30 +++++++ docs/vocs/docs/pages/cli/reth/stage/run.mdx | 30 +++++++ .../vocs/docs/pages/cli/reth/stage/unwind.mdx | 30 +++++++ 34 files changed, 851 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 31ad77a44bb..c2ea2d86702 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9442,7 +9442,6 @@ dependencies = [ "reth-network-p2p", "reth-network-peers", "reth-primitives-traits", - "reth-provider", "reth-prune-types", "reth-rpc-convert", "reth-rpc-eth-types", diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 56d574f74cc..4cd8c29d7a3 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -19,7 +19,7 @@ use reth_node_builder::{ Node, NodeComponents, NodeComponentsBuilder, NodeTypes, NodeTypesWithDBAdapter, }; use reth_node_core::{ - args::{DatabaseArgs, DatadirArgs, StaticFilesArgs}, + args::{DatabaseArgs, DatadirArgs, RocksDbArgs, StaticFilesArgs}, dirs::{ChainPath, DataDirPath}, }; use reth_provider::{ @@ -27,7 +27,7 @@ use reth_provider::{ BlockchainProvider, NodeTypesForProvider, RocksDBProvider, StaticFileProvider, StaticFileProviderBuilder, }, - ProviderFactory, StaticFileProviderFactory, + ProviderFactory, StaticFileProviderFactory, StorageSettings, }; use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; use reth_static_file::StaticFileProducer; @@ -66,9 +66,24 @@ pub struct EnvironmentArgs { /// All static files related arguments #[command(flatten)] pub static_files: StaticFilesArgs, + + /// All `RocksDB` related arguments + #[command(flatten)] + pub rocksdb: RocksDbArgs, } impl EnvironmentArgs { + /// Returns the effective storage settings derived from static-file and `RocksDB` CLI args. + pub fn storage_settings(&self) -> StorageSettings { + StorageSettings::base() + .with_receipts_in_static_files(self.static_files.receipts) + .with_transaction_senders_in_static_files(self.static_files.transaction_senders) + .with_account_changesets_in_static_files(self.static_files.account_changesets) + .with_transaction_hash_numbers_in_rocksdb(self.rocksdb.all || self.rocksdb.tx_hash) + .with_storages_history_in_rocksdb(self.rocksdb.all || self.rocksdb.storages_history) + .with_account_history_in_rocksdb(self.rocksdb.all || self.rocksdb.account_history) + } + /// Initializes environment according to [`AccessRights`] and returns an instance of /// [`Environment`]. pub fn init(&self, access: AccessRights) -> eyre::Result> @@ -131,7 +146,7 @@ impl EnvironmentArgs { self.create_provider_factory(&config, db, sfp, rocksdb_provider, access)?; if access.is_read_write() { debug!(target: "reth::cli", chain=%self.chain.chain(), genesis=?self.chain.genesis_hash(), "Initializing genesis"); - init_genesis_with_settings(&provider_factory, self.static_files.to_settings())?; + init_genesis_with_settings(&provider_factory, self.storage_settings())?; } Ok(Environment { config, provider_factory, data_dir }) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index e351fe96f65..d97ecab876b 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -676,19 +676,13 @@ where /// Convenience function to [`Self::init_genesis`] pub fn with_genesis(self) -> Result { - init_genesis_with_settings( - self.provider_factory(), - self.node_config().static_files.to_settings(), - )?; + init_genesis_with_settings(self.provider_factory(), self.node_config().storage_settings())?; Ok(self) } /// Write the genesis block and state if it has not already been written pub fn init_genesis(&self) -> Result { - init_genesis_with_settings( - self.provider_factory(), - self.node_config().static_files.to_settings(), - ) + init_genesis_with_settings(self.provider_factory(), self.node_config().storage_settings()) } /// Creates a new `WithMeteredProvider` container and attaches it to the diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 2be74b17613..676f507c136 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -19,7 +19,6 @@ reth-cli-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-storage-errors.workspace = true reth-storage-api = { workspace = true, features = ["std", "db-api"] } -reth-provider.workspace = true reth-network = { workspace = true, features = ["serde"] } reth-network-p2p.workspace = true reth-rpc-eth-types.workspace = true @@ -92,7 +91,7 @@ min-debug-logs = ["tracing/release_max_level_debug"] min-trace-logs = ["tracing/release_max_level_trace"] # Marker feature for edge/unstable builds - captured by vergen in build.rs -edge = ["reth-provider/edge"] +edge = ["reth-storage-api/edge"] [build-dependencies] vergen = { workspace = true, features = ["build", "cargo", "emit_and_set"] } diff --git a/crates/node/core/src/args/rocksdb.rs b/crates/node/core/src/args/rocksdb.rs index 61bc33bc90d..ad3b5dc8d31 100644 --- a/crates/node/core/src/args/rocksdb.rs +++ b/crates/node/core/src/args/rocksdb.rs @@ -2,11 +2,19 @@ use clap::{ArgAction, Args}; +/// Default value for `RocksDB` routing flags. +/// +/// When the `edge` feature is enabled, defaults to `true` to enable edge storage features. +/// Otherwise defaults to `false` for legacy behavior. +const fn default_rocksdb_flag() -> bool { + cfg!(feature = "edge") +} + /// Parameters for `RocksDB` table routing configuration. /// /// These flags control which database tables are stored in `RocksDB` instead of MDBX. /// All flags are genesis-initialization-only: changing them after genesis requires a re-sync. -#[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)] +#[derive(Debug, Args, PartialEq, Eq, Clone, Copy)] #[command(next_help_heading = "RocksDB")] pub struct RocksDbArgs { /// Route all supported tables to `RocksDB` instead of MDBX. @@ -17,31 +25,51 @@ pub struct RocksDbArgs { pub all: bool, /// Route tx hash -> number table to `RocksDB` instead of MDBX. - #[arg(long = "rocksdb.tx-hash", action = ArgAction::Set)] - pub tx_hash: Option, + /// + /// This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. + /// Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + #[arg(long = "rocksdb.tx-hash", default_value_t = default_rocksdb_flag(), action = ArgAction::Set)] + pub tx_hash: bool, /// Route storages history tables to `RocksDB` instead of MDBX. - #[arg(long = "rocksdb.storages-history", action = ArgAction::Set)] - pub storages_history: Option, + /// + /// This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. + /// Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + #[arg(long = "rocksdb.storages-history", default_value_t = default_rocksdb_flag(), action = ArgAction::Set)] + pub storages_history: bool, /// Route account history tables to `RocksDB` instead of MDBX. - #[arg(long = "rocksdb.account-history", action = ArgAction::Set)] - pub account_history: Option, + /// + /// This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. + /// Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + #[arg(long = "rocksdb.account-history", default_value_t = default_rocksdb_flag(), action = ArgAction::Set)] + pub account_history: bool, +} + +impl Default for RocksDbArgs { + fn default() -> Self { + Self { + all: false, + tx_hash: default_rocksdb_flag(), + storages_history: default_rocksdb_flag(), + account_history: default_rocksdb_flag(), + } + } } impl RocksDbArgs { /// Validates the `RocksDB` arguments. /// /// Returns an error if `--rocksdb.all` is used with any individual flag set to `false`. - pub fn validate(&self) -> Result<(), RocksDbArgsError> { + pub const fn validate(&self) -> Result<(), RocksDbArgsError> { if self.all { - if self.tx_hash == Some(false) { + if !self.tx_hash { return Err(RocksDbArgsError::ConflictingFlags("tx-hash")); } - if self.storages_history == Some(false) { + if !self.storages_history { return Err(RocksDbArgsError::ConflictingFlags("storages-history")); } - if self.account_history == Some(false) { + if !self.account_history { return Err(RocksDbArgsError::ConflictingFlags("account-history")); } } @@ -78,7 +106,7 @@ mod tests { fn test_parse_all_flag() { let args = CommandParser::::parse_from(["reth", "--rocksdb.all"]).args; assert!(args.all); - assert_eq!(args.tx_hash, None); + assert_eq!(args.tx_hash, default_rocksdb_flag()); } #[test] @@ -91,32 +119,42 @@ mod tests { ]) .args; assert!(!args.all); - assert_eq!(args.tx_hash, Some(true)); - assert_eq!(args.storages_history, Some(false)); - assert_eq!(args.account_history, Some(true)); - } - - #[test] - fn test_validate_all_alone_ok() { - let args = RocksDbArgs { all: true, ..Default::default() }; - assert!(args.validate().is_ok()); + assert!(args.tx_hash); + assert!(!args.storages_history); + assert!(args.account_history); } #[test] fn test_validate_all_with_true_ok() { - let args = RocksDbArgs { all: true, tx_hash: Some(true), ..Default::default() }; + let args = + RocksDbArgs { all: true, tx_hash: true, storages_history: true, account_history: true }; assert!(args.validate().is_ok()); } #[test] fn test_validate_all_with_false_errors() { - let args = RocksDbArgs { all: true, tx_hash: Some(false), ..Default::default() }; + let args = RocksDbArgs { + all: true, + tx_hash: false, + storages_history: true, + account_history: true, + }; assert_eq!(args.validate(), Err(RocksDbArgsError::ConflictingFlags("tx-hash"))); - let args = RocksDbArgs { all: true, storages_history: Some(false), ..Default::default() }; + let args = RocksDbArgs { + all: true, + tx_hash: true, + storages_history: false, + account_history: true, + }; assert_eq!(args.validate(), Err(RocksDbArgsError::ConflictingFlags("storages-history"))); - let args = RocksDbArgs { all: true, account_history: Some(false), ..Default::default() }; + let args = RocksDbArgs { + all: true, + tx_hash: true, + storages_history: true, + account_history: false, + }; assert_eq!(args.validate(), Err(RocksDbArgsError::ConflictingFlags("account-history"))); } } diff --git a/crates/node/core/src/args/static_files.rs b/crates/node/core/src/args/static_files.rs index aa52164f2f2..d0048022cfc 100644 --- a/crates/node/core/src/args/static_files.rs +++ b/crates/node/core/src/args/static_files.rs @@ -2,7 +2,6 @@ use clap::Args; use reth_config::config::{BlocksPerFileConfig, StaticFilesConfig}; -use reth_provider::StorageSettings; /// Blocks per static file when running in `--minimal` node. /// @@ -102,18 +101,6 @@ impl StaticFilesArgs { }, } } - - /// Converts the static files arguments into [`StorageSettings`]. - pub const fn to_settings(&self) -> StorageSettings { - #[cfg(feature = "edge")] - let base = StorageSettings::edge(); - #[cfg(not(feature = "edge"))] - let base = StorageSettings::legacy(); - - base.with_receipts_in_static_files(self.receipts) - .with_transaction_senders_in_static_files(self.transaction_senders) - .with_account_changesets_in_static_files(self.account_changesets) - } } impl Default for StaticFilesArgs { diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index aeff14a8755..98502fdd115 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -358,19 +358,14 @@ impl NodeConfig { } /// Returns the effective storage settings derived from static-file and `RocksDB` CLI args. - pub fn storage_settings(&self) -> StorageSettings { - let tx_hash = self.rocksdb.all || self.rocksdb.tx_hash.unwrap_or(false); - let storages_history = self.rocksdb.all || self.rocksdb.storages_history.unwrap_or(false); - let account_history = self.rocksdb.all || self.rocksdb.account_history.unwrap_or(false); - - StorageSettings { - receipts_in_static_files: self.static_files.receipts, - transaction_senders_in_static_files: self.static_files.transaction_senders, - account_changesets_in_static_files: self.static_files.account_changesets, - transaction_hash_numbers_in_rocksdb: tx_hash, - storages_history_in_rocksdb: storages_history, - account_history_in_rocksdb: account_history, - } + pub const fn storage_settings(&self) -> StorageSettings { + StorageSettings::base() + .with_receipts_in_static_files(self.static_files.receipts) + .with_transaction_senders_in_static_files(self.static_files.transaction_senders) + .with_account_changesets_in_static_files(self.static_files.account_changesets) + .with_transaction_hash_numbers_in_rocksdb(self.rocksdb.all || self.rocksdb.tx_hash) + .with_storages_history_in_rocksdb(self.rocksdb.all || self.rocksdb.storages_history) + .with_account_history_in_rocksdb(self.rocksdb.all || self.rocksdb.account_history) } /// Returns the max block that the node should run to, looking it up from the network if diff --git a/crates/storage/db-api/src/models/metadata.rs b/crates/storage/db-api/src/models/metadata.rs index b3dc4710936..a12e9b6dab6 100644 --- a/crates/storage/db-api/src/models/metadata.rs +++ b/crates/storage/db-api/src/models/metadata.rs @@ -34,6 +34,21 @@ pub struct StorageSettings { } impl StorageSettings { + /// Returns the default base `StorageSettings` for this build. + /// + /// When the `edge` feature is enabled, returns [`Self::edge()`]. + /// Otherwise, returns [`Self::legacy()`]. + pub const fn base() -> Self { + #[cfg(feature = "edge")] + { + Self::edge() + } + #[cfg(not(feature = "edge"))] + { + Self::legacy() + } + } + /// Creates `StorageSettings` for edge nodes with all storage features enabled: /// - Receipts and transaction senders in static files /// - History indices in `RocksDB` (storages, accounts, transaction hashes) diff --git a/docs/vocs/docs/pages/cli/op-reth/db.mdx b/docs/vocs/docs/pages/cli/op-reth/db.mdx index 335b54cba6c..9d5fd0032a4 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db.mdx @@ -154,6 +154,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx index 891439b4f6d..95ef59d63e6 100644 --- a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --chunk-len Chunk byte length to read from file. diff --git a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx index cabcf3b0401..499017a379f 100644 --- a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --chunk-len Chunk byte length to read from file. diff --git a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx index 429c4fe1f0d..9637e30cd6e 100644 --- a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --without-evm Specifies whether to initialize the state without relying on EVM historical data. diff --git a/docs/vocs/docs/pages/cli/op-reth/init.mdx b/docs/vocs/docs/pages/cli/op-reth/init.mdx index 1094918f334..01cd9d866a2 100644 --- a/docs/vocs/docs/pages/cli/op-reth/init.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/init.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index c2de9fae562..3493c9ac4b4 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -904,18 +904,27 @@ RocksDB: This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. --rocksdb.tx-hash - Route tx hash -> number table to `RocksDB` instead of MDBX + Route tx hash -> number table to `RocksDB` instead of MDBX. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] [possible values: true, false] --rocksdb.storages-history - Route storages history tables to `RocksDB` instead of MDBX + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + [default: false] [possible values: true, false] --rocksdb.account-history - Route account history tables to `RocksDB` instead of MDBX + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + [default: false] [possible values: true, false] Engine: diff --git a/docs/vocs/docs/pages/cli/op-reth/prune.mdx b/docs/vocs/docs/pages/cli/op-reth/prune.mdx index 953e77d6cac..1409abf05f4 100644 --- a/docs/vocs/docs/pages/cli/op-reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/prune.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx index 8e40a32b9ea..484805486e1 100644 --- a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --from The height to start at diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx index e176564435b..effc3dfe6bd 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + Possible values: - headers: The headers stage within the pipeline diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx index 99d18f48ea7..9843a022569 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx @@ -145,6 +145,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx index 13a1599bd76..3130a06819f 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --metrics Enable Prometheus metrics. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx index 3e380975e52..496417aeb4c 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx @@ -143,6 +143,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --offline If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index ef1793696bd..5fd0ef4199b 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -154,6 +154,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 8c8b047d94e..dfc81c0bf57 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + -u, --url Specify a snapshot URL or let the command propose a default one. diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 4dcbbd18aa3..64bc8038242 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --first-block-number Optional first block number to export from the db. It is by default 0. diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index fb7a3d394c0..38218816626 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --path The path to a directory for import. diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index c3482e2a46c..ad81cc3d187 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --no-state Disables stages that require state. diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 16aa7f61482..b1d05e4b5ff 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --without-evm Specifies whether to initialize the state without relying on EVM historical data. diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index d2da76d31c6..b6c5ee05399 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 3766d7ed9df..b076f3eee47 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -904,18 +904,27 @@ RocksDB: This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. --rocksdb.tx-hash - Route tx hash -> number table to `RocksDB` instead of MDBX + Route tx hash -> number table to `RocksDB` instead of MDBX. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] [possible values: true, false] --rocksdb.storages-history - Route storages history tables to `RocksDB` instead of MDBX + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + [default: false] [possible values: true, false] --rocksdb.account-history - Route account history tables to `RocksDB` instead of MDBX + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + [default: false] [possible values: true, false] Engine: diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index c2d1e830099..8e33c025044 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index c4a254ed511..aa0615070c7 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --from The height to start at diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 26178aad354..15318efa4cd 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + Possible values: - headers: The headers stage within the pipeline diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 5750798c6fe..f78ed561f99 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -145,6 +145,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 1213a272647..8752c5e526a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -138,6 +138,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --metrics Enable Prometheus metrics. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index ed16cfb48fc..8c4f9ef9f7a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -143,6 +143,36 @@ Static Files: [default: false] [possible values: true, false] +RocksDB: + --rocksdb.all + Route all supported tables to `RocksDB` instead of MDBX. + + This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false. + + --rocksdb.tx-hash + Route tx hash -> number table to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.storages-history + Route storages history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + + --rocksdb.account-history + Route account history tables to `RocksDB` instead of MDBX. + + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + + [default: false] + [possible values: true, false] + --offline If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound From 624ddc57799c1c1bf06e0d6af3e945b57f5d3e32 Mon Sep 17 00:00:00 2001 From: YK Date: Wed, 21 Jan 2026 18:05:19 +0100 Subject: [PATCH 121/267] feat(stages): add RocksDB support for IndexStorageHistoryStage (#21175) --- .../src/stages/index_storage_history.rs | 356 +++++++++++++++-- crates/stages/stages/src/stages/utils.rs | 360 ++++++++++-------- crates/storage/provider/src/either_writer.rs | 45 ++- .../src/providers/database/provider.rs | 44 ++- .../src/providers/rocksdb/provider.rs | 173 +++++++++ 5 files changed, 770 insertions(+), 208 deletions(-) diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index 2ec4094c1ec..e37dbaa4411 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -1,19 +1,21 @@ -use super::{collect_history_indices, load_history_indices}; -use crate::{StageCheckpoint, StageId}; +use super::collect_history_indices; +use crate::{stages::utils::load_storage_history, StageCheckpoint, StageId}; use reth_config::config::{EtlConfig, IndexHistoryConfig}; use reth_db_api::{ models::{storage_sharded_key::StorageShardedKey, AddressStorageKey, BlockNumberAddress}, - table::Decode, tables, transaction::DbTxMut, }; -use reth_provider::{DBProvider, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter}; +use reth_provider::{ + DBProvider, EitherWriter, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, + RocksDBProviderFactory, StorageSettingsCache, +}; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment}; use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use std::fmt::Debug; use tracing::info; -/// Stage is indexing history the account changesets generated in +/// Stage is indexing history the storage changesets generated in /// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information /// on index sharding take a look at [`tables::StoragesHistory`]. #[derive(Debug)] @@ -34,7 +36,7 @@ impl IndexStorageHistoryStage { etl_config: EtlConfig, prune_mode: Option, ) -> Self { - Self { commit_threshold: config.commit_threshold, prune_mode, etl_config } + Self { commit_threshold: config.commit_threshold, etl_config, prune_mode } } } @@ -46,8 +48,13 @@ impl Default for IndexStorageHistoryStage { impl Stage for IndexStorageHistoryStage where - Provider: - DBProvider + PruneCheckpointWriter + HistoryWriter + PruneCheckpointReader, + Provider: DBProvider + + HistoryWriter + + PruneCheckpointReader + + PruneCheckpointWriter + + StorageSettingsCache + + RocksDBProviderFactory + + reth_provider::NodePrimitivesProvider, { /// Return the id of the stage fn id(&self) -> StageId { @@ -95,15 +102,25 @@ where let mut range = input.next_block_range(); let first_sync = input.checkpoint().block_number == 0; + let use_rocksdb = provider.cached_storage_settings().storages_history_in_rocksdb; // On first sync we might have history coming from genesis. We clear the table since it's // faster to rebuild from scratch. if first_sync { - provider.tx_ref().clear::()?; + if use_rocksdb { + // Note: RocksDB clear() executes immediately (not deferred to commit like MDBX), + // but this is safe for first_sync because if we crash before commit, the + // checkpoint stays at 0 and we'll just clear and rebuild again on restart. The + // source data (changesets) is intact. + #[cfg(all(unix, feature = "rocksdb"))] + provider.rocksdb_provider().clear::()?; + } else { + provider.tx_ref().clear::()?; + } range = 0..=*input.next_block_range().end(); } - info!(target: "sync::stages::index_storage_history::exec", ?first_sync, "Collecting indices"); + info!(target: "sync::stages::index_storage_history::exec", ?first_sync, ?use_rocksdb, "Collecting indices"); let collector = collect_history_indices::<_, tables::StorageChangeSets, tables::StoragesHistory, _>( provider, @@ -116,16 +133,13 @@ where )?; info!(target: "sync::stages::index_storage_history::exec", "Loading indices into database"); - load_history_indices::<_, tables::StoragesHistory, _>( - provider, - collector, - first_sync, - |AddressStorageKey((address, storage_key)), highest_block_number| { - StorageShardedKey::new(address, storage_key, highest_block_number) - }, - StorageShardedKey::decode_owned, - |key| AddressStorageKey((key.address, key.sharded_key.key)), - )?; + + provider.with_rocksdb_batch(|rocksdb_batch| { + let mut writer = EitherWriter::new_storages_history(provider, rocksdb_batch)?; + load_storage_history(collector, first_sync, &mut writer) + .map_err(|e| reth_provider::ProviderError::other(Box::new(e)))?; + Ok(((), writer.into_raw_rocksdb_batch())) + })?; Ok(ExecOutput { checkpoint: StageCheckpoint::new(*range.end()), done: true }) } @@ -382,12 +396,12 @@ mod tests { async fn insert_index_second_half_shard() { // init let db = TestStageDB::default(); - let mut close_full_list = (1..=LAST_BLOCK_IN_FULL_SHARD - 1).collect::>(); + let mut almost_full_list = (1..=LAST_BLOCK_IN_FULL_SHARD - 1).collect::>(); // setup partial_setup(&db); db.commit(|tx| { - tx.put::(shard(u64::MAX), list(&close_full_list)).unwrap(); + tx.put::(shard(u64::MAX), list(&almost_full_list)).unwrap(); Ok(()) }) .unwrap(); @@ -396,12 +410,12 @@ mod tests { run(&db, LAST_BLOCK_IN_FULL_SHARD + 1, Some(LAST_BLOCK_IN_FULL_SHARD - 1)); // verify - close_full_list.push(LAST_BLOCK_IN_FULL_SHARD); + almost_full_list.push(LAST_BLOCK_IN_FULL_SHARD); let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ - (shard(LAST_BLOCK_IN_FULL_SHARD), close_full_list.clone()), + (shard(LAST_BLOCK_IN_FULL_SHARD), almost_full_list.clone()), (shard(u64::MAX), vec![LAST_BLOCK_IN_FULL_SHARD + 1]) ]) ); @@ -410,9 +424,9 @@ mod tests { unwind(&db, LAST_BLOCK_IN_FULL_SHARD, LAST_BLOCK_IN_FULL_SHARD - 1); // verify initial state - close_full_list.pop(); + almost_full_list.pop(); let table = cast(db.table::().unwrap()); - assert_eq!(table, BTreeMap::from([(shard(u64::MAX), close_full_list)])); + assert_eq!(table, BTreeMap::from([(shard(u64::MAX), almost_full_list)])); } #[tokio::test] @@ -663,4 +677,294 @@ mod tests { Ok(()) } } + + #[cfg(all(unix, feature = "rocksdb"))] + mod rocksdb_tests { + use super::*; + use reth_provider::RocksDBProviderFactory; + use reth_storage_api::StorageSettings; + + /// Test that when `storages_history_in_rocksdb` is enabled, the stage + /// writes storage history indices to `RocksDB` instead of MDBX. + #[tokio::test] + async fn execute_writes_to_rocksdb_when_enabled() { + let db = TestStageDB::default(); + + db.factory.set_storage_settings_cache( + StorageSettings::legacy().with_storages_history_in_rocksdb(true), + ); + + db.commit(|tx| { + for block in 0..=10 { + tx.put::( + block, + StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, + )?; + tx.put::( + block_number_address(block), + storage(STORAGE_KEY), + )?; + } + Ok(()) + }) + .unwrap(); + + let input = ExecInput { target: Some(10), ..Default::default() }; + let mut stage = IndexStorageHistoryStage::default(); + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.execute(&provider, input).unwrap(); + assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(10), done: true }); + provider.commit().unwrap(); + + let mdbx_table = db.table::().unwrap(); + assert!( + mdbx_table.is_empty(), + "MDBX StoragesHistory should be empty when RocksDB is enabled" + ); + + let rocksdb = db.factory.rocksdb_provider(); + let result = rocksdb.get::(shard(u64::MAX)).unwrap(); + assert!(result.is_some(), "RocksDB should contain storage history"); + + let block_list = result.unwrap(); + let blocks: Vec = block_list.iter().collect(); + assert_eq!(blocks, (0..=10).collect::>()); + } + + /// Test that unwind works correctly when `storages_history_in_rocksdb` is enabled. + #[tokio::test] + async fn unwind_works_when_rocksdb_enabled() { + let db = TestStageDB::default(); + + db.factory.set_storage_settings_cache( + StorageSettings::legacy().with_storages_history_in_rocksdb(true), + ); + + db.commit(|tx| { + for block in 0..=10 { + tx.put::( + block, + StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, + )?; + tx.put::( + block_number_address(block), + storage(STORAGE_KEY), + )?; + } + Ok(()) + }) + .unwrap(); + + let input = ExecInput { target: Some(10), ..Default::default() }; + let mut stage = IndexStorageHistoryStage::default(); + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.execute(&provider, input).unwrap(); + assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(10), done: true }); + provider.commit().unwrap(); + + let rocksdb = db.factory.rocksdb_provider(); + let result = rocksdb.get::(shard(u64::MAX)).unwrap(); + assert!(result.is_some(), "RocksDB should have data before unwind"); + let blocks_before: Vec = result.unwrap().iter().collect(); + assert_eq!(blocks_before, (0..=10).collect::>()); + + let unwind_input = + UnwindInput { checkpoint: StageCheckpoint::new(10), unwind_to: 5, bad_block: None }; + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.unwind(&provider, unwind_input).unwrap(); + assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(5) }); + provider.commit().unwrap(); + + let rocksdb = db.factory.rocksdb_provider(); + let result = rocksdb.get::(shard(u64::MAX)).unwrap(); + assert!(result.is_some(), "RocksDB should still have data after partial unwind"); + let blocks_after: Vec = result.unwrap().iter().collect(); + assert_eq!( + blocks_after, + (0..=5).collect::>(), + "Should only have blocks 0-5 after unwind to block 5" + ); + } + + /// Test that unwind to block 0 keeps only block 0's history. + #[tokio::test] + async fn unwind_to_zero_keeps_block_zero() { + let db = TestStageDB::default(); + + db.factory.set_storage_settings_cache( + StorageSettings::legacy().with_storages_history_in_rocksdb(true), + ); + + db.commit(|tx| { + for block in 0..=5 { + tx.put::( + block, + StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, + )?; + tx.put::( + block_number_address(block), + storage(STORAGE_KEY), + )?; + } + Ok(()) + }) + .unwrap(); + + let input = ExecInput { target: Some(5), ..Default::default() }; + let mut stage = IndexStorageHistoryStage::default(); + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.execute(&provider, input).unwrap(); + assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(5), done: true }); + provider.commit().unwrap(); + + let rocksdb = db.factory.rocksdb_provider(); + let result = rocksdb.get::(shard(u64::MAX)).unwrap(); + assert!(result.is_some(), "RocksDB should have data before unwind"); + + let unwind_input = + UnwindInput { checkpoint: StageCheckpoint::new(5), unwind_to: 0, bad_block: None }; + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.unwind(&provider, unwind_input).unwrap(); + assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(0) }); + provider.commit().unwrap(); + + let rocksdb = db.factory.rocksdb_provider(); + let result = rocksdb.get::(shard(u64::MAX)).unwrap(); + assert!(result.is_some(), "RocksDB should still have block 0 history"); + let blocks_after: Vec = result.unwrap().iter().collect(); + assert_eq!(blocks_after, vec![0], "Should only have block 0 after unwinding to 0"); + } + + /// Test incremental sync merges new data with existing shards. + #[tokio::test] + async fn execute_incremental_sync() { + let db = TestStageDB::default(); + + db.factory.set_storage_settings_cache( + StorageSettings::legacy().with_storages_history_in_rocksdb(true), + ); + + db.commit(|tx| { + for block in 0..=5 { + tx.put::( + block, + StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, + )?; + tx.put::( + block_number_address(block), + storage(STORAGE_KEY), + )?; + } + Ok(()) + }) + .unwrap(); + + let input = ExecInput { target: Some(5), ..Default::default() }; + let mut stage = IndexStorageHistoryStage::default(); + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.execute(&provider, input).unwrap(); + assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(5), done: true }); + provider.commit().unwrap(); + + let rocksdb = db.factory.rocksdb_provider(); + let result = rocksdb.get::(shard(u64::MAX)).unwrap(); + assert!(result.is_some()); + let blocks: Vec = result.unwrap().iter().collect(); + assert_eq!(blocks, (0..=5).collect::>()); + + db.commit(|tx| { + for block in 6..=10 { + tx.put::( + block, + StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, + )?; + tx.put::( + block_number_address(block), + storage(STORAGE_KEY), + )?; + } + Ok(()) + }) + .unwrap(); + + let input = ExecInput { target: Some(10), checkpoint: Some(StageCheckpoint::new(5)) }; + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.execute(&provider, input).unwrap(); + assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(10), done: true }); + provider.commit().unwrap(); + + let rocksdb = db.factory.rocksdb_provider(); + let result = rocksdb.get::(shard(u64::MAX)).unwrap(); + assert!(result.is_some(), "RocksDB should have merged data"); + let blocks: Vec = result.unwrap().iter().collect(); + assert_eq!(blocks, (0..=10).collect::>()); + } + + /// Test multi-shard unwind correctly handles shards that span across unwind boundary. + #[tokio::test] + async fn unwind_multi_shard() { + use reth_db_api::models::sharded_key::NUM_OF_INDICES_IN_SHARD; + + let db = TestStageDB::default(); + + db.factory.set_storage_settings_cache( + StorageSettings::legacy().with_storages_history_in_rocksdb(true), + ); + + let num_blocks = (NUM_OF_INDICES_IN_SHARD * 2 + 100) as u64; + + db.commit(|tx| { + for block in 0..num_blocks { + tx.put::( + block, + StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, + )?; + tx.put::( + block_number_address(block), + storage(STORAGE_KEY), + )?; + } + Ok(()) + }) + .unwrap(); + + let input = ExecInput { target: Some(num_blocks - 1), ..Default::default() }; + let mut stage = IndexStorageHistoryStage::default(); + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.execute(&provider, input).unwrap(); + assert_eq!( + out, + ExecOutput { checkpoint: StageCheckpoint::new(num_blocks - 1), done: true } + ); + provider.commit().unwrap(); + + let rocksdb = db.factory.rocksdb_provider(); + let shards = rocksdb.storage_history_shards(ADDRESS, STORAGE_KEY).unwrap(); + assert!(shards.len() >= 2, "Should have at least 2 shards for {} blocks", num_blocks); + + let unwind_to = NUM_OF_INDICES_IN_SHARD as u64 + 50; + let unwind_input = UnwindInput { + checkpoint: StageCheckpoint::new(num_blocks - 1), + unwind_to, + bad_block: None, + }; + let provider = db.factory.database_provider_rw().unwrap(); + let out = stage.unwind(&provider, unwind_input).unwrap(); + assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) }); + provider.commit().unwrap(); + + let rocksdb = db.factory.rocksdb_provider(); + let shards_after = rocksdb.storage_history_shards(ADDRESS, STORAGE_KEY).unwrap(); + assert!(!shards_after.is_empty(), "Should still have shards after unwind"); + + let all_blocks: Vec = + shards_after.iter().flat_map(|(_, list)| list.iter()).collect(); + assert_eq!( + all_blocks, + (0..=unwind_to).collect::>(), + "Should only have blocks 0 to {} after unwind", + unwind_to + ); + } + } } diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 93158a62ed9..c5a8dee347c 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -1,12 +1,15 @@ //! Utils for `stages`. -use alloy_primitives::{Address, BlockNumber, TxNumber}; +use alloy_primitives::{Address, BlockNumber, TxNumber, B256}; use reth_config::config::EtlConfig; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, - models::{sharded_key::NUM_OF_INDICES_IN_SHARD, AccountBeforeTx, ShardedKey}, + models::{ + sharded_key::NUM_OF_INDICES_IN_SHARD, storage_sharded_key::StorageShardedKey, + AccountBeforeTx, ShardedKey, + }, table::{Decode, Decompress, Table}, - transaction::{DbTx, DbTxMut}, - BlockNumberList, DatabaseError, + transaction::DbTx, + BlockNumberList, }; use reth_etl::Collector; use reth_primitives_traits::NodePrimitives; @@ -171,164 +174,9 @@ where Ok(collector) } -/// Given a [`Collector`] created by [`collect_history_indices`] it iterates all entries, loading -/// the indices into the database in shards. -/// -/// ## Process -/// Iterates over elements, grouping indices by their partial keys (e.g., `Address` or -/// `Address.StorageKey`). It flushes indices to disk when reaching a shard's max length -/// (`NUM_OF_INDICES_IN_SHARD`) or when the partial key changes, ensuring the last previous partial -/// key shard is stored. -pub(crate) fn load_history_indices( - provider: &Provider, - mut collector: Collector, - append_only: bool, - sharded_key_factory: impl Clone + Fn(P, u64) -> ::Key, - decode_key: impl Fn(Vec) -> Result<::Key, DatabaseError>, - get_partial: impl Fn(::Key) -> P, -) -> Result<(), StageError> -where - Provider: DBProvider, - H: Table, - P: Copy + Default + Eq, -{ - let mut write_cursor = provider.tx_ref().cursor_write::()?; - let mut current_partial = None; - let mut current_list = Vec::::new(); - - // observability - let total_entries = collector.len(); - let interval = (total_entries / 10).max(1); - - for (index, element) in collector.iter()?.enumerate() { - let (k, v) = element?; - let sharded_key = decode_key(k)?; - let new_list = BlockNumberList::decompress_owned(v)?; - - if index > 0 && index.is_multiple_of(interval) && total_entries > 10 { - info!(target: "sync::stages::index_history", progress = %format!("{:.2}%", (index as f64 / total_entries as f64) * 100.0), "Writing indices"); - } - - // AccountsHistory: `Address`. - // StorageHistory: `Address.StorageKey`. - let partial_key = get_partial(sharded_key); - - if current_partial != Some(partial_key) { - // We have reached the end of this subset of keys so - // we need to flush its last indice shard. - if let Some(current) = current_partial { - load_indices( - &mut write_cursor, - current, - &mut current_list, - &sharded_key_factory, - append_only, - LoadMode::Flush, - )?; - } - - current_partial = Some(partial_key); - current_list.clear(); - - // If it's not the first sync, there might an existing shard already, so we need to - // merge it with the one coming from the collector - if !append_only && - let Some((_, last_database_shard)) = - write_cursor.seek_exact(sharded_key_factory(partial_key, u64::MAX))? - { - current_list.extend(last_database_shard.iter()); - } - } - - current_list.extend(new_list.iter()); - load_indices( - &mut write_cursor, - partial_key, - &mut current_list, - &sharded_key_factory, - append_only, - LoadMode::KeepLast, - )?; - } - - // There will be one remaining shard that needs to be flushed to DB. - if let Some(current) = current_partial { - load_indices( - &mut write_cursor, - current, - &mut current_list, - &sharded_key_factory, - append_only, - LoadMode::Flush, - )?; - } - - Ok(()) -} - -/// Shard and insert the indices list according to [`LoadMode`] and its length. -pub(crate) fn load_indices( - cursor: &mut C, - partial_key: P, - list: &mut Vec, - sharded_key_factory: &impl Fn(P, BlockNumber) -> ::Key, - append_only: bool, - mode: LoadMode, -) -> Result<(), StageError> -where - C: DbCursorRO + DbCursorRW, - H: Table, - P: Copy, -{ - if list.len() > NUM_OF_INDICES_IN_SHARD || mode.is_flush() { - let chunks = list - .chunks(NUM_OF_INDICES_IN_SHARD) - .map(|chunks| chunks.to_vec()) - .collect::>>(); - - let mut iter = chunks.into_iter().peekable(); - while let Some(chunk) = iter.next() { - let mut highest = *chunk.last().expect("at least one index"); - - if !mode.is_flush() && iter.peek().is_none() { - *list = chunk; - } else { - if iter.peek().is_none() { - highest = u64::MAX; - } - let key = sharded_key_factory(partial_key, highest); - let value = BlockNumberList::new_pre_sorted(chunk); - - if append_only { - cursor.append(key, &value)?; - } else { - cursor.upsert(key, &value)?; - } - } - } - } - - Ok(()) -} - -/// Mode on how to load index shards into the database. -pub(crate) enum LoadMode { - /// Keep the last shard in memory and don't flush it to the database. - KeepLast, - /// Flush all shards into the database. - Flush, -} - -impl LoadMode { - const fn is_flush(&self) -> bool { - matches!(self, Self::Flush) - } -} - /// Loads account history indices into the database via `EitherWriter`. /// -/// Similar to [`load_history_indices`] but works with [`EitherWriter`] to support -/// both MDBX and `RocksDB` backends. +/// Works with [`EitherWriter`] to support both MDBX and `RocksDB` backends. /// /// ## Process /// Iterates over elements, grouping indices by their address. It flushes indices to disk @@ -404,8 +252,6 @@ where /// Only flushes when we have more than one shard's worth of data, keeping the last /// (possibly partial) shard for continued accumulation. This avoids writing a shard /// that may need to be updated when more indices arrive. -/// -/// Equivalent to [`load_indices`] with [`LoadMode::KeepLast`]. fn flush_account_history_shards_partial( address: Address, list: &mut Vec, @@ -462,8 +308,6 @@ where /// /// The `u64::MAX` key for the final shard is an invariant that allows `seek_exact(address, /// u64::MAX)` to find the last shard during incremental sync for merging with new indices. -/// -/// Equivalent to [`load_indices`] with [`LoadMode::Flush`]. fn flush_account_history_shards( address: Address, list: &mut Vec, @@ -537,3 +381,191 @@ where segment, }) } + +/// Loads storage history indices into the database via `EitherWriter`. +/// +/// Works with [`EitherWriter`] to support both MDBX and `RocksDB` backends. +/// +/// ## Process +/// Iterates over elements, grouping indices by their (address, `storage_key`) pairs. It flushes +/// indices to disk when reaching a shard's max length (`NUM_OF_INDICES_IN_SHARD`) or when the +/// (address, `storage_key`) pair changes, ensuring the last previous shard is stored. +/// +/// Uses `Option<(Address, B256)>` instead of default values as the sentinel to avoid +/// incorrectly treating `(Address::ZERO, B256::ZERO)` as "no previous key". +pub(crate) fn load_storage_history( + mut collector: Collector, + append_only: bool, + writer: &mut EitherWriter<'_, CURSOR, N>, +) -> Result<(), StageError> +where + N: NodePrimitives, + CURSOR: DbCursorRW + + DbCursorRO, +{ + let mut current_key: Option<(Address, B256)> = None; + // Accumulator for block numbers where the current (address, storage_key) changed. + let mut current_list = Vec::::new(); + + let total_entries = collector.len(); + let interval = (total_entries / 10).max(1); + + for (index, element) in collector.iter()?.enumerate() { + let (k, v) = element?; + let sharded_key = StorageShardedKey::decode_owned(k)?; + let new_list = BlockNumberList::decompress_owned(v)?; + + if index > 0 && index.is_multiple_of(interval) && total_entries > 10 { + info!(target: "sync::stages::index_history", progress = %format!("{:.2}%", (index as f64 / total_entries as f64) * 100.0), "Writing indices"); + } + + let partial_key = (sharded_key.address, sharded_key.sharded_key.key); + + // When (address, storage_key) changes, flush the previous key's shards and start fresh. + if current_key != Some(partial_key) { + // Flush all remaining shards for the previous key (uses u64::MAX for last shard). + if let Some((prev_addr, prev_storage_key)) = current_key { + flush_storage_history_shards( + prev_addr, + prev_storage_key, + &mut current_list, + append_only, + writer, + )?; + } + + current_key = Some(partial_key); + current_list.clear(); + + // On incremental sync, merge with the existing last shard from the database. + // The last shard is stored with key (address, storage_key, u64::MAX) so we can find it. + if !append_only && + let Some(last_shard) = + writer.get_last_storage_history_shard(partial_key.0, partial_key.1)? + { + current_list.extend(last_shard.iter()); + } + } + + // Append new block numbers to the accumulator. + current_list.extend(new_list.iter()); + + // Flush complete shards, keeping the last (partial) shard buffered. + flush_storage_history_shards_partial( + partial_key.0, + partial_key.1, + &mut current_list, + append_only, + writer, + )?; + } + + // Flush the final key's remaining shard. + if let Some((addr, storage_key)) = current_key { + flush_storage_history_shards(addr, storage_key, &mut current_list, append_only, writer)?; + } + + Ok(()) +} + +/// Flushes complete shards for storage history, keeping the trailing partial shard buffered. +/// +/// Only flushes when we have more than one shard's worth of data, keeping the last +/// (possibly partial) shard for continued accumulation. This avoids writing a shard +/// that may need to be updated when more indices arrive. +fn flush_storage_history_shards_partial( + address: Address, + storage_key: B256, + list: &mut Vec, + append_only: bool, + writer: &mut EitherWriter<'_, CURSOR, N>, +) -> Result<(), StageError> +where + N: NodePrimitives, + CURSOR: DbCursorRW + + DbCursorRO, +{ + // Nothing to flush if we haven't filled a complete shard yet. + if list.len() <= NUM_OF_INDICES_IN_SHARD { + return Ok(()); + } + + let num_full_shards = list.len() / NUM_OF_INDICES_IN_SHARD; + + // Always keep at least one shard buffered for continued accumulation. + // If len is exact multiple of shard size, keep the last full shard. + let shards_to_flush = if list.len().is_multiple_of(NUM_OF_INDICES_IN_SHARD) { + num_full_shards - 1 + } else { + num_full_shards + }; + + if shards_to_flush == 0 { + return Ok(()); + } + + // Split: flush the first N shards, keep the remainder buffered. + let flush_len = shards_to_flush * NUM_OF_INDICES_IN_SHARD; + let remainder = list.split_off(flush_len); + + // Write each complete shard with its highest block number as the key. + for chunk in list.chunks(NUM_OF_INDICES_IN_SHARD) { + let highest = *chunk.last().expect("chunk is non-empty"); + let key = StorageShardedKey::new(address, storage_key, highest); + let value = BlockNumberList::new_pre_sorted(chunk.iter().copied()); + + if append_only { + writer.append_storage_history(key, &value)?; + } else { + writer.upsert_storage_history(key, &value)?; + } + } + + // Keep the remaining indices for the next iteration. + *list = remainder; + Ok(()) +} + +/// Flushes all remaining shards for storage history, using `u64::MAX` for the last shard. +/// +/// The `u64::MAX` key for the final shard is an invariant that allows +/// `seek_exact(address, storage_key, u64::MAX)` to find the last shard during incremental +/// sync for merging with new indices. +fn flush_storage_history_shards( + address: Address, + storage_key: B256, + list: &mut Vec, + append_only: bool, + writer: &mut EitherWriter<'_, CURSOR, N>, +) -> Result<(), StageError> +where + N: NodePrimitives, + CURSOR: DbCursorRW + + DbCursorRO, +{ + if list.is_empty() { + return Ok(()); + } + + let num_chunks = list.len().div_ceil(NUM_OF_INDICES_IN_SHARD); + + for (i, chunk) in list.chunks(NUM_OF_INDICES_IN_SHARD).enumerate() { + let is_last = i == num_chunks - 1; + + // Use u64::MAX for the final shard's key. This invariant allows incremental sync + // to find the last shard via seek_exact(address, storage_key, u64::MAX) for merging. + let highest = if is_last { u64::MAX } else { *chunk.last().expect("chunk is non-empty") }; + + let key = StorageShardedKey::new(address, storage_key, highest); + let value = BlockNumberList::new_pre_sorted(chunk.iter().copied()); + + if append_only { + writer.append_storage_history(key, &value)?; + } else { + writer.upsert_storage_history(key, &value)?; + } + } + + list.clear(); + Ok(()) +} diff --git a/crates/storage/provider/src/either_writer.rs b/crates/storage/provider/src/either_writer.rs index 16eced90dd6..c6ba79d0311 100644 --- a/crates/storage/provider/src/either_writer.rs +++ b/crates/storage/provider/src/either_writer.rs @@ -13,7 +13,7 @@ use crate::{ providers::{history_info, HistoryInfo, StaticFileProvider, StaticFileProviderRWRefMut}, StaticFileProviderFactory, }; -use alloy_primitives::{map::HashMap, Address, BlockNumber, TxHash, TxNumber}; +use alloy_primitives::{map::HashMap, Address, BlockNumber, TxHash, TxNumber, B256}; use rayon::slice::ParallelSliceMut; use reth_db::{ cursor::{DbCursorRO, DbDupCursorRW}, @@ -512,6 +512,49 @@ where Self::RocksDB(batch) => batch.delete::(key), } } + + /// Appends a storage history entry (for first sync - more efficient). + pub fn append_storage_history( + &mut self, + key: StorageShardedKey, + value: &BlockNumberList, + ) -> ProviderResult<()> { + match self { + Self::Database(cursor) => Ok(cursor.append(key, value)?), + Self::StaticFile(_) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(batch) => batch.put::(key, value), + } + } + + /// Upserts a storage history entry (for incremental sync). + pub fn upsert_storage_history( + &mut self, + key: StorageShardedKey, + value: &BlockNumberList, + ) -> ProviderResult<()> { + match self { + Self::Database(cursor) => Ok(cursor.upsert(key, value)?), + Self::StaticFile(_) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(batch) => batch.put::(key, value), + } + } + + /// Gets the last shard for an address and storage key (keyed with `u64::MAX`). + pub fn get_last_storage_history_shard( + &mut self, + address: Address, + storage_key: B256, + ) -> ProviderResult> { + let key = StorageShardedKey::last(address, storage_key); + match self { + Self::Database(cursor) => Ok(cursor.seek_exact(key)?.map(|(_, v)| v)), + Self::StaticFile(_) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(batch) => batch.get::(key), + } + } } impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index a8032ae66a3..9a41bc243d4 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -3005,25 +3005,35 @@ impl HistoryWriter for DatabaseProvi .collect::>(); storage_changesets.sort_by_key(|(address, key, _)| (*address, *key)); - let mut cursor = self.tx.cursor_write::()?; - for &(address, storage_key, rem_index) in &storage_changesets { - let partial_shard = unwind_history_shards::<_, tables::StoragesHistory, _>( - &mut cursor, - StorageShardedKey::last(address, storage_key), - rem_index, - |storage_sharded_key| { - storage_sharded_key.address == address && - storage_sharded_key.sharded_key.key == storage_key - }, - )?; - - // Check the last returned partial shard. - // If it's not empty, the shard needs to be reinserted. - if !partial_shard.is_empty() { - cursor.insert( + if self.cached_storage_settings().storages_history_in_rocksdb { + #[cfg(all(unix, feature = "rocksdb"))] + { + let batch = + self.rocksdb_provider.unwind_storage_history_indices(&storage_changesets)?; + self.pending_rocksdb_batches.lock().push(batch); + } + } else { + // Unwind the storage history index in MDBX. + let mut cursor = self.tx.cursor_write::()?; + for &(address, storage_key, rem_index) in &storage_changesets { + let partial_shard = unwind_history_shards::<_, tables::StoragesHistory, _>( + &mut cursor, StorageShardedKey::last(address, storage_key), - &BlockNumberList::new_pre_sorted(partial_shard), + rem_index, + |storage_sharded_key| { + storage_sharded_key.address == address && + storage_sharded_key.sharded_key.key == storage_key + }, )?; + + // Check the last returned partial shard. + // If it's not empty, the shard needs to be reinserted. + if !partial_shard.is_empty() { + cursor.insert( + StorageShardedKey::last(address, storage_key), + &BlockNumberList::new_pre_sorted(partial_shard), + )?; + } } } diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 129d8f1100a..7824059086c 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -814,6 +814,52 @@ impl RocksDBProvider { Ok(result) } + /// Returns all storage history shards for the given `(address, storage_key)` pair. + /// + /// Iterates through all shards in ascending `highest_block_number` order until + /// a different `(address, storage_key)` is encountered. + pub fn storage_history_shards( + &self, + address: Address, + storage_key: B256, + ) -> ProviderResult> { + let cf = self.get_cf_handle::()?; + + let start_key = StorageShardedKey::new(address, storage_key, 0u64); + let start_bytes = start_key.encode(); + + let iter = self + .0 + .iterator_cf(cf, IteratorMode::From(start_bytes.as_ref(), rocksdb::Direction::Forward)); + + let mut result = Vec::new(); + for item in iter { + match item { + Ok((key_bytes, value_bytes)) => { + let key = StorageShardedKey::decode(&key_bytes) + .map_err(|_| ProviderError::Database(DatabaseError::Decode))?; + + if key.address != address || key.sharded_key.key != storage_key { + break; + } + + let value = BlockNumberList::decompress(&value_bytes) + .map_err(|_| ProviderError::Database(DatabaseError::Decode))?; + + result.push((key, value)); + } + Err(e) => { + return Err(ProviderError::Database(DatabaseError::Read(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + }))); + } + } + } + + Ok(result) + } + /// Unwinds account history indices for the given `(address, block_number)` pairs. /// /// Groups addresses by their minimum block number and calls the appropriate unwind @@ -846,6 +892,37 @@ impl RocksDBProvider { Ok(batch.into_inner()) } + /// Unwinds storage history indices for the given `(address, storage_key, block_number)` tuples. + /// + /// Groups by `(address, storage_key)` and finds the minimum block number for each. + /// For each key, keeps only blocks less than the minimum block + /// (i.e., removes the minimum block and all higher blocks). + /// + /// Returns a `WriteBatchWithTransaction` that can be committed later. + pub fn unwind_storage_history_indices( + &self, + storage_changesets: &[(Address, B256, BlockNumber)], + ) -> ProviderResult> { + let mut key_min_block: HashMap<(Address, B256), BlockNumber> = + HashMap::with_capacity_and_hasher(storage_changesets.len(), Default::default()); + for &(address, storage_key, block_number) in storage_changesets { + key_min_block + .entry((address, storage_key)) + .and_modify(|min| *min = (*min).min(block_number)) + .or_insert(block_number); + } + + let mut batch = self.batch(); + for ((address, storage_key), min_block) in key_min_block { + match min_block.checked_sub(1) { + Some(keep_to) => batch.unwind_storage_history_to(address, storage_key, keep_to)?, + None => batch.clear_storage_history(address, storage_key)?, + } + } + + Ok(batch.into_inner()) + } + /// Writes a batch of operations atomically. #[instrument(level = "debug", target = "providers::rocksdb", skip_all)] pub fn write_batch(&self, f: F) -> ProviderResult<()> @@ -1290,6 +1367,87 @@ impl<'a> RocksDBBatch<'a> { Ok(()) } + /// Unwinds storage history to keep only blocks `<= keep_to`. + /// + /// Handles multi-shard scenarios by: + /// 1. Loading all shards for the `(address, storage_key)` pair + /// 2. Finding the boundary shard containing `keep_to` + /// 3. Deleting all shards after the boundary + /// 4. Truncating the boundary shard to keep only indices `<= keep_to` + /// 5. Ensuring the last shard is keyed with `u64::MAX` + pub fn unwind_storage_history_to( + &mut self, + address: Address, + storage_key: B256, + keep_to: BlockNumber, + ) -> ProviderResult<()> { + let shards = self.provider.storage_history_shards(address, storage_key)?; + if shards.is_empty() { + return Ok(()); + } + + // Find the first shard that might contain blocks > keep_to. + // A shard is affected if it's the sentinel (u64::MAX) or its highest_block_number > keep_to + let boundary_idx = shards.iter().position(|(key, _)| { + key.sharded_key.highest_block_number == u64::MAX || + key.sharded_key.highest_block_number > keep_to + }); + + // Repair path: no shards affected means all blocks <= keep_to, just ensure sentinel exists + let Some(boundary_idx) = boundary_idx else { + let (last_key, last_value) = shards.last().expect("shards is non-empty"); + if last_key.sharded_key.highest_block_number != u64::MAX { + self.delete::(last_key.clone())?; + self.put::( + StorageShardedKey::last(address, storage_key), + last_value, + )?; + } + return Ok(()); + }; + + // Delete all shards strictly after the boundary (they are entirely > keep_to) + for (key, _) in shards.iter().skip(boundary_idx + 1) { + self.delete::(key.clone())?; + } + + // Process the boundary shard: filter out blocks > keep_to + let (boundary_key, boundary_list) = &shards[boundary_idx]; + + // Delete the boundary shard (we'll either drop it or rewrite at u64::MAX) + self.delete::(boundary_key.clone())?; + + // Build truncated list once; check emptiness directly (avoids double iteration) + let new_last = + BlockNumberList::new_pre_sorted(boundary_list.iter().take_while(|&b| b <= keep_to)); + + if new_last.is_empty() { + // Boundary shard is now empty. Previous shard becomes the last and must be keyed + // u64::MAX. + if boundary_idx == 0 { + // Nothing left for this (address, storage_key) pair + return Ok(()); + } + + let (prev_key, prev_value) = &shards[boundary_idx - 1]; + if prev_key.sharded_key.highest_block_number != u64::MAX { + self.delete::(prev_key.clone())?; + self.put::( + StorageShardedKey::last(address, storage_key), + prev_value, + )?; + } + return Ok(()); + } + + self.put::( + StorageShardedKey::last(address, storage_key), + &new_last, + )?; + + Ok(()) + } + /// Clears all account history shards for the given address. /// /// Used when unwinding from block 0 (i.e., removing all history). @@ -1300,6 +1458,21 @@ impl<'a> RocksDBBatch<'a> { } Ok(()) } + + /// Clears all storage history shards for the given `(address, storage_key)` pair. + /// + /// Used when unwinding from block 0 (i.e., removing all history for this storage slot). + pub fn clear_storage_history( + &mut self, + address: Address, + storage_key: B256, + ) -> ProviderResult<()> { + let shards = self.provider.storage_history_shards(address, storage_key)?; + for (key, _) in shards { + self.delete::(key)?; + } + Ok(()) + } } /// `RocksDB` transaction wrapper providing MDBX-like semantics. From ec50fd40b3d7568eb17d0d18844e625e267ff124 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 21 Jan 2026 20:19:24 +0100 Subject: [PATCH 122/267] chore(chainspec): use ..Default::default() in create_chain_config (#21266) --- crates/chainspec/src/spec.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 5927c262c9c..e64ba98367c 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -278,6 +278,7 @@ pub fn create_chain_config( // Check if DAO fork is supported (it has an activation block) let dao_fork_support = hardforks.fork(EthereumHardfork::Dao) != ForkCondition::Never; + #[allow(clippy::needless_update)] ChainConfig { chain_id: chain.map(|c| c.id()).unwrap_or(0), homestead_block: block_num(EthereumHardfork::Homestead), @@ -313,6 +314,7 @@ pub fn create_chain_config( extra_fields: Default::default(), deposit_contract_address, blob_schedule, + ..Default::default() } } From 7609deddda79ff4cca0d337d4a4eaf1d3f928dae Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 21 Jan 2026 21:08:03 +0100 Subject: [PATCH 123/267] perf(trie): parallelize merge_ancestors_into_overlay (#21202) --- Cargo.lock | 135 ++++++++++++++++++++++++ Cargo.toml | 29 +++++ crates/chain-state/src/deferred_trie.rs | 49 ++++++++- crates/trie/common/src/hashed_state.rs | 32 +++++- crates/trie/common/src/updates.rs | 31 ++++++ 5 files changed, 273 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2ea2d86702..d3b28b3a92d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14636,3 +14636,138 @@ dependencies = [ "cc", "pkg-config", ] + +[[patch.unused]] +name = "alloy-consensus" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-contract" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-eips" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-genesis" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-json-rpc" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-network" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-network-primitives" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-provider" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-pubsub" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-rpc-client" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-rpc-types" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-rpc-types-admin" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-rpc-types-anvil" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-rpc-types-beacon" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-rpc-types-debug" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-rpc-types-engine" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-rpc-types-eth" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-rpc-types-mev" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-rpc-types-trace" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-rpc-types-txpool" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-serde" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-signer" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-signer-local" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-transport" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-transport-http" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-transport-ipc" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" + +[[patch.unused]] +name = "alloy-transport-ws" +version = "1.5.1" +source = "git+https://github.com/alloy-rs/alloy?branch=main#05fd66e6f05399b71dfc9c802e6ee182b19e8575" diff --git a/Cargo.toml b/Cargo.toml index 316f5ee2b57..ef91483a79f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -797,3 +797,32 @@ ipnet = "2.11" # alloy-evm = { git = "https://github.com/alloy-rs/evm", rev = "072c248" } # alloy-op-evm = { git = "https://github.com/alloy-rs/evm", rev = "072c248" } + +# Patched by patch-alloy.sh +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-contract = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-network-primitives = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-rpc-types-admin = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-rpc-types-debug = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-rpc-types-mev = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-rpc-types-txpool = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-serde = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-signer-local = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-transport = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", branch = "main" } +alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", branch = "main" } diff --git a/crates/chain-state/src/deferred_trie.rs b/crates/chain-state/src/deferred_trie.rs index 1b4a3d43a35..9755b54b99d 100644 --- a/crates/chain-state/src/deferred_trie.rs +++ b/crates/chain-state/src/deferred_trie.rs @@ -243,8 +243,53 @@ impl DeferredTrieData { /// In normal operation, the parent always has a cached overlay and this /// function is never called. /// - /// Iterates ancestors oldest -> newest, then extends with current block's data, - /// so later state takes precedence. + /// When the `rayon` feature is enabled, uses parallel collection and merge: + /// 1. Collects ancestor data in parallel (each `wait_cloned()` may compute) + /// 2. Merges hashed state and trie updates in parallel with each other + /// 3. Uses tree reduction within each merge for O(log n) depth + #[cfg(feature = "rayon")] + fn merge_ancestors_into_overlay( + ancestors: &[Self], + sorted_hashed_state: &HashedPostStateSorted, + sorted_trie_updates: &TrieUpdatesSorted, + ) -> TrieInputSorted { + // Early exit: no ancestors means just wrap current block's data + if ancestors.is_empty() { + return TrieInputSorted::new( + Arc::new(sorted_trie_updates.clone()), + Arc::new(sorted_hashed_state.clone()), + Default::default(), + ); + } + + // Collect ancestor data, unzipping states and updates into Arc slices + let (states, updates): (Vec<_>, Vec<_>) = ancestors + .iter() + .map(|a| { + let data = a.wait_cloned(); + (data.hashed_state, data.trie_updates) + }) + .unzip(); + + // Merge state and nodes in parallel with each other using tree reduction + let (state, nodes) = rayon::join( + || { + let mut merged = HashedPostStateSorted::merge_parallel(&states); + merged.extend_ref_and_sort(sorted_hashed_state); + merged + }, + || { + let mut merged = TrieUpdatesSorted::merge_parallel(&updates); + merged.extend_ref_and_sort(sorted_trie_updates); + merged + }, + ); + + TrieInputSorted::new(Arc::new(nodes), Arc::new(state), Default::default()) + } + + /// Merge all ancestors and current block's data into a single overlay (sequential fallback). + #[cfg(not(feature = "rayon"))] fn merge_ancestors_into_overlay( ancestors: &[Self], sorted_hashed_state: &HashedPostStateSorted, diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 315bda49a45..3273e65829b 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -6,7 +6,7 @@ use crate::{ utils::{extend_sorted_vec, kway_merge_sorted}, KeyHasher, MultiProofTargets, Nibbles, }; -use alloc::{borrow::Cow, vec::Vec}; +use alloc::{borrow::Cow, sync::Arc, vec::Vec}; use alloy_primitives::{ keccak256, map::{hash_map, B256Map, HashMap, HashSet}, @@ -710,6 +710,36 @@ impl HashedPostStateSorted { self.accounts.clear(); self.storages.clear(); } + + /// Parallel batch-merge sorted hashed post states. Slice is **oldest to newest**. + /// + /// This is more efficient than sequential `extend_ref` calls when merging many states, + /// as it processes all states in parallel with tree reduction using divide-and-conquer. + #[cfg(feature = "rayon")] + pub fn merge_parallel(states: &[Arc]) -> Self { + fn parallel_merge_tree(states: &[Arc]) -> HashedPostStateSorted { + match states.len() { + 0 => HashedPostStateSorted::default(), + 1 => states[0].as_ref().clone(), + 2 => { + let mut acc = states[0].as_ref().clone(); + acc.extend_ref_and_sort(&states[1]); + acc + } + n => { + let mid = n / 2; + let (mut left, right) = rayon::join( + || parallel_merge_tree(&states[..mid]), + || parallel_merge_tree(&states[mid..]), + ); + left.extend_ref_and_sort(&right); + left + } + } + } + + parallel_merge_tree(states) + } } impl AsRef for HashedPostStateSorted { diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index 26985108089..0155e0e4846 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -4,6 +4,7 @@ use crate::{ }; use alloc::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + sync::Arc, vec::Vec, }; use alloy_primitives::{ @@ -697,6 +698,36 @@ impl TrieUpdatesSorted { Self { account_nodes, storage_tries }.into() } + + /// Parallel batch-merge sorted trie updates. Slice is **oldest to newest**. + /// + /// This is more efficient than sequential `extend_ref` calls when merging many updates, + /// as it processes all updates in parallel with tree reduction using divide-and-conquer. + #[cfg(feature = "rayon")] + pub fn merge_parallel(updates: &[Arc]) -> Self { + fn parallel_merge_tree(updates: &[Arc]) -> TrieUpdatesSorted { + match updates.len() { + 0 => TrieUpdatesSorted::default(), + 1 => updates[0].as_ref().clone(), + 2 => { + let mut acc = updates[0].as_ref().clone(); + acc.extend_ref_and_sort(&updates[1]); + acc + } + n => { + let mid = n / 2; + let (mut left, right) = rayon::join( + || parallel_merge_tree(&updates[..mid]), + || parallel_merge_tree(&updates[mid..]), + ); + left.extend_ref_and_sort(&right); + left + } + } + } + + parallel_merge_tree(updates) + } } impl AsRef for TrieUpdatesSorted { From b7d2ee25666ac21034664c9cd6704d388e3d7c07 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Wed, 21 Jan 2026 12:17:45 -0800 Subject: [PATCH 124/267] feat(engine): add metric for execution cache unavailability due to concurrent use (#21265) Co-authored-by: Tempo AI Co-authored-by: Alexey Shekhirin --- .../tree/src/tree/payload_processor/mod.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 1fa4232b0e2..6d61578f636 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -19,6 +19,7 @@ use alloy_evm::{block::StateChangeSource, ToTxEnv}; use alloy_primitives::B256; use crossbeam_channel::Sender as CrossbeamSender; use executor::WorkloadExecutor; +use metrics::Counter; use multiproof::{SparseTrieUpdate, *}; use parking_lot::RwLock; use prewarm::PrewarmMetrics; @@ -28,6 +29,7 @@ use reth_evm::{ ConfigureEvm, EvmEnvFor, ExecutableTxIterator, ExecutableTxTuple, OnStateHook, SpecFor, TxEnvFor, }; +use reth_metrics::Metrics; use reth_primitives_traits::NodePrimitives; use reth_provider::{ BlockExecutionOutput, BlockReader, DatabaseProviderROFactory, StateProvider, @@ -788,6 +790,8 @@ impl Drop for CacheTaskHandle { struct ExecutionCache { /// Guarded cloneable cache identified by a block hash. inner: Arc>>, + /// Metrics for cache operations. + metrics: ExecutionCacheMetrics, } impl ExecutionCache { @@ -829,6 +833,10 @@ impl ExecutionCache { if hash_matches && available { return Some(c.clone()); } + + if hash_matches && !available { + self.metrics.execution_cache_in_use.increment(1); + } } else { debug!(target: "engine::caching", %parent_hash, "No cache found"); } @@ -864,6 +872,15 @@ impl ExecutionCache { } } +/// Metrics for execution cache operations. +#[derive(Metrics, Clone)] +#[metrics(scope = "consensus.engine.beacon")] +pub(crate) struct ExecutionCacheMetrics { + /// Counter for when the execution cache was unavailable because other threads + /// (e.g., prewarming) are still using it. + pub(crate) execution_cache_in_use: Counter, +} + /// EVM context required to execute a block. #[derive(Debug, Clone)] pub struct ExecutionEnv { From 8c645d57628e6df3c842b024ec642d86bbe428a8 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Wed, 21 Jan 2026 13:04:10 -0800 Subject: [PATCH 125/267] feat(reth-bench): accept short notation for --target-gas-limit (#21273) --- bin/reth-bench/src/bench/gas_limit_ramp.rs | 74 +++++++++++++++++++++- 1 file changed, 73 insertions(+), 1 deletion(-) diff --git a/bin/reth-bench/src/bench/gas_limit_ramp.rs b/bin/reth-bench/src/bench/gas_limit_ramp.rs index 7c9e894ea3f..3a969d17cb0 100644 --- a/bin/reth-bench/src/bench/gas_limit_ramp.rs +++ b/bin/reth-bench/src/bench/gas_limit_ramp.rs @@ -22,6 +22,29 @@ use reth_primitives_traits::constants::{GAS_LIMIT_BOUND_DIVISOR, MAXIMUM_GAS_LIM use std::{path::PathBuf, time::Instant}; use tracing::info; +/// Parses a gas limit value with optional suffix: K for thousand, M for million, G for billion. +/// +/// Examples: "30000000", "30M", "1G", "2G" +fn parse_gas_limit(s: &str) -> eyre::Result { + let s = s.trim(); + if s.is_empty() { + return Err(eyre::eyre!("empty value")); + } + + let (num_str, multiplier) = if let Some(prefix) = s.strip_suffix(['G', 'g']) { + (prefix, 1_000_000_000u64) + } else if let Some(prefix) = s.strip_suffix(['M', 'm']) { + (prefix, 1_000_000u64) + } else if let Some(prefix) = s.strip_suffix(['K', 'k']) { + (prefix, 1_000u64) + } else { + (s, 1u64) + }; + + let base: u64 = num_str.trim().parse()?; + base.checked_mul(multiplier).ok_or_else(|| eyre::eyre!("value overflow")) +} + /// `reth benchmark gas-limit-ramp` command. #[derive(Debug, Parser)] pub struct Command { @@ -31,7 +54,9 @@ pub struct Command { /// Target gas limit to ramp up to. The benchmark will generate blocks until the gas limit /// reaches or exceeds this value. Mutually exclusive with --blocks. - #[arg(long, value_name = "TARGET_GAS_LIMIT", conflicts_with = "blocks")] + /// Accepts short notation: K for thousand, M for million, G for billion (e.g., 2G = 2 + /// billion). + #[arg(long, value_name = "TARGET_GAS_LIMIT", conflicts_with = "blocks", value_parser = parse_gas_limit)] target_gas_limit: Option, /// The Engine API RPC URL. @@ -212,3 +237,50 @@ const fn should_stop(mode: RampMode, blocks_processed: u64, current_gas_limit: u RampMode::TargetGasLimit(target) => current_gas_limit >= target, } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_gas_limit_plain_number() { + assert_eq!(parse_gas_limit("30000000").unwrap(), 30_000_000); + assert_eq!(parse_gas_limit("1").unwrap(), 1); + assert_eq!(parse_gas_limit("0").unwrap(), 0); + } + + #[test] + fn test_parse_gas_limit_k_suffix() { + assert_eq!(parse_gas_limit("1K").unwrap(), 1_000); + assert_eq!(parse_gas_limit("30k").unwrap(), 30_000); + assert_eq!(parse_gas_limit("100K").unwrap(), 100_000); + } + + #[test] + fn test_parse_gas_limit_m_suffix() { + assert_eq!(parse_gas_limit("1M").unwrap(), 1_000_000); + assert_eq!(parse_gas_limit("30m").unwrap(), 30_000_000); + assert_eq!(parse_gas_limit("100M").unwrap(), 100_000_000); + } + + #[test] + fn test_parse_gas_limit_g_suffix() { + assert_eq!(parse_gas_limit("1G").unwrap(), 1_000_000_000); + assert_eq!(parse_gas_limit("2g").unwrap(), 2_000_000_000); + assert_eq!(parse_gas_limit("10G").unwrap(), 10_000_000_000); + } + + #[test] + fn test_parse_gas_limit_with_whitespace() { + assert_eq!(parse_gas_limit(" 1G ").unwrap(), 1_000_000_000); + assert_eq!(parse_gas_limit("2 M").unwrap(), 2_000_000); + } + + #[test] + fn test_parse_gas_limit_errors() { + assert!(parse_gas_limit("").is_err()); + assert!(parse_gas_limit("abc").is_err()); + assert!(parse_gas_limit("G").is_err()); + assert!(parse_gas_limit("-1G").is_err()); + } +} From 74edce0089c29e19b250382c8dc641ef62925bc6 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 21 Jan 2026 21:07:13 +0000 Subject: [PATCH 126/267] revert: feat(trie): add V2 account proof computation and refactor proof types (#21214) (#21274) --- crates/engine/primitives/src/config.rs | 16 - .../tree/src/tree/payload_processor/mod.rs | 19 +- .../src/tree/payload_processor/multiproof.rs | 507 ++++-------------- .../src/tree/payload_processor/prewarm.rs | 89 +-- .../src/tree/payload_processor/sparse_trie.rs | 15 +- crates/trie/parallel/Cargo.toml | 2 +- crates/trie/parallel/src/proof.rs | 9 +- crates/trie/parallel/src/proof_task.rs | 444 +++++---------- crates/trie/parallel/src/stats.rs | 5 + crates/trie/parallel/src/value_encoder.rs | 2 + 10 files changed, 269 insertions(+), 839 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 0b72e1d6243..2870d3dccc4 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -34,11 +34,6 @@ fn default_account_worker_count() -> usize { /// The size of proof targets chunk to spawn in one multiproof calculation. pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 60; -/// The size of proof targets chunk to spawn in one multiproof calculation when V2 proofs are -/// enabled. This is 4x the default chunk size to take advantage of more efficient V2 proof -/// computation. -pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE_V2: usize = DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE * 4; - /// Default number of reserved CPU cores for non-reth processes. /// /// This will be deducted from the thread count of main reth global threadpool. @@ -272,17 +267,6 @@ impl TreeConfig { self.multiproof_chunk_size } - /// Return the multiproof task chunk size, using the V2 default if V2 proofs are enabled - /// and the chunk size is at the default value. - pub const fn effective_multiproof_chunk_size(&self) -> usize { - if self.enable_proof_v2 && self.multiproof_chunk_size == DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE - { - DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE_V2 - } else { - self.multiproof_chunk_size - } - } - /// Return the number of reserved CPU cores for non-reth processes pub const fn reserved_cpu_cores(&self) -> usize { self.reserved_cpu_cores diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 6d61578f636..f606fb1091c 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -247,9 +247,6 @@ where let (to_sparse_trie, sparse_trie_rx) = channel(); let (to_multi_proof, from_multi_proof) = crossbeam_channel::unbounded(); - // Extract V2 proofs flag early so we can pass it to prewarm - let v2_proofs_enabled = config.enable_proof_v2(); - // Handle BAL-based optimization if available let prewarm_handle = if let Some(bal) = bal { // When BAL is present, use BAL prewarming and send BAL to multiproof @@ -266,7 +263,6 @@ where provider_builder.clone(), None, // Don't send proof targets when BAL is present Some(bal), - v2_proofs_enabled, ) } else { // Normal path: spawn with transaction prewarming @@ -277,7 +273,6 @@ where provider_builder.clone(), Some(to_multi_proof.clone()), None, - v2_proofs_enabled, ) }; @@ -285,6 +280,7 @@ where let task_ctx = ProofTaskCtx::new(multiproof_provider_factory); let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); + let v2_proofs_enabled = config.enable_proof_v2(); let proof_handle = ProofWorkerHandle::new( self.executor.handle().clone(), task_ctx, @@ -296,13 +292,10 @@ where let multi_proof_task = MultiProofTask::new( proof_handle.clone(), to_sparse_trie, - config - .multiproof_chunking_enabled() - .then_some(config.effective_multiproof_chunk_size()), + config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), to_multi_proof.clone(), from_multi_proof, - ) - .with_v2_proofs_enabled(v2_proofs_enabled); + ); // spawn multi-proof task let parent_span = span.clone(); @@ -351,9 +344,8 @@ where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, { let (prewarm_rx, execution_rx, size_hint) = self.spawn_tx_iterator(transactions); - // This path doesn't use multiproof, so V2 proofs flag doesn't matter let prewarm_handle = - self.spawn_caching_with(env, prewarm_rx, size_hint, provider_builder, None, bal, false); + self.spawn_caching_with(env, prewarm_rx, size_hint, provider_builder, None, bal); PayloadHandle { to_multi_proof: None, prewarm_handle, @@ -420,7 +412,6 @@ where } /// Spawn prewarming optionally wired to the multiproof task for target updates. - #[expect(clippy::too_many_arguments)] fn spawn_caching_with

( &self, env: ExecutionEnv, @@ -429,7 +420,6 @@ where provider_builder: StateProviderBuilder, to_multi_proof: Option>, bal: Option>, - v2_proofs_enabled: bool, ) -> CacheTaskHandle where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, @@ -452,7 +442,6 @@ where terminate_execution: Arc::new(AtomicBool::new(false)), precompile_cache_disabled: self.precompile_cache_disabled, precompile_cache_map: self.precompile_cache_map.clone(), - v2_proofs_enabled, }; let (prewarm_task, to_prewarm_task) = PrewarmCacheTask::new( diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 823c3e54e9b..b5f1272b67e 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -11,18 +11,14 @@ use reth_metrics::Metrics; use reth_provider::AccountReader; use reth_revm::state::EvmState; use reth_trie::{ - added_removed_keys::MultiAddedRemovedKeys, proof_v2, HashedPostState, HashedStorage, + added_removed_keys::MultiAddedRemovedKeys, DecodedMultiProof, HashedPostState, HashedStorage, MultiProofTargets, }; -#[cfg(test)] -use reth_trie_parallel::stats::ParallelTrieTracker; use reth_trie_parallel::{ proof::ParallelProof, proof_task::{ - AccountMultiproofInput, ProofResult, ProofResultContext, ProofResultMessage, - ProofWorkerHandle, + AccountMultiproofInput, ProofResultContext, ProofResultMessage, ProofWorkerHandle, }, - targets_v2::{ChunkedMultiProofTargetsV2, MultiProofTargetsV2}, }; use revm_primitives::map::{hash_map, B256Map}; use std::{collections::BTreeMap, sync::Arc, time::Instant}; @@ -67,12 +63,12 @@ const DEFAULT_MAX_TARGETS_FOR_CHUNKING: usize = 300; /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. -#[derive(Debug)] +#[derive(Default, Debug)] pub struct SparseTrieUpdate { /// The state update that was used to calculate the proof pub(crate) state: HashedPostState, /// The calculated multiproof - pub(crate) multiproof: ProofResult, + pub(crate) multiproof: DecodedMultiProof, } impl SparseTrieUpdate { @@ -84,11 +80,7 @@ impl SparseTrieUpdate { /// Construct update from multiproof. #[cfg(test)] pub(super) fn from_multiproof(multiproof: reth_trie::MultiProof) -> alloy_rlp::Result { - let stats = ParallelTrieTracker::default().finish(); - Ok(Self { - state: HashedPostState::default(), - multiproof: ProofResult::Legacy(multiproof.try_into()?, stats), - }) + Ok(Self { multiproof: multiproof.try_into()?, ..Default::default() }) } /// Extend update with contents of the other. @@ -102,7 +94,7 @@ impl SparseTrieUpdate { #[derive(Debug)] pub(super) enum MultiProofMessage { /// Prefetch proof targets - PrefetchProofs(VersionedMultiProofTargets), + PrefetchProofs(MultiProofTargets), /// New state update from transaction execution with its source StateUpdate(Source, EvmState), /// State update that can be applied to the sparse trie without any new proofs. @@ -231,155 +223,12 @@ pub(crate) fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostStat hashed_state } -/// Extends a `MultiProofTargets` with the contents of a `VersionedMultiProofTargets`, -/// regardless of which variant the latter is. -fn extend_multiproof_targets(dest: &mut MultiProofTargets, src: &VersionedMultiProofTargets) { - match src { - VersionedMultiProofTargets::Legacy(targets) => { - dest.extend_ref(targets); - } - VersionedMultiProofTargets::V2(targets) => { - // Add all account targets - for target in &targets.account_targets { - dest.entry(target.key()).or_default(); - } - - // Add all storage targets - for (hashed_address, slots) in &targets.storage_targets { - let slot_set = dest.entry(*hashed_address).or_default(); - for slot in slots { - slot_set.insert(slot.key()); - } - } - } - } -} - -/// A set of multiproof targets which can be either in the legacy or V2 representations. -#[derive(Debug)] -pub(super) enum VersionedMultiProofTargets { - /// Legacy targets - Legacy(MultiProofTargets), - /// V2 targets - V2(MultiProofTargetsV2), -} - -impl VersionedMultiProofTargets { - /// Returns true if there are no account or storage targets. - fn is_empty(&self) -> bool { - match self { - Self::Legacy(targets) => targets.is_empty(), - Self::V2(targets) => targets.is_empty(), - } - } - - /// Returns the number of account targets in the multiproof target - fn account_targets_len(&self) -> usize { - match self { - Self::Legacy(targets) => targets.len(), - Self::V2(targets) => targets.account_targets.len(), - } - } - - /// Returns the number of storage targets in the multiproof target - fn storage_targets_len(&self) -> usize { - match self { - Self::Legacy(targets) => targets.values().map(|slots| slots.len()).sum::(), - Self::V2(targets) => { - targets.storage_targets.values().map(|slots| slots.len()).sum::() - } - } - } - - /// Returns the number of accounts in the multiproof targets. - fn len(&self) -> usize { - match self { - Self::Legacy(targets) => targets.len(), - Self::V2(targets) => targets.account_targets.len(), - } - } - - /// Returns the total storage slot count across all accounts. - fn storage_count(&self) -> usize { - match self { - Self::Legacy(targets) => targets.values().map(|slots| slots.len()).sum(), - Self::V2(targets) => targets.storage_targets.values().map(|slots| slots.len()).sum(), - } - } - - /// Returns the number of items that will be considered during chunking. - fn chunking_length(&self) -> usize { - match self { - Self::Legacy(targets) => targets.chunking_length(), - Self::V2(targets) => { - // For V2, count accounts + storage slots - targets.account_targets.len() + - targets.storage_targets.values().map(|slots| slots.len()).sum::() - } - } - } - - /// Retains the targets representing the difference with another `MultiProofTargets`. - /// Removes all targets that are already present in `other`. - fn retain_difference(&mut self, other: &MultiProofTargets) { - match self { - Self::Legacy(targets) => { - targets.retain_difference(other); - } - Self::V2(targets) => { - // Remove account targets that exist in other - targets.account_targets.retain(|target| !other.contains_key(&target.key())); - - // For each account in storage_targets, remove slots that exist in other - targets.storage_targets.retain(|hashed_address, slots| { - if let Some(other_slots) = other.get(hashed_address) { - slots.retain(|slot| !other_slots.contains(&slot.key())); - !slots.is_empty() - } else { - true - } - }); - } - } - } - - /// Extends this `VersionedMultiProofTargets` with the contents of another. - /// - /// Panics if the variants do not match. - fn extend(&mut self, other: Self) { - match (self, other) { - (Self::Legacy(dest), Self::Legacy(src)) => { - dest.extend(src); - } - (Self::V2(dest), Self::V2(src)) => { - dest.account_targets.extend(src.account_targets); - for (addr, slots) in src.storage_targets { - dest.storage_targets.entry(addr).or_default().extend(slots); - } - } - _ => panic!("Cannot extend VersionedMultiProofTargets with mismatched variants"), - } - } - - /// Chunks this `VersionedMultiProofTargets` into smaller chunks of the given size. - fn chunks(self, chunk_size: usize) -> Box> { - match self { - Self::Legacy(targets) => { - Box::new(MultiProofTargets::chunks(targets, chunk_size).map(Self::Legacy)) - } - Self::V2(targets) => { - Box::new(ChunkedMultiProofTargetsV2::new(targets, chunk_size).map(Self::V2)) - } - } - } -} - /// Input parameters for dispatching a multiproof calculation. #[derive(Debug)] struct MultiproofInput { source: Option, hashed_state_update: HashedPostState, - proof_targets: VersionedMultiProofTargets, + proof_targets: MultiProofTargets, proof_sequence_number: u64, state_root_message_sender: CrossbeamSender, multi_added_removed_keys: Option>, @@ -414,6 +263,8 @@ pub struct MultiproofManager { proof_result_tx: CrossbeamSender, /// Metrics metrics: MultiProofTaskMetrics, + /// Whether to use V2 storage proofs + v2_proofs_enabled: bool, } impl MultiproofManager { @@ -427,7 +278,9 @@ impl MultiproofManager { metrics.max_storage_workers.set(proof_worker_handle.total_storage_workers() as f64); metrics.max_account_workers.set(proof_worker_handle.total_account_workers() as f64); - Self { metrics, proof_worker_handle, proof_result_tx } + let v2_proofs_enabled = proof_worker_handle.v2_proofs_enabled(); + + Self { metrics, proof_worker_handle, proof_result_tx, v2_proofs_enabled } } /// Dispatches a new multiproof calculation to worker pools. @@ -472,48 +325,41 @@ impl MultiproofManager { multi_added_removed_keys, } = multiproof_input; + let account_targets = proof_targets.len(); + let storage_targets = proof_targets.values().map(|slots| slots.len()).sum::(); + trace!( target: "engine::tree::payload_processor::multiproof", proof_sequence_number, ?proof_targets, - account_targets = proof_targets.account_targets_len(), - storage_targets = proof_targets.storage_targets_len(), + account_targets, + storage_targets, ?source, "Dispatching multiproof to workers" ); let start = Instant::now(); - // Workers will send ProofResultMessage directly to proof_result_rx - let proof_result_sender = ProofResultContext::new( - self.proof_result_tx.clone(), - proof_sequence_number, - hashed_state_update, - start, - ); - - let input = match proof_targets { - VersionedMultiProofTargets::Legacy(proof_targets) => { - // Extend prefix sets with targets - let frozen_prefix_sets = ParallelProof::extend_prefix_sets_with_targets( - &Default::default(), - &proof_targets, - ); + // Extend prefix sets with targets + let frozen_prefix_sets = + ParallelProof::extend_prefix_sets_with_targets(&Default::default(), &proof_targets); - AccountMultiproofInput::Legacy { - targets: proof_targets, - prefix_sets: frozen_prefix_sets, - collect_branch_node_masks: true, - multi_added_removed_keys, - proof_result_sender, - } - } - VersionedMultiProofTargets::V2(proof_targets) => { - AccountMultiproofInput::V2 { targets: proof_targets, proof_result_sender } - } + // Dispatch account multiproof to worker pool with result sender + let input = AccountMultiproofInput { + targets: proof_targets, + prefix_sets: frozen_prefix_sets, + collect_branch_node_masks: true, + multi_added_removed_keys, + // Workers will send ProofResultMessage directly to proof_result_rx + proof_result_sender: ProofResultContext::new( + self.proof_result_tx.clone(), + proof_sequence_number, + hashed_state_update, + start, + ), + v2_proofs_enabled: self.v2_proofs_enabled, }; - // Dispatch account multiproof to worker pool with result sender if let Err(e) = self.proof_worker_handle.dispatch_account_multiproof(input) { error!(target: "engine::tree::payload_processor::multiproof", ?e, "Failed to dispatch account multiproof"); return; @@ -715,9 +561,6 @@ pub(super) struct MultiProofTask { /// there are any active workers and force chunking across workers. This is to prevent tasks /// which are very long from hitting a single worker. max_targets_for_chunking: usize, - /// Whether or not V2 proof calculation is enabled. If enabled then [`MultiProofTargetsV2`] - /// will be produced by state updates. - v2_proofs_enabled: bool, } impl MultiProofTask { @@ -749,16 +592,9 @@ impl MultiProofTask { ), metrics, max_targets_for_chunking: DEFAULT_MAX_TARGETS_FOR_CHUNKING, - v2_proofs_enabled: false, } } - /// Enables V2 proof target generation on state updates. - pub(super) const fn with_v2_proofs_enabled(mut self, v2_proofs_enabled: bool) -> Self { - self.v2_proofs_enabled = v2_proofs_enabled; - self - } - /// Handles request for proof prefetch. /// /// Returns how many multiproof tasks were dispatched for the prefetch request. @@ -766,29 +602,37 @@ impl MultiProofTask { level = "debug", target = "engine::tree::payload_processor::multiproof", skip_all, - fields(accounts = targets.account_targets_len(), chunks = 0) + fields(accounts = targets.len(), chunks = 0) )] - fn on_prefetch_proof(&mut self, mut targets: VersionedMultiProofTargets) -> u64 { + fn on_prefetch_proof(&mut self, mut targets: MultiProofTargets) -> u64 { // Remove already fetched proof targets to avoid redundant work. targets.retain_difference(&self.fetched_proof_targets); - extend_multiproof_targets(&mut self.fetched_proof_targets, &targets); + self.fetched_proof_targets.extend_ref(&targets); - // For Legacy multiproofs, make sure all target accounts have an `AddedRemovedKeySet` in the + // Make sure all target accounts have an `AddedRemovedKeySet` in the // [`MultiAddedRemovedKeys`]. Even if there are not any known removed keys for the account, // we still want to optimistically fetch extension children for the leaf addition case. - // V2 multiproofs don't need this. - let multi_added_removed_keys = - if let VersionedMultiProofTargets::Legacy(legacy_targets) = &targets { - self.multi_added_removed_keys.touch_accounts(legacy_targets.keys().copied()); - Some(Arc::new(self.multi_added_removed_keys.clone())) - } else { - None - }; + self.multi_added_removed_keys.touch_accounts(targets.keys().copied()); + + // Clone+Arc MultiAddedRemovedKeys for sharing with the dispatched multiproof tasks + let multi_added_removed_keys = Arc::new(MultiAddedRemovedKeys { + account: self.multi_added_removed_keys.account.clone(), + storages: targets + .keys() + .filter_map(|account| { + self.multi_added_removed_keys + .storages + .get(account) + .cloned() + .map(|keys| (*account, keys)) + }) + .collect(), + }); self.metrics.prefetch_proof_targets_accounts_histogram.record(targets.len() as f64); self.metrics .prefetch_proof_targets_storages_histogram - .record(targets.storage_count() as f64); + .record(targets.values().map(|slots| slots.len()).sum::() as f64); let chunking_len = targets.chunking_length(); let available_account_workers = @@ -802,7 +646,7 @@ impl MultiProofTask { self.max_targets_for_chunking, available_account_workers, available_storage_workers, - VersionedMultiProofTargets::chunks, + MultiProofTargets::chunks, |proof_targets| { self.multiproof_manager.dispatch(MultiproofInput { source: None, @@ -810,7 +654,7 @@ impl MultiProofTask { proof_targets, proof_sequence_number: self.proof_sequencer.next_sequence(), state_root_message_sender: self.tx.clone(), - multi_added_removed_keys: multi_added_removed_keys.clone(), + multi_added_removed_keys: Some(multi_added_removed_keys.clone()), }); }, ); @@ -913,7 +757,6 @@ impl MultiProofTask { self.multiproof_manager.proof_worker_handle.available_account_workers(); let available_storage_workers = self.multiproof_manager.proof_worker_handle.available_storage_workers(); - let num_chunks = dispatch_with_chunking( not_fetched_state_update, chunking_len, @@ -927,9 +770,8 @@ impl MultiProofTask { &hashed_state_update, &self.fetched_proof_targets, &multi_added_removed_keys, - self.v2_proofs_enabled, ); - extend_multiproof_targets(&mut spawned_proof_targets, &proof_targets); + spawned_proof_targets.extend_ref(&proof_targets); self.multiproof_manager.dispatch(MultiproofInput { source: Some(source), @@ -1029,10 +871,7 @@ impl MultiProofTask { batch_metrics.proofs_processed += 1; if let Some(combined_update) = self.on_proof( sequence_number, - SparseTrieUpdate { - state, - multiproof: ProofResult::empty(self.v2_proofs_enabled), - }, + SparseTrieUpdate { state, multiproof: Default::default() }, ) { let _ = self.to_sparse_trie.send(combined_update); } @@ -1059,7 +898,8 @@ impl MultiProofTask { } let account_targets = merged_targets.len(); - let storage_targets = merged_targets.storage_count(); + let storage_targets = + merged_targets.values().map(|slots| slots.len()).sum::(); batch_metrics.prefetch_proofs_requested += self.on_prefetch_proof(merged_targets); trace!( target: "engine::tree::payload_processor::multiproof", @@ -1163,10 +1003,7 @@ impl MultiProofTask { if let Some(combined_update) = self.on_proof( sequence_number, - SparseTrieUpdate { - state, - multiproof: ProofResult::empty(self.v2_proofs_enabled), - }, + SparseTrieUpdate { state, multiproof: Default::default() }, ) { let _ = self.to_sparse_trie.send(combined_update); } @@ -1269,7 +1106,7 @@ impl MultiProofTask { let update = SparseTrieUpdate { state: proof_result.state, - multiproof: proof_result_data, + multiproof: proof_result_data.proof, }; if let Some(combined_update) = @@ -1359,7 +1196,7 @@ struct MultiproofBatchCtx { /// received. updates_finished_time: Option, /// Reusable buffer for accumulating prefetch targets during batching. - accumulated_prefetch_targets: Vec, + accumulated_prefetch_targets: Vec, } impl MultiproofBatchCtx { @@ -1405,77 +1242,40 @@ fn get_proof_targets( state_update: &HashedPostState, fetched_proof_targets: &MultiProofTargets, multi_added_removed_keys: &MultiAddedRemovedKeys, - v2_enabled: bool, -) -> VersionedMultiProofTargets { - if v2_enabled { - let mut targets = MultiProofTargetsV2::default(); - - // first collect all new accounts (not previously fetched) - for &hashed_address in state_update.accounts.keys() { - if !fetched_proof_targets.contains_key(&hashed_address) { - targets.account_targets.push(hashed_address.into()); - } - } - - // then process storage slots for all accounts in the state update - for (hashed_address, storage) in &state_update.storages { - let fetched = fetched_proof_targets.get(hashed_address); - - // If the storage is wiped, we still need to fetch the account proof. - if storage.wiped && fetched.is_none() { - targets.account_targets.push(Into::::into(*hashed_address)); - continue - } - - let changed_slots = storage - .storage - .keys() - .filter(|slot| !fetched.is_some_and(|f| f.contains(*slot))) - .map(|slot| Into::::into(*slot)) - .collect::>(); +) -> MultiProofTargets { + let mut targets = MultiProofTargets::default(); - if !changed_slots.is_empty() { - targets.account_targets.push((*hashed_address).into()); - targets.storage_targets.insert(*hashed_address, changed_slots); - } + // first collect all new accounts (not previously fetched) + for hashed_address in state_update.accounts.keys() { + if !fetched_proof_targets.contains_key(hashed_address) { + targets.insert(*hashed_address, HashSet::default()); } + } - VersionedMultiProofTargets::V2(targets) - } else { - let mut targets = MultiProofTargets::default(); + // then process storage slots for all accounts in the state update + for (hashed_address, storage) in &state_update.storages { + let fetched = fetched_proof_targets.get(hashed_address); + let storage_added_removed_keys = multi_added_removed_keys.get_storage(hashed_address); + let mut changed_slots = storage + .storage + .keys() + .filter(|slot| { + !fetched.is_some_and(|f| f.contains(*slot)) || + storage_added_removed_keys.is_some_and(|k| k.is_removed(slot)) + }) + .peekable(); - // first collect all new accounts (not previously fetched) - for hashed_address in state_update.accounts.keys() { - if !fetched_proof_targets.contains_key(hashed_address) { - targets.insert(*hashed_address, HashSet::default()); - } + // If the storage is wiped, we still need to fetch the account proof. + if storage.wiped && fetched.is_none() { + targets.entry(*hashed_address).or_default(); } - // then process storage slots for all accounts in the state update - for (hashed_address, storage) in &state_update.storages { - let fetched = fetched_proof_targets.get(hashed_address); - let storage_added_removed_keys = multi_added_removed_keys.get_storage(hashed_address); - let mut changed_slots = storage - .storage - .keys() - .filter(|slot| { - !fetched.is_some_and(|f| f.contains(*slot)) || - storage_added_removed_keys.is_some_and(|k| k.is_removed(slot)) - }) - .peekable(); - - // If the storage is wiped, we still need to fetch the account proof. - if storage.wiped && fetched.is_none() { - targets.entry(*hashed_address).or_default(); - } - - if changed_slots.peek().is_some() { - targets.entry(*hashed_address).or_default().extend(changed_slots); - } + if changed_slots.peek().is_some() { + targets.entry(*hashed_address).or_default().extend(changed_slots); } - - VersionedMultiProofTargets::Legacy(targets) } + + targets } /// Dispatches work items as a single unit or in chunks based on target size and worker @@ -1681,24 +1481,12 @@ mod tests { state } - fn unwrap_legacy_targets(targets: VersionedMultiProofTargets) -> MultiProofTargets { - match targets { - VersionedMultiProofTargets::Legacy(targets) => targets, - VersionedMultiProofTargets::V2(_) => panic!("Expected Legacy targets"), - } - } - #[test] fn test_get_proof_targets_new_account_targets() { let state = create_get_proof_targets_state(); let fetched = MultiProofTargets::default(); - let targets = unwrap_legacy_targets(get_proof_targets( - &state, - &fetched, - &MultiAddedRemovedKeys::new(), - false, - )); + let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); // should return all accounts as targets since nothing was fetched before assert_eq!(targets.len(), state.accounts.len()); @@ -1712,12 +1500,7 @@ mod tests { let state = create_get_proof_targets_state(); let fetched = MultiProofTargets::default(); - let targets = unwrap_legacy_targets(get_proof_targets( - &state, - &fetched, - &MultiAddedRemovedKeys::new(), - false, - )); + let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); // verify storage slots are included for accounts with storage for (addr, storage) in &state.storages { @@ -1745,12 +1528,7 @@ mod tests { // mark the account as already fetched fetched.insert(*fetched_addr, HashSet::default()); - let targets = unwrap_legacy_targets(get_proof_targets( - &state, - &fetched, - &MultiAddedRemovedKeys::new(), - false, - )); + let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); // should not include the already fetched account since it has no storage updates assert!(!targets.contains_key(fetched_addr)); @@ -1770,12 +1548,7 @@ mod tests { fetched_slots.insert(fetched_slot); fetched.insert(*addr, fetched_slots); - let targets = unwrap_legacy_targets(get_proof_targets( - &state, - &fetched, - &MultiAddedRemovedKeys::new(), - false, - )); + let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); // should not include the already fetched storage slot let target_slots = &targets[addr]; @@ -1788,12 +1561,7 @@ mod tests { let state = HashedPostState::default(); let fetched = MultiProofTargets::default(); - let targets = unwrap_legacy_targets(get_proof_targets( - &state, - &fetched, - &MultiAddedRemovedKeys::new(), - false, - )); + let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); assert!(targets.is_empty()); } @@ -1820,12 +1588,7 @@ mod tests { fetched_slots.insert(slot1); fetched.insert(addr1, fetched_slots); - let targets = unwrap_legacy_targets(get_proof_targets( - &state, - &fetched, - &MultiAddedRemovedKeys::new(), - false, - )); + let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); assert!(targets.contains_key(&addr2)); assert!(!targets[&addr1].contains(&slot1)); @@ -1851,12 +1614,7 @@ mod tests { assert!(!state.accounts.contains_key(&addr)); assert!(!fetched.contains_key(&addr)); - let targets = unwrap_legacy_targets(get_proof_targets( - &state, - &fetched, - &MultiAddedRemovedKeys::new(), - false, - )); + let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); // verify that we still get the storage slots for the unmodified account assert!(targets.contains_key(&addr)); @@ -1898,12 +1656,7 @@ mod tests { removed_state.storages.insert(addr, removed_storage); multi_added_removed_keys.update_with_state(&removed_state); - let targets = unwrap_legacy_targets(get_proof_targets( - &state, - &fetched, - &multi_added_removed_keys, - false, - )); + let targets = get_proof_targets(&state, &fetched, &multi_added_removed_keys); // slot1 should be included despite being fetched, because it's marked as removed assert!(targets.contains_key(&addr)); @@ -1930,12 +1683,7 @@ mod tests { storage.storage.insert(slot1, U256::from(100)); state.storages.insert(addr, storage); - let targets = unwrap_legacy_targets(get_proof_targets( - &state, - &fetched, - &multi_added_removed_keys, - false, - )); + let targets = get_proof_targets(&state, &fetched, &multi_added_removed_keys); // account should be included because storage is wiped and account wasn't fetched assert!(targets.contains_key(&addr)); @@ -1978,12 +1726,7 @@ mod tests { removed_state.storages.insert(addr, removed_storage); multi_added_removed_keys.update_with_state(&removed_state); - let targets = unwrap_legacy_targets(get_proof_targets( - &state, - &fetched, - &multi_added_removed_keys, - false, - )); + let targets = get_proof_targets(&state, &fetched, &multi_added_removed_keys); // only slots in the state update can be included, so slot3 should not appear assert!(!targets.contains_key(&addr)); @@ -2010,12 +1753,9 @@ mod tests { targets3.insert(addr3, HashSet::default()); let tx = task.tx.clone(); - tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets1))) - .unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets2))) - .unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets3))) - .unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(targets1)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(targets2)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(targets3)).unwrap(); let proofs_requested = if let Ok(MultiProofMessage::PrefetchProofs(targets)) = task.rx.recv() { @@ -2029,12 +1769,11 @@ mod tests { assert_eq!(num_batched, 3); assert_eq!(merged_targets.len(), 3); - let legacy_targets = unwrap_legacy_targets(merged_targets); - assert!(legacy_targets.contains_key(&addr1)); - assert!(legacy_targets.contains_key(&addr2)); - assert!(legacy_targets.contains_key(&addr3)); + assert!(merged_targets.contains_key(&addr1)); + assert!(merged_targets.contains_key(&addr2)); + assert!(merged_targets.contains_key(&addr3)); - task.on_prefetch_proof(VersionedMultiProofTargets::Legacy(legacy_targets)) + task.on_prefetch_proof(merged_targets) } else { panic!("Expected PrefetchProofs message"); }; @@ -2109,16 +1848,11 @@ mod tests { // Queue: [PrefetchProofs1, PrefetchProofs2, StateUpdate1, StateUpdate2, PrefetchProofs3] let tx = task.tx.clone(); - tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets1))) - .unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets2))) - .unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(targets1)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(targets2)).unwrap(); tx.send(MultiProofMessage::StateUpdate(source.into(), state_update1)).unwrap(); tx.send(MultiProofMessage::StateUpdate(source.into(), state_update2)).unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy( - targets3.clone(), - ))) - .unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(targets3.clone())).unwrap(); // Step 1: Receive and batch PrefetchProofs (should get targets1 + targets2) let mut pending_msg: Option = None; @@ -2144,10 +1878,9 @@ mod tests { // Should have batched exactly 2 PrefetchProofs (not 3!) assert_eq!(num_batched, 2, "Should batch only until different message type"); assert_eq!(merged_targets.len(), 2); - let legacy_targets = unwrap_legacy_targets(merged_targets); - assert!(legacy_targets.contains_key(&addr1)); - assert!(legacy_targets.contains_key(&addr2)); - assert!(!legacy_targets.contains_key(&addr3), "addr3 should NOT be in first batch"); + assert!(merged_targets.contains_key(&addr1)); + assert!(merged_targets.contains_key(&addr2)); + assert!(!merged_targets.contains_key(&addr3), "addr3 should NOT be in first batch"); } else { panic!("Expected PrefetchProofs message"); } @@ -2172,8 +1905,7 @@ mod tests { match task.rx.try_recv() { Ok(MultiProofMessage::PrefetchProofs(targets)) => { assert_eq!(targets.len(), 1); - let legacy_targets = unwrap_legacy_targets(targets); - assert!(legacy_targets.contains_key(&addr3)); + assert!(targets.contains_key(&addr3)); } _ => panic!("PrefetchProofs3 was lost!"), } @@ -2219,13 +1951,9 @@ mod tests { let source = StateChangeSource::Transaction(99); let tx = task.tx.clone(); - tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(prefetch1))) - .unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(prefetch1)).unwrap(); tx.send(MultiProofMessage::StateUpdate(source.into(), state_update)).unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy( - prefetch2.clone(), - ))) - .unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(prefetch2.clone())).unwrap(); let mut ctx = MultiproofBatchCtx::new(Instant::now()); let mut batch_metrics = MultiproofBatchMetrics::default(); @@ -2258,8 +1986,7 @@ mod tests { match task.rx.try_recv() { Ok(MultiProofMessage::PrefetchProofs(targets)) => { assert_eq!(targets.len(), 1); - let legacy_targets = unwrap_legacy_targets(targets); - assert!(legacy_targets.contains_key(&prefetch_addr2)); + assert!(targets.contains_key(&prefetch_addr2)); } other => panic!("Expected PrefetchProofs2 in channel, got {:?}", other), } diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 1083450549d..6021098627c 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -16,7 +16,7 @@ use crate::tree::{ payload_processor::{ bal::{total_slots, BALSlotIter}, executor::WorkloadExecutor, - multiproof::{MultiProofMessage, VersionedMultiProofTargets}, + multiproof::MultiProofMessage, ExecutionCache as PayloadExecutionCache, }, precompile_cache::{CachedPrecompile, PrecompileCacheMap}, @@ -237,7 +237,7 @@ where } /// If configured and the tx returned proof targets, emit the targets the transaction produced - fn send_multi_proof_targets(&self, targets: Option) { + fn send_multi_proof_targets(&self, targets: Option) { if self.is_execution_terminated() { // if execution is already terminated then we dont need to send more proof fetch // messages @@ -479,8 +479,6 @@ where pub(super) terminate_execution: Arc, pub(super) precompile_cache_disabled: bool, pub(super) precompile_cache_map: PrecompileCacheMap>, - /// Whether V2 proof calculation is enabled. - pub(super) v2_proofs_enabled: bool, } impl PrewarmContext @@ -489,12 +487,10 @@ where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, Evm: ConfigureEvm + 'static, { - /// Splits this context into an evm, an evm config, metrics, the atomic bool for terminating - /// execution, and whether V2 proofs are enabled. + /// Splits this context into an evm, an evm config, metrics, and the atomic bool for terminating + /// execution. #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] - fn evm_for_ctx( - self, - ) -> Option<(EvmFor, PrewarmMetrics, Arc, bool)> { + fn evm_for_ctx(self) -> Option<(EvmFor, PrewarmMetrics, Arc)> { let Self { env, evm_config, @@ -504,7 +500,6 @@ where terminate_execution, precompile_cache_disabled, precompile_cache_map, - v2_proofs_enabled, } = self; let mut state_provider = match provider.build() { @@ -554,7 +549,7 @@ where }); } - Some((evm, metrics, terminate_execution, v2_proofs_enabled)) + Some((evm, metrics, terminate_execution)) } /// Accepts an [`mpsc::Receiver`] of transactions and a handle to prewarm task. Executes @@ -575,10 +570,7 @@ where ) where Tx: ExecutableTxFor, { - let Some((mut evm, metrics, terminate_execution, v2_proofs_enabled)) = self.evm_for_ctx() - else { - return - }; + let Some((mut evm, metrics, terminate_execution)) = self.evm_for_ctx() else { return }; while let Ok(IndexedTransaction { index, tx }) = { let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", "recv tx") @@ -641,8 +633,7 @@ where let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm outcome", index, tx_hash=%tx.tx().tx_hash()) .entered(); - let (targets, storage_targets) = - multiproof_targets_from_state(res.state, v2_proofs_enabled); + let (targets, storage_targets) = multiproof_targets_from_state(res.state); metrics.prefetch_storage_targets.record(storage_targets as f64); let _ = sender.send(PrewarmTaskEvent::Outcome { proof_targets: Some(targets) }); drop(_enter); @@ -787,22 +778,9 @@ where } } -/// Returns a set of [`VersionedMultiProofTargets`] and the total amount of storage targets, based -/// on the given state. -fn multiproof_targets_from_state( - state: EvmState, - v2_enabled: bool, -) -> (VersionedMultiProofTargets, usize) { - if v2_enabled { - multiproof_targets_v2_from_state(state) - } else { - multiproof_targets_legacy_from_state(state) - } -} - -/// Returns legacy [`MultiProofTargets`] and the total amount of storage targets, based on the +/// Returns a set of [`MultiProofTargets`] and the total amount of storage targets, based on the /// given state. -fn multiproof_targets_legacy_from_state(state: EvmState) -> (VersionedMultiProofTargets, usize) { +fn multiproof_targets_from_state(state: EvmState) -> (MultiProofTargets, usize) { let mut targets = MultiProofTargets::with_capacity(state.len()); let mut storage_targets = 0; for (addr, account) in state { @@ -832,50 +810,7 @@ fn multiproof_targets_legacy_from_state(state: EvmState) -> (VersionedMultiProof targets.insert(keccak256(addr), storage_set); } - (VersionedMultiProofTargets::Legacy(targets), storage_targets) -} - -/// Returns V2 [`reth_trie_parallel::targets_v2::MultiProofTargetsV2`] and the total amount of -/// storage targets, based on the given state. -fn multiproof_targets_v2_from_state(state: EvmState) -> (VersionedMultiProofTargets, usize) { - use reth_trie::proof_v2; - use reth_trie_parallel::targets_v2::MultiProofTargetsV2; - - let mut targets = MultiProofTargetsV2::default(); - let mut storage_target_count = 0; - for (addr, account) in state { - // if the account was not touched, or if the account was selfdestructed, do not - // fetch proofs for it - // - // Since selfdestruct can only happen in the same transaction, we can skip - // prefetching proofs for selfdestructed accounts - // - // See: https://eips.ethereum.org/EIPS/eip-6780 - if !account.is_touched() || account.is_selfdestructed() { - continue - } - - let hashed_address = keccak256(addr); - targets.account_targets.push(hashed_address.into()); - - let mut storage_slots = Vec::with_capacity(account.storage.len()); - for (key, slot) in account.storage { - // do nothing if unchanged - if !slot.is_changed() { - continue - } - - let hashed_slot = keccak256(B256::new(key.to_be_bytes())); - storage_slots.push(proof_v2::Target::from(hashed_slot)); - } - - storage_target_count += storage_slots.len(); - if !storage_slots.is_empty() { - targets.storage_targets.insert(hashed_address, storage_slots); - } - } - - (VersionedMultiProofTargets::V2(targets), storage_target_count) + (targets, storage_targets) } /// The events the pre-warm task can handle. @@ -900,7 +835,7 @@ pub(super) enum PrewarmTaskEvent { /// The outcome of a pre-warm task Outcome { /// The prepared proof targets based on the evm state outcome - proof_targets: Option, + proof_targets: Option, }, /// Finished executing all transactions FinishedTxExecution { diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index 052fd8672b2..b4c150cfa9a 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -4,7 +4,7 @@ use crate::tree::payload_processor::multiproof::{MultiProofTaskMetrics, SparseTr use alloy_primitives::B256; use rayon::iter::{ParallelBridge, ParallelIterator}; use reth_trie::{updates::TrieUpdates, Nibbles}; -use reth_trie_parallel::{proof_task::ProofResult, root::ParallelStateRootError}; +use reth_trie_parallel::root::ParallelStateRootError; use reth_trie_sparse::{ errors::{SparseStateTrieResult, SparseTrieErrorKind}, provider::{TrieNodeProvider, TrieNodeProviderFactory}, @@ -97,8 +97,8 @@ where debug!( target: "engine::root", num_updates, - account_proofs = update.multiproof.account_proofs_len(), - storage_proofs = update.multiproof.storage_proofs_len(), + account_proofs = update.multiproof.account_subtree.len(), + storage_proofs = update.multiproof.storages.len(), "Updating sparse trie" ); @@ -157,14 +157,7 @@ where let started_at = Instant::now(); // Reveal new accounts and storage slots. - match multiproof { - ProofResult::Legacy(decoded, _) => { - trie.reveal_decoded_multiproof(decoded)?; - } - ProofResult::V2(decoded_v2) => { - trie.reveal_decoded_multiproof_v2(decoded_v2)?; - } - } + trie.reveal_decoded_multiproof(multiproof)?; let reveal_multiproof_elapsed = started_at.elapsed(); trace!( target: "engine::root::sparse", diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 812dd2b85b1..d64f2dfb519 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] # reth -reth-primitives-traits.workspace = true reth-execution-errors.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-storage-errors.workspace = true reth-trie-common.workspace = true diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index d42534c2713..7bf936bad3a 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -197,7 +197,7 @@ impl ParallelProof { let (result_tx, result_rx) = crossbeam_unbounded(); let account_multiproof_start_time = Instant::now(); - let input = AccountMultiproofInput::Legacy { + let input = AccountMultiproofInput { targets, prefix_sets, collect_branch_node_masks: self.collect_branch_node_masks, @@ -208,6 +208,7 @@ impl ParallelProof { HashedPostState::default(), account_multiproof_start_time, ), + v2_proofs_enabled: self.v2_proofs_enabled, }; self.proof_worker_handle @@ -221,9 +222,7 @@ impl ParallelProof { ) })?; - let ProofResult::Legacy(multiproof, stats) = proof_result_msg.result? else { - panic!("AccountMultiproofInput::Legacy was submitted, expected legacy result") - }; + let ProofResult { proof: multiproof, stats } = proof_result_msg.result?; #[cfg(feature = "metrics")] self.metrics.record(stats); @@ -236,7 +235,7 @@ impl ParallelProof { leaves_added = stats.leaves_added(), missed_leaves = stats.missed_leaves(), precomputed_storage_roots = stats.precomputed_storage_roots(), - "Calculated decoded proof", + "Calculated decoded proof" ); Ok(multiproof) diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 076931f48c7..eb6f8923469 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -32,8 +32,6 @@ use crate::{ root::ParallelStateRootError, stats::{ParallelTrieStats, ParallelTrieTracker}, - targets_v2::MultiProofTargetsV2, - value_encoder::AsyncAccountValueEncoder, StorageRootTargets, }; use alloy_primitives::{ @@ -51,11 +49,11 @@ use reth_trie::{ node_iter::{TrieElement, TrieNodeIter}, prefix_set::TriePrefixSets, proof::{ProofBlindedAccountProvider, ProofBlindedStorageProvider, StorageProof}, - proof_v2, + proof_v2::{self, StorageProofCalculator}, trie_cursor::{InstrumentedTrieCursor, TrieCursorFactory, TrieCursorMetricsCache}, walker::TrieWalker, - DecodedMultiProof, DecodedMultiProofV2, DecodedStorageMultiProof, HashBuilder, HashedPostState, - MultiProofTargets, Nibbles, ProofTrieNode, TRIE_ACCOUNT_RLP_MAX_SIZE, + DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostState, MultiProofTargets, + Nibbles, ProofTrieNode, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::{ added_removed_keys::MultiAddedRemovedKeys, @@ -222,8 +220,7 @@ impl ProofWorkerHandle { metrics, #[cfg(feature = "metrics")] cursor_metrics, - ) - .with_v2_proofs(v2_proofs_enabled); + ); if let Err(error) = worker.run() { error!( target: "trie::proof_task", @@ -336,12 +333,16 @@ impl ProofWorkerHandle { ProviderError::other(std::io::Error::other("account workers unavailable")); if let AccountWorkerJob::AccountMultiproof { input } = err.0 { - let ProofResultContext { - sender: result_tx, - sequence_number: seq, - state, - start_time: start, - } = input.into_proof_result_sender(); + let AccountMultiproofInput { + proof_result_sender: + ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + }, + .. + } = *input; let _ = result_tx.send(ProofResultMessage { sequence_number: seq, @@ -604,65 +605,11 @@ impl TrieNodeProvider for ProofTaskTrieNodeProvider { /// Result of a multiproof calculation. #[derive(Debug)] -pub enum ProofResult { - /// Legacy multiproof calculation result. - Legacy(DecodedMultiProof, ParallelTrieStats), - /// V2 multiproof calculation result. - V2(DecodedMultiProofV2), -} - -impl ProofResult { - /// Creates an empty [`ProofResult`] of the appropriate variant based on `v2_enabled`. - /// - /// Use this when constructing empty proofs (e.g., for state updates where all targets - /// were already fetched) to ensure consistency with the proof version being used. - pub fn empty(v2_enabled: bool) -> Self { - if v2_enabled { - Self::V2(DecodedMultiProofV2::default()) - } else { - let stats = ParallelTrieTracker::default().finish(); - Self::Legacy(DecodedMultiProof::default(), stats) - } - } - - /// Returns true if the result contains no proofs - pub fn is_empty(&self) -> bool { - match self { - Self::Legacy(proof, _) => proof.is_empty(), - Self::V2(proof) => proof.is_empty(), - } - } - - /// Extends the receiver with the value of the given results. - /// - /// # Panics - /// - /// This method panics if the two [`ProofResult`]s are not the same variant. - pub fn extend(&mut self, other: Self) { - match (self, other) { - (Self::Legacy(proof, _), Self::Legacy(other, _)) => proof.extend(other), - (Self::V2(proof), Self::V2(other)) => proof.extend(other), - _ => panic!("mismatched ProofResults, cannot extend one with the other"), - } - } - - /// Returns the number of account proofs. - pub fn account_proofs_len(&self) -> usize { - match self { - Self::Legacy(proof, _) => proof.account_subtree.len(), - Self::V2(proof) => proof.account_proofs.len(), - } - } - - /// Returns the total number of storage proofs - pub fn storage_proofs_len(&self) -> usize { - match self { - Self::Legacy(proof, _) => { - proof.storages.values().map(|p| p.subtree.len()).sum::() - } - Self::V2(proof) => proof.storage_proofs.values().map(|p| p.len()).sum::(), - } - } +pub struct ProofResult { + /// The account multiproof + pub proof: DecodedMultiProof, + /// Statistics collected during proof computation + pub stats: ParallelTrieStats, } /// Channel used by worker threads to deliver `ProofResultMessage` items back to @@ -942,7 +889,7 @@ where &self, proof_tx: &ProofTaskTx, v2_calculator: Option< - &mut proof_v2::StorageProofCalculator< + &mut StorageProofCalculator< ::StorageTrieCursor<'_>, ::StorageCursor<'_>, >, @@ -1106,8 +1053,6 @@ struct AccountProofWorker { /// Cursor metrics for this worker #[cfg(feature = "metrics")] cursor_metrics: ProofTaskCursorMetrics, - /// Set to true if V2 proofs are enabled. - v2_enabled: bool, } impl AccountProofWorker @@ -1137,16 +1082,9 @@ where metrics, #[cfg(feature = "metrics")] cursor_metrics, - v2_enabled: false, } } - /// Changes whether or not V2 proofs are enabled. - const fn with_v2_proofs(mut self, v2_enabled: bool) -> Self { - self.v2_enabled = v2_enabled; - self - } - /// Runs the worker loop, processing jobs until the channel closes. /// /// # Lifecycle @@ -1179,17 +1117,6 @@ where let mut account_nodes_processed = 0u64; let mut cursor_metrics_cache = ProofTaskCursorMetricsCache::default(); - let mut v2_calculator = if self.v2_enabled { - let trie_cursor = proof_tx.provider.account_trie_cursor()?; - let hashed_cursor = proof_tx.provider.hashed_account_cursor()?; - Some(proof_v2::ProofCalculator::<_, _, AsyncAccountValueEncoder>::new( - trie_cursor, - hashed_cursor, - )) - } else { - None - }; - // Count this worker as available only after successful initialization. self.available_workers.fetch_add(1, Ordering::Relaxed); @@ -1201,7 +1128,6 @@ where AccountWorkerJob::AccountMultiproof { input } => { self.process_account_multiproof( &proof_tx, - v2_calculator.as_mut(), *input, &mut account_proofs_processed, &mut cursor_metrics_cache, @@ -1240,18 +1166,26 @@ where Ok(()) } - fn compute_legacy_account_multiproof( + /// Processes an account multiproof request. + fn process_account_multiproof( &self, proof_tx: &ProofTaskTx, - targets: MultiProofTargets, - mut prefix_sets: TriePrefixSets, - collect_branch_node_masks: bool, - multi_added_removed_keys: Option>, - proof_cursor_metrics: &mut ProofTaskCursorMetricsCache, - ) -> Result - where + input: AccountMultiproofInput, + account_proofs_processed: &mut u64, + cursor_metrics_cache: &mut ProofTaskCursorMetricsCache, + ) where Provider: TrieCursorFactory + HashedCursorFactory, { + let AccountMultiproofInput { + targets, + mut prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys, + proof_result_sender: + ProofResultContext { sender: result_tx, sequence_number: seq, state, start_time: start }, + v2_proofs_enabled, + } = input; + let span = debug_span!( target: "trie::proof_task", "Account multiproof calculation", @@ -1265,6 +1199,8 @@ where "Processing account multiproof" ); + let proof_start = Instant::now(); + let mut tracker = ParallelTrieTracker::default(); let mut storage_prefix_sets = std::mem::take(&mut prefix_sets.storage_prefix_sets); @@ -1274,14 +1210,29 @@ where tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); - let storage_proof_receivers = dispatch_storage_proofs( + let storage_proof_receivers = match dispatch_storage_proofs( &self.storage_work_tx, &targets, &mut storage_prefix_sets, collect_branch_node_masks, multi_added_removed_keys.as_ref(), - )?; + v2_proofs_enabled, + ) { + Ok(receivers) => receivers, + Err(error) => { + // Send error through result channel + error!(target: "trie::proof_task", "Failed to dispatch storage proofs: {error}"); + let _ = result_tx.send(ProofResultMessage { + sequence_number: seq, + result: Err(error), + elapsed: start.elapsed(), + state, + }); + return; + } + }; + // Use the missed leaves cache passed from the multiproof manager let account_prefix_set = std::mem::take(&mut prefix_sets.account_prefix_set); let ctx = AccountMultiproofParams { @@ -1293,115 +1244,17 @@ where cached_storage_roots: &self.cached_storage_roots, }; - let result = build_account_multiproof_with_storage_roots( - &proof_tx.provider, - ctx, - &mut tracker, - proof_cursor_metrics, - ); - - let stats = tracker.finish(); - result.map(|proof| ProofResult::Legacy(proof, stats)) - } - - fn compute_v2_account_multiproof( - &self, - v2_calculator: &mut proof_v2::ProofCalculator< - ::AccountTrieCursor<'_>, - ::AccountCursor<'_>, - AsyncAccountValueEncoder, - >, - targets: MultiProofTargetsV2, - ) -> Result - where - Provider: TrieCursorFactory + HashedCursorFactory, - { - let MultiProofTargetsV2 { mut account_targets, storage_targets } = targets; - - let span = debug_span!( - target: "trie::proof_task", - "Account V2 multiproof calculation", - account_targets = account_targets.len(), - storage_targets = storage_targets.values().map(|t| t.len()).sum::(), - worker_id = self.worker_id, - ); - let _span_guard = span.enter(); - - trace!(target: "trie::proof_task", "Processing V2 account multiproof"); - - let storage_proof_receivers = - dispatch_v2_storage_proofs(&self.storage_work_tx, &account_targets, storage_targets)?; - - let mut value_encoder = AsyncAccountValueEncoder::new( - self.storage_work_tx.clone(), - storage_proof_receivers, - self.cached_storage_roots.clone(), - ); - - let proof = DecodedMultiProofV2 { - account_proofs: v2_calculator.proof(&mut value_encoder, &mut account_targets)?, - storage_proofs: value_encoder.into_storage_proofs()?, - }; - - Ok(ProofResult::V2(proof)) - } + let result = + build_account_multiproof_with_storage_roots(&proof_tx.provider, ctx, &mut tracker); - /// Processes an account multiproof request. - fn process_account_multiproof( - &self, - proof_tx: &ProofTaskTx, - v2_calculator: Option< - &mut proof_v2::ProofCalculator< - ::AccountTrieCursor<'_>, - ::AccountCursor<'_>, - AsyncAccountValueEncoder, - >, - >, - input: AccountMultiproofInput, - account_proofs_processed: &mut u64, - cursor_metrics_cache: &mut ProofTaskCursorMetricsCache, - ) where - Provider: TrieCursorFactory + HashedCursorFactory, - { - let mut proof_cursor_metrics = ProofTaskCursorMetricsCache::default(); - let proof_start = Instant::now(); - - let (proof_result_sender, result) = match input { - AccountMultiproofInput::Legacy { - targets, - prefix_sets, - collect_branch_node_masks, - multi_added_removed_keys, - proof_result_sender, - } => ( - proof_result_sender, - self.compute_legacy_account_multiproof( - proof_tx, - targets, - prefix_sets, - collect_branch_node_masks, - multi_added_removed_keys, - &mut proof_cursor_metrics, - ), - ), - AccountMultiproofInput::V2 { targets, proof_result_sender } => ( - proof_result_sender, - self.compute_v2_account_multiproof::( - v2_calculator.expect("v2 calculator provided"), - targets, - ), - ), - }; - - let ProofResultContext { - sender: result_tx, - sequence_number: seq, - state, - start_time: start, - } = proof_result_sender; + let now = Instant::now(); + let proof_elapsed = now.duration_since(proof_start); + let total_elapsed = now.duration_since(start); + let proof_cursor_metrics = tracker.cursor_metrics; + proof_cursor_metrics.record_spans(); - let proof_elapsed = proof_start.elapsed(); - let total_elapsed = start.elapsed(); + let stats = tracker.finish(); + let result = result.map(|proof| ProofResult { proof, stats }); *account_proofs_processed += 1; // Send result to MultiProofTask @@ -1422,8 +1275,6 @@ where ); } - proof_cursor_metrics.record_spans(); - trace!( target: "trie::proof_task", proof_time_us = proof_elapsed.as_micros(), @@ -1504,7 +1355,6 @@ fn build_account_multiproof_with_storage_roots

( provider: &P, ctx: AccountMultiproofParams<'_>, tracker: &mut ParallelTrieTracker, - proof_cursor_metrics: &mut ProofTaskCursorMetricsCache, ) -> Result where P: TrieCursorFactory + HashedCursorFactory, @@ -1512,12 +1362,15 @@ where let accounts_added_removed_keys = ctx.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); + // Create local metrics caches for account cursors. We can't directly use the metrics caches in + // the tracker due to the call to `inc_missed_leaves` which occurs on it. + let mut account_trie_cursor_metrics = TrieCursorMetricsCache::default(); + let mut account_hashed_cursor_metrics = HashedCursorMetricsCache::default(); + // Wrap account trie cursor with instrumented cursor let account_trie_cursor = provider.account_trie_cursor().map_err(ProviderError::Database)?; - let account_trie_cursor = InstrumentedTrieCursor::new( - account_trie_cursor, - &mut proof_cursor_metrics.account_trie_cursor, - ); + let account_trie_cursor = + InstrumentedTrieCursor::new(account_trie_cursor, &mut account_trie_cursor_metrics); // Create the walker. let walker = TrieWalker::<_>::state_trie(account_trie_cursor, ctx.prefix_set) @@ -1544,10 +1397,8 @@ where // Wrap account hashed cursor with instrumented cursor let account_hashed_cursor = provider.hashed_account_cursor().map_err(ProviderError::Database)?; - let account_hashed_cursor = InstrumentedHashedCursor::new( - account_hashed_cursor, - &mut proof_cursor_metrics.account_hashed_cursor, - ); + let account_hashed_cursor = + InstrumentedHashedCursor::new(account_hashed_cursor, &mut account_hashed_cursor_metrics); let mut account_node_iter = TrieNodeIter::state_trie(walker, account_hashed_cursor); @@ -1611,10 +1462,10 @@ where StorageProof::new_hashed(provider, provider, hashed_address) .with_prefix_set_mut(Default::default()) .with_trie_cursor_metrics( - &mut proof_cursor_metrics.storage_trie_cursor, + &mut tracker.cursor_metrics.storage_trie_cursor, ) .with_hashed_cursor_metrics( - &mut proof_cursor_metrics.storage_hashed_cursor, + &mut tracker.cursor_metrics.storage_hashed_cursor, ) .storage_multiproof( ctx.targets @@ -1665,6 +1516,21 @@ where BranchNodeMasksMap::default() }; + // Extend tracker with accumulated metrics from account cursors + tracker.cursor_metrics.account_trie_cursor.extend(&account_trie_cursor_metrics); + tracker.cursor_metrics.account_hashed_cursor.extend(&account_hashed_cursor_metrics); + + // Consume remaining storage proof receivers for accounts not encountered during trie walk. + // Done last to allow storage workers more time to complete while we finalized the account trie. + for (hashed_address, receiver) in storage_proof_receivers { + if let Ok(proof_msg) = receiver.recv() { + let proof_result = proof_msg.result?; + let proof = Into::>::into(proof_result) + .expect("Partial proofs are not yet supported"); + collected_decoded_storages.insert(hashed_address, proof); + } + } + Ok(DecodedMultiProof { account_subtree: decoded_account_subtree, branch_node_masks, @@ -1684,6 +1550,7 @@ fn dispatch_storage_proofs( storage_prefix_sets: &mut B256Map, with_branch_node_masks: bool, multi_added_removed_keys: Option<&Arc>, + use_v2_proofs: bool, ) -> Result>, ParallelStateRootError> { let mut storage_proof_receivers = B256Map::with_capacity_and_hasher(targets.len(), Default::default()); @@ -1697,14 +1564,20 @@ fn dispatch_storage_proofs( let (result_tx, result_rx) = crossbeam_channel::unbounded(); // Create computation input based on V2 flag - let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); - let input = StorageProofInput::legacy( - *hashed_address, - prefix_set, - target_slots.clone(), - with_branch_node_masks, - multi_added_removed_keys.cloned(), - ); + let input = if use_v2_proofs { + // Convert target slots to V2 targets + let v2_targets = target_slots.iter().copied().map(Into::into).collect(); + StorageProofInput::new(*hashed_address, v2_targets) + } else { + let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); + StorageProofInput::legacy( + *hashed_address, + prefix_set, + target_slots.clone(), + with_branch_node_masks, + multi_added_removed_keys.cloned(), + ) + }; // Always dispatch a storage proof so we obtain the storage root even when no slots are // requested. @@ -1722,64 +1595,6 @@ fn dispatch_storage_proofs( Ok(storage_proof_receivers) } - -/// Queues V2 storage proofs for all accounts in the targets and returns receivers. -/// -/// This function queues all storage proof tasks to the worker pool but returns immediately -/// with receivers, allowing the account trie walk to proceed in parallel with storage proof -/// computation. This enables interleaved parallelism for better performance. -/// -/// Propagates errors up if queuing fails. Receivers must be consumed by the caller. -fn dispatch_v2_storage_proofs( - storage_work_tx: &CrossbeamSender, - account_targets: &Vec, - storage_targets: B256Map>, -) -> Result>, ParallelStateRootError> { - let mut storage_proof_receivers = - B256Map::with_capacity_and_hasher(account_targets.len(), Default::default()); - - // Dispatch all proofs for targeted storage slots - for (hashed_address, targets) in storage_targets { - // Create channel for receiving StorageProofResultMessage - let (result_tx, result_rx) = crossbeam_channel::unbounded(); - let input = StorageProofInput::new(hashed_address, targets); - - storage_work_tx - .send(StorageWorkerJob::StorageProof { input, proof_result_sender: result_tx }) - .map_err(|_| { - ParallelStateRootError::Other(format!( - "Failed to queue storage proof for {hashed_address:?}: storage worker pool unavailable", - )) - })?; - - storage_proof_receivers.insert(hashed_address, result_rx); - } - - // If there are any targeted accounts which did not have storage targets then we generate a - // single proof target for them so that we get their root. - for target in account_targets { - let hashed_address = target.key(); - if storage_proof_receivers.contains_key(&hashed_address) { - continue - } - - let (result_tx, result_rx) = crossbeam_channel::unbounded(); - let input = StorageProofInput::new(hashed_address, vec![proof_v2::Target::new(B256::ZERO)]); - - storage_work_tx - .send(StorageWorkerJob::StorageProof { input, proof_result_sender: result_tx }) - .map_err(|_| { - ParallelStateRootError::Other(format!( - "Failed to queue storage proof for {hashed_address:?}: storage worker pool unavailable", - )) - })?; - - storage_proof_receivers.insert(hashed_address, result_rx); - } - - Ok(storage_proof_receivers) -} - /// Input parameters for storage proof computation. #[derive(Debug)] pub enum StorageProofInput { @@ -1824,7 +1639,7 @@ impl StorageProofInput { } } - /// Creates a new [`StorageProofInput`] with the given hashed address and target slots. + /// Creates a new [`StorageProofInput`] with the given hashed address and target slots. pub const fn new(hashed_address: B256, targets: Vec) -> Self { Self::V2 { hashed_address, targets } } @@ -1840,39 +1655,20 @@ impl StorageProofInput { } /// Input parameters for account multiproof computation. -#[derive(Debug)] -pub enum AccountMultiproofInput { - /// Legacy account multiproof proof variant - Legacy { - /// The targets for which to compute the multiproof. - targets: MultiProofTargets, - /// The prefix sets for the proof calculation. - prefix_sets: TriePrefixSets, - /// Whether or not to collect branch node masks. - collect_branch_node_masks: bool, - /// Provided by the user to give the necessary context to retain extra proofs. - multi_added_removed_keys: Option>, - /// Context for sending the proof result. - proof_result_sender: ProofResultContext, - }, - /// V2 account multiproof variant - V2 { - /// The targets for which to compute the multiproof. - targets: MultiProofTargetsV2, - /// Context for sending the proof result. - proof_result_sender: ProofResultContext, - }, -} - -impl AccountMultiproofInput { - /// Returns the [`ProofResultContext`] for this input, consuming the input. - fn into_proof_result_sender(self) -> ProofResultContext { - match self { - Self::Legacy { proof_result_sender, .. } | Self::V2 { proof_result_sender, .. } => { - proof_result_sender - } - } - } +#[derive(Debug, Clone)] +pub struct AccountMultiproofInput { + /// The targets for which to compute the multiproof. + pub targets: MultiProofTargets, + /// The prefix sets for the proof calculation. + pub prefix_sets: TriePrefixSets, + /// Whether or not to collect branch node masks. + pub collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + pub multi_added_removed_keys: Option>, + /// Context for sending the proof result. + pub proof_result_sender: ProofResultContext, + /// Whether to use V2 storage proofs. + pub v2_proofs_enabled: bool, } /// Parameters for building an account multiproof with pre-computed storage roots. diff --git a/crates/trie/parallel/src/stats.rs b/crates/trie/parallel/src/stats.rs index de5b0a628ef..088b95c9708 100644 --- a/crates/trie/parallel/src/stats.rs +++ b/crates/trie/parallel/src/stats.rs @@ -1,3 +1,5 @@ +#[cfg(feature = "metrics")] +use crate::proof_task_metrics::ProofTaskCursorMetricsCache; use derive_more::Deref; use reth_trie::stats::{TrieStats, TrieTracker}; @@ -34,6 +36,9 @@ pub struct ParallelTrieTracker { trie: TrieTracker, precomputed_storage_roots: u64, missed_leaves: u64, + #[cfg(feature = "metrics")] + /// Local tracking of cursor-related metrics + pub cursor_metrics: ProofTaskCursorMetricsCache, } impl ParallelTrieTracker { diff --git a/crates/trie/parallel/src/value_encoder.rs b/crates/trie/parallel/src/value_encoder.rs index 7b08d3e1b5e..13c611922db 100644 --- a/crates/trie/parallel/src/value_encoder.rs +++ b/crates/trie/parallel/src/value_encoder.rs @@ -86,6 +86,7 @@ pub(crate) struct AsyncAccountValueEncoder { impl AsyncAccountValueEncoder { /// Initializes a [`Self`] using a `ProofWorkerHandle` which will be used to calculate storage /// roots asynchronously. + #[expect(dead_code)] pub(crate) fn new( storage_work_tx: CrossbeamSender, dispatched: B256Map>, @@ -105,6 +106,7 @@ impl AsyncAccountValueEncoder { /// /// This method panics if any deferred encoders produced by [`Self::deferred_encoder`] have not /// been dropped. + #[expect(dead_code)] pub(crate) fn into_storage_proofs( self, ) -> Result>, StateProofError> { From 72e1467ba3a6ea2396c49902793e7de3845faf41 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 21 Jan 2026 21:21:53 +0000 Subject: [PATCH 127/267] fix(prune): avoid panic in tx lookup (#21275) --- .../prune/prune/src/segments/user/transaction_lookup.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index 74e0e29647c..bfdcd61a0c7 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -84,7 +84,14 @@ where .into_inner(); let tx_range = start..= Some(end) - .min(input.limiter.deleted_entries_limit_left().map(|left| start + left as u64 - 1)) + .min( + input + .limiter + .deleted_entries_limit_left() + // Use saturating addition here to avoid panicking on + // `deleted_entries_limit == usize::MAX` + .map(|left| start.saturating_add(left as u64) - 1), + ) .unwrap(); let tx_range_end = *tx_range.end(); From eb55c3c3da8d54c5ad15a1384c9ae707f7739a55 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 21 Jan 2026 22:09:42 +0000 Subject: [PATCH 128/267] feat(grafana): add RocksDB metrics dashboard (#21243) Co-authored-by: Amp --- crates/node/builder/src/launch/common.rs | 4 + crates/node/metrics/src/server.rs | 21 + crates/storage/provider/Cargo.toml | 2 +- .../src/providers/rocksdb/provider.rs | 38 + .../provider/src/providers/rocksdb_stub.rs | 9 +- etc/grafana/dashboards/overview.json | 1171 +++++++++++++---- 6 files changed, 1005 insertions(+), 240 deletions(-) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index d97ecab876b..c9237ac63b5 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -1277,6 +1277,10 @@ pub fn metrics_hooks(provider_factory: &ProviderFactory) }) } }) + .with_hook({ + let rocksdb = provider_factory.rocksdb_provider(); + move || throttle!(Duration::from_secs(5 * 60), || rocksdb.report_metrics()) + }) .build() } diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index 4777e26853d..ea24e6572ee 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -106,6 +106,7 @@ impl MetricServer { // Describe metrics after recorder installation describe_db_metrics(); describe_static_file_metrics(); + describe_rocksdb_metrics(); Collector::default().describe(); describe_memory_stats(); describe_io_stats(); @@ -238,6 +239,26 @@ fn describe_static_file_metrics() { ); } +fn describe_rocksdb_metrics() { + describe_gauge!( + "rocksdb.table_size", + Unit::Bytes, + "The estimated size of a RocksDB table (SST + memtable)" + ); + describe_gauge!("rocksdb.table_entries", "The estimated number of keys in a RocksDB table"); + describe_gauge!( + "rocksdb.pending_compaction_bytes", + Unit::Bytes, + "Bytes pending compaction for a RocksDB table" + ); + describe_gauge!("rocksdb.sst_size", Unit::Bytes, "The size of SST files for a RocksDB table"); + describe_gauge!( + "rocksdb.memtable_size", + Unit::Bytes, + "The size of memtables for a RocksDB table" + ); +} + #[cfg(all(feature = "jemalloc", unix))] fn describe_memory_stats() { describe_gauge!( diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 2aa30ab1b9c..677c1c642bd 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -85,7 +85,7 @@ rand.workspace = true tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } [features] -edge = ["reth-storage-api/edge"] +edge = ["reth-storage-api/edge", "rocksdb"] rocksdb = ["dep:rocksdb"] test-utils = [ "reth-db/test-utils", diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 7824059086c..d866f59cd95 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -3,9 +3,11 @@ use crate::providers::{compute_history_rank, needs_prev_shard_check, HistoryInfo use alloy_consensus::transaction::TxHashRef; use alloy_primitives::{Address, BlockNumber, TxNumber, B256}; use itertools::Itertools; +use metrics::Label; use parking_lot::Mutex; use reth_chain_state::ExecutedBlock; use reth_db_api::{ + database_metrics::DatabaseMetrics, models::{ sharded_key::NUM_OF_INDICES_IN_SHARD, storage_sharded_key::StorageShardedKey, ShardedKey, StorageSettings, @@ -529,6 +531,42 @@ impl Clone for RocksDBProvider { } } +impl DatabaseMetrics for RocksDBProvider { + fn gauge_metrics(&self) -> Vec<(&'static str, f64, Vec

::Provider: BlockReader + StageCheckpointReader + ChangeSetReader + + StorageChangeSetReader + BlockNumReader, C: ConfigureEvm + 'static, T: PayloadTypes>, diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index b5f1272b67e..4664c1906c9 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1323,7 +1323,7 @@ mod tests { use reth_provider::{ providers::OverlayStateProviderFactory, test_utils::create_test_provider_factory, BlockNumReader, BlockReader, ChangeSetReader, DatabaseProviderFactory, LatestStateProvider, - PruneCheckpointReader, StageCheckpointReader, StateProviderBox, + PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StorageChangeSetReader, }; use reth_trie::MultiProof; use reth_trie_db::ChangesetCache; @@ -1350,6 +1350,7 @@ mod tests { + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader + + StorageChangeSetReader + BlockNumReader, > + Clone + Send diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 138fad5b168..2372256cabf 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -39,7 +39,7 @@ use reth_provider::{ providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockNumReader, BlockReader, ChangeSetReader, DatabaseProviderFactory, DatabaseProviderROFactory, HashedPostStateProvider, ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProvider, - StateProviderFactory, StateReader, + StateProviderFactory, StateReader, StorageChangeSetReader, }; use reth_revm::db::{states::bundle_state::BundleRetention, State}; use reth_trie::{updates::TrieUpdates, HashedPostState, StateRoot}; @@ -144,6 +144,7 @@ where + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader + + StorageChangeSetReader + BlockNumReader, > + BlockReader

+ ChangeSetReader @@ -1336,6 +1337,7 @@ where + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader + + StorageChangeSetReader + BlockNumReader, > + BlockReader
+ StateProviderFactory diff --git a/crates/node/core/src/args/static_files.rs b/crates/node/core/src/args/static_files.rs index d0048022cfc..ac710d0c9ee 100644 --- a/crates/node/core/src/args/static_files.rs +++ b/crates/node/core/src/args/static_files.rs @@ -2,6 +2,7 @@ use clap::Args; use reth_config::config::{BlocksPerFileConfig, StaticFilesConfig}; +use reth_storage_api::StorageSettings; /// Blocks per static file when running in `--minimal` node. /// @@ -40,6 +41,10 @@ pub struct StaticFilesArgs { #[arg(long = "static-files.blocks-per-file.account-change-sets")] pub blocks_per_file_account_change_sets: Option, + /// Number of blocks per file for the storage changesets segment. + #[arg(long = "static-files.blocks-per-file.storage-change-sets")] + pub blocks_per_file_storage_change_sets: Option, + /// Store receipts in static files instead of the database. /// /// When enabled, receipts will be written to static files on disk instead of the database. @@ -68,6 +73,16 @@ pub struct StaticFilesArgs { /// the node has been initialized, changing this flag requires re-syncing from scratch. #[arg(long = "static-files.account-change-sets", default_value_t = default_static_file_flag(), action = clap::ArgAction::Set)] pub account_changesets: bool, + + /// Store storage changesets in static files. + /// + /// When enabled, storage changesets will be written to static files on disk instead of the + /// database. + /// + /// Note: This setting can only be configured at genesis initialization. Once + /// the node has been initialized, changing this flag requires re-syncing from scratch. + #[arg(long = "static-files.storage-change-sets", default_value_t = default_static_file_flag(), action = clap::ArgAction::Set)] + pub storage_changesets: bool, } impl StaticFilesArgs { @@ -98,9 +113,25 @@ impl StaticFilesArgs { account_change_sets: self .blocks_per_file_account_change_sets .or(config.blocks_per_file.account_change_sets), + storage_change_sets: self + .blocks_per_file_storage_change_sets + .or(config.blocks_per_file.storage_change_sets), }, } } + + /// Converts the static files arguments into [`StorageSettings`]. + pub const fn to_settings(&self) -> StorageSettings { + #[cfg(feature = "edge")] + let base = StorageSettings::edge(); + #[cfg(not(feature = "edge"))] + let base = StorageSettings::legacy(); + + base.with_receipts_in_static_files(self.receipts) + .with_transaction_senders_in_static_files(self.transaction_senders) + .with_account_changesets_in_static_files(self.account_changesets) + .with_storage_changesets_in_static_files(self.storage_changesets) + } } impl Default for StaticFilesArgs { @@ -111,9 +142,11 @@ impl Default for StaticFilesArgs { blocks_per_file_receipts: None, blocks_per_file_transaction_senders: None, blocks_per_file_account_change_sets: None, + blocks_per_file_storage_change_sets: None, receipts: default_static_file_flag(), transaction_senders: default_static_file_flag(), account_changesets: default_static_file_flag(), + storage_changesets: default_static_file_flag(), } } } diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 98502fdd115..225c957c1cf 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -363,6 +363,7 @@ impl NodeConfig { .with_receipts_in_static_files(self.static_files.receipts) .with_transaction_senders_in_static_files(self.static_files.transaction_senders) .with_account_changesets_in_static_files(self.static_files.account_changesets) + .with_storage_changesets_in_static_files(self.static_files.storage_changesets) .with_transaction_hash_numbers_in_rocksdb(self.rocksdb.all || self.rocksdb.tx_hash) .with_storages_history_in_rocksdb(self.rocksdb.all || self.rocksdb.storages_history) .with_account_history_in_rocksdb(self.rocksdb.all || self.rocksdb.account_history) diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index e37dbaa4411..36f2f6ede6b 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -1,4 +1,4 @@ -use super::collect_history_indices; +use super::{collect_history_indices, collect_storage_history_indices}; use crate::{stages::utils::load_storage_history, StageCheckpoint, StageId}; use reth_config::config::{EtlConfig, IndexHistoryConfig}; use reth_db_api::{ @@ -8,7 +8,8 @@ use reth_db_api::{ }; use reth_provider::{ DBProvider, EitherWriter, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, - RocksDBProviderFactory, StorageSettingsCache, + RocksDBProviderFactory, StaticFileProviderFactory, StorageChangeSetReader, + StorageSettingsCache, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment}; use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; @@ -54,6 +55,8 @@ where + PruneCheckpointWriter + StorageSettingsCache + RocksDBProviderFactory + + StorageChangeSetReader + + StaticFileProviderFactory + reth_provider::NodePrimitivesProvider, { /// Return the id of the stage @@ -121,7 +124,9 @@ where } info!(target: "sync::stages::index_storage_history::exec", ?first_sync, ?use_rocksdb, "Collecting indices"); - let collector = + let collector = if provider.cached_storage_settings().storage_changesets_in_static_files { + collect_storage_history_indices(provider, range.clone(), &self.etl_config)? + } else { collect_history_indices::<_, tables::StorageChangeSets, tables::StoragesHistory, _>( provider, BlockNumberAddress::range(range.clone()), @@ -130,7 +135,8 @@ where }, |(key, value)| (key.block_number(), AddressStorageKey((key.address(), value.key))), &self.etl_config, - )?; + )? + }; info!(target: "sync::stages::index_storage_history::exec", "Loading indices into database"); diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index c7ab0f7f012..cd9d5ebd43c 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -9,7 +9,7 @@ use reth_db_api::{ use reth_primitives_traits::{GotExpected, SealedHeader}; use reth_provider::{ ChangeSetReader, DBProvider, HeaderProvider, ProviderError, StageCheckpointReader, - StageCheckpointWriter, StatsReader, TrieWriter, + StageCheckpointWriter, StatsReader, StorageChangeSetReader, TrieWriter, }; use reth_stages_api::{ BlockErrorKind, EntitiesCheckpoint, ExecInput, ExecOutput, MerkleCheckpoint, Stage, @@ -159,6 +159,7 @@ where + StatsReader + HeaderProvider + ChangeSetReader + + StorageChangeSetReader + StageCheckpointReader + StageCheckpointWriter, { diff --git a/crates/stages/stages/src/stages/merkle_changesets.rs b/crates/stages/stages/src/stages/merkle_changesets.rs index e81f8f18564..c4345fedb99 100644 --- a/crates/stages/stages/src/stages/merkle_changesets.rs +++ b/crates/stages/stages/src/stages/merkle_changesets.rs @@ -6,7 +6,7 @@ use reth_primitives_traits::{GotExpected, SealedHeader}; use reth_provider::{ BlockNumReader, ChainStateBlockReader, ChangeSetReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, StageCheckpointReader, - StageCheckpointWriter, TrieWriter, + StageCheckpointWriter, StorageChangeSetReader, TrieWriter, }; use reth_prune_types::{ PruneCheckpoint, PruneMode, PruneSegment, MERKLE_CHANGESETS_RETENTION_BLOCKS, @@ -167,7 +167,8 @@ impl MerkleChangeSets { + HeaderProvider + ChainStateBlockReader + BlockNumReader - + ChangeSetReader, + + ChangeSetReader + + StorageChangeSetReader, { let target_start = target_range.start; let target_end = target_range.end; @@ -308,6 +309,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + ChangeSetReader + + StorageChangeSetReader + BlockNumReader, { fn id(&self) -> StageId { diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index c5a8dee347c..cd447ba9b43 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -5,7 +5,7 @@ use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, models::{ sharded_key::NUM_OF_INDICES_IN_SHARD, storage_sharded_key::StorageShardedKey, - AccountBeforeTx, ShardedKey, + AccountBeforeTx, AddressStorageKey, BlockNumberAddress, ShardedKey, }, table::{Decode, Decompress, Table}, transaction::DbTx, @@ -19,7 +19,7 @@ use reth_provider::{ }; use reth_stages_api::StageError; use reth_static_file_types::StaticFileSegment; -use reth_storage_api::ChangeSetReader; +use reth_storage_api::{ChangeSetReader, StorageChangeSetReader}; use std::{collections::HashMap, hash::Hash, ops::RangeBounds}; use tracing::info; @@ -102,15 +102,15 @@ where } /// Allows collecting indices from a cache with a custom insert fn -fn collect_indices( - cache: impl Iterator)>, +fn collect_indices( + cache: impl Iterator)>, mut insert_fn: F, ) -> Result<(), StageError> where - F: FnMut(Address, Vec) -> Result<(), StageError>, + F: FnMut(K, Vec) -> Result<(), StageError>, { - for (address, indices) in cache { - insert_fn(address, indices)? + for (key, indices) in cache { + insert_fn(key, indices)? } Ok(()) } @@ -174,6 +174,62 @@ where Ok(collector) } +/// Collects storage history indices using a provider that implements `StorageChangeSetReader`. +pub(crate) fn collect_storage_history_indices( + provider: &Provider, + range: impl RangeBounds, + etl_config: &EtlConfig, +) -> Result, StageError> +where + Provider: DBProvider + StorageChangeSetReader + StaticFileProviderFactory, +{ + let mut collector = Collector::new(etl_config.file_size, etl_config.dir.clone()); + let mut cache: HashMap> = HashMap::default(); + + let mut insert_fn = |key: AddressStorageKey, indices: Vec| { + let last = indices.last().expect("qed"); + collector.insert( + StorageShardedKey::new(key.0 .0, key.0 .1, *last), + BlockNumberList::new_pre_sorted(indices.into_iter()), + )?; + Ok::<(), StageError>(()) + }; + + let range = to_range(range); + let static_file_provider = provider.static_file_provider(); + + let total_changesets = static_file_provider.storage_changeset_count()?; + let interval = (total_changesets / 1000).max(1); + + let walker = static_file_provider.walk_storage_changeset_range(range); + + let mut flush_counter = 0; + let mut current_block_number = u64::MAX; + + for (idx, changeset_result) in walker.enumerate() { + let (BlockNumberAddress((block_number, address)), storage) = changeset_result?; + cache.entry(AddressStorageKey((address, storage.key))).or_default().push(block_number); + + if idx > 0 && idx % interval == 0 && total_changesets > 1000 { + info!(target: "sync::stages::index_history", progress = %format!("{:.4}%", (idx as f64 / total_changesets as f64) * 100.0), "Collecting indices"); + } + + if block_number != current_block_number { + current_block_number = block_number; + flush_counter += 1; + } + + if flush_counter > DEFAULT_CACHE_THRESHOLD { + collect_indices(cache.drain(), &mut insert_fn)?; + flush_counter = 0; + } + } + + collect_indices(cache.into_iter(), insert_fn)?; + + Ok(collector) +} + /// Loads account history indices into the database via `EitherWriter`. /// /// Works with [`EitherWriter`] to support both MDBX and `RocksDB` backends. diff --git a/crates/static-file/types/src/segment.rs b/crates/static-file/types/src/segment.rs index 9a3e5e35c89..791172be70c 100644 --- a/crates/static-file/types/src/segment.rs +++ b/crates/static-file/types/src/segment.rs @@ -55,6 +55,11 @@ pub enum StaticFileSegment { /// * address 0xbb, account info /// * address 0xcc, account info AccountChangeSets, + /// Static File segment responsible for the `StorageChangeSets` table. + /// + /// Storage changeset static files append block-by-block changesets sorted by address and + /// storage slot. + StorageChangeSets, } impl StaticFileSegment { @@ -71,6 +76,7 @@ impl StaticFileSegment { Self::Receipts => "receipts", Self::TransactionSenders => "transaction-senders", Self::AccountChangeSets => "account-change-sets", + Self::StorageChangeSets => "storage-change-sets", } } @@ -83,6 +89,7 @@ impl StaticFileSegment { Self::Receipts, Self::TransactionSenders, Self::AccountChangeSets, + Self::StorageChangeSets, ] .into_iter() } @@ -99,7 +106,8 @@ impl StaticFileSegment { Self::Transactions | Self::Receipts | Self::TransactionSenders | - Self::AccountChangeSets => 1, + Self::AccountChangeSets | + Self::StorageChangeSets => 1, } } @@ -161,14 +169,14 @@ impl StaticFileSegment { pub const fn is_tx_based(&self) -> bool { match self { Self::Receipts | Self::Transactions | Self::TransactionSenders => true, - Self::Headers | Self::AccountChangeSets => false, + Self::Headers | Self::AccountChangeSets | Self::StorageChangeSets => false, } } - /// Returns `true` if the segment is [`StaticFileSegment::AccountChangeSets`] + /// Returns `true` if the segment is change-based. pub const fn is_change_based(&self) -> bool { match self { - Self::AccountChangeSets => true, + Self::AccountChangeSets | Self::StorageChangeSets => true, Self::Receipts | Self::Transactions | Self::Headers | Self::TransactionSenders => false, } } @@ -180,7 +188,8 @@ impl StaticFileSegment { Self::Receipts | Self::Transactions | Self::TransactionSenders | - Self::AccountChangeSets => false, + Self::AccountChangeSets | + Self::StorageChangeSets => false, } } @@ -259,10 +268,10 @@ impl<'de> Visitor<'de> for SegmentHeaderVisitor { let tx_range = seq.next_element()?.ok_or_else(|| serde::de::Error::invalid_length(2, &self))?; - let segment = + let segment: StaticFileSegment = seq.next_element()?.ok_or_else(|| serde::de::Error::invalid_length(3, &self))?; - let changeset_offsets = if segment == StaticFileSegment::AccountChangeSets { + let changeset_offsets = if segment.is_change_based() { // Try to read the 5th field (changeset_offsets) // If it doesn't exist (old format), this will return None match seq.next_element()? { @@ -309,8 +318,8 @@ impl Serialize for SegmentHeader { where S: Serializer, { - // We serialize an extra field, the changeset offsets, for account changesets - let len = if self.segment.is_account_change_sets() { 5 } else { 4 }; + // We serialize an extra field, the changeset offsets, for change-based segments + let len = if self.segment.is_change_based() { 5 } else { 4 }; let mut state = serializer.serialize_struct("SegmentHeader", len)?; state.serialize_field("expected_block_range", &self.expected_block_range)?; @@ -318,7 +327,7 @@ impl Serialize for SegmentHeader { state.serialize_field("tx_range", &self.tx_range)?; state.serialize_field("segment", &self.segment)?; - if self.segment.is_account_change_sets() { + if self.segment.is_change_based() { state.serialize_field("changeset_offsets", &self.changeset_offsets)?; } @@ -672,6 +681,12 @@ mod tests { "static_file_account-change-sets_1123233_11223233", None, ), + ( + StaticFileSegment::StorageChangeSets, + 1_123_233..=11_223_233, + "static_file_storage-change-sets_1123233_11223233", + None, + ), ( StaticFileSegment::Headers, 2..=30, @@ -755,6 +770,13 @@ mod tests { segment: StaticFileSegment::AccountChangeSets, changeset_offsets: Some(vec![ChangesetOffset { offset: 1, num_changes: 1 }; 100]), }, + SegmentHeader { + expected_block_range: SegmentRangeInclusive::new(0, 200), + block_range: Some(SegmentRangeInclusive::new(0, 100)), + tx_range: None, + segment: StaticFileSegment::StorageChangeSets, + changeset_offsets: Some(vec![ChangesetOffset { offset: 1, num_changes: 1 }; 100]), + }, ]; // Check that we test all segments assert_eq!( @@ -788,6 +810,7 @@ mod tests { StaticFileSegment::Receipts => "receipts", StaticFileSegment::TransactionSenders => "transaction-senders", StaticFileSegment::AccountChangeSets => "account-change-sets", + StaticFileSegment::StorageChangeSets => "storage-change-sets", }; assert_eq!(static_str, expected_str); } @@ -806,6 +829,7 @@ mod tests { StaticFileSegment::Receipts => "Receipts", StaticFileSegment::TransactionSenders => "TransactionSenders", StaticFileSegment::AccountChangeSets => "AccountChangeSets", + StaticFileSegment::StorageChangeSets => "StorageChangeSets", }; assert_eq!(ser, format!("\"{expected_str}\"")); } diff --git a/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__StorageChangeSets.snap b/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__StorageChangeSets.snap new file mode 100644 index 00000000000..c1b94903bda --- /dev/null +++ b/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__StorageChangeSets.snap @@ -0,0 +1,5 @@ +--- +source: crates/static-file/types/src/segment.rs +expression: "Bytes::from(serialized)" +--- +0x01000000000000000000000000000000c800000000000000010000000000000000640000000000000000050000000164000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000000000000000000000000000000000000 diff --git a/crates/storage/db-api/src/models/metadata.rs b/crates/storage/db-api/src/models/metadata.rs index a12e9b6dab6..b17dccabfba 100644 --- a/crates/storage/db-api/src/models/metadata.rs +++ b/crates/storage/db-api/src/models/metadata.rs @@ -31,6 +31,9 @@ pub struct StorageSettings { /// Whether this node should read and write account changesets from static files. #[serde(default)] pub account_changesets_in_static_files: bool, + /// Whether this node should read and write storage changesets from static files. + #[serde(default)] + pub storage_changesets_in_static_files: bool, } impl StorageSettings { @@ -59,6 +62,7 @@ impl StorageSettings { receipts_in_static_files: true, transaction_senders_in_static_files: true, account_changesets_in_static_files: true, + storage_changesets_in_static_files: true, storages_history_in_rocksdb: false, transaction_hash_numbers_in_rocksdb: true, account_history_in_rocksdb: false, @@ -78,6 +82,7 @@ impl StorageSettings { transaction_hash_numbers_in_rocksdb: false, account_history_in_rocksdb: false, account_changesets_in_static_files: false, + storage_changesets_in_static_files: false, } } @@ -117,6 +122,12 @@ impl StorageSettings { self } + /// Sets the `storage_changesets_in_static_files` flag to the provided value. + pub const fn with_storage_changesets_in_static_files(mut self, value: bool) -> Self { + self.storage_changesets_in_static_files = value; + self + } + /// Returns `true` if any tables are configured to be stored in `RocksDB`. pub const fn any_in_rocksdb(&self) -> bool { self.transaction_hash_numbers_in_rocksdb || diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 8d2e31e875e..67bc3ea0d14 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -29,8 +29,8 @@ pub use blocks::*; pub use integer_list::IntegerList; pub use metadata::*; pub use reth_db_models::{ - AccountBeforeTx, ClientVersion, StaticFileBlockWithdrawals, StoredBlockBodyIndices, - StoredBlockWithdrawals, + AccountBeforeTx, ClientVersion, StaticFileBlockWithdrawals, StorageBeforeTx, + StoredBlockBodyIndices, StoredBlockWithdrawals, }; pub use sharded_key::ShardedKey; @@ -230,6 +230,7 @@ impl_compression_for_compact!( StaticFileBlockWithdrawals, Bytecode, AccountBeforeTx, + StorageBeforeTx, TransactionSigned, CompactU256, StageCheckpoint, diff --git a/crates/storage/db-models/src/lib.rs b/crates/storage/db-models/src/lib.rs index db1c99b5e16..49bcfe7a3bc 100644 --- a/crates/storage/db-models/src/lib.rs +++ b/crates/storage/db-models/src/lib.rs @@ -19,6 +19,10 @@ pub use accounts::AccountBeforeTx; pub mod blocks; pub use blocks::{StaticFileBlockWithdrawals, StoredBlockBodyIndices, StoredBlockWithdrawals}; +/// Storage +pub mod storage; +pub use storage::StorageBeforeTx; + /// Client Version pub mod client_version; pub use client_version::ClientVersion; diff --git a/crates/storage/db-models/src/storage.rs b/crates/storage/db-models/src/storage.rs new file mode 100644 index 00000000000..4de05901aaa --- /dev/null +++ b/crates/storage/db-models/src/storage.rs @@ -0,0 +1,48 @@ +use alloy_primitives::{Address, B256, U256}; +use reth_primitives_traits::ValueWithSubKey; + +/// Storage entry as it is saved in the static files. +/// +/// [`B256`] is the subkey. +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +pub struct StorageBeforeTx { + /// Address for the storage entry. Acts as `DupSort::SubKey` in static files. + pub address: Address, + /// Storage key. + pub key: B256, + /// Value on storage key. + pub value: U256, +} + +impl ValueWithSubKey for StorageBeforeTx { + type SubKey = B256; + + fn get_subkey(&self) -> Self::SubKey { + self.key + } +} + +// NOTE: Removing reth_codec and manually encode subkey +// and compress second part of the value. If we have compression +// over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StorageBeforeTx { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + buf.put_slice(self.address.as_slice()); + buf.put_slice(&self.key[..]); + self.value.to_compact(buf) + 52 + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let address = Address::from_slice(&buf[..20]); + let key = B256::from_slice(&buf[20..52]); + let (value, out) = U256::from_compact(&buf[52..], len - 52); + (Self { address, key, value }, out) + } +} diff --git a/crates/storage/db/src/static_file/masks.rs b/crates/storage/db/src/static_file/masks.rs index 4b0eae7f7ae..d6a01283dd7 100644 --- a/crates/storage/db/src/static_file/masks.rs +++ b/crates/storage/db/src/static_file/masks.rs @@ -4,7 +4,7 @@ use crate::{ HeaderTerminalDifficulties, }; use alloy_primitives::{Address, BlockHash}; -use reth_db_api::{table::Table, AccountChangeSets}; +use reth_db_api::{models::StorageBeforeTx, table::Table, AccountChangeSets}; // HEADER MASKS add_static_file_mask! { @@ -54,3 +54,9 @@ add_static_file_mask! { #[doc = "Mask for selecting a single changeset from `AccountChangesets` static file segment"] AccountChangesetMask, ::Value, 0b1 } + +// STORAGE CHANGESET MASKS +add_static_file_mask! { + #[doc = "Mask for selecting a single changeset from `StorageChangesets` static file segment"] + StorageChangesetMask, StorageBeforeTx, 0b1 +} diff --git a/crates/storage/provider/src/changeset_walker.rs b/crates/storage/provider/src/changeset_walker.rs index f31eed8e8a5..5eb521e3a75 100644 --- a/crates/storage/provider/src/changeset_walker.rs +++ b/crates/storage/provider/src/changeset_walker.rs @@ -1,10 +1,12 @@ -//! Account changeset iteration support for walking through historical account state changes in +//! Account/storage changeset iteration support for walking through historical state changes in //! static files. use crate::ProviderResult; use alloy_primitives::BlockNumber; use reth_db::models::AccountBeforeTx; -use reth_storage_api::ChangeSetReader; +use reth_db_api::models::BlockNumberAddress; +use reth_primitives_traits::StorageEntry; +use reth_storage_api::{ChangeSetReader, StorageChangeSetReader}; use std::ops::{Bound, RangeBounds}; /// Iterator that walks account changesets from static files in a block range. @@ -97,3 +99,78 @@ where None } } + +/// Iterator that walks storage changesets from static files in a block range. +#[derive(Debug)] +pub struct StaticFileStorageChangesetWalker

{ + /// Static file provider + provider: P, + /// End block (exclusive). `None` means iterate until exhausted. + end_block: Option, + /// Current block being processed + current_block: BlockNumber, + /// Changesets for current block + current_changesets: Vec<(BlockNumberAddress, StorageEntry)>, + /// Index within current block's changesets + changeset_index: usize, +} + +impl

StaticFileStorageChangesetWalker

{ + /// Create a new static file storage changeset walker. + pub fn new(provider: P, range: impl RangeBounds) -> Self { + let start = match range.start_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n + 1, + Bound::Unbounded => 0, + }; + + let end_block = match range.end_bound() { + Bound::Included(&n) => Some(n + 1), + Bound::Excluded(&n) => Some(n), + Bound::Unbounded => None, + }; + + Self { + provider, + end_block, + current_block: start, + current_changesets: Vec::new(), + changeset_index: 0, + } + } +} + +impl

Iterator for StaticFileStorageChangesetWalker

+where + P: StorageChangeSetReader, +{ + type Item = ProviderResult<(BlockNumberAddress, StorageEntry)>; + + fn next(&mut self) -> Option { + if let Some(changeset) = self.current_changesets.get(self.changeset_index).copied() { + self.changeset_index += 1; + return Some(Ok(changeset)); + } + + if !self.current_changesets.is_empty() { + self.current_block += 1; + } + + while self.end_block.is_none_or(|end| self.current_block < end) { + match self.provider.storage_changeset(self.current_block) { + Ok(changesets) if !changesets.is_empty() => { + self.current_changesets = changesets; + self.changeset_index = 1; + return Some(Ok(self.current_changesets[0])); + } + Ok(_) => self.current_block += 1, + Err(e) => { + self.current_block += 1; + return Some(Err(e)); + } + } + } + + None + } +} diff --git a/crates/storage/provider/src/either_writer.rs b/crates/storage/provider/src/either_writer.rs index c6ba79d0311..efa1032420a 100644 --- a/crates/storage/provider/src/either_writer.rs +++ b/crates/storage/provider/src/either_writer.rs @@ -17,20 +17,20 @@ use alloy_primitives::{map::HashMap, Address, BlockNumber, TxHash, TxNumber, B25 use rayon::slice::ParallelSliceMut; use reth_db::{ cursor::{DbCursorRO, DbDupCursorRW}, - models::AccountBeforeTx, + models::{AccountBeforeTx, StorageBeforeTx}, static_file::TransactionSenderMask, table::Value, transaction::{CursorMutTy, CursorTy, DbTx, DbTxMut, DupCursorMutTy, DupCursorTy}, }; use reth_db_api::{ cursor::DbCursorRW, - models::{storage_sharded_key::StorageShardedKey, ShardedKey}, + models::{storage_sharded_key::StorageShardedKey, BlockNumberAddress, ShardedKey}, tables, tables::BlockNumberList, }; use reth_errors::ProviderError; use reth_node_types::NodePrimitives; -use reth_primitives_traits::ReceiptTy; +use reth_primitives_traits::{ReceiptTy, StorageEntry}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ChangeSetReader, DBProvider, NodePrimitivesProvider, StorageSettingsCache}; use reth_storage_errors::provider::ProviderResult; @@ -171,6 +171,27 @@ impl<'a> EitherWriter<'a, (), ()> { } } + /// Creates a new [`EitherWriter`] for storage changesets based on storage settings. + pub fn new_storage_changesets

( + provider: &'a P, + block_number: BlockNumber, + ) -> ProviderResult> + where + P: DBProvider + NodePrimitivesProvider + StorageSettingsCache + StaticFileProviderFactory, + P::Tx: DbTxMut, + { + if provider.cached_storage_settings().storage_changesets_in_static_files { + Ok(EitherWriter::StaticFile( + provider + .get_static_file_writer(block_number, StaticFileSegment::StorageChangeSets)?, + )) + } else { + Ok(EitherWriter::Database( + provider.tx_ref().cursor_dup_write::()?, + )) + } + } + /// Returns the destination for writing receipts. /// /// The rules are as follows: @@ -208,6 +229,19 @@ impl<'a> EitherWriter<'a, (), ()> { } } + /// Returns the destination for writing storage changesets. + /// + /// This determines the destination based solely on storage settings. + pub fn storage_changesets_destination( + provider: &P, + ) -> EitherWriterDestination { + if provider.cached_storage_settings().storage_changesets_in_static_files { + EitherWriterDestination::StaticFile + } else { + EitherWriterDestination::Database + } + } + /// Creates a new [`EitherWriter`] for storages history based on storage settings. pub fn new_storages_history

( provider: &P, @@ -651,6 +685,41 @@ where } } +impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> +where + CURSOR: DbDupCursorRW, +{ + /// Append storage changeset for a block. + /// + /// NOTE: This _sorts_ the changesets by address and storage key before appending. + pub fn append_storage_changeset( + &mut self, + block_number: BlockNumber, + mut changeset: Vec, + ) -> ProviderResult<()> { + changeset.par_sort_by_key(|change| (change.address, change.key)); + + match self { + Self::Database(cursor) => { + for change in changeset { + let storage_id = BlockNumberAddress((block_number, change.address)); + cursor.append_dup( + storage_id, + StorageEntry { key: change.key, value: change.value }, + )?; + } + } + Self::StaticFile(writer) => { + writer.append_storage_changeset(changeset, block_number)?; + } + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(_) => return Err(ProviderError::UnsupportedProvider), + } + + Ok(()) + } +} + /// Represents a source for reading data, either from database, static files, or `RocksDB`. #[derive(Debug, Display)] pub enum EitherReader<'a, CURSOR, N> { @@ -987,6 +1056,19 @@ impl EitherWriterDestination { Self::Database } } + + /// Returns the destination for writing storage changesets based on storage settings. + pub fn storage_changesets

(provider: &P) -> Self + where + P: StorageSettingsCache, + { + // Write storage changesets to static files only if they're explicitly enabled + if provider.cached_storage_settings().storage_changesets_in_static_files { + Self::StaticFile + } else { + Self::Database + } + } } #[cfg(test)] diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index fecd87a0e8e..005b94915b4 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -711,6 +711,26 @@ impl StorageChangeSetReader for BlockchainProvider { ) -> ProviderResult> { self.consistent_provider()?.storage_changeset(block_number) } + + fn get_storage_before_block( + &self, + block_number: BlockNumber, + address: Address, + storage_key: B256, + ) -> ProviderResult> { + self.consistent_provider()?.get_storage_before_block(block_number, address, storage_key) + } + + fn storage_changesets_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.consistent_provider()?.storage_changesets_range(range) + } + + fn storage_changeset_count(&self) -> ProviderResult { + self.consistent_provider()?.storage_changeset_count() + } } impl ChangeSetReader for BlockchainProvider { diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index e0c503eae01..7fadea95dac 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1347,6 +1347,138 @@ impl StorageChangeSetReader for ConsistentProvider { self.storage_provider.storage_changeset(block_number) } } + + fn get_storage_before_block( + &self, + block_number: BlockNumber, + address: Address, + storage_key: B256, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + let changeset = state + .block_ref() + .execution_output + .state + .reverts + .clone() + .to_plain_state_reverts() + .storage + .into_iter() + .flatten() + .find_map(|revert: PlainStorageRevert| { + if revert.address != address { + return None + } + revert.storage_revert.into_iter().find_map(|(key, value)| { + let key = key.into(); + (key == storage_key) + .then(|| StorageEntry { key, value: value.to_previous_value() }) + }) + }); + Ok(changeset) + } else { + let storage_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::StorageHistory)? + .and_then(|checkpoint| { + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !storage_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + self.storage_provider.get_storage_before_block(block_number, address, storage_key) + } + } + + fn storage_changesets_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + let range = to_range(range); + let mut changesets = Vec::new(); + let database_start = range.start; + let mut database_end = range.end; + + if let Some(head_block) = &self.head_block { + database_end = head_block.anchor().number; + + let chain = head_block.chain().collect::>(); + for state in chain { + let block_changesets = state + .block_ref() + .execution_output + .state + .reverts + .clone() + .to_plain_state_reverts() + .storage + .into_iter() + .flatten() + .flat_map(|revert: PlainStorageRevert| { + revert.storage_revert.into_iter().map(move |(key, value)| { + ( + BlockNumberAddress((state.number(), revert.address)), + StorageEntry { key: key.into(), value: value.to_previous_value() }, + ) + }) + }); + + changesets.extend(block_changesets); + } + } + + if database_start < database_end { + let storage_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::StorageHistory)? + .and_then(|checkpoint| { + checkpoint.block_number.map(|checkpoint| database_start > checkpoint) + }) + .unwrap_or(true); + + if !storage_history_exists { + return Err(ProviderError::StateAtBlockPruned(database_start)) + } + + let db_changesets = self + .storage_provider + .storage_changesets_range(database_start..=database_end - 1)?; + changesets.extend(db_changesets); + } + + changesets.sort_by_key(|(block_address, _)| block_address.block_number()); + + Ok(changesets) + } + + fn storage_changeset_count(&self) -> ProviderResult { + let mut count = 0; + if let Some(head_block) = &self.head_block { + for state in head_block.chain() { + count += state + .block_ref() + .execution_output + .state + .reverts + .clone() + .to_plain_state_reverts() + .storage + .into_iter() + .flatten() + .map(|revert: PlainStorageRevert| revert.storage_revert.len()) + .sum::(); + } + } + + count += self.storage_provider.storage_changeset_count()?; + + Ok(count) + } } impl ChangeSetReader for ConsistentProvider { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index e9e4e82c68c..39f1e35473d 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -40,7 +40,8 @@ use reth_db_api::{ database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - BlockNumberHashedAddress, ShardedKey, StorageSettings, StoredBlockBodyIndices, + BlockNumberHashedAddress, ShardedKey, StorageBeforeTx, StorageSettings, + StoredBlockBodyIndices, }, table::Table, tables, @@ -463,6 +464,8 @@ impl DatabaseProvider StorageChangeSetReader for DatabaseProvider &self, block_number: BlockNumber, ) -> ProviderResult> { - let range = block_number..=block_number; - let storage_range = BlockNumberAddress::range(range); - self.tx - .cursor_dup_read::()? - .walk_range(storage_range)? - .map(|result| -> ProviderResult<_> { Ok(result?) }) - .collect() + if self.cached_storage_settings().storage_changesets_in_static_files { + self.static_file_provider.storage_changeset(block_number) + } else { + let range = block_number..=block_number; + let storage_range = BlockNumberAddress::range(range); + self.tx + .cursor_dup_read::()? + .walk_range(storage_range)? + .map(|result| -> ProviderResult<_> { Ok(result?) }) + .collect() + } + } + + fn get_storage_before_block( + &self, + block_number: BlockNumber, + address: Address, + storage_key: B256, + ) -> ProviderResult> { + if self.cached_storage_settings().storage_changesets_in_static_files { + self.static_file_provider.get_storage_before_block(block_number, address, storage_key) + } else { + self.tx + .cursor_dup_read::()? + .seek_by_key_subkey(BlockNumberAddress((block_number, address)), storage_key)? + .filter(|entry| entry.key == storage_key) + .map(Ok) + .transpose() + } + } + + fn storage_changesets_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + if self.cached_storage_settings().storage_changesets_in_static_files { + self.static_file_provider.storage_changesets_range(range) + } else { + self.tx + .cursor_dup_read::()? + .walk_range(BlockNumberAddress::range(range))? + .map(|result| -> ProviderResult<_> { Ok(result?) }) + .collect() + } + } + + fn storage_changeset_count(&self) -> ProviderResult { + if self.cached_storage_settings().storage_changesets_in_static_files { + self.static_file_provider.storage_changeset_count() + } else { + Ok(self.tx.entries::()?) + } } } @@ -2072,38 +2120,67 @@ impl StorageReader for DatabaseProvider &self, range: RangeInclusive, ) -> ProviderResult>> { - self.tx - .cursor_read::()? - .walk_range(BlockNumberAddress::range(range))? - // fold all storages and save its old state so we can remove it from HashedStorage - // it is needed as it is dup table. - .try_fold(BTreeMap::new(), |mut accounts: BTreeMap>, entry| { - let (BlockNumberAddress((_, address)), storage_entry) = entry?; - accounts.entry(address).or_default().insert(storage_entry.key); - Ok(accounts) - }) + if self.cached_storage_settings().storage_changesets_in_static_files { + self.storage_changesets_range(range)?.into_iter().try_fold( + BTreeMap::new(), + |mut accounts: BTreeMap>, entry| { + let (BlockNumberAddress((_, address)), storage_entry) = entry; + accounts.entry(address).or_default().insert(storage_entry.key); + Ok(accounts) + }, + ) + } else { + self.tx + .cursor_read::()? + .walk_range(BlockNumberAddress::range(range))? + // fold all storages and save its old state so we can remove it from HashedStorage + // it is needed as it is dup table. + .try_fold( + BTreeMap::new(), + |mut accounts: BTreeMap>, entry| { + let (BlockNumberAddress((_, address)), storage_entry) = entry?; + accounts.entry(address).or_default().insert(storage_entry.key); + Ok(accounts) + }, + ) + } } fn changed_storages_and_blocks_with_range( &self, range: RangeInclusive, ) -> ProviderResult>> { - let mut changeset_cursor = self.tx.cursor_read::()?; - - let storage_changeset_lists = - changeset_cursor.walk_range(BlockNumberAddress::range(range))?.try_fold( + if self.cached_storage_settings().storage_changesets_in_static_files { + self.storage_changesets_range(range)?.into_iter().try_fold( BTreeMap::new(), - |mut storages: BTreeMap<(Address, B256), Vec>, entry| -> ProviderResult<_> { - let (index, storage) = entry?; + |mut storages: BTreeMap<(Address, B256), Vec>, (index, storage)| { storages .entry((index.address(), storage.key)) .or_default() .push(index.block_number()); Ok(storages) }, - )?; + ) + } else { + let mut changeset_cursor = self.tx.cursor_read::()?; + + let storage_changeset_lists = + changeset_cursor.walk_range(BlockNumberAddress::range(range))?.try_fold( + BTreeMap::new(), + |mut storages: BTreeMap<(Address, B256), Vec>, + entry| + -> ProviderResult<_> { + let (index, storage) = entry?; + storages + .entry((index.address(), storage.key)) + .or_default() + .push(index.block_number()); + Ok(storages) + }, + )?; - Ok(storage_changeset_lists) + Ok(storage_changeset_lists) + } } } @@ -2226,17 +2303,16 @@ impl StateWriter // Write storage changes tracing::trace!("Writing storage changes"); let mut storages_cursor = self.tx_ref().cursor_dup_write::()?; - let mut storage_changeset_cursor = - self.tx_ref().cursor_dup_write::()?; for (block_index, mut storage_changes) in reverts.storage.into_iter().enumerate() { let block_number = first_block + block_index as BlockNumber; tracing::trace!(block_number, "Writing block change"); // sort changes by address. storage_changes.par_sort_unstable_by_key(|a| a.address); + let total_changes = + storage_changes.iter().map(|change| change.storage_revert.len()).sum(); + let mut changeset = Vec::with_capacity(total_changes); for PlainStorageRevert { address, wiped, storage_revert } in storage_changes { - let storage_id = BlockNumberAddress((block_number, address)); - let mut storage = storage_revert .into_iter() .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) @@ -2264,9 +2340,13 @@ impl StateWriter tracing::trace!(?address, ?storage, "Writing storage reverts"); for (key, value) in StorageRevertsIter::new(storage, wiped_storage) { - storage_changeset_cursor.append_dup(storage_id, StorageEntry { key, value })?; + changeset.push(StorageBeforeTx { address, key, value }); } } + + let mut storage_changesets_writer = + EitherWriter::new_storage_changesets(self, block_number)?; + storage_changesets_writer.append_storage_changeset(block_number, changeset)?; } if !config.write_account_changesets { @@ -2427,8 +2507,19 @@ impl StateWriter block_bodies.first().expect("already checked if there are blocks").first_tx_num(); let storage_range = BlockNumberAddress::range(range.clone()); - - let storage_changeset = self.take::(storage_range)?; + let storage_changeset = if let Some(_highest_block) = self + .static_file_provider + .get_highest_static_file_block(StaticFileSegment::StorageChangeSets) && + self.cached_storage_settings().storage_changesets_in_static_files + { + let changesets = self.storage_changesets_range(range.clone())?; + let mut changeset_writer = + self.static_file_provider.latest_writer(StaticFileSegment::StorageChangeSets)?; + changeset_writer.prune_storage_changesets(block)?; + changesets + } else { + self.take::(storage_range)? + }; let account_changeset = self.take::(range)?; // This is not working for blocks that are not at tip. as plain state is not the last @@ -2523,8 +2614,19 @@ impl StateWriter block_bodies.last().expect("already checked if there are blocks").last_tx_num(); let storage_range = BlockNumberAddress::range(range.clone()); - - let storage_changeset = self.take::(storage_range)?; + let storage_changeset = if let Some(highest_block) = self + .static_file_provider + .get_highest_static_file_block(StaticFileSegment::StorageChangeSets) && + self.cached_storage_settings().storage_changesets_in_static_files + { + let changesets = self.storage_changesets_range(block + 1..=highest_block)?; + let mut changeset_writer = + self.static_file_provider.latest_writer(StaticFileSegment::StorageChangeSets)?; + changeset_writer.prune_storage_changesets(block)?; + changesets + } else { + self.take::(storage_range)? + }; // This is not working for blocks that are not at tip. as plain state is not the last // state of end range. We should rename the functions or add support to access diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 359fcdd7aec..3d42feedb08 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -14,7 +14,7 @@ use reth_db_api::{ use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{ BlockNumReader, BytecodeReader, DBProvider, NodePrimitivesProvider, StateProofProvider, - StorageRootProvider, StorageSettingsCache, + StorageChangeSetReader, StorageRootProvider, StorageSettingsCache, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ @@ -26,8 +26,8 @@ use reth_trie::{ TrieInputSorted, }; use reth_trie_db::{ - DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, - DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, + hashed_storage_from_reverts_with_provider, DatabaseHashedPostState, DatabaseProof, + DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, }; use std::fmt::Debug; @@ -109,7 +109,7 @@ pub struct HistoricalStateProviderRef<'b, Provider> { lowest_available_blocks: LowestAvailableBlocks, } -impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader> +impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + BlockNumReader> HistoricalStateProviderRef<'b, Provider> { /// Create new `StateProvider` for historical block number @@ -210,7 +210,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader> ); } - Ok(HashedStorage::from_reverts(self.tx(), address, self.block_number)?) + hashed_storage_from_reverts_with_provider(self.provider, address, self.block_number) } /// Set the lowest block number at which the account history is available. @@ -242,6 +242,7 @@ impl< Provider: DBProvider + BlockNumReader + ChangeSetReader + + StorageChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider, @@ -285,8 +286,8 @@ impl BlockHashReader } } -impl StateRootProvider - for HistoricalStateProviderRef<'_, Provider> +impl + StateRootProvider for HistoricalStateProviderRef<'_, Provider> { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let mut revert_state = self.revert_state()?; @@ -322,8 +323,8 @@ impl StateRootProvider } } -impl StorageRootProvider - for HistoricalStateProviderRef<'_, Provider> +impl + StorageRootProvider for HistoricalStateProviderRef<'_, Provider> { fn storage_root( &self, @@ -361,8 +362,8 @@ impl StorageRootProvide } } -impl StateProofProvider - for HistoricalStateProviderRef<'_, Provider> +impl + StateProofProvider for HistoricalStateProviderRef<'_, Provider> { /// Get account and storage proofs. fn proof( @@ -405,6 +406,7 @@ impl< + BlockNumReader + BlockHashReader + ChangeSetReader + + StorageChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider, @@ -418,18 +420,16 @@ impl< ) -> ProviderResult> { match self.storage_history_lookup(address, storage_key)? { HistoryInfo::NotYetWritten => Ok(None), - HistoryInfo::InChangeset(changeset_block_number) => Ok(Some( - self.tx() - .cursor_dup_read::()? - .seek_by_key_subkey((changeset_block_number, address).into(), storage_key)? - .filter(|entry| entry.key == storage_key) - .ok_or_else(|| ProviderError::StorageChangesetNotFound { - block_number: changeset_block_number, - address, - storage_key: Box::new(storage_key), - })? - .value, - )), + HistoryInfo::InChangeset(changeset_block_number) => self + .provider + .get_storage_before_block(changeset_block_number, address, storage_key)? + .ok_or_else(|| ProviderError::StorageChangesetNotFound { + block_number: changeset_block_number, + address, + storage_key: Box::new(storage_key), + }) + .map(|entry| entry.value) + .map(Some), HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => Ok(self .tx() .cursor_dup_read::()? @@ -462,7 +462,9 @@ pub struct HistoricalStateProvider { lowest_available_blocks: LowestAvailableBlocks, } -impl HistoricalStateProvider { +impl + HistoricalStateProvider +{ /// Create new `StateProvider` for historical block number pub fn new(provider: Provider, block_number: BlockNumber) -> Self { Self { provider, block_number, lowest_available_blocks: Default::default() } @@ -498,7 +500,7 @@ impl HistoricalStatePro } // Delegates all provider impls to [HistoricalStateProviderRef] -reth_storage_api::macros::delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider]); +reth_storage_api::macros::delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader + StorageChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -631,7 +633,7 @@ mod tests { use reth_primitives_traits::{Account, StorageEntry}; use reth_storage_api::{ BlockHashReader, BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory, - NodePrimitivesProvider, StorageSettingsCache, + NodePrimitivesProvider, StorageChangeSetReader, StorageSettingsCache, }; use reth_storage_errors::provider::ProviderError; @@ -647,6 +649,7 @@ mod tests { + BlockNumReader + BlockHashReader + ChangeSetReader + + StorageChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider, diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 97baab150e5..8f7919f7f3b 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -10,6 +10,7 @@ use reth_stages_types::StageId; use reth_storage_api::{ BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory, DatabaseProviderROFactory, PruneCheckpointReader, StageCheckpointReader, + StorageChangeSetReader, }; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, @@ -196,6 +197,7 @@ where F::Provider: StageCheckpointReader + PruneCheckpointReader + ChangeSetReader + + StorageChangeSetReader + DBProvider + BlockNumReader, { @@ -446,7 +448,11 @@ where impl DatabaseProviderROFactory for OverlayStateProviderFactory where F: DatabaseProviderFactory, - F::Provider: StageCheckpointReader + PruneCheckpointReader + BlockNumReader + ChangeSetReader, + F::Provider: StageCheckpointReader + + PruneCheckpointReader + + BlockNumReader + + ChangeSetReader + + StorageChangeSetReader, { type Provider = OverlayStateProvider; diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 8c7b5fb50a0..dff1b6d303d 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -3,10 +3,10 @@ use super::{ StaticFileJarProvider, StaticFileProviderRW, StaticFileProviderRWRefMut, }; use crate::{ - changeset_walker::StaticFileAccountChangesetWalker, to_range, BlockHashReader, BlockNumReader, - BlockReader, BlockSource, EitherWriter, EitherWriterDestination, HeaderProvider, - ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, + changeset_walker::{StaticFileAccountChangesetWalker, StaticFileStorageChangesetWalker}, + to_range, BlockHashReader, BlockNumReader, BlockReader, BlockSource, EitherWriter, + EitherWriterDestination, HeaderProvider, ReceiptProvider, StageCheckpointReader, StatsReader, + TransactionVariant, TransactionsProvider, TransactionsProviderExt, }; use alloy_consensus::{transaction::TransactionMeta, Header}; use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; @@ -20,12 +20,12 @@ use reth_db::{ lockfile::StorageLock, static_file::{ iter_static_files, BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, - StaticFileCursor, TransactionMask, TransactionSenderMask, + StaticFileCursor, StorageChangesetMask, TransactionMask, TransactionSenderMask, }, }; use reth_db_api::{ cursor::DbCursorRO, - models::{AccountBeforeTx, StoredBlockBodyIndices}, + models::{AccountBeforeTx, BlockNumberAddress, StorageBeforeTx, StoredBlockBodyIndices}, table::{Decompress, Table, Value}, tables, transaction::DbTx, @@ -35,6 +35,7 @@ use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; use reth_node_types::NodePrimitives; use reth_primitives_traits::{ AlloyBlockHeader as _, BlockBody as _, RecoveredBlock, SealedHeader, SignedTransaction, + StorageEntry, }; use reth_stages_types::{PipelineTarget, StageId}; use reth_static_file_types::{ @@ -42,7 +43,8 @@ use reth_static_file_types::{ StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE, }; use reth_storage_api::{ - BlockBodyIndicesProvider, ChangeSetReader, DBProvider, StorageSettingsCache, + BlockBodyIndicesProvider, ChangeSetReader, DBProvider, StorageChangeSetReader, + StorageSettingsCache, }; use reth_storage_errors::provider::{ProviderError, ProviderResult, StaticFileWriterError}; use std::{ @@ -92,6 +94,8 @@ pub struct StaticFileWriteCtx { pub write_receipts: bool, /// Whether account changesets should be written to static files. pub write_account_changesets: bool, + /// Whether storage changesets should be written to static files. + pub write_storage_changesets: bool, /// The current chain tip block number (for pruning). pub tip: BlockNumber, /// The prune mode for receipts, if any. @@ -622,6 +626,35 @@ impl StaticFileProvider { Ok(()) } + /// Writes storage changesets for all blocks to the static file segment. + #[instrument(level = "debug", target = "providers::db", skip_all)] + fn write_storage_changesets( + w: &mut StaticFileProviderRWRefMut<'_, N>, + blocks: &[ExecutedBlock], + ) -> ProviderResult<()> { + for block in blocks { + let block_number = block.recovered_block().number(); + let reverts = block.execution_outcome().state.reverts.to_plain_state_reverts(); + + for storage_block_reverts in reverts.storage { + let changeset = storage_block_reverts + .into_iter() + .flat_map(|revert| { + revert.storage_revert.into_iter().map(move |(key, revert_to_slot)| { + StorageBeforeTx { + address: revert.address, + key: B256::new(key.to_be_bytes()), + value: revert_to_slot.to_previous_value(), + } + }) + }) + .collect::>(); + w.append_storage_changeset(changeset, block_number)?; + } + } + Ok(()) + } + /// Spawns a scoped thread that writes to a static file segment using the provided closure. /// /// The closure receives a mutable reference to the segment writer. After the closure completes, @@ -697,6 +730,15 @@ impl StaticFileProvider { ) }); + let h_storage_changesets = ctx.write_storage_changesets.then(|| { + self.spawn_segment_writer( + s, + StaticFileSegment::StorageChangeSets, + first_block_number, + |w| Self::write_storage_changesets(w, blocks), + ) + }); + h_headers.join().map_err(|_| StaticFileWriterError::ThreadPanic("headers"))??; h_txs.join().map_err(|_| StaticFileWriterError::ThreadPanic("transactions"))??; if let Some(h) = h_senders { @@ -709,6 +751,10 @@ impl StaticFileProvider { h.join() .map_err(|_| StaticFileWriterError::ThreadPanic("account_changesets"))??; } + if let Some(h) = h_storage_changesets { + h.join() + .map_err(|_| StaticFileWriterError::ThreadPanic("storage_changesets"))??; + } Ok(()) }) } @@ -1381,6 +1427,13 @@ impl StaticFileProvider { highest_tx, highest_block, )?, + StaticFileSegment::StorageChangeSets => self + .ensure_changeset_invariants_by_block::<_, tables::StorageChangeSets, _>( + provider, + segment, + highest_block, + |key| key.block_number(), + )?, } { debug!(target: "reth::providers::static_file", ?segment, unwind_target=unwind, "Invariants check returned unwind target"); update_unwind_target(unwind); @@ -1462,6 +1515,13 @@ impl StaticFileProvider { } true } + StaticFileSegment::StorageChangeSets => { + if EitherWriter::storage_changesets_destination(provider).is_database() { + debug!(target: "reth::providers::static_file", ?segment, "Skipping storage changesets segment: changesets stored in database"); + return false + } + true + } } } @@ -1594,9 +1654,9 @@ impl StaticFileProvider { let stage_id = match segment { StaticFileSegment::Headers => StageId::Headers, StaticFileSegment::Transactions => StageId::Bodies, - StaticFileSegment::Receipts | StaticFileSegment::AccountChangeSets => { - StageId::Execution - } + StaticFileSegment::Receipts | + StaticFileSegment::AccountChangeSets | + StaticFileSegment::StorageChangeSets => StageId::Execution, StaticFileSegment::TransactionSenders => StageId::SenderRecovery, }; let checkpoint_block_number = @@ -1651,7 +1711,9 @@ impl StaticFileProvider { StaticFileSegment::TransactionSenders => { writer.prune_transaction_senders(number, checkpoint_block_number)? } - StaticFileSegment::Headers | StaticFileSegment::AccountChangeSets => { + StaticFileSegment::Headers | + StaticFileSegment::AccountChangeSets | + StaticFileSegment::StorageChangeSets => { unreachable!() } } @@ -1662,6 +1724,9 @@ impl StaticFileProvider { StaticFileSegment::AccountChangeSets => { writer.prune_account_changesets(checkpoint_block_number)?; } + StaticFileSegment::StorageChangeSets => { + writer.prune_storage_changesets(checkpoint_block_number)?; + } } debug!(target: "reth::providers::static_file", ?segment, "Committing writer after pruning"); writer.commit()?; @@ -1672,6 +1737,105 @@ impl StaticFileProvider { Ok(None) } + fn ensure_changeset_invariants_by_block( + &self, + provider: &Provider, + segment: StaticFileSegment, + highest_static_file_block: Option, + block_from_key: F, + ) -> ProviderResult> + where + Provider: DBProvider + BlockReader + StageCheckpointReader, + T: Table, + F: Fn(&T::Key) -> BlockNumber, + { + debug!( + target: "reth::providers::static_file", + ?segment, + ?highest_static_file_block, + "Ensuring changeset invariants" + ); + let mut db_cursor = provider.tx_ref().cursor_read::()?; + + if let Some((db_first_key, _)) = db_cursor.first()? { + let db_first_block = block_from_key(&db_first_key); + if let Some(highest_block) = highest_static_file_block && + !(db_first_block <= highest_block || highest_block + 1 == db_first_block) + { + info!( + target: "reth::providers::static_file", + ?db_first_block, + ?highest_block, + unwind_target = highest_block, + ?segment, + "Setting unwind target." + ); + return Ok(Some(highest_block)) + } + + if let Some((db_last_key, _)) = db_cursor.last()? && + highest_static_file_block + .is_none_or(|highest_block| block_from_key(&db_last_key) > highest_block) + { + debug!( + target: "reth::providers::static_file", + ?segment, + "Database has entries beyond static files, no unwind needed" + ); + return Ok(None) + } + } else { + debug!(target: "reth::providers::static_file", ?segment, "No database entries found"); + } + + let highest_static_file_block = highest_static_file_block.unwrap_or_default(); + + let stage_id = match segment { + StaticFileSegment::Headers => StageId::Headers, + StaticFileSegment::Transactions => StageId::Bodies, + StaticFileSegment::Receipts | + StaticFileSegment::AccountChangeSets | + StaticFileSegment::StorageChangeSets => StageId::Execution, + StaticFileSegment::TransactionSenders => StageId::SenderRecovery, + }; + let checkpoint_block_number = + provider.get_stage_checkpoint(stage_id)?.unwrap_or_default().block_number; + + if checkpoint_block_number > highest_static_file_block { + info!( + target: "reth::providers::static_file", + checkpoint_block_number, + unwind_target = highest_static_file_block, + ?segment, + "Setting unwind target." + ); + return Ok(Some(highest_static_file_block)) + } + + if checkpoint_block_number < highest_static_file_block { + info!( + target: "reth::providers", + ?segment, + from = highest_static_file_block, + to = checkpoint_block_number, + "Unwinding static file segment." + ); + let mut writer = self.latest_writer(segment)?; + match segment { + StaticFileSegment::AccountChangeSets => { + writer.prune_account_changesets(checkpoint_block_number)?; + } + StaticFileSegment::StorageChangeSets => { + writer.prune_storage_changesets(checkpoint_block_number)?; + } + _ => unreachable!("invalid segment for changeset invariants"), + } + writer.commit()?; + } + + Ok(None) + } + /// Returns the earliest available block number that has not been expired and is still /// available. /// @@ -2212,6 +2376,124 @@ impl ChangeSetReader for StaticFileProvider { } } +impl StorageChangeSetReader for StaticFileProvider { + fn storage_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + let provider = match self.get_segment_provider_for_block( + StaticFileSegment::StorageChangeSets, + block_number, + None, + ) { + Ok(provider) => provider, + Err(ProviderError::MissingStaticFileBlock(_, _)) => return Ok(Vec::new()), + Err(err) => return Err(err), + }; + + if let Some(offset) = provider.user_header().changeset_offset(block_number) { + let mut cursor = provider.cursor()?; + let mut changeset = Vec::with_capacity(offset.num_changes() as usize); + + for i in offset.changeset_range() { + if let Some(change) = cursor.get_one::(i.into())? { + let block_address = BlockNumberAddress((block_number, change.address)); + let entry = StorageEntry { key: change.key, value: change.value }; + changeset.push((block_address, entry)); + } + } + Ok(changeset) + } else { + Ok(Vec::new()) + } + } + + fn get_storage_before_block( + &self, + block_number: BlockNumber, + address: Address, + storage_key: B256, + ) -> ProviderResult> { + let provider = match self.get_segment_provider_for_block( + StaticFileSegment::StorageChangeSets, + block_number, + None, + ) { + Ok(provider) => provider, + Err(ProviderError::MissingStaticFileBlock(_, _)) => return Ok(None), + Err(err) => return Err(err), + }; + + let user_header = provider.user_header(); + let Some(offset) = user_header.changeset_offset(block_number) else { + return Ok(None); + }; + + let mut cursor = provider.cursor()?; + let range = offset.changeset_range(); + let mut low = range.start; + let mut high = range.end; + + while low < high { + let mid = low + (high - low) / 2; + if let Some(change) = cursor.get_one::(mid.into())? { + match (change.address, change.key).cmp(&(address, storage_key)) { + std::cmp::Ordering::Less => low = mid + 1, + _ => high = mid, + } + } else { + debug!( + target: "provider::static_file", + ?low, + ?mid, + ?high, + ?range, + ?block_number, + ?address, + ?storage_key, + "Cannot continue binary search for storage changeset fetch" + ); + low = range.end; + break; + } + } + + if low < range.end && + let Some(change) = cursor + .get_one::(low.into())? + .filter(|change| change.address == address && change.key == storage_key) + { + return Ok(Some(StorageEntry { key: change.key, value: change.value })); + } + + Ok(None) + } + + fn storage_changesets_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.walk_storage_changeset_range(range).collect() + } + + fn storage_changeset_count(&self) -> ProviderResult { + let mut count = 0; + + let static_files = iter_static_files(&self.path).map_err(ProviderError::other)?; + if let Some(changeset_segments) = static_files.get(StaticFileSegment::StorageChangeSets) { + for (_, header) in changeset_segments { + if let Some(changeset_offsets) = header.changeset_offsets() { + for offset in changeset_offsets { + count += offset.num_changes() as usize; + } + } + } + } + + Ok(count) + } +} + impl StaticFileProvider { /// Creates an iterator for walking through account changesets in the specified block range. /// @@ -2228,6 +2510,14 @@ impl StaticFileProvider { ) -> StaticFileAccountChangesetWalker { StaticFileAccountChangesetWalker::new(self.clone(), range) } + + /// Creates an iterator for walking through storage changesets in the specified block range. + pub fn walk_storage_changeset_range( + &self, + range: impl RangeBounds, + ) -> StaticFileStorageChangesetWalker { + StaticFileStorageChangesetWalker::new(self.clone(), range) + } } impl> HeaderProvider for StaticFileProvider { diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index aa5b61171ac..9fdcc1aee18 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -69,14 +69,19 @@ mod tests { use alloy_consensus::{Header, SignableTransaction, Transaction, TxLegacy}; use alloy_primitives::{Address, BlockHash, Signature, TxNumber, B256, U160, U256}; use rand::seq::SliceRandom; - use reth_db::{models::AccountBeforeTx, test_utils::create_test_static_files_dir}; + use reth_db::{ + models::{AccountBeforeTx, StorageBeforeTx}, + test_utils::create_test_static_files_dir, + }; use reth_db_api::{transaction::DbTxMut, CanonicalHeaders, HeaderNumbers, Headers}; use reth_ethereum_primitives::{EthPrimitives, Receipt, TransactionSigned}; use reth_primitives_traits::Account; use reth_static_file_types::{ find_fixed_range, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }; - use reth_storage_api::{ChangeSetReader, ReceiptProvider, TransactionsProvider}; + use reth_storage_api::{ + ChangeSetReader, ReceiptProvider, StorageChangeSetReader, TransactionsProvider, + }; use reth_testing_utils::generators::{self, random_header_range}; use std::{collections::BTreeMap, fmt::Debug, fs, ops::Range, path::Path}; @@ -321,7 +326,9 @@ mod tests { // Append transaction/receipt if there's still a transaction count to append if tx_count > 0 { match segment { - StaticFileSegment::Headers | StaticFileSegment::AccountChangeSets => { + StaticFileSegment::Headers | + StaticFileSegment::AccountChangeSets | + StaticFileSegment::StorageChangeSets => { panic!("non tx based segment") } StaticFileSegment::Transactions => { @@ -438,7 +445,9 @@ mod tests { // Prune transactions or receipts based on the segment type match segment { - StaticFileSegment::Headers | StaticFileSegment::AccountChangeSets => { + StaticFileSegment::Headers | + StaticFileSegment::AccountChangeSets | + StaticFileSegment::StorageChangeSets => { panic!("non tx based segment") } StaticFileSegment::Transactions => { @@ -463,7 +472,9 @@ mod tests { // cumulative_gas_used & nonce as ids. if let Some(id) = expected_tx_tip { match segment { - StaticFileSegment::Headers | StaticFileSegment::AccountChangeSets => { + StaticFileSegment::Headers | + StaticFileSegment::AccountChangeSets | + StaticFileSegment::StorageChangeSets => { panic!("non tx based segment") } StaticFileSegment::Transactions => assert_eyre( @@ -1033,4 +1044,311 @@ mod tests { } } } + + #[test] + fn test_storage_changeset_static_files() { + let (static_dir, _) = create_test_static_files_dir(); + + let sf_rw = StaticFileProvider::::read_write(&static_dir) + .expect("Failed to create static file provider"); + + // Test writing and reading storage changesets + { + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + + // Create test data for multiple blocks + let test_blocks = 10u64; + let entries_per_block = 5; + + for block_num in 0..test_blocks { + let changeset = (0..entries_per_block) + .map(|i| { + let mut addr = Address::ZERO; + addr.0[0] = block_num as u8; + addr.0[1] = i as u8; + StorageBeforeTx { + address: addr, + key: B256::with_last_byte(i as u8), + value: U256::from(block_num * 1000 + i as u64), + } + }) + .collect::>(); + + writer.append_storage_changeset(changeset, block_num).unwrap(); + } + + writer.commit().unwrap(); + } + + // Verify data can be read back correctly + { + let provider = sf_rw + .get_segment_provider_for_block(StaticFileSegment::StorageChangeSets, 5, None) + .unwrap(); + + // Check that the segment header has changeset offsets + assert!(provider.user_header().changeset_offsets().is_some()); + let offsets = provider.user_header().changeset_offsets().unwrap(); + assert_eq!(offsets.len(), 10); // Should have 10 blocks worth of offsets + + // Verify each block has the expected number of changes + for (i, offset) in offsets.iter().enumerate() { + assert_eq!(offset.num_changes(), 5, "Block {} should have 5 changes", i); + } + } + } + + #[test] + fn test_get_storage_before_block() { + let (static_dir, _) = create_test_static_files_dir(); + + let sf_rw = StaticFileProvider::::read_write(&static_dir) + .expect("Failed to create static file provider"); + + let test_address = Address::from([1u8; 20]); + let other_address = Address::from([2u8; 20]); + let missing_address = Address::from([3u8; 20]); + let test_key = B256::with_last_byte(1); + let other_key = B256::with_last_byte(2); + + // Write changesets for multiple blocks + { + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + + // Block 0: test_address and other_address change + writer + .append_storage_changeset( + vec![ + StorageBeforeTx { address: test_address, key: test_key, value: U256::ZERO }, + StorageBeforeTx { + address: other_address, + key: other_key, + value: U256::from(5), + }, + ], + 0, + ) + .unwrap(); + + // Block 1: only other_address changes + writer + .append_storage_changeset( + vec![StorageBeforeTx { + address: other_address, + key: other_key, + value: U256::from(7), + }], + 1, + ) + .unwrap(); + + // Block 2: test_address changes again + writer + .append_storage_changeset( + vec![StorageBeforeTx { + address: test_address, + key: test_key, + value: U256::from(9), + }], + 2, + ) + .unwrap(); + + writer.commit().unwrap(); + } + + // Test get_storage_before_block + { + let result = sf_rw.get_storage_before_block(0, test_address, test_key).unwrap(); + assert!(result.is_some()); + let entry = result.unwrap(); + assert_eq!(entry.key, test_key); + assert_eq!(entry.value, U256::ZERO); + + let result = sf_rw.get_storage_before_block(2, test_address, test_key).unwrap(); + assert!(result.is_some()); + let entry = result.unwrap(); + assert_eq!(entry.key, test_key); + assert_eq!(entry.value, U256::from(9)); + + let result = sf_rw.get_storage_before_block(1, test_address, test_key).unwrap(); + assert!(result.is_none()); + + let result = sf_rw.get_storage_before_block(2, missing_address, test_key).unwrap(); + assert!(result.is_none()); + + let result = sf_rw.get_storage_before_block(1, other_address, other_key).unwrap(); + assert!(result.is_some()); + let entry = result.unwrap(); + assert_eq!(entry.key, other_key); + } + } + + #[test] + fn test_storage_changeset_truncation() { + let (static_dir, _) = create_test_static_files_dir(); + + let blocks_per_file = 10; + let files_per_range = 3; + let file_set_count = 3; + let initial_file_count = files_per_range * file_set_count; + let tip = blocks_per_file * file_set_count - 1; + + // Setup: Create storage changesets for multiple blocks + { + let sf_rw: StaticFileProvider = + StaticFileProviderBuilder::read_write(&static_dir) + .with_blocks_per_file(blocks_per_file) + .build() + .expect("failed to create static file provider"); + + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + + for block_num in 0..=tip { + let num_changes = ((block_num % 5) + 1) as usize; + let mut changeset = Vec::with_capacity(num_changes); + + for i in 0..num_changes { + let mut address = Address::ZERO; + address.0[0] = block_num as u8; + address.0[1] = i as u8; + + changeset.push(StorageBeforeTx { + address, + key: B256::with_last_byte(i as u8), + value: U256::from(block_num * 1000 + i as u64), + }); + } + + writer.append_storage_changeset(changeset, block_num).unwrap(); + } + + writer.commit().unwrap(); + } + + fn validate_truncation( + sf_rw: &StaticFileProvider, + static_dir: impl AsRef, + expected_tip: Option, + expected_file_count: u64, + ) -> eyre::Result<()> { + let highest_block = + sf_rw.get_highest_static_file_block(StaticFileSegment::StorageChangeSets); + assert_eyre(highest_block, expected_tip, "block tip mismatch")?; + + assert_eyre( + count_files_without_lockfile(static_dir)?, + expected_file_count as usize, + "file count mismatch", + )?; + + if let Some(tip) = expected_tip { + let provider = sf_rw.get_segment_provider_for_block( + StaticFileSegment::StorageChangeSets, + tip, + None, + )?; + let offsets = provider.user_header().changeset_offsets(); + assert!(offsets.is_some(), "Should have changeset offsets"); + } + + Ok(()) + } + + let sf_rw = StaticFileProviderBuilder::read_write(&static_dir) + .with_blocks_per_file(blocks_per_file) + .build() + .expect("failed to create static file provider"); + + sf_rw.initialize_index().expect("Failed to initialize index"); + + // Case 1: Truncate to block 20 + { + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + writer.prune_storage_changesets(20).unwrap(); + writer.commit().unwrap(); + + validate_truncation(&sf_rw, &static_dir, Some(20), initial_file_count) + .expect("Truncation validation failed"); + } + + // Case 2: Truncate to block 9 + { + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + writer.prune_storage_changesets(9).unwrap(); + writer.commit().unwrap(); + + validate_truncation(&sf_rw, &static_dir, Some(9), files_per_range) + .expect("Truncation validation failed"); + } + + // Case 3: Truncate all (should keep block 0) + { + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + writer.prune_storage_changesets(0).unwrap(); + writer.commit().unwrap(); + + validate_truncation(&sf_rw, &static_dir, Some(0), files_per_range) + .expect("Truncation validation failed"); + } + } + + #[test] + fn test_storage_changeset_binary_search() { + let (static_dir, _) = create_test_static_files_dir(); + + let sf_rw = StaticFileProvider::::read_write(&static_dir) + .expect("Failed to create static file provider"); + + let block_num = 0u64; + let num_slots = 100; + let address = Address::from([4u8; 20]); + + let mut keys: Vec = Vec::with_capacity(num_slots); + for i in 0..num_slots { + keys.push(B256::with_last_byte(i as u8)); + } + + { + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + let changeset = keys + .iter() + .enumerate() + .map(|(i, key)| StorageBeforeTx { address, key: *key, value: U256::from(i as u64) }) + .collect::>(); + + writer.append_storage_changeset(changeset, block_num).unwrap(); + writer.commit().unwrap(); + } + + { + let result = sf_rw.get_storage_before_block(block_num, address, keys[0]).unwrap(); + assert!(result.is_some()); + let entry = result.unwrap(); + assert_eq!(entry.key, keys[0]); + assert_eq!(entry.value, U256::from(0)); + + let result = + sf_rw.get_storage_before_block(block_num, address, keys[num_slots - 1]).unwrap(); + assert!(result.is_some()); + let entry = result.unwrap(); + assert_eq!(entry.key, keys[num_slots - 1]); + + let mid = num_slots / 2; + let result = sf_rw.get_storage_before_block(block_num, address, keys[mid]).unwrap(); + assert!(result.is_some()); + let entry = result.unwrap(); + assert_eq!(entry.key, keys[mid]); + + let missing_key = B256::with_last_byte(255); + let result = sf_rw.get_storage_before_block(block_num, address, missing_key).unwrap(); + assert!(result.is_none()); + + for i in (0..num_slots).step_by(10) { + let result = sf_rw.get_storage_before_block(block_num, address, keys[i]).unwrap(); + assert!(result.is_some()); + assert_eq!(result.unwrap().key, keys[i]); + } + } + } } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 869554cc793..6c4eb97c4ca 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -6,7 +6,7 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, TxNumber, U256}; use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock, RwLock}; use reth_codecs::Compact; -use reth_db::models::AccountBeforeTx; +use reth_db::models::{AccountBeforeTx, StorageBeforeTx}; use reth_db_api::models::CompactU256; use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter}; use reth_node_types::NodePrimitives; @@ -56,6 +56,11 @@ enum PruneStrategy { /// The target block number to prune to. last_block: BlockNumber, }, + /// Prune storage changesets to a target block number. + StorageChangeSets { + /// The target block number to prune to. + last_block: BlockNumber, + }, } /// Static file writers for every known [`StaticFileSegment`]. @@ -69,6 +74,7 @@ pub(crate) struct StaticFileWriters { receipts: RwLock>>, transaction_senders: RwLock>>, account_change_sets: RwLock>>, + storage_change_sets: RwLock>>, } impl Default for StaticFileWriters { @@ -79,6 +85,7 @@ impl Default for StaticFileWriters { receipts: Default::default(), transaction_senders: Default::default(), account_change_sets: Default::default(), + storage_change_sets: Default::default(), } } } @@ -95,6 +102,7 @@ impl StaticFileWriters { StaticFileSegment::Receipts => self.receipts.write(), StaticFileSegment::TransactionSenders => self.transaction_senders.write(), StaticFileSegment::AccountChangeSets => self.account_change_sets.write(), + StaticFileSegment::StorageChangeSets => self.storage_change_sets.write(), }; if write_guard.is_none() { @@ -113,6 +121,7 @@ impl StaticFileWriters { &self.receipts, &self.transaction_senders, &self.account_change_sets, + &self.storage_change_sets, ] { let mut writer = writer_lock.write(); if let Some(writer) = writer.as_mut() { @@ -131,6 +140,7 @@ impl StaticFileWriters { &self.receipts, &self.transaction_senders, &self.account_change_sets, + &self.storage_change_sets, ] { let writer = writer_lock.read(); if let Some(writer) = writer.as_ref() && @@ -155,6 +165,7 @@ impl StaticFileWriters { &self.receipts, &self.transaction_senders, &self.account_change_sets, + &self.storage_change_sets, ] { let mut writer = writer_lock.write(); if let Some(writer) = writer.as_mut() { @@ -388,6 +399,9 @@ impl StaticFileProviderRW { PruneStrategy::AccountChangeSets { last_block } => { self.prune_account_changeset_data(last_block)? } + PruneStrategy::StorageChangeSets { last_block } => { + self.prune_storage_changeset_data(last_block)? + } } } @@ -596,7 +610,7 @@ impl StaticFileProviderRW { /// Commits to the configuration file at the end fn truncate_changesets(&mut self, last_block: u64) -> ProviderResult<()> { let segment = self.writer.user_header().segment(); - debug_assert_eq!(segment, StaticFileSegment::AccountChangeSets); + debug_assert!(segment.is_change_based()); // Get the current block range let current_block_end = self @@ -1076,6 +1090,41 @@ impl StaticFileProviderRW { Ok(()) } + /// Appends a block storage changeset to the static file. + /// + /// It **CALLS** `increment_block()`. + pub fn append_storage_changeset( + &mut self, + mut changeset: Vec, + block_number: u64, + ) -> ProviderResult<()> { + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::StorageChangeSets); + let start = Instant::now(); + + self.increment_block(block_number)?; + self.ensure_no_queued_prune()?; + + // sort by address + storage key + changeset.sort_by_key(|change| (change.address, change.key)); + + let mut count: u64 = 0; + for change in changeset { + self.append_change(&change)?; + count += 1; + } + + if let Some(metrics) = &self.metrics { + metrics.record_segment_operations( + StaticFileSegment::StorageChangeSets, + StaticFileProviderOperation::Append, + count, + Some(start.elapsed()), + ); + } + + Ok(()) + } + /// Adds an instruction to prune `to_delete` transactions during commit. /// /// Note: `last_block` refers to the block the unwinds ends at. @@ -1127,6 +1176,12 @@ impl StaticFileProviderRW { self.queue_prune(PruneStrategy::AccountChangeSets { last_block }) } + /// Adds an instruction to prune storage changesets until the given block. + pub fn prune_storage_changesets(&mut self, last_block: u64) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::StorageChangeSets); + self.queue_prune(PruneStrategy::StorageChangeSets { last_block }) + } + /// Adds an instruction to prune elements during commit using the specified strategy. fn queue_prune(&mut self, strategy: PruneStrategy) -> ProviderResult<()> { self.ensure_no_queued_prune()?; @@ -1186,6 +1241,25 @@ impl StaticFileProviderRW { Ok(()) } + /// Prunes the last storage changesets from the data file. + fn prune_storage_changeset_data(&mut self, last_block: BlockNumber) -> ProviderResult<()> { + let start = Instant::now(); + + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::StorageChangeSets); + + self.truncate_changesets(last_block)?; + + if let Some(metrics) = &self.metrics { + metrics.record_segment_operation( + StaticFileSegment::StorageChangeSets, + StaticFileProviderOperation::Prune, + Some(start.elapsed()), + ); + } + + Ok(()) + } + /// Prunes the last `to_delete` receipts from the data file. fn prune_receipt_data( &mut self, diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index fba585cc793..f9a2f980ef2 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -27,14 +27,14 @@ use reth_ethereum_primitives::EthPrimitives; use reth_execution_types::ExecutionOutcome; use reth_primitives_traits::{ Account, Block, BlockBody, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, - SignerRecoverable, + SignerRecoverable, StorageEntry, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BytecodeReader, DBProvider, DatabaseProviderFactory, HashedPostStateProvider, NodePrimitivesProvider, StageCheckpointReader, StateProofProvider, - StorageRootProvider, + StorageChangeSetReader, StorageRootProvider, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ @@ -989,6 +989,37 @@ impl ChangeSetReader for MockEthProvi } } +impl StorageChangeSetReader + for MockEthProvider +{ + fn storage_changeset( + &self, + _block_number: BlockNumber, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn get_storage_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + _storage_key: B256, + ) -> ProviderResult> { + Ok(None) + } + + fn storage_changesets_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn storage_changeset_count(&self) -> ProviderResult { + Ok(0) + } +} + impl StateReader for MockEthProvider { type Receipt = T::Receipt; diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 67c633559c9..8fb9c387069 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -10,14 +10,18 @@ use reth_chain_state::{ CanonStateSubscriptions, ForkChoiceSubscriptions, PersistedBlockSubscriptions, }; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; -use reth_storage_api::NodePrimitivesProvider; +use reth_storage_api::{NodePrimitivesProvider, StorageChangeSetReader}; use std::fmt::Debug; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: DatabaseProviderFactory< DB = N::DB, - Provider: BlockReader + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader, + Provider: BlockReader + + StageCheckpointReader + + PruneCheckpointReader + + ChangeSetReader + + StorageChangeSetReader, > + NodePrimitivesProvider + StaticFileProviderFactory + RocksDBProviderFactory @@ -32,6 +36,7 @@ pub trait FullProvider: + HashedPostStateProvider + ChainSpecProvider + ChangeSetReader + + StorageChangeSetReader + CanonStateSubscriptions + ForkChoiceSubscriptions

> + PersistedBlockSubscriptions @@ -46,7 +51,11 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory< DB = N::DB, - Provider: BlockReader + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader, + Provider: BlockReader + + StageCheckpointReader + + PruneCheckpointReader + + ChangeSetReader + + StorageChangeSetReader, > + NodePrimitivesProvider + StaticFileProviderFactory + RocksDBProviderFactory @@ -61,6 +70,7 @@ impl FullProvider for T where + HashedPostStateProvider + ChainSpecProvider + ChangeSetReader + + StorageChangeSetReader + CanonStateSubscriptions + ForkChoiceSubscriptions
> + PersistedBlockSubscriptions diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index beb9d23165b..a12a6cfcc95 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -10,7 +10,7 @@ use crate::{ }; #[cfg(feature = "db-api")] -use crate::{DBProvider, DatabaseProviderFactory}; +use crate::{DBProvider, DatabaseProviderFactory, StorageChangeSetReader}; use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; @@ -28,7 +28,9 @@ use reth_db_api::mock::{DatabaseMock, TxMock}; use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_ethereum_primitives::EthPrimitives; use reth_execution_types::ExecutionOutcome; -use reth_primitives_traits::{Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader}; +use reth_primitives_traits::{ + Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader, StorageEntry, +}; #[cfg(feature = "db-api")] use reth_prune_types::PruneModes; use reth_prune_types::{PruneCheckpoint, PruneSegment}; @@ -408,6 +410,36 @@ impl ChangeSetReader for NoopProvider { } } +#[cfg(feature = "db-api")] +impl StorageChangeSetReader for NoopProvider { + fn storage_changeset( + &self, + _block_number: BlockNumber, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn get_storage_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + _storage_key: B256, + ) -> ProviderResult> { + Ok(None) + } + + fn storage_changesets_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn storage_changeset_count(&self) -> ProviderResult { + Ok(0) + } +} + impl StateRootProvider for NoopProvider { fn state_root(&self, _state: HashedPostState) -> ProviderResult { Ok(B256::default()) diff --git a/crates/storage/storage-api/src/storage.rs b/crates/storage/storage-api/src/storage.rs index 51a9c5e5e53..ecd47ff50db 100644 --- a/crates/storage/storage-api/src/storage.rs +++ b/crates/storage/storage-api/src/storage.rs @@ -4,6 +4,7 @@ use alloc::{ }; use alloy_primitives::{Address, BlockNumber, B256}; use core::ops::RangeInclusive; +use reth_db_models::StorageBeforeTx; use reth_primitives_traits::StorageEntry; use reth_storage_errors::provider::ProviderResult; @@ -41,4 +42,44 @@ pub trait StorageChangeSetReader: Send { &self, block_number: BlockNumber, ) -> ProviderResult>; + + /// Search the block's changesets for the given address and storage key, and return the result. + /// + /// Returns `None` if the storage slot was not changed in this block. + fn get_storage_before_block( + &self, + block_number: BlockNumber, + address: Address, + storage_key: B256, + ) -> ProviderResult>; + + /// Get all storage changesets in a range of blocks. + /// + /// NOTE: Get inclusive range of blocks. + fn storage_changesets_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>; + + /// Get the total count of all storage changes. + fn storage_changeset_count(&self) -> ProviderResult; + + /// Get storage changesets for a block as static-file rows. + /// + /// Default implementation uses `storage_changeset` and maps to `StorageBeforeTx`. + fn storage_block_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + self.storage_changeset(block_number).map(|changesets| { + changesets + .into_iter() + .map(|(block_address, entry)| StorageBeforeTx { + address: block_address.address(), + key: entry.key, + value: entry.value, + }) + .collect() + }) + } } diff --git a/crates/trie/db/src/changesets.rs b/crates/trie/db/src/changesets.rs index fe9558e3bc5..deccb1df456 100644 --- a/crates/trie/db/src/changesets.rs +++ b/crates/trie/db/src/changesets.rs @@ -10,7 +10,9 @@ use crate::{DatabaseHashedPostState, DatabaseStateRoot, DatabaseTrieCursorFactory}; use alloy_primitives::{map::B256Map, BlockNumber, B256}; use parking_lot::RwLock; -use reth_storage_api::{BlockNumReader, ChangeSetReader, DBProvider, StageCheckpointReader}; +use reth_storage_api::{ + BlockNumReader, ChangeSetReader, DBProvider, StageCheckpointReader, StorageChangeSetReader, +}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ changesets::compute_trie_changesets, @@ -65,7 +67,11 @@ pub fn compute_block_trie_changesets( block_number: BlockNumber, ) -> Result where - Provider: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader, + Provider: DBProvider + + StageCheckpointReader + + ChangeSetReader + + StorageChangeSetReader + + BlockNumReader, { debug!( target: "trie::changeset_cache", @@ -175,7 +181,11 @@ pub fn compute_block_trie_updates( block_number: BlockNumber, ) -> ProviderResult where - Provider: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader, + Provider: DBProvider + + StageCheckpointReader + + ChangeSetReader + + StorageChangeSetReader + + BlockNumReader, { let tx = provider.tx_ref(); @@ -323,7 +333,11 @@ impl ChangesetCache { provider: &P, ) -> ProviderResult> where - P: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader, + P: DBProvider + + StageCheckpointReader + + ChangeSetReader + + StorageChangeSetReader + + BlockNumReader, { // Try cache first (with read lock) { @@ -408,7 +422,11 @@ impl ChangesetCache { range: RangeInclusive, ) -> ProviderResult where - P: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader, + P: DBProvider + + StageCheckpointReader + + ChangeSetReader + + StorageChangeSetReader + + BlockNumReader, { // Get the database tip block number let db_tip_block = provider diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs index f509702c2e0..ce49539fd45 100644 --- a/crates/trie/db/src/lib.rs +++ b/crates/trie/db/src/lib.rs @@ -18,7 +18,9 @@ pub use hashed_cursor::{ pub use prefix_set::{load_prefix_sets_with_provider, PrefixSetLoader}; pub use proof::{DatabaseProof, DatabaseStorageProof}; pub use state::{DatabaseHashedPostState, DatabaseStateRoot}; -pub use storage::{DatabaseHashedStorage, DatabaseStorageRoot}; +pub use storage::{ + hashed_storage_from_reverts_with_provider, DatabaseHashedStorage, DatabaseStorageRoot, +}; pub use trie_cursor::{ DatabaseAccountTrieCursor, DatabaseStorageTrieCursor, DatabaseTrieCursorFactory, }; diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index b92afa42011..eec1bad362f 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -14,7 +14,7 @@ use reth_db_api::{ DatabaseError, }; use reth_primitives_traits::StorageEntry; -use reth_storage_api::{ChangeSetReader, DBProvider}; +use reth_storage_api::{ChangeSetReader, DBProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderError; use reth_trie::{ prefix_set::{PrefixSetMut, TriePrefixSets}, @@ -93,7 +93,7 @@ pub fn load_prefix_sets_with_provider( range: RangeInclusive, ) -> Result where - Provider: ChangeSetReader + DBProvider, + Provider: ChangeSetReader + StorageChangeSetReader + DBProvider, KH: KeyHasher, { let tx = provider.tx_ref(); @@ -118,12 +118,9 @@ where } } - // Walk storage changeset and insert storage prefixes - // Note: Storage changesets don't have static files yet, so we still use direct cursor - let mut storage_cursor = tx.cursor_dup_read::()?; - let storage_range = BlockNumberAddress::range(range); - for storage_entry in storage_cursor.walk_range(storage_range)? { - let (BlockNumberAddress((_, address)), StorageEntry { key, .. }) = storage_entry?; + // Walk storage changesets using the provider (handles static files + database) + let storage_changesets = provider.storage_changesets_range(range)?; + for (BlockNumberAddress((_, address)), StorageEntry { key, .. }) in storage_changesets { let hashed_address = KH::hash_key(address); account_prefix_set.insert(Nibbles::unpack(hashed_address)); storage_prefix_sets diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index a4f76402560..3be7464a92c 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -3,13 +3,11 @@ use crate::{ }; use alloy_primitives::{map::B256Map, BlockNumber, B256}; use reth_db_api::{ - cursor::DbCursorRO, - models::{AccountBeforeTx, BlockNumberAddress, BlockNumberAddressRange}, - tables, + models::{AccountBeforeTx, BlockNumberAddress}, transaction::DbTx, }; use reth_execution_errors::StateRootError; -use reth_storage_api::{BlockNumReader, ChangeSetReader, DBProvider}; +use reth_storage_api::{BlockNumReader, ChangeSetReader, DBProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderError; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory, @@ -34,7 +32,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { /// /// An instance of state root calculator with account and storage prefixes loaded. fn incremental_root_calculator( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result; @@ -45,7 +43,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { /// /// The updated state root. fn incremental_root( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result; @@ -58,7 +56,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { /// /// The updated state root and the trie updates. fn incremental_root_with_updates( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result<(B256, TrieUpdates), StateRootError>; @@ -69,7 +67,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { /// /// The intermediate progress of state root computation. fn incremental_root_with_progress( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result; @@ -133,7 +131,7 @@ pub trait DatabaseHashedPostState: Sized { /// Initializes [`HashedPostStateSorted`] from reverts. Iterates over state reverts in the /// specified range and aggregates them into sorted hashed state. fn from_reverts( - provider: &(impl ChangeSetReader + BlockNumReader + DBProvider), + provider: &(impl ChangeSetReader + StorageChangeSetReader + BlockNumReader + DBProvider), range: impl RangeBounds, ) -> Result; } @@ -146,7 +144,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } fn incremental_root_calculator( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result { let loaded_prefix_sets = @@ -155,7 +153,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } fn incremental_root( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result { debug!(target: "trie::loader", ?range, "incremental state root"); @@ -163,7 +161,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } fn incremental_root_with_updates( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result<(B256, TrieUpdates), StateRootError> { debug!(target: "trie::loader", ?range, "incremental state root"); @@ -171,7 +169,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } fn incremental_root_with_progress( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result { debug!(target: "trie::loader", ?range, "incremental state root with progress"); @@ -248,11 +246,9 @@ impl DatabaseHashedPostState for HashedPostStateSorted { /// - Hashes keys and returns them already ordered for trie iteration. #[instrument(target = "trie::db", skip(provider), fields(range))] fn from_reverts( - provider: &(impl ChangeSetReader + BlockNumReader + DBProvider), + provider: &(impl ChangeSetReader + StorageChangeSetReader + BlockNumReader + DBProvider), range: impl RangeBounds, ) -> Result { - let tx = provider.tx_ref(); - // Extract concrete start/end values to use for both account and storage changesets. let start = match range.start_bound() { Bound::Included(&n) => n, @@ -266,9 +262,6 @@ impl DatabaseHashedPostState for HashedPostStateSorted { Bound::Unbounded => BlockNumber::MAX, }; - // Convert to BlockNumberAddressRange for storage changesets. - let storage_range: BlockNumberAddressRange = (start..end).into(); - // Iterate over account changesets and record value before first occurring account change let mut accounts = Vec::new(); let mut seen_accounts = HashSet::new(); @@ -280,20 +273,23 @@ impl DatabaseHashedPostState for HashedPostStateSorted { } accounts.sort_unstable_by_key(|(hash, _)| *hash); - // Read storages directly into B256Map> with HashSet to track seen keys. + // Read storages into B256Map> with HashSet to track seen keys. // Only keep the first (oldest) occurrence of each (address, slot) pair. let mut storages = B256Map::>::default(); let mut seen_storage_keys = HashSet::new(); - let mut storage_changesets_cursor = tx.cursor_read::()?; - - for entry in storage_changesets_cursor.walk_range(storage_range)? { - let (BlockNumberAddress((_, address)), storage) = entry?; - if seen_storage_keys.insert((address, storage.key)) { - let hashed_address = KH::hash_key(address); - storages - .entry(hashed_address) - .or_default() - .push((KH::hash_key(storage.key), storage.value)); + + if start < end { + let end_inclusive = end.saturating_sub(1); + for (BlockNumberAddress((_, address)), storage) in + provider.storage_changesets_range(start..=end_inclusive)? + { + if seen_storage_keys.insert((address, storage.key)) { + let hashed_address = KH::hash_key(address); + storages + .entry(hashed_address) + .or_default() + .push((KH::hash_key(storage.key), storage.value)); + } } } diff --git a/crates/trie/db/src/storage.rs b/crates/trie/db/src/storage.rs index 42d0d464c77..b4614eb15b3 100644 --- a/crates/trie/db/src/storage.rs +++ b/crates/trie/db/src/storage.rs @@ -4,6 +4,8 @@ use reth_db_api::{ cursor::DbCursorRO, models::BlockNumberAddress, tables, transaction::DbTx, DatabaseError, }; use reth_execution_errors::StorageRootError; +use reth_storage_api::{BlockNumReader, StorageChangeSetReader}; +use reth_storage_errors::provider::ProviderResult; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StorageRoot, }; @@ -34,6 +36,36 @@ pub trait DatabaseHashedStorage: Sized { fn from_reverts(tx: &TX, address: Address, from: BlockNumber) -> Result; } +/// Initializes [`HashedStorage`] from reverts using a provider. +pub fn hashed_storage_from_reverts_with_provider

( + provider: &P, + address: Address, + from: BlockNumber, +) -> ProviderResult +where + P: StorageChangeSetReader + BlockNumReader, +{ + let mut storage = HashedStorage::new(false); + let tip = provider.last_block_number()?; + + if from > tip { + return Ok(storage) + } + + for (BlockNumberAddress((_, storage_address)), storage_change) in + provider.storage_changesets_range(from..=tip)? + { + if storage_address == address { + let hashed_slot = keccak256(storage_change.key); + if let hash_map::Entry::Vacant(entry) = storage.storage.entry(hashed_slot) { + entry.insert(storage_change.value); + } + } + } + + Ok(storage) +} + impl<'a, TX: DbTx> DatabaseStorageRoot<'a, TX> for StorageRoot, DatabaseHashedCursorFactory<&'a TX>> { diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index 89a390f3f72..ac193f4d95d 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -35,6 +35,7 @@ - [`reth db settings set storages_history`](./reth/db/settings/set/storages_history.mdx) - [`reth db settings set transaction_hash_numbers`](./reth/db/settings/set/transaction_hash_numbers.mdx) - [`reth db settings set account_history`](./reth/db/settings/set/account_history.mdx) + - [`reth db settings set storage_changesets`](./reth/db/settings/set/storage_changesets.mdx) - [`reth db account-storage`](./reth/db/account-storage.mdx) - [`reth download`](./reth/download.mdx) - [`reth stage`](./reth/stage.mdx) @@ -93,6 +94,7 @@ - [`op-reth db settings set storages_history`](./op-reth/db/settings/set/storages_history.mdx) - [`op-reth db settings set transaction_hash_numbers`](./op-reth/db/settings/set/transaction_hash_numbers.mdx) - [`op-reth db settings set account_history`](./op-reth/db/settings/set/account_history.mdx) + - [`op-reth db settings set storage_changesets`](./op-reth/db/settings/set/storage_changesets.mdx) - [`op-reth db account-storage`](./op-reth/db/account-storage.mdx) - [`op-reth stage`](./op-reth/stage.mdx) - [`op-reth stage run`](./op-reth/stage/run.mdx) diff --git a/docs/vocs/docs/pages/cli/op-reth/db.mdx b/docs/vocs/docs/pages/cli/op-reth/db.mdx index 9d5fd0032a4..d8a816e23ae 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db.mdx @@ -124,6 +124,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -154,6 +157,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/checksum/static-file.mdx b/docs/vocs/docs/pages/cli/op-reth/db/checksum/static-file.mdx index 0515b998834..a9939730d1a 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/checksum/static-file.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/checksum/static-file.mdx @@ -18,6 +18,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table Options: --start-block diff --git a/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx index 5ee09d1b050..a0ceb8ee50c 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx @@ -16,6 +16,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table Options: -h, --help diff --git a/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx index cd979cffde2..af64e2e00a2 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx @@ -16,6 +16,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table The key to get content for diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx index c804080a0d6..8aa2ae3cdca 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx @@ -15,6 +15,7 @@ Commands: storages_history Store storage history in rocksdb instead of MDBX transaction_hash_numbers Store transaction hash to number mapping in rocksdb instead of MDBX account_history Store account history in rocksdb instead of MDBX + storage_changesets Store storage changesets in static files instead of the database help Print this message or the help of the given subcommand(s) Options: diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/storage_changesets.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/storage_changesets.mdx new file mode 100644 index 00000000000..d84b848a6b6 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/storage_changesets.mdx @@ -0,0 +1,170 @@ +# op-reth db settings set storage_changesets + +Store storage changesets in static files instead of the database + +```bash +$ op-reth db settings set storage_changesets --help +``` +```txt +Usage: op-reth db settings set storage_changesets [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx index a95bbcfbca4..bdf56a9804d 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx @@ -18,6 +18,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table Block number to query diff --git a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx index 95ef59d63e6..c5affadf9f5 100644 --- a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx index 499017a379f..398086f9dc6 100644 --- a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx index 9637e30cd6e..3e3e1ba019e 100644 --- a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/init.mdx b/docs/vocs/docs/pages/cli/op-reth/init.mdx index 01cd9d866a2..9a0930b4fe3 100644 --- a/docs/vocs/docs/pages/cli/op-reth/init.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/init.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index 3493c9ac4b4..98205ad008e 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -1036,6 +1036,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -1066,6 +1069,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + Rollup: --rollup.sequencer Endpoint for the sequencer mempool (can be both HTTP and WS) diff --git a/docs/vocs/docs/pages/cli/op-reth/prune.mdx b/docs/vocs/docs/pages/cli/op-reth/prune.mdx index 1409abf05f4..603af5d99e4 100644 --- a/docs/vocs/docs/pages/cli/op-reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/prune.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx index 484805486e1..c185b91027d 100644 --- a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx index effc3dfe6bd..d5034f0d4b8 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx index 9843a022569..9150154c31f 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx @@ -115,6 +115,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -145,6 +148,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx index 3130a06819f..75b39f76c77 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx index 496417aeb4c..37852456cfd 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx @@ -113,6 +113,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -143,6 +146,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 5fd0ef4199b..11f25e69730 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -124,6 +124,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -154,6 +157,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum/static-file.mdx index 04bd067b27b..274d060dab6 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum/static-file.mdx @@ -18,6 +18,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table Options: --start-block diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index e9a7792cf09..1fb9e442d46 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -16,6 +16,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table Options: -h, --help diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 2a794310144..0e0d6e95c5d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -16,6 +16,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table The key to get content for diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx index 53a1b8aea06..ecc8163de17 100644 --- a/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx @@ -15,6 +15,7 @@ Commands: storages_history Store storage history in rocksdb instead of MDBX transaction_hash_numbers Store transaction hash to number mapping in rocksdb instead of MDBX account_history Store account history in rocksdb instead of MDBX + storage_changesets Store storage changesets in static files instead of the database help Print this message or the help of the given subcommand(s) Options: diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set/storage_changesets.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set/storage_changesets.mdx new file mode 100644 index 00000000000..d160b895188 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set/storage_changesets.mdx @@ -0,0 +1,170 @@ +# reth db settings set storage_changesets + +Store storage changesets in static files instead of the database + +```bash +$ reth db settings set storage_changesets --help +``` +```txt +Usage: reth db settings set storage_changesets [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx b/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx index dfa69b1281d..54808d46f48 100644 --- a/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx @@ -18,6 +18,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table Block number to query diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index dfc81c0bf57..02ff7298c7f 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 64bc8038242..9275a12059f 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 38218816626..aa13fd5f56a 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index ad81cc3d187..50ed891bcf9 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index b1d05e4b5ff..cbaf086f7c8 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index b6c5ee05399..bc4fe2c30c8 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index b076f3eee47..75ee10f7c17 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -1036,6 +1036,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -1066,6 +1069,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + Ress: --ress.enable Enable support for `ress` subprotocol diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 8e33c025044..a40d116b5f1 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index aa0615070c7..30f2f8fc213 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 15318efa4cd..98352862663 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index f78ed561f99..9aefa355425 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -115,6 +115,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -145,6 +148,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 8752c5e526a..c06c786879b 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index 8c4f9ef9f7a..af442b243d6 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -113,6 +113,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -143,6 +146,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/sidebar-cli-op-reth.ts b/docs/vocs/sidebar-cli-op-reth.ts index 58d814f0e31..7a15d764376 100644 --- a/docs/vocs/sidebar-cli-op-reth.ts +++ b/docs/vocs/sidebar-cli-op-reth.ts @@ -159,6 +159,10 @@ export const opRethCliSidebar: SidebarItem = { { text: "op-reth db settings set account_history", link: "/cli/op-reth/db/settings/set/account_history" + }, + { + text: "op-reth db settings set storage_changesets", + link: "/cli/op-reth/db/settings/set/storage_changesets" } ] } diff --git a/docs/vocs/sidebar-cli-reth.ts b/docs/vocs/sidebar-cli-reth.ts index 91c39eabb6b..5b5a74c2a69 100644 --- a/docs/vocs/sidebar-cli-reth.ts +++ b/docs/vocs/sidebar-cli-reth.ts @@ -163,6 +163,10 @@ export const rethCliSidebar: SidebarItem = { { text: "reth db settings set account_history", link: "/cli/reth/db/settings/set/account_history" + }, + { + text: "reth db settings set storage_changesets", + link: "/cli/reth/db/settings/set/storage_changesets" } ] } From 965705ff889de08d9ea403b46566239d65a72f61 Mon Sep 17 00:00:00 2001 From: andrewshab <152420261+andrewshab3@users.noreply.github.com> Date: Thu, 22 Jan 2026 16:24:51 +0100 Subject: [PATCH 147/267] fix: remove collect (#21318) Co-authored-by: Matthias Seitz --- .../provider/src/providers/blockchain_provider.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 005b94915b4..1644219428b 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1697,14 +1697,11 @@ mod tests { database_state.into_iter().map(|(address, (account, _))| { (address, None, Some(account.into()), Default::default()) }), - database_changesets - .iter() - .map(|block_changesets| { - block_changesets.iter().map(|(address, account, _)| { - (*address, Some(Some((*account).into())), []) - }) + database_changesets.iter().map(|block_changesets| { + block_changesets.iter().map(|(address, account, _)| { + (*address, Some(Some((*account).into())), []) }) - .collect::>(), + }), Vec::new(), ), first_block: first_database_block, From 2ac7d719f377ffe8ef09b8f1c7462e03c47dd803 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Thu, 22 Jan 2026 16:46:01 +0100 Subject: [PATCH 148/267] feat(trie): add V2 account proof computation and refactor proof types (reapply) (#21316) Co-authored-by: Matthias Seitz --- crates/engine/primitives/src/config.rs | 16 + .../tree/src/tree/payload_processor/mod.rs | 19 +- .../src/tree/payload_processor/multiproof.rs | 507 ++++++++++++++---- .../src/tree/payload_processor/prewarm.rs | 89 ++- .../src/tree/payload_processor/sparse_trie.rs | 15 +- crates/trie/parallel/Cargo.toml | 2 +- crates/trie/parallel/src/proof.rs | 9 +- crates/trie/parallel/src/proof_task.rs | 433 +++++++++++---- crates/trie/parallel/src/stats.rs | 5 - crates/trie/parallel/src/value_encoder.rs | 2 - 10 files changed, 839 insertions(+), 258 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 2870d3dccc4..0b72e1d6243 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -34,6 +34,11 @@ fn default_account_worker_count() -> usize { /// The size of proof targets chunk to spawn in one multiproof calculation. pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 60; +/// The size of proof targets chunk to spawn in one multiproof calculation when V2 proofs are +/// enabled. This is 4x the default chunk size to take advantage of more efficient V2 proof +/// computation. +pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE_V2: usize = DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE * 4; + /// Default number of reserved CPU cores for non-reth processes. /// /// This will be deducted from the thread count of main reth global threadpool. @@ -267,6 +272,17 @@ impl TreeConfig { self.multiproof_chunk_size } + /// Return the multiproof task chunk size, using the V2 default if V2 proofs are enabled + /// and the chunk size is at the default value. + pub const fn effective_multiproof_chunk_size(&self) -> usize { + if self.enable_proof_v2 && self.multiproof_chunk_size == DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE + { + DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE_V2 + } else { + self.multiproof_chunk_size + } + } + /// Return the number of reserved CPU cores for non-reth processes pub const fn reserved_cpu_cores(&self) -> usize { self.reserved_cpu_cores diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index f606fb1091c..6d61578f636 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -247,6 +247,9 @@ where let (to_sparse_trie, sparse_trie_rx) = channel(); let (to_multi_proof, from_multi_proof) = crossbeam_channel::unbounded(); + // Extract V2 proofs flag early so we can pass it to prewarm + let v2_proofs_enabled = config.enable_proof_v2(); + // Handle BAL-based optimization if available let prewarm_handle = if let Some(bal) = bal { // When BAL is present, use BAL prewarming and send BAL to multiproof @@ -263,6 +266,7 @@ where provider_builder.clone(), None, // Don't send proof targets when BAL is present Some(bal), + v2_proofs_enabled, ) } else { // Normal path: spawn with transaction prewarming @@ -273,6 +277,7 @@ where provider_builder.clone(), Some(to_multi_proof.clone()), None, + v2_proofs_enabled, ) }; @@ -280,7 +285,6 @@ where let task_ctx = ProofTaskCtx::new(multiproof_provider_factory); let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); - let v2_proofs_enabled = config.enable_proof_v2(); let proof_handle = ProofWorkerHandle::new( self.executor.handle().clone(), task_ctx, @@ -292,10 +296,13 @@ where let multi_proof_task = MultiProofTask::new( proof_handle.clone(), to_sparse_trie, - config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), + config + .multiproof_chunking_enabled() + .then_some(config.effective_multiproof_chunk_size()), to_multi_proof.clone(), from_multi_proof, - ); + ) + .with_v2_proofs_enabled(v2_proofs_enabled); // spawn multi-proof task let parent_span = span.clone(); @@ -344,8 +351,9 @@ where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, { let (prewarm_rx, execution_rx, size_hint) = self.spawn_tx_iterator(transactions); + // This path doesn't use multiproof, so V2 proofs flag doesn't matter let prewarm_handle = - self.spawn_caching_with(env, prewarm_rx, size_hint, provider_builder, None, bal); + self.spawn_caching_with(env, prewarm_rx, size_hint, provider_builder, None, bal, false); PayloadHandle { to_multi_proof: None, prewarm_handle, @@ -412,6 +420,7 @@ where } /// Spawn prewarming optionally wired to the multiproof task for target updates. + #[expect(clippy::too_many_arguments)] fn spawn_caching_with

( &self, env: ExecutionEnv, @@ -420,6 +429,7 @@ where provider_builder: StateProviderBuilder, to_multi_proof: Option>, bal: Option>, + v2_proofs_enabled: bool, ) -> CacheTaskHandle where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, @@ -442,6 +452,7 @@ where terminate_execution: Arc::new(AtomicBool::new(false)), precompile_cache_disabled: self.precompile_cache_disabled, precompile_cache_map: self.precompile_cache_map.clone(), + v2_proofs_enabled, }; let (prewarm_task, to_prewarm_task) = PrewarmCacheTask::new( diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 4664c1906c9..12514e2dc9a 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -11,14 +11,18 @@ use reth_metrics::Metrics; use reth_provider::AccountReader; use reth_revm::state::EvmState; use reth_trie::{ - added_removed_keys::MultiAddedRemovedKeys, DecodedMultiProof, HashedPostState, HashedStorage, + added_removed_keys::MultiAddedRemovedKeys, proof_v2, HashedPostState, HashedStorage, MultiProofTargets, }; +#[cfg(test)] +use reth_trie_parallel::stats::ParallelTrieTracker; use reth_trie_parallel::{ proof::ParallelProof, proof_task::{ - AccountMultiproofInput, ProofResultContext, ProofResultMessage, ProofWorkerHandle, + AccountMultiproofInput, ProofResult, ProofResultContext, ProofResultMessage, + ProofWorkerHandle, }, + targets_v2::{ChunkedMultiProofTargetsV2, MultiProofTargetsV2}, }; use revm_primitives::map::{hash_map, B256Map}; use std::{collections::BTreeMap, sync::Arc, time::Instant}; @@ -63,12 +67,12 @@ const DEFAULT_MAX_TARGETS_FOR_CHUNKING: usize = 300; /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. -#[derive(Default, Debug)] +#[derive(Debug)] pub struct SparseTrieUpdate { /// The state update that was used to calculate the proof pub(crate) state: HashedPostState, /// The calculated multiproof - pub(crate) multiproof: DecodedMultiProof, + pub(crate) multiproof: ProofResult, } impl SparseTrieUpdate { @@ -80,7 +84,11 @@ impl SparseTrieUpdate { /// Construct update from multiproof. #[cfg(test)] pub(super) fn from_multiproof(multiproof: reth_trie::MultiProof) -> alloy_rlp::Result { - Ok(Self { multiproof: multiproof.try_into()?, ..Default::default() }) + let stats = ParallelTrieTracker::default().finish(); + Ok(Self { + state: HashedPostState::default(), + multiproof: ProofResult::Legacy(multiproof.try_into()?, stats), + }) } /// Extend update with contents of the other. @@ -94,7 +102,7 @@ impl SparseTrieUpdate { #[derive(Debug)] pub(super) enum MultiProofMessage { /// Prefetch proof targets - PrefetchProofs(MultiProofTargets), + PrefetchProofs(VersionedMultiProofTargets), /// New state update from transaction execution with its source StateUpdate(Source, EvmState), /// State update that can be applied to the sparse trie without any new proofs. @@ -223,12 +231,155 @@ pub(crate) fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostStat hashed_state } +/// Extends a `MultiProofTargets` with the contents of a `VersionedMultiProofTargets`, +/// regardless of which variant the latter is. +fn extend_multiproof_targets(dest: &mut MultiProofTargets, src: &VersionedMultiProofTargets) { + match src { + VersionedMultiProofTargets::Legacy(targets) => { + dest.extend_ref(targets); + } + VersionedMultiProofTargets::V2(targets) => { + // Add all account targets + for target in &targets.account_targets { + dest.entry(target.key()).or_default(); + } + + // Add all storage targets + for (hashed_address, slots) in &targets.storage_targets { + let slot_set = dest.entry(*hashed_address).or_default(); + for slot in slots { + slot_set.insert(slot.key()); + } + } + } + } +} + +/// A set of multiproof targets which can be either in the legacy or V2 representations. +#[derive(Debug)] +pub(super) enum VersionedMultiProofTargets { + /// Legacy targets + Legacy(MultiProofTargets), + /// V2 targets + V2(MultiProofTargetsV2), +} + +impl VersionedMultiProofTargets { + /// Returns true if there are no account or storage targets. + fn is_empty(&self) -> bool { + match self { + Self::Legacy(targets) => targets.is_empty(), + Self::V2(targets) => targets.is_empty(), + } + } + + /// Returns the number of account targets in the multiproof target + fn account_targets_len(&self) -> usize { + match self { + Self::Legacy(targets) => targets.len(), + Self::V2(targets) => targets.account_targets.len(), + } + } + + /// Returns the number of storage targets in the multiproof target + fn storage_targets_len(&self) -> usize { + match self { + Self::Legacy(targets) => targets.values().map(|slots| slots.len()).sum::(), + Self::V2(targets) => { + targets.storage_targets.values().map(|slots| slots.len()).sum::() + } + } + } + + /// Returns the number of accounts in the multiproof targets. + fn len(&self) -> usize { + match self { + Self::Legacy(targets) => targets.len(), + Self::V2(targets) => targets.account_targets.len(), + } + } + + /// Returns the total storage slot count across all accounts. + fn storage_count(&self) -> usize { + match self { + Self::Legacy(targets) => targets.values().map(|slots| slots.len()).sum(), + Self::V2(targets) => targets.storage_targets.values().map(|slots| slots.len()).sum(), + } + } + + /// Returns the number of items that will be considered during chunking. + fn chunking_length(&self) -> usize { + match self { + Self::Legacy(targets) => targets.chunking_length(), + Self::V2(targets) => { + // For V2, count accounts + storage slots + targets.account_targets.len() + + targets.storage_targets.values().map(|slots| slots.len()).sum::() + } + } + } + + /// Retains the targets representing the difference with another `MultiProofTargets`. + /// Removes all targets that are already present in `other`. + fn retain_difference(&mut self, other: &MultiProofTargets) { + match self { + Self::Legacy(targets) => { + targets.retain_difference(other); + } + Self::V2(targets) => { + // Remove account targets that exist in other + targets.account_targets.retain(|target| !other.contains_key(&target.key())); + + // For each account in storage_targets, remove slots that exist in other + targets.storage_targets.retain(|hashed_address, slots| { + if let Some(other_slots) = other.get(hashed_address) { + slots.retain(|slot| !other_slots.contains(&slot.key())); + !slots.is_empty() + } else { + true + } + }); + } + } + } + + /// Extends this `VersionedMultiProofTargets` with the contents of another. + /// + /// Panics if the variants do not match. + fn extend(&mut self, other: Self) { + match (self, other) { + (Self::Legacy(dest), Self::Legacy(src)) => { + dest.extend(src); + } + (Self::V2(dest), Self::V2(src)) => { + dest.account_targets.extend(src.account_targets); + for (addr, slots) in src.storage_targets { + dest.storage_targets.entry(addr).or_default().extend(slots); + } + } + _ => panic!("Cannot extend VersionedMultiProofTargets with mismatched variants"), + } + } + + /// Chunks this `VersionedMultiProofTargets` into smaller chunks of the given size. + fn chunks(self, chunk_size: usize) -> Box> { + match self { + Self::Legacy(targets) => { + Box::new(MultiProofTargets::chunks(targets, chunk_size).map(Self::Legacy)) + } + Self::V2(targets) => { + Box::new(ChunkedMultiProofTargetsV2::new(targets, chunk_size).map(Self::V2)) + } + } + } +} + /// Input parameters for dispatching a multiproof calculation. #[derive(Debug)] struct MultiproofInput { source: Option, hashed_state_update: HashedPostState, - proof_targets: MultiProofTargets, + proof_targets: VersionedMultiProofTargets, proof_sequence_number: u64, state_root_message_sender: CrossbeamSender, multi_added_removed_keys: Option>, @@ -263,8 +414,6 @@ pub struct MultiproofManager { proof_result_tx: CrossbeamSender, /// Metrics metrics: MultiProofTaskMetrics, - /// Whether to use V2 storage proofs - v2_proofs_enabled: bool, } impl MultiproofManager { @@ -278,9 +427,7 @@ impl MultiproofManager { metrics.max_storage_workers.set(proof_worker_handle.total_storage_workers() as f64); metrics.max_account_workers.set(proof_worker_handle.total_account_workers() as f64); - let v2_proofs_enabled = proof_worker_handle.v2_proofs_enabled(); - - Self { metrics, proof_worker_handle, proof_result_tx, v2_proofs_enabled } + Self { metrics, proof_worker_handle, proof_result_tx } } /// Dispatches a new multiproof calculation to worker pools. @@ -325,41 +472,48 @@ impl MultiproofManager { multi_added_removed_keys, } = multiproof_input; - let account_targets = proof_targets.len(); - let storage_targets = proof_targets.values().map(|slots| slots.len()).sum::(); - trace!( target: "engine::tree::payload_processor::multiproof", proof_sequence_number, ?proof_targets, - account_targets, - storage_targets, + account_targets = proof_targets.account_targets_len(), + storage_targets = proof_targets.storage_targets_len(), ?source, "Dispatching multiproof to workers" ); let start = Instant::now(); - // Extend prefix sets with targets - let frozen_prefix_sets = - ParallelProof::extend_prefix_sets_with_targets(&Default::default(), &proof_targets); + // Workers will send ProofResultMessage directly to proof_result_rx + let proof_result_sender = ProofResultContext::new( + self.proof_result_tx.clone(), + proof_sequence_number, + hashed_state_update, + start, + ); - // Dispatch account multiproof to worker pool with result sender - let input = AccountMultiproofInput { - targets: proof_targets, - prefix_sets: frozen_prefix_sets, - collect_branch_node_masks: true, - multi_added_removed_keys, - // Workers will send ProofResultMessage directly to proof_result_rx - proof_result_sender: ProofResultContext::new( - self.proof_result_tx.clone(), - proof_sequence_number, - hashed_state_update, - start, - ), - v2_proofs_enabled: self.v2_proofs_enabled, + let input = match proof_targets { + VersionedMultiProofTargets::Legacy(proof_targets) => { + // Extend prefix sets with targets + let frozen_prefix_sets = ParallelProof::extend_prefix_sets_with_targets( + &Default::default(), + &proof_targets, + ); + + AccountMultiproofInput::Legacy { + targets: proof_targets, + prefix_sets: frozen_prefix_sets, + collect_branch_node_masks: true, + multi_added_removed_keys, + proof_result_sender, + } + } + VersionedMultiProofTargets::V2(proof_targets) => { + AccountMultiproofInput::V2 { targets: proof_targets, proof_result_sender } + } }; + // Dispatch account multiproof to worker pool with result sender if let Err(e) = self.proof_worker_handle.dispatch_account_multiproof(input) { error!(target: "engine::tree::payload_processor::multiproof", ?e, "Failed to dispatch account multiproof"); return; @@ -561,6 +715,9 @@ pub(super) struct MultiProofTask { /// there are any active workers and force chunking across workers. This is to prevent tasks /// which are very long from hitting a single worker. max_targets_for_chunking: usize, + /// Whether or not V2 proof calculation is enabled. If enabled then [`MultiProofTargetsV2`] + /// will be produced by state updates. + v2_proofs_enabled: bool, } impl MultiProofTask { @@ -592,9 +749,16 @@ impl MultiProofTask { ), metrics, max_targets_for_chunking: DEFAULT_MAX_TARGETS_FOR_CHUNKING, + v2_proofs_enabled: false, } } + /// Enables V2 proof target generation on state updates. + pub(super) const fn with_v2_proofs_enabled(mut self, v2_proofs_enabled: bool) -> Self { + self.v2_proofs_enabled = v2_proofs_enabled; + self + } + /// Handles request for proof prefetch. /// /// Returns how many multiproof tasks were dispatched for the prefetch request. @@ -602,37 +766,29 @@ impl MultiProofTask { level = "debug", target = "engine::tree::payload_processor::multiproof", skip_all, - fields(accounts = targets.len(), chunks = 0) + fields(accounts = targets.account_targets_len(), chunks = 0) )] - fn on_prefetch_proof(&mut self, mut targets: MultiProofTargets) -> u64 { + fn on_prefetch_proof(&mut self, mut targets: VersionedMultiProofTargets) -> u64 { // Remove already fetched proof targets to avoid redundant work. targets.retain_difference(&self.fetched_proof_targets); - self.fetched_proof_targets.extend_ref(&targets); + extend_multiproof_targets(&mut self.fetched_proof_targets, &targets); - // Make sure all target accounts have an `AddedRemovedKeySet` in the + // For Legacy multiproofs, make sure all target accounts have an `AddedRemovedKeySet` in the // [`MultiAddedRemovedKeys`]. Even if there are not any known removed keys for the account, // we still want to optimistically fetch extension children for the leaf addition case. - self.multi_added_removed_keys.touch_accounts(targets.keys().copied()); - - // Clone+Arc MultiAddedRemovedKeys for sharing with the dispatched multiproof tasks - let multi_added_removed_keys = Arc::new(MultiAddedRemovedKeys { - account: self.multi_added_removed_keys.account.clone(), - storages: targets - .keys() - .filter_map(|account| { - self.multi_added_removed_keys - .storages - .get(account) - .cloned() - .map(|keys| (*account, keys)) - }) - .collect(), - }); + // V2 multiproofs don't need this. + let multi_added_removed_keys = + if let VersionedMultiProofTargets::Legacy(legacy_targets) = &targets { + self.multi_added_removed_keys.touch_accounts(legacy_targets.keys().copied()); + Some(Arc::new(self.multi_added_removed_keys.clone())) + } else { + None + }; self.metrics.prefetch_proof_targets_accounts_histogram.record(targets.len() as f64); self.metrics .prefetch_proof_targets_storages_histogram - .record(targets.values().map(|slots| slots.len()).sum::() as f64); + .record(targets.storage_count() as f64); let chunking_len = targets.chunking_length(); let available_account_workers = @@ -646,7 +802,7 @@ impl MultiProofTask { self.max_targets_for_chunking, available_account_workers, available_storage_workers, - MultiProofTargets::chunks, + VersionedMultiProofTargets::chunks, |proof_targets| { self.multiproof_manager.dispatch(MultiproofInput { source: None, @@ -654,7 +810,7 @@ impl MultiProofTask { proof_targets, proof_sequence_number: self.proof_sequencer.next_sequence(), state_root_message_sender: self.tx.clone(), - multi_added_removed_keys: Some(multi_added_removed_keys.clone()), + multi_added_removed_keys: multi_added_removed_keys.clone(), }); }, ); @@ -757,6 +913,7 @@ impl MultiProofTask { self.multiproof_manager.proof_worker_handle.available_account_workers(); let available_storage_workers = self.multiproof_manager.proof_worker_handle.available_storage_workers(); + let num_chunks = dispatch_with_chunking( not_fetched_state_update, chunking_len, @@ -770,8 +927,9 @@ impl MultiProofTask { &hashed_state_update, &self.fetched_proof_targets, &multi_added_removed_keys, + self.v2_proofs_enabled, ); - spawned_proof_targets.extend_ref(&proof_targets); + extend_multiproof_targets(&mut spawned_proof_targets, &proof_targets); self.multiproof_manager.dispatch(MultiproofInput { source: Some(source), @@ -871,7 +1029,10 @@ impl MultiProofTask { batch_metrics.proofs_processed += 1; if let Some(combined_update) = self.on_proof( sequence_number, - SparseTrieUpdate { state, multiproof: Default::default() }, + SparseTrieUpdate { + state, + multiproof: ProofResult::empty(self.v2_proofs_enabled), + }, ) { let _ = self.to_sparse_trie.send(combined_update); } @@ -898,8 +1059,7 @@ impl MultiProofTask { } let account_targets = merged_targets.len(); - let storage_targets = - merged_targets.values().map(|slots| slots.len()).sum::(); + let storage_targets = merged_targets.storage_count(); batch_metrics.prefetch_proofs_requested += self.on_prefetch_proof(merged_targets); trace!( target: "engine::tree::payload_processor::multiproof", @@ -1003,7 +1163,10 @@ impl MultiProofTask { if let Some(combined_update) = self.on_proof( sequence_number, - SparseTrieUpdate { state, multiproof: Default::default() }, + SparseTrieUpdate { + state, + multiproof: ProofResult::empty(self.v2_proofs_enabled), + }, ) { let _ = self.to_sparse_trie.send(combined_update); } @@ -1106,7 +1269,7 @@ impl MultiProofTask { let update = SparseTrieUpdate { state: proof_result.state, - multiproof: proof_result_data.proof, + multiproof: proof_result_data, }; if let Some(combined_update) = @@ -1196,7 +1359,7 @@ struct MultiproofBatchCtx { /// received. updates_finished_time: Option, /// Reusable buffer for accumulating prefetch targets during batching. - accumulated_prefetch_targets: Vec, + accumulated_prefetch_targets: Vec, } impl MultiproofBatchCtx { @@ -1242,40 +1405,77 @@ fn get_proof_targets( state_update: &HashedPostState, fetched_proof_targets: &MultiProofTargets, multi_added_removed_keys: &MultiAddedRemovedKeys, -) -> MultiProofTargets { - let mut targets = MultiProofTargets::default(); + v2_enabled: bool, +) -> VersionedMultiProofTargets { + if v2_enabled { + let mut targets = MultiProofTargetsV2::default(); + + // first collect all new accounts (not previously fetched) + for &hashed_address in state_update.accounts.keys() { + if !fetched_proof_targets.contains_key(&hashed_address) { + targets.account_targets.push(hashed_address.into()); + } + } + + // then process storage slots for all accounts in the state update + for (hashed_address, storage) in &state_update.storages { + let fetched = fetched_proof_targets.get(hashed_address); + + // If the storage is wiped, we still need to fetch the account proof. + if storage.wiped && fetched.is_none() { + targets.account_targets.push(Into::::into(*hashed_address)); + continue + } + + let changed_slots = storage + .storage + .keys() + .filter(|slot| !fetched.is_some_and(|f| f.contains(*slot))) + .map(|slot| Into::::into(*slot)) + .collect::>(); - // first collect all new accounts (not previously fetched) - for hashed_address in state_update.accounts.keys() { - if !fetched_proof_targets.contains_key(hashed_address) { - targets.insert(*hashed_address, HashSet::default()); + if !changed_slots.is_empty() { + targets.account_targets.push((*hashed_address).into()); + targets.storage_targets.insert(*hashed_address, changed_slots); + } } - } - // then process storage slots for all accounts in the state update - for (hashed_address, storage) in &state_update.storages { - let fetched = fetched_proof_targets.get(hashed_address); - let storage_added_removed_keys = multi_added_removed_keys.get_storage(hashed_address); - let mut changed_slots = storage - .storage - .keys() - .filter(|slot| { - !fetched.is_some_and(|f| f.contains(*slot)) || - storage_added_removed_keys.is_some_and(|k| k.is_removed(slot)) - }) - .peekable(); + VersionedMultiProofTargets::V2(targets) + } else { + let mut targets = MultiProofTargets::default(); - // If the storage is wiped, we still need to fetch the account proof. - if storage.wiped && fetched.is_none() { - targets.entry(*hashed_address).or_default(); + // first collect all new accounts (not previously fetched) + for hashed_address in state_update.accounts.keys() { + if !fetched_proof_targets.contains_key(hashed_address) { + targets.insert(*hashed_address, HashSet::default()); + } } - if changed_slots.peek().is_some() { - targets.entry(*hashed_address).or_default().extend(changed_slots); + // then process storage slots for all accounts in the state update + for (hashed_address, storage) in &state_update.storages { + let fetched = fetched_proof_targets.get(hashed_address); + let storage_added_removed_keys = multi_added_removed_keys.get_storage(hashed_address); + let mut changed_slots = storage + .storage + .keys() + .filter(|slot| { + !fetched.is_some_and(|f| f.contains(*slot)) || + storage_added_removed_keys.is_some_and(|k| k.is_removed(slot)) + }) + .peekable(); + + // If the storage is wiped, we still need to fetch the account proof. + if storage.wiped && fetched.is_none() { + targets.entry(*hashed_address).or_default(); + } + + if changed_slots.peek().is_some() { + targets.entry(*hashed_address).or_default().extend(changed_slots); + } } - } - targets + VersionedMultiProofTargets::Legacy(targets) + } } /// Dispatches work items as a single unit or in chunks based on target size and worker @@ -1482,12 +1682,24 @@ mod tests { state } + fn unwrap_legacy_targets(targets: VersionedMultiProofTargets) -> MultiProofTargets { + match targets { + VersionedMultiProofTargets::Legacy(targets) => targets, + VersionedMultiProofTargets::V2(_) => panic!("Expected Legacy targets"), + } + } + #[test] fn test_get_proof_targets_new_account_targets() { let state = create_get_proof_targets_state(); let fetched = MultiProofTargets::default(); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); // should return all accounts as targets since nothing was fetched before assert_eq!(targets.len(), state.accounts.len()); @@ -1501,7 +1713,12 @@ mod tests { let state = create_get_proof_targets_state(); let fetched = MultiProofTargets::default(); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); // verify storage slots are included for accounts with storage for (addr, storage) in &state.storages { @@ -1529,7 +1746,12 @@ mod tests { // mark the account as already fetched fetched.insert(*fetched_addr, HashSet::default()); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); // should not include the already fetched account since it has no storage updates assert!(!targets.contains_key(fetched_addr)); @@ -1549,7 +1771,12 @@ mod tests { fetched_slots.insert(fetched_slot); fetched.insert(*addr, fetched_slots); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); // should not include the already fetched storage slot let target_slots = &targets[addr]; @@ -1562,7 +1789,12 @@ mod tests { let state = HashedPostState::default(); let fetched = MultiProofTargets::default(); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); assert!(targets.is_empty()); } @@ -1589,7 +1821,12 @@ mod tests { fetched_slots.insert(slot1); fetched.insert(addr1, fetched_slots); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); assert!(targets.contains_key(&addr2)); assert!(!targets[&addr1].contains(&slot1)); @@ -1615,7 +1852,12 @@ mod tests { assert!(!state.accounts.contains_key(&addr)); assert!(!fetched.contains_key(&addr)); - let targets = get_proof_targets(&state, &fetched, &MultiAddedRemovedKeys::new()); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &MultiAddedRemovedKeys::new(), + false, + )); // verify that we still get the storage slots for the unmodified account assert!(targets.contains_key(&addr)); @@ -1657,7 +1899,12 @@ mod tests { removed_state.storages.insert(addr, removed_storage); multi_added_removed_keys.update_with_state(&removed_state); - let targets = get_proof_targets(&state, &fetched, &multi_added_removed_keys); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &multi_added_removed_keys, + false, + )); // slot1 should be included despite being fetched, because it's marked as removed assert!(targets.contains_key(&addr)); @@ -1684,7 +1931,12 @@ mod tests { storage.storage.insert(slot1, U256::from(100)); state.storages.insert(addr, storage); - let targets = get_proof_targets(&state, &fetched, &multi_added_removed_keys); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &multi_added_removed_keys, + false, + )); // account should be included because storage is wiped and account wasn't fetched assert!(targets.contains_key(&addr)); @@ -1727,7 +1979,12 @@ mod tests { removed_state.storages.insert(addr, removed_storage); multi_added_removed_keys.update_with_state(&removed_state); - let targets = get_proof_targets(&state, &fetched, &multi_added_removed_keys); + let targets = unwrap_legacy_targets(get_proof_targets( + &state, + &fetched, + &multi_added_removed_keys, + false, + )); // only slots in the state update can be included, so slot3 should not appear assert!(!targets.contains_key(&addr)); @@ -1754,9 +2011,12 @@ mod tests { targets3.insert(addr3, HashSet::default()); let tx = task.tx.clone(); - tx.send(MultiProofMessage::PrefetchProofs(targets1)).unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(targets2)).unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(targets3)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets1))) + .unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets2))) + .unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets3))) + .unwrap(); let proofs_requested = if let Ok(MultiProofMessage::PrefetchProofs(targets)) = task.rx.recv() { @@ -1770,11 +2030,12 @@ mod tests { assert_eq!(num_batched, 3); assert_eq!(merged_targets.len(), 3); - assert!(merged_targets.contains_key(&addr1)); - assert!(merged_targets.contains_key(&addr2)); - assert!(merged_targets.contains_key(&addr3)); + let legacy_targets = unwrap_legacy_targets(merged_targets); + assert!(legacy_targets.contains_key(&addr1)); + assert!(legacy_targets.contains_key(&addr2)); + assert!(legacy_targets.contains_key(&addr3)); - task.on_prefetch_proof(merged_targets) + task.on_prefetch_proof(VersionedMultiProofTargets::Legacy(legacy_targets)) } else { panic!("Expected PrefetchProofs message"); }; @@ -1849,11 +2110,16 @@ mod tests { // Queue: [PrefetchProofs1, PrefetchProofs2, StateUpdate1, StateUpdate2, PrefetchProofs3] let tx = task.tx.clone(); - tx.send(MultiProofMessage::PrefetchProofs(targets1)).unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(targets2)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets1))) + .unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(targets2))) + .unwrap(); tx.send(MultiProofMessage::StateUpdate(source.into(), state_update1)).unwrap(); tx.send(MultiProofMessage::StateUpdate(source.into(), state_update2)).unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(targets3.clone())).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy( + targets3.clone(), + ))) + .unwrap(); // Step 1: Receive and batch PrefetchProofs (should get targets1 + targets2) let mut pending_msg: Option = None; @@ -1879,9 +2145,10 @@ mod tests { // Should have batched exactly 2 PrefetchProofs (not 3!) assert_eq!(num_batched, 2, "Should batch only until different message type"); assert_eq!(merged_targets.len(), 2); - assert!(merged_targets.contains_key(&addr1)); - assert!(merged_targets.contains_key(&addr2)); - assert!(!merged_targets.contains_key(&addr3), "addr3 should NOT be in first batch"); + let legacy_targets = unwrap_legacy_targets(merged_targets); + assert!(legacy_targets.contains_key(&addr1)); + assert!(legacy_targets.contains_key(&addr2)); + assert!(!legacy_targets.contains_key(&addr3), "addr3 should NOT be in first batch"); } else { panic!("Expected PrefetchProofs message"); } @@ -1906,7 +2173,8 @@ mod tests { match task.rx.try_recv() { Ok(MultiProofMessage::PrefetchProofs(targets)) => { assert_eq!(targets.len(), 1); - assert!(targets.contains_key(&addr3)); + let legacy_targets = unwrap_legacy_targets(targets); + assert!(legacy_targets.contains_key(&addr3)); } _ => panic!("PrefetchProofs3 was lost!"), } @@ -1952,9 +2220,13 @@ mod tests { let source = StateChangeSource::Transaction(99); let tx = task.tx.clone(); - tx.send(MultiProofMessage::PrefetchProofs(prefetch1)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy(prefetch1))) + .unwrap(); tx.send(MultiProofMessage::StateUpdate(source.into(), state_update)).unwrap(); - tx.send(MultiProofMessage::PrefetchProofs(prefetch2.clone())).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(VersionedMultiProofTargets::Legacy( + prefetch2.clone(), + ))) + .unwrap(); let mut ctx = MultiproofBatchCtx::new(Instant::now()); let mut batch_metrics = MultiproofBatchMetrics::default(); @@ -1987,7 +2259,8 @@ mod tests { match task.rx.try_recv() { Ok(MultiProofMessage::PrefetchProofs(targets)) => { assert_eq!(targets.len(), 1); - assert!(targets.contains_key(&prefetch_addr2)); + let legacy_targets = unwrap_legacy_targets(targets); + assert!(legacy_targets.contains_key(&prefetch_addr2)); } other => panic!("Expected PrefetchProofs2 in channel, got {:?}", other), } diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index e9b7e1e07b8..4f044e98fad 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -16,7 +16,7 @@ use crate::tree::{ payload_processor::{ bal::{total_slots, BALSlotIter}, executor::WorkloadExecutor, - multiproof::MultiProofMessage, + multiproof::{MultiProofMessage, VersionedMultiProofTargets}, ExecutionCache as PayloadExecutionCache, }, precompile_cache::{CachedPrecompile, PrecompileCacheMap}, @@ -237,7 +237,7 @@ where } /// If configured and the tx returned proof targets, emit the targets the transaction produced - fn send_multi_proof_targets(&self, targets: Option) { + fn send_multi_proof_targets(&self, targets: Option) { if self.is_execution_terminated() { // if execution is already terminated then we dont need to send more proof fetch // messages @@ -484,6 +484,8 @@ where pub(super) terminate_execution: Arc, pub(super) precompile_cache_disabled: bool, pub(super) precompile_cache_map: PrecompileCacheMap>, + /// Whether V2 proof calculation is enabled. + pub(super) v2_proofs_enabled: bool, } impl PrewarmContext @@ -492,10 +494,12 @@ where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, Evm: ConfigureEvm + 'static, { - /// Splits this context into an evm, an evm config, metrics, and the atomic bool for terminating - /// execution. + /// Splits this context into an evm, an evm config, metrics, the atomic bool for terminating + /// execution, and whether V2 proofs are enabled. #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] - fn evm_for_ctx(self) -> Option<(EvmFor, PrewarmMetrics, Arc)> { + fn evm_for_ctx( + self, + ) -> Option<(EvmFor, PrewarmMetrics, Arc, bool)> { let Self { env, evm_config, @@ -505,6 +509,7 @@ where terminate_execution, precompile_cache_disabled, precompile_cache_map, + v2_proofs_enabled, } = self; let mut state_provider = match provider.build() { @@ -554,7 +559,7 @@ where }); } - Some((evm, metrics, terminate_execution)) + Some((evm, metrics, terminate_execution, v2_proofs_enabled)) } /// Accepts an [`mpsc::Receiver`] of transactions and a handle to prewarm task. Executes @@ -575,7 +580,10 @@ where ) where Tx: ExecutableTxFor, { - let Some((mut evm, metrics, terminate_execution)) = self.evm_for_ctx() else { return }; + let Some((mut evm, metrics, terminate_execution, v2_proofs_enabled)) = self.evm_for_ctx() + else { + return + }; while let Ok(IndexedTransaction { index, tx }) = { let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", "recv tx") @@ -638,7 +646,8 @@ where let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm outcome", index, tx_hash=%tx.tx().tx_hash()) .entered(); - let (targets, storage_targets) = multiproof_targets_from_state(res.state); + let (targets, storage_targets) = + multiproof_targets_from_state(res.state, v2_proofs_enabled); metrics.prefetch_storage_targets.record(storage_targets as f64); let _ = sender.send(PrewarmTaskEvent::Outcome { proof_targets: Some(targets) }); drop(_enter); @@ -783,9 +792,22 @@ where } } -/// Returns a set of [`MultiProofTargets`] and the total amount of storage targets, based on the +/// Returns a set of [`VersionedMultiProofTargets`] and the total amount of storage targets, based +/// on the given state. +fn multiproof_targets_from_state( + state: EvmState, + v2_enabled: bool, +) -> (VersionedMultiProofTargets, usize) { + if v2_enabled { + multiproof_targets_v2_from_state(state) + } else { + multiproof_targets_legacy_from_state(state) + } +} + +/// Returns legacy [`MultiProofTargets`] and the total amount of storage targets, based on the /// given state. -fn multiproof_targets_from_state(state: EvmState) -> (MultiProofTargets, usize) { +fn multiproof_targets_legacy_from_state(state: EvmState) -> (VersionedMultiProofTargets, usize) { let mut targets = MultiProofTargets::with_capacity(state.len()); let mut storage_targets = 0; for (addr, account) in state { @@ -815,7 +837,50 @@ fn multiproof_targets_from_state(state: EvmState) -> (MultiProofTargets, usize) targets.insert(keccak256(addr), storage_set); } - (targets, storage_targets) + (VersionedMultiProofTargets::Legacy(targets), storage_targets) +} + +/// Returns V2 [`reth_trie_parallel::targets_v2::MultiProofTargetsV2`] and the total amount of +/// storage targets, based on the given state. +fn multiproof_targets_v2_from_state(state: EvmState) -> (VersionedMultiProofTargets, usize) { + use reth_trie::proof_v2; + use reth_trie_parallel::targets_v2::MultiProofTargetsV2; + + let mut targets = MultiProofTargetsV2::default(); + let mut storage_target_count = 0; + for (addr, account) in state { + // if the account was not touched, or if the account was selfdestructed, do not + // fetch proofs for it + // + // Since selfdestruct can only happen in the same transaction, we can skip + // prefetching proofs for selfdestructed accounts + // + // See: https://eips.ethereum.org/EIPS/eip-6780 + if !account.is_touched() || account.is_selfdestructed() { + continue + } + + let hashed_address = keccak256(addr); + targets.account_targets.push(hashed_address.into()); + + let mut storage_slots = Vec::with_capacity(account.storage.len()); + for (key, slot) in account.storage { + // do nothing if unchanged + if !slot.is_changed() { + continue + } + + let hashed_slot = keccak256(B256::new(key.to_be_bytes())); + storage_slots.push(proof_v2::Target::from(hashed_slot)); + } + + storage_target_count += storage_slots.len(); + if !storage_slots.is_empty() { + targets.storage_targets.insert(hashed_address, storage_slots); + } + } + + (VersionedMultiProofTargets::V2(targets), storage_target_count) } /// The events the pre-warm task can handle. @@ -840,7 +905,7 @@ pub(super) enum PrewarmTaskEvent { /// The outcome of a pre-warm task Outcome { /// The prepared proof targets based on the evm state outcome - proof_targets: Option, + proof_targets: Option, }, /// Finished executing all transactions FinishedTxExecution { diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index b4c150cfa9a..052fd8672b2 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -4,7 +4,7 @@ use crate::tree::payload_processor::multiproof::{MultiProofTaskMetrics, SparseTr use alloy_primitives::B256; use rayon::iter::{ParallelBridge, ParallelIterator}; use reth_trie::{updates::TrieUpdates, Nibbles}; -use reth_trie_parallel::root::ParallelStateRootError; +use reth_trie_parallel::{proof_task::ProofResult, root::ParallelStateRootError}; use reth_trie_sparse::{ errors::{SparseStateTrieResult, SparseTrieErrorKind}, provider::{TrieNodeProvider, TrieNodeProviderFactory}, @@ -97,8 +97,8 @@ where debug!( target: "engine::root", num_updates, - account_proofs = update.multiproof.account_subtree.len(), - storage_proofs = update.multiproof.storages.len(), + account_proofs = update.multiproof.account_proofs_len(), + storage_proofs = update.multiproof.storage_proofs_len(), "Updating sparse trie" ); @@ -157,7 +157,14 @@ where let started_at = Instant::now(); // Reveal new accounts and storage slots. - trie.reveal_decoded_multiproof(multiproof)?; + match multiproof { + ProofResult::Legacy(decoded, _) => { + trie.reveal_decoded_multiproof(decoded)?; + } + ProofResult::V2(decoded_v2) => { + trie.reveal_decoded_multiproof_v2(decoded_v2)?; + } + } let reveal_multiproof_elapsed = started_at.elapsed(); trace!( target: "engine::root::sparse", diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index d64f2dfb519..812dd2b85b1 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] # reth -reth-execution-errors.workspace = true reth-primitives-traits.workspace = true +reth-execution-errors.workspace = true reth-provider.workspace = true reth-storage-errors.workspace = true reth-trie-common.workspace = true diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 7bf936bad3a..d42534c2713 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -197,7 +197,7 @@ impl ParallelProof { let (result_tx, result_rx) = crossbeam_unbounded(); let account_multiproof_start_time = Instant::now(); - let input = AccountMultiproofInput { + let input = AccountMultiproofInput::Legacy { targets, prefix_sets, collect_branch_node_masks: self.collect_branch_node_masks, @@ -208,7 +208,6 @@ impl ParallelProof { HashedPostState::default(), account_multiproof_start_time, ), - v2_proofs_enabled: self.v2_proofs_enabled, }; self.proof_worker_handle @@ -222,7 +221,9 @@ impl ParallelProof { ) })?; - let ProofResult { proof: multiproof, stats } = proof_result_msg.result?; + let ProofResult::Legacy(multiproof, stats) = proof_result_msg.result? else { + panic!("AccountMultiproofInput::Legacy was submitted, expected legacy result") + }; #[cfg(feature = "metrics")] self.metrics.record(stats); @@ -235,7 +236,7 @@ impl ParallelProof { leaves_added = stats.leaves_added(), missed_leaves = stats.missed_leaves(), precomputed_storage_roots = stats.precomputed_storage_roots(), - "Calculated decoded proof" + "Calculated decoded proof", ); Ok(multiproof) diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index eb6f8923469..d8073d1acf5 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -32,6 +32,8 @@ use crate::{ root::ParallelStateRootError, stats::{ParallelTrieStats, ParallelTrieTracker}, + targets_v2::MultiProofTargetsV2, + value_encoder::AsyncAccountValueEncoder, StorageRootTargets, }; use alloy_primitives::{ @@ -49,11 +51,11 @@ use reth_trie::{ node_iter::{TrieElement, TrieNodeIter}, prefix_set::TriePrefixSets, proof::{ProofBlindedAccountProvider, ProofBlindedStorageProvider, StorageProof}, - proof_v2::{self, StorageProofCalculator}, + proof_v2, trie_cursor::{InstrumentedTrieCursor, TrieCursorFactory, TrieCursorMetricsCache}, walker::TrieWalker, - DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostState, MultiProofTargets, - Nibbles, ProofTrieNode, TRIE_ACCOUNT_RLP_MAX_SIZE, + DecodedMultiProof, DecodedMultiProofV2, DecodedStorageMultiProof, HashBuilder, HashedPostState, + MultiProofTargets, Nibbles, ProofTrieNode, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::{ added_removed_keys::MultiAddedRemovedKeys, @@ -220,7 +222,8 @@ impl ProofWorkerHandle { metrics, #[cfg(feature = "metrics")] cursor_metrics, - ); + ) + .with_v2_proofs(v2_proofs_enabled); if let Err(error) = worker.run() { error!( target: "trie::proof_task", @@ -333,16 +336,12 @@ impl ProofWorkerHandle { ProviderError::other(std::io::Error::other("account workers unavailable")); if let AccountWorkerJob::AccountMultiproof { input } = err.0 { - let AccountMultiproofInput { - proof_result_sender: - ProofResultContext { - sender: result_tx, - sequence_number: seq, - state, - start_time: start, - }, - .. - } = *input; + let ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + } = input.into_proof_result_sender(); let _ = result_tx.send(ProofResultMessage { sequence_number: seq, @@ -605,11 +604,65 @@ impl TrieNodeProvider for ProofTaskTrieNodeProvider { /// Result of a multiproof calculation. #[derive(Debug)] -pub struct ProofResult { - /// The account multiproof - pub proof: DecodedMultiProof, - /// Statistics collected during proof computation - pub stats: ParallelTrieStats, +pub enum ProofResult { + /// Legacy multiproof calculation result. + Legacy(DecodedMultiProof, ParallelTrieStats), + /// V2 multiproof calculation result. + V2(DecodedMultiProofV2), +} + +impl ProofResult { + /// Creates an empty [`ProofResult`] of the appropriate variant based on `v2_enabled`. + /// + /// Use this when constructing empty proofs (e.g., for state updates where all targets + /// were already fetched) to ensure consistency with the proof version being used. + pub fn empty(v2_enabled: bool) -> Self { + if v2_enabled { + Self::V2(DecodedMultiProofV2::default()) + } else { + let stats = ParallelTrieTracker::default().finish(); + Self::Legacy(DecodedMultiProof::default(), stats) + } + } + + /// Returns true if the result contains no proofs + pub fn is_empty(&self) -> bool { + match self { + Self::Legacy(proof, _) => proof.is_empty(), + Self::V2(proof) => proof.is_empty(), + } + } + + /// Extends the receiver with the value of the given results. + /// + /// # Panics + /// + /// This method panics if the two [`ProofResult`]s are not the same variant. + pub fn extend(&mut self, other: Self) { + match (self, other) { + (Self::Legacy(proof, _), Self::Legacy(other, _)) => proof.extend(other), + (Self::V2(proof), Self::V2(other)) => proof.extend(other), + _ => panic!("mismatched ProofResults, cannot extend one with the other"), + } + } + + /// Returns the number of account proofs. + pub fn account_proofs_len(&self) -> usize { + match self { + Self::Legacy(proof, _) => proof.account_subtree.len(), + Self::V2(proof) => proof.account_proofs.len(), + } + } + + /// Returns the total number of storage proofs + pub fn storage_proofs_len(&self) -> usize { + match self { + Self::Legacy(proof, _) => { + proof.storages.values().map(|p| p.subtree.len()).sum::() + } + Self::V2(proof) => proof.storage_proofs.values().map(|p| p.len()).sum::(), + } + } } /// Channel used by worker threads to deliver `ProofResultMessage` items back to @@ -889,7 +942,7 @@ where &self, proof_tx: &ProofTaskTx, v2_calculator: Option< - &mut StorageProofCalculator< + &mut proof_v2::StorageProofCalculator< ::StorageTrieCursor<'_>, ::StorageCursor<'_>, >, @@ -1053,6 +1106,8 @@ struct AccountProofWorker { /// Cursor metrics for this worker #[cfg(feature = "metrics")] cursor_metrics: ProofTaskCursorMetrics, + /// Set to true if V2 proofs are enabled. + v2_enabled: bool, } impl AccountProofWorker @@ -1082,9 +1137,16 @@ where metrics, #[cfg(feature = "metrics")] cursor_metrics, + v2_enabled: false, } } + /// Changes whether or not V2 proofs are enabled. + const fn with_v2_proofs(mut self, v2_enabled: bool) -> Self { + self.v2_enabled = v2_enabled; + self + } + /// Runs the worker loop, processing jobs until the channel closes. /// /// # Lifecycle @@ -1117,6 +1179,17 @@ where let mut account_nodes_processed = 0u64; let mut cursor_metrics_cache = ProofTaskCursorMetricsCache::default(); + let mut v2_calculator = if self.v2_enabled { + let trie_cursor = proof_tx.provider.account_trie_cursor()?; + let hashed_cursor = proof_tx.provider.hashed_account_cursor()?; + Some(proof_v2::ProofCalculator::<_, _, AsyncAccountValueEncoder>::new( + trie_cursor, + hashed_cursor, + )) + } else { + None + }; + // Count this worker as available only after successful initialization. self.available_workers.fetch_add(1, Ordering::Relaxed); @@ -1128,6 +1201,7 @@ where AccountWorkerJob::AccountMultiproof { input } => { self.process_account_multiproof( &proof_tx, + v2_calculator.as_mut(), *input, &mut account_proofs_processed, &mut cursor_metrics_cache, @@ -1166,26 +1240,18 @@ where Ok(()) } - /// Processes an account multiproof request. - fn process_account_multiproof( + fn compute_legacy_account_multiproof( &self, proof_tx: &ProofTaskTx, - input: AccountMultiproofInput, - account_proofs_processed: &mut u64, - cursor_metrics_cache: &mut ProofTaskCursorMetricsCache, - ) where + targets: MultiProofTargets, + mut prefix_sets: TriePrefixSets, + collect_branch_node_masks: bool, + multi_added_removed_keys: Option>, + proof_cursor_metrics: &mut ProofTaskCursorMetricsCache, + ) -> Result + where Provider: TrieCursorFactory + HashedCursorFactory, { - let AccountMultiproofInput { - targets, - mut prefix_sets, - collect_branch_node_masks, - multi_added_removed_keys, - proof_result_sender: - ProofResultContext { sender: result_tx, sequence_number: seq, state, start_time: start }, - v2_proofs_enabled, - } = input; - let span = debug_span!( target: "trie::proof_task", "Account multiproof calculation", @@ -1199,8 +1265,6 @@ where "Processing account multiproof" ); - let proof_start = Instant::now(); - let mut tracker = ParallelTrieTracker::default(); let mut storage_prefix_sets = std::mem::take(&mut prefix_sets.storage_prefix_sets); @@ -1210,29 +1274,14 @@ where tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); - let storage_proof_receivers = match dispatch_storage_proofs( + let storage_proof_receivers = dispatch_storage_proofs( &self.storage_work_tx, &targets, &mut storage_prefix_sets, collect_branch_node_masks, multi_added_removed_keys.as_ref(), - v2_proofs_enabled, - ) { - Ok(receivers) => receivers, - Err(error) => { - // Send error through result channel - error!(target: "trie::proof_task", "Failed to dispatch storage proofs: {error}"); - let _ = result_tx.send(ProofResultMessage { - sequence_number: seq, - result: Err(error), - elapsed: start.elapsed(), - state, - }); - return; - } - }; + )?; - // Use the missed leaves cache passed from the multiproof manager let account_prefix_set = std::mem::take(&mut prefix_sets.account_prefix_set); let ctx = AccountMultiproofParams { @@ -1244,17 +1293,115 @@ where cached_storage_roots: &self.cached_storage_roots, }; - let result = - build_account_multiproof_with_storage_roots(&proof_tx.provider, ctx, &mut tracker); - - let now = Instant::now(); - let proof_elapsed = now.duration_since(proof_start); - let total_elapsed = now.duration_since(start); - let proof_cursor_metrics = tracker.cursor_metrics; - proof_cursor_metrics.record_spans(); + let result = build_account_multiproof_with_storage_roots( + &proof_tx.provider, + ctx, + &mut tracker, + proof_cursor_metrics, + ); let stats = tracker.finish(); - let result = result.map(|proof| ProofResult { proof, stats }); + result.map(|proof| ProofResult::Legacy(proof, stats)) + } + + fn compute_v2_account_multiproof( + &self, + v2_calculator: &mut proof_v2::ProofCalculator< + ::AccountTrieCursor<'_>, + ::AccountCursor<'_>, + AsyncAccountValueEncoder, + >, + targets: MultiProofTargetsV2, + ) -> Result + where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let MultiProofTargetsV2 { mut account_targets, storage_targets } = targets; + + let span = debug_span!( + target: "trie::proof_task", + "Account V2 multiproof calculation", + account_targets = account_targets.len(), + storage_targets = storage_targets.values().map(|t| t.len()).sum::(), + worker_id = self.worker_id, + ); + let _span_guard = span.enter(); + + trace!(target: "trie::proof_task", "Processing V2 account multiproof"); + + let storage_proof_receivers = + dispatch_v2_storage_proofs(&self.storage_work_tx, &account_targets, storage_targets)?; + + let mut value_encoder = AsyncAccountValueEncoder::new( + self.storage_work_tx.clone(), + storage_proof_receivers, + self.cached_storage_roots.clone(), + ); + + let proof = DecodedMultiProofV2 { + account_proofs: v2_calculator.proof(&mut value_encoder, &mut account_targets)?, + storage_proofs: value_encoder.into_storage_proofs()?, + }; + + Ok(ProofResult::V2(proof)) + } + + /// Processes an account multiproof request. + fn process_account_multiproof( + &self, + proof_tx: &ProofTaskTx, + v2_calculator: Option< + &mut proof_v2::ProofCalculator< + ::AccountTrieCursor<'_>, + ::AccountCursor<'_>, + AsyncAccountValueEncoder, + >, + >, + input: AccountMultiproofInput, + account_proofs_processed: &mut u64, + cursor_metrics_cache: &mut ProofTaskCursorMetricsCache, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let mut proof_cursor_metrics = ProofTaskCursorMetricsCache::default(); + let proof_start = Instant::now(); + + let (proof_result_sender, result) = match input { + AccountMultiproofInput::Legacy { + targets, + prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys, + proof_result_sender, + } => ( + proof_result_sender, + self.compute_legacy_account_multiproof( + proof_tx, + targets, + prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys, + &mut proof_cursor_metrics, + ), + ), + AccountMultiproofInput::V2 { targets, proof_result_sender } => ( + proof_result_sender, + self.compute_v2_account_multiproof::( + v2_calculator.expect("v2 calculator provided"), + targets, + ), + ), + }; + + let ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + } = proof_result_sender; + + let proof_elapsed = proof_start.elapsed(); + let total_elapsed = start.elapsed(); *account_proofs_processed += 1; // Send result to MultiProofTask @@ -1275,6 +1422,8 @@ where ); } + proof_cursor_metrics.record_spans(); + trace!( target: "trie::proof_task", proof_time_us = proof_elapsed.as_micros(), @@ -1355,6 +1504,7 @@ fn build_account_multiproof_with_storage_roots

( provider: &P, ctx: AccountMultiproofParams<'_>, tracker: &mut ParallelTrieTracker, + proof_cursor_metrics: &mut ProofTaskCursorMetricsCache, ) -> Result where P: TrieCursorFactory + HashedCursorFactory, @@ -1362,15 +1512,12 @@ where let accounts_added_removed_keys = ctx.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); - // Create local metrics caches for account cursors. We can't directly use the metrics caches in - // the tracker due to the call to `inc_missed_leaves` which occurs on it. - let mut account_trie_cursor_metrics = TrieCursorMetricsCache::default(); - let mut account_hashed_cursor_metrics = HashedCursorMetricsCache::default(); - // Wrap account trie cursor with instrumented cursor let account_trie_cursor = provider.account_trie_cursor().map_err(ProviderError::Database)?; - let account_trie_cursor = - InstrumentedTrieCursor::new(account_trie_cursor, &mut account_trie_cursor_metrics); + let account_trie_cursor = InstrumentedTrieCursor::new( + account_trie_cursor, + &mut proof_cursor_metrics.account_trie_cursor, + ); // Create the walker. let walker = TrieWalker::<_>::state_trie(account_trie_cursor, ctx.prefix_set) @@ -1397,8 +1544,10 @@ where // Wrap account hashed cursor with instrumented cursor let account_hashed_cursor = provider.hashed_account_cursor().map_err(ProviderError::Database)?; - let account_hashed_cursor = - InstrumentedHashedCursor::new(account_hashed_cursor, &mut account_hashed_cursor_metrics); + let account_hashed_cursor = InstrumentedHashedCursor::new( + account_hashed_cursor, + &mut proof_cursor_metrics.account_hashed_cursor, + ); let mut account_node_iter = TrieNodeIter::state_trie(walker, account_hashed_cursor); @@ -1462,10 +1611,10 @@ where StorageProof::new_hashed(provider, provider, hashed_address) .with_prefix_set_mut(Default::default()) .with_trie_cursor_metrics( - &mut tracker.cursor_metrics.storage_trie_cursor, + &mut proof_cursor_metrics.storage_trie_cursor, ) .with_hashed_cursor_metrics( - &mut tracker.cursor_metrics.storage_hashed_cursor, + &mut proof_cursor_metrics.storage_hashed_cursor, ) .storage_multiproof( ctx.targets @@ -1516,10 +1665,6 @@ where BranchNodeMasksMap::default() }; - // Extend tracker with accumulated metrics from account cursors - tracker.cursor_metrics.account_trie_cursor.extend(&account_trie_cursor_metrics); - tracker.cursor_metrics.account_hashed_cursor.extend(&account_hashed_cursor_metrics); - // Consume remaining storage proof receivers for accounts not encountered during trie walk. // Done last to allow storage workers more time to complete while we finalized the account trie. for (hashed_address, receiver) in storage_proof_receivers { @@ -1550,7 +1695,6 @@ fn dispatch_storage_proofs( storage_prefix_sets: &mut B256Map, with_branch_node_masks: bool, multi_added_removed_keys: Option<&Arc>, - use_v2_proofs: bool, ) -> Result>, ParallelStateRootError> { let mut storage_proof_receivers = B256Map::with_capacity_and_hasher(targets.len(), Default::default()); @@ -1564,20 +1708,14 @@ fn dispatch_storage_proofs( let (result_tx, result_rx) = crossbeam_channel::unbounded(); // Create computation input based on V2 flag - let input = if use_v2_proofs { - // Convert target slots to V2 targets - let v2_targets = target_slots.iter().copied().map(Into::into).collect(); - StorageProofInput::new(*hashed_address, v2_targets) - } else { - let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); - StorageProofInput::legacy( - *hashed_address, - prefix_set, - target_slots.clone(), - with_branch_node_masks, - multi_added_removed_keys.cloned(), - ) - }; + let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); + let input = StorageProofInput::legacy( + *hashed_address, + prefix_set, + target_slots.clone(), + with_branch_node_masks, + multi_added_removed_keys.cloned(), + ); // Always dispatch a storage proof so we obtain the storage root even when no slots are // requested. @@ -1595,6 +1733,64 @@ fn dispatch_storage_proofs( Ok(storage_proof_receivers) } + +/// Queues V2 storage proofs for all accounts in the targets and returns receivers. +/// +/// This function queues all storage proof tasks to the worker pool but returns immediately +/// with receivers, allowing the account trie walk to proceed in parallel with storage proof +/// computation. This enables interleaved parallelism for better performance. +/// +/// Propagates errors up if queuing fails. Receivers must be consumed by the caller. +fn dispatch_v2_storage_proofs( + storage_work_tx: &CrossbeamSender, + account_targets: &Vec, + storage_targets: B256Map>, +) -> Result>, ParallelStateRootError> { + let mut storage_proof_receivers = + B256Map::with_capacity_and_hasher(account_targets.len(), Default::default()); + + // Dispatch all proofs for targeted storage slots + for (hashed_address, targets) in storage_targets { + // Create channel for receiving StorageProofResultMessage + let (result_tx, result_rx) = crossbeam_channel::unbounded(); + let input = StorageProofInput::new(hashed_address, targets); + + storage_work_tx + .send(StorageWorkerJob::StorageProof { input, proof_result_sender: result_tx }) + .map_err(|_| { + ParallelStateRootError::Other(format!( + "Failed to queue storage proof for {hashed_address:?}: storage worker pool unavailable", + )) + })?; + + storage_proof_receivers.insert(hashed_address, result_rx); + } + + // If there are any targeted accounts which did not have storage targets then we generate a + // single proof target for them so that we get their root. + for target in account_targets { + let hashed_address = target.key(); + if storage_proof_receivers.contains_key(&hashed_address) { + continue + } + + let (result_tx, result_rx) = crossbeam_channel::unbounded(); + let input = StorageProofInput::new(hashed_address, vec![proof_v2::Target::new(B256::ZERO)]); + + storage_work_tx + .send(StorageWorkerJob::StorageProof { input, proof_result_sender: result_tx }) + .map_err(|_| { + ParallelStateRootError::Other(format!( + "Failed to queue storage proof for {hashed_address:?}: storage worker pool unavailable", + )) + })?; + + storage_proof_receivers.insert(hashed_address, result_rx); + } + + Ok(storage_proof_receivers) +} + /// Input parameters for storage proof computation. #[derive(Debug)] pub enum StorageProofInput { @@ -1639,7 +1835,7 @@ impl StorageProofInput { } } - /// Creates a new [`StorageProofInput`] with the given hashed address and target slots. + /// Creates a new [`StorageProofInput`] with the given hashed address and target slots. pub const fn new(hashed_address: B256, targets: Vec) -> Self { Self::V2 { hashed_address, targets } } @@ -1655,20 +1851,39 @@ impl StorageProofInput { } /// Input parameters for account multiproof computation. -#[derive(Debug, Clone)] -pub struct AccountMultiproofInput { - /// The targets for which to compute the multiproof. - pub targets: MultiProofTargets, - /// The prefix sets for the proof calculation. - pub prefix_sets: TriePrefixSets, - /// Whether or not to collect branch node masks. - pub collect_branch_node_masks: bool, - /// Provided by the user to give the necessary context to retain extra proofs. - pub multi_added_removed_keys: Option>, - /// Context for sending the proof result. - pub proof_result_sender: ProofResultContext, - /// Whether to use V2 storage proofs. - pub v2_proofs_enabled: bool, +#[derive(Debug)] +pub enum AccountMultiproofInput { + /// Legacy account multiproof proof variant + Legacy { + /// The targets for which to compute the multiproof. + targets: MultiProofTargets, + /// The prefix sets for the proof calculation. + prefix_sets: TriePrefixSets, + /// Whether or not to collect branch node masks. + collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + multi_added_removed_keys: Option>, + /// Context for sending the proof result. + proof_result_sender: ProofResultContext, + }, + /// V2 account multiproof variant + V2 { + /// The targets for which to compute the multiproof. + targets: MultiProofTargetsV2, + /// Context for sending the proof result. + proof_result_sender: ProofResultContext, + }, +} + +impl AccountMultiproofInput { + /// Returns the [`ProofResultContext`] for this input, consuming the input. + fn into_proof_result_sender(self) -> ProofResultContext { + match self { + Self::Legacy { proof_result_sender, .. } | Self::V2 { proof_result_sender, .. } => { + proof_result_sender + } + } + } } /// Parameters for building an account multiproof with pre-computed storage roots. diff --git a/crates/trie/parallel/src/stats.rs b/crates/trie/parallel/src/stats.rs index 088b95c9708..de5b0a628ef 100644 --- a/crates/trie/parallel/src/stats.rs +++ b/crates/trie/parallel/src/stats.rs @@ -1,5 +1,3 @@ -#[cfg(feature = "metrics")] -use crate::proof_task_metrics::ProofTaskCursorMetricsCache; use derive_more::Deref; use reth_trie::stats::{TrieStats, TrieTracker}; @@ -36,9 +34,6 @@ pub struct ParallelTrieTracker { trie: TrieTracker, precomputed_storage_roots: u64, missed_leaves: u64, - #[cfg(feature = "metrics")] - /// Local tracking of cursor-related metrics - pub cursor_metrics: ProofTaskCursorMetricsCache, } impl ParallelTrieTracker { diff --git a/crates/trie/parallel/src/value_encoder.rs b/crates/trie/parallel/src/value_encoder.rs index 13c611922db..7b08d3e1b5e 100644 --- a/crates/trie/parallel/src/value_encoder.rs +++ b/crates/trie/parallel/src/value_encoder.rs @@ -86,7 +86,6 @@ pub(crate) struct AsyncAccountValueEncoder { impl AsyncAccountValueEncoder { /// Initializes a [`Self`] using a `ProofWorkerHandle` which will be used to calculate storage /// roots asynchronously. - #[expect(dead_code)] pub(crate) fn new( storage_work_tx: CrossbeamSender, dispatched: B256Map>, @@ -106,7 +105,6 @@ impl AsyncAccountValueEncoder { /// /// This method panics if any deferred encoders produced by [`Self::deferred_encoder`] have not /// been dropped. - #[expect(dead_code)] pub(crate) fn into_storage_proofs( self, ) -> Result>, StateProofError> { From 3e55c6ca6e95eb850daa782555b3328e163a5edb Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 22 Jan 2026 19:47:50 +0400 Subject: [PATCH 149/267] fix: always check upper subtrie for keys (#21276) Co-authored-by: Brian Picciano --- crates/trie/sparse-parallel/src/trie.rs | 153 ++++++++++++++++++++++-- 1 file changed, 146 insertions(+), 7 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index ea0c93b76b0..af24f510b3d 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -768,7 +768,17 @@ impl SparseTrieInterface for ParallelSparseTrie { } fn get_leaf_value(&self, full_path: &Nibbles) -> Option<&Vec> { - self.subtrie_for_path(full_path).and_then(|subtrie| subtrie.inner.values.get(full_path)) + // `subtrie_for_path` is intended for a node path, but here we are using a full key path. So + // we need to check if the subtrie that the key might belong to has any nodes; if not then + // the key's portion of the trie doesn't have enough depth to reach into the subtrie, and + // the key will be in the upper subtrie + if let Some(subtrie) = self.subtrie_for_path(full_path) && + !subtrie.is_empty() + { + return subtrie.inner.values.get(full_path); + } + + self.upper_subtrie.inner.values.get(full_path) } fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { @@ -2276,14 +2286,24 @@ impl SparseSubtrieInner { if let Some((hash, store_in_db_trie)) = hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) { + let rlp_node = RlpNode::word_rlp(&hash); + let node_type = + SparseNodeType::Branch { store_in_db_trie: Some(store_in_db_trie) }; + + trace!( + target: "trie::parallel_sparse", + ?path, + ?node_type, + ?rlp_node, + "Adding node to RLP node stack (cached branch)" + ); + // If the node hash is already computed, and the node path is not in // the prefix set, return the pre-computed hash self.buffers.rlp_node_stack.push(RlpNodeStackItem { path, - rlp_node: RlpNode::word_rlp(&hash), - node_type: SparseNodeType::Branch { - store_in_db_trie: Some(store_in_db_trie), - }, + rlp_node, + node_type, }); return } @@ -2447,13 +2467,14 @@ impl SparseSubtrieInner { } }; - self.buffers.rlp_node_stack.push(RlpNodeStackItem { path, rlp_node, node_type }); trace!( target: "trie::parallel_sparse", ?path, ?node_type, - "Added node to RLP node stack" + ?rlp_node, + "Adding node to RLP node stack" ); + self.buffers.rlp_node_stack.push(RlpNodeStackItem { path, rlp_node, node_type }); } /// Clears the subtrie, keeping the data structures allocated. @@ -6968,4 +6989,122 @@ mod tests { assert_eq!(branch_0x3_update, &expected_branch); } + + #[test] + fn test_get_leaf_value_lower_subtrie() { + // This test demonstrates that get_leaf_value must look in the correct subtrie, + // not always in upper_subtrie. + + let mut trie = ParallelSparseTrie::default(); + + // Create a leaf node with path >= 2 nibbles (will go to lower subtrie) + let leaf_path = Nibbles::from_nibbles([0x1, 0x2]); + let leaf_key = Nibbles::from_nibbles([0x3, 0x4]); + let leaf_node = create_leaf_node(leaf_key.to_vec(), 42); + + // Reveal the leaf node + trie.reveal_nodes(vec![ProofTrieNode { path: leaf_path, node: leaf_node, masks: None }]) + .unwrap(); + + // The full path is leaf_path + leaf_key + let full_path = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); + + // Verify the value is stored in the lower subtrie, not upper + let idx = path_subtrie_index_unchecked(&leaf_path); + let lower_subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap(); + assert!( + lower_subtrie.inner.values.contains_key(&full_path), + "value should be in lower subtrie" + ); + assert!( + !trie.upper_subtrie.inner.values.contains_key(&full_path), + "value should NOT be in upper subtrie" + ); + + // get_leaf_value should find the value + assert!( + trie.get_leaf_value(&full_path).is_some(), + "get_leaf_value should find the value in lower subtrie" + ); + } + + /// Test that `get_leaf_value` correctly returns values stored via `update_leaf` + /// when the leaf node ends up in the upper subtrie (depth < 2). + /// + /// This can happen when the trie is sparse and the leaf is inserted at the root level. + /// Previously, `get_leaf_value` only checked the lower subtrie based on the full path, + /// missing values stored in `upper_subtrie.inner.values`. + #[test] + fn test_get_leaf_value_upper_subtrie_via_update_leaf() { + let provider = MockTrieNodeProvider::new(); + + // Create an empty trie with an empty root + let mut trie = ParallelSparseTrie::default() + .with_root(TrieNode::EmptyRoot, None, false) + .expect("root revealed"); + + // Create a full 64-nibble path (like a real account hash) + let full_path = pad_nibbles_right(Nibbles::from_nibbles([0x0, 0xA, 0xB, 0xC])); + let value = encode_account_value(42); + + // Insert the leaf - since the trie is empty, the leaf node will be created + // at the root level (depth 0), which is in the upper subtrie + trie.update_leaf(full_path, value.clone(), &provider).unwrap(); + + // Verify the value is stored in upper_subtrie (where update_leaf puts it) + assert!( + trie.upper_subtrie.inner.values.contains_key(&full_path), + "value should be in upper subtrie after update_leaf" + ); + + // Verify the value can be retrieved via get_leaf_value + // Before the fix, this would return None because get_leaf_value only + // checked the lower subtrie based on the path length + let retrieved = trie.get_leaf_value(&full_path); + assert_eq!(retrieved, Some(&value)); + } + + /// Test that `get_leaf_value` works for values in both upper and lower subtries. + #[test] + fn test_get_leaf_value_upper_and_lower_subtries() { + let provider = MockTrieNodeProvider::new(); + + // Create an empty trie + let mut trie = ParallelSparseTrie::default() + .with_root(TrieNode::EmptyRoot, None, false) + .expect("root revealed"); + + // Insert first leaf - will be at root level (upper subtrie) + let path1 = pad_nibbles_right(Nibbles::from_nibbles([0x0, 0xA])); + let value1 = encode_account_value(1); + trie.update_leaf(path1, value1.clone(), &provider).unwrap(); + + // Insert second leaf with different prefix - creates a branch + let path2 = pad_nibbles_right(Nibbles::from_nibbles([0x1, 0xB])); + let value2 = encode_account_value(2); + trie.update_leaf(path2, value2.clone(), &provider).unwrap(); + + // Both values should be retrievable + assert_eq!(trie.get_leaf_value(&path1), Some(&value1)); + assert_eq!(trie.get_leaf_value(&path2), Some(&value2)); + } + + /// Test that `get_leaf_value` works for storage tries which are often very sparse. + #[test] + fn test_get_leaf_value_sparse_storage_trie() { + let provider = MockTrieNodeProvider::new(); + + // Simulate a storage trie with a single slot + let mut trie = ParallelSparseTrie::default() + .with_root(TrieNode::EmptyRoot, None, false) + .expect("root revealed"); + + // Single storage slot - leaf will be at root (depth 0) + let slot_path = pad_nibbles_right(Nibbles::from_nibbles([0x2, 0x9])); + let slot_value = alloy_rlp::encode(U256::from(12345)); + trie.update_leaf(slot_path, slot_value.clone(), &provider).unwrap(); + + // Value should be retrievable + assert_eq!(trie.get_leaf_value(&slot_path), Some(&slot_value)); + } } From a01ecce73f712729d3a10bd3f8178cd2eb8bb44f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 22 Jan 2026 16:55:36 +0100 Subject: [PATCH 150/267] test: add e2e tests for invalid payload handling via Engine API (#21288) --- .../node/tests/e2e/invalid_payload.rs | 357 ++++++++++++++++++ crates/ethereum/node/tests/e2e/main.rs | 1 + 2 files changed, 358 insertions(+) create mode 100644 crates/ethereum/node/tests/e2e/invalid_payload.rs diff --git a/crates/ethereum/node/tests/e2e/invalid_payload.rs b/crates/ethereum/node/tests/e2e/invalid_payload.rs new file mode 100644 index 00000000000..03269e53b7b --- /dev/null +++ b/crates/ethereum/node/tests/e2e/invalid_payload.rs @@ -0,0 +1,357 @@ +//! Tests for handling invalid payloads via Engine API. +//! +//! This module tests the scenario where a node receives invalid payloads (e.g., with modified +//! state roots) before receiving valid ones, ensuring the node can recover and continue. + +use crate::utils::eth_payload_attributes; +use alloy_primitives::B256; +use alloy_rpc_types_engine::{ExecutionPayloadV3, PayloadStatusEnum}; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_e2e_test_utils::{setup_engine, transaction::TransactionTestContext}; +use reth_node_ethereum::EthereumNode; + +use reth_rpc_api::EngineApiClient; +use std::sync::Arc; + +/// Tests that a node can handle receiving an invalid payload (with wrong state root) +/// followed by the correct payload, and continue operating normally. +/// +/// Setup: +/// - Node 1: Produces valid payloads and advances the chain +/// - Node 2: Receives payloads from node 1, but we also inject modified payloads with invalid state +/// roots in between to verify error handling +#[tokio::test] +async fn can_handle_invalid_payload_then_valid() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::rng().random(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {seed:?}"); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = setup_engine::( + 2, + chain_spec.clone(), + false, + Default::default(), + eth_payload_attributes, + ) + .await?; + + let mut producer = nodes.pop().unwrap(); + let receiver = nodes.pop().unwrap(); + + // Get engine API client for the receiver node + let receiver_engine = receiver.auth_server_handle().http_client(); + + // Inject a transaction to allow block building (advance_block waits for transactions) + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallet.inner).await; + producer.rpc.inject_tx(raw_tx).await?; + + // Build a valid payload on the producer + let payload = producer.advance_block().await?; + let valid_block = payload.block().clone(); + + // Create valid payload first, then corrupt the state root + let mut invalid_payload = ExecutionPayloadV3::from_block_unchecked( + valid_block.hash(), + &valid_block.clone().into_block(), + ); + let original_state_root = invalid_payload.payload_inner.payload_inner.state_root; + invalid_payload.payload_inner.payload_inner.state_root = B256::random_with(&mut rng); + + // Send the invalid payload to the receiver - should be rejected + let invalid_result = EngineApiClient::::new_payload_v3( + &receiver_engine, + invalid_payload.clone(), + vec![], + valid_block.header().parent_beacon_block_root.unwrap_or_default(), + ) + .await?; + + println!( + "Invalid payload response: {:?} (state_root changed from {original_state_root} to {})", + invalid_result.status, invalid_payload.payload_inner.payload_inner.state_root + ); + + // The invalid payload should be rejected + assert!( + matches!( + invalid_result.status, + PayloadStatusEnum::Invalid { .. } | PayloadStatusEnum::Syncing + ), + "Expected INVALID or SYNCING status for invalid payload, got {:?}", + invalid_result.status + ); + + // Now send the valid payload - should be accepted + let valid_payload = ExecutionPayloadV3::from_block_unchecked( + valid_block.hash(), + &valid_block.clone().into_block(), + ); + + let valid_result = EngineApiClient::::new_payload_v3( + &receiver_engine, + valid_payload, + vec![], + valid_block.header().parent_beacon_block_root.unwrap_or_default(), + ) + .await?; + + println!("Valid payload response: {:?}", valid_result.status); + + // The valid payload should be accepted + assert!( + matches!( + valid_result.status, + PayloadStatusEnum::Valid | PayloadStatusEnum::Syncing | PayloadStatusEnum::Accepted + ), + "Expected VALID/SYNCING/ACCEPTED status for valid payload, got {:?}", + valid_result.status + ); + + // Update forkchoice on receiver to the valid block + receiver.update_forkchoice(valid_block.hash(), valid_block.hash()).await?; + + // Verify the receiver node is at the expected block + let receiver_head = receiver.block_hash(1); + let producer_head = producer.block_hash(1); + assert_eq!( + receiver_head, producer_head, + "Receiver should have synced to the same chain as producer" + ); + + println!( + "Test passed: Receiver successfully handled invalid payloads and synced to valid chain" + ); + + Ok(()) +} + +/// Tests that a node can handle multiple consecutive invalid payloads +/// before receiving a valid one. +#[tokio::test] +async fn can_handle_multiple_invalid_payloads() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::rng().random(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {seed:?}"); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = setup_engine::( + 2, + chain_spec.clone(), + false, + Default::default(), + eth_payload_attributes, + ) + .await?; + + let mut producer = nodes.pop().unwrap(); + let receiver = nodes.pop().unwrap(); + + let receiver_engine = receiver.auth_server_handle().http_client(); + + // Inject a transaction to allow block building + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallet.inner).await; + producer.rpc.inject_tx(raw_tx).await?; + + // Produce a valid block + let payload = producer.advance_block().await?; + let valid_block = payload.block().clone(); + + // Send multiple invalid payloads with different corruptions + for i in 0..3 { + // Create valid payload first, then corrupt the state root + let mut invalid_payload = ExecutionPayloadV3::from_block_unchecked( + valid_block.hash(), + &valid_block.clone().into_block(), + ); + invalid_payload.payload_inner.payload_inner.state_root = B256::random_with(&mut rng); + + let result = EngineApiClient::::new_payload_v3( + &receiver_engine, + invalid_payload, + vec![], + valid_block.header().parent_beacon_block_root.unwrap_or_default(), + ) + .await?; + + println!("Invalid payload {i}: status = {:?}", result.status); + + assert!( + matches!(result.status, PayloadStatusEnum::Invalid { .. } | PayloadStatusEnum::Syncing), + "Expected INVALID or SYNCING for invalid payload {i}, got {:?}", + result.status + ); + } + + // Now send the valid payload + let valid_payload = ExecutionPayloadV3::from_block_unchecked( + valid_block.hash(), + &valid_block.clone().into_block(), + ); + + let valid_result = EngineApiClient::::new_payload_v3( + &receiver_engine, + valid_payload, + vec![], + valid_block.header().parent_beacon_block_root.unwrap_or_default(), + ) + .await?; + + println!("Valid payload: status = {:?}", valid_result.status); + + assert!( + matches!( + valid_result.status, + PayloadStatusEnum::Valid | PayloadStatusEnum::Syncing | PayloadStatusEnum::Accepted + ), + "Expected valid status for correct payload, got {:?}", + valid_result.status + ); + + // Finalize the valid block + receiver.update_forkchoice(valid_block.hash(), valid_block.hash()).await?; + + println!("Test passed: Receiver handled multiple invalid payloads and accepted valid one"); + + Ok(()) +} + +/// Tests invalid payload handling with blocks that contain transactions. +/// +/// This test sends real transactions to node 1, produces blocks with those transactions, +/// then sends invalid (corrupted state root) and valid payloads to node 2. +#[tokio::test] +async fn can_handle_invalid_payload_with_transactions() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::rng().random(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {seed:?}"); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = setup_engine::( + 2, + chain_spec.clone(), + false, + Default::default(), + eth_payload_attributes, + ) + .await?; + + let mut producer = nodes.pop().unwrap(); + let receiver = nodes.pop().unwrap(); + + let receiver_engine = receiver.auth_server_handle().http_client(); + + // Create and send a transaction to the producer node + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallet.inner).await; + let tx_hash = producer.rpc.inject_tx(raw_tx).await?; + println!("Injected transaction {tx_hash}"); + + // Build a block containing the transaction + let payload = producer.advance_block().await?; + let valid_block = payload.block().clone(); + + // Verify the block contains a transaction + let tx_count = valid_block.body().transactions().count(); + println!("Block contains {tx_count} transaction(s)"); + assert!(tx_count > 0, "Block should contain at least one transaction"); + + // Create invalid payload by corrupting the state root + let mut invalid_payload = ExecutionPayloadV3::from_block_unchecked( + valid_block.hash(), + &valid_block.clone().into_block(), + ); + let original_state_root = invalid_payload.payload_inner.payload_inner.state_root; + invalid_payload.payload_inner.payload_inner.state_root = B256::random_with(&mut rng); + + // Send invalid payload - should be rejected + let invalid_result = EngineApiClient::::new_payload_v3( + &receiver_engine, + invalid_payload.clone(), + vec![], + valid_block.header().parent_beacon_block_root.unwrap_or_default(), + ) + .await?; + + println!( + "Invalid payload (with tx) response: {:?} (state_root changed from {original_state_root} to {})", + invalid_result.status, + invalid_payload.payload_inner.payload_inner.state_root + ); + + assert!( + matches!( + invalid_result.status, + PayloadStatusEnum::Invalid { .. } | PayloadStatusEnum::Syncing + ), + "Expected INVALID or SYNCING for invalid payload with transactions, got {:?}", + invalid_result.status + ); + + // Send valid payload - should be accepted + let valid_payload = ExecutionPayloadV3::from_block_unchecked( + valid_block.hash(), + &valid_block.clone().into_block(), + ); + + let valid_result = EngineApiClient::::new_payload_v3( + &receiver_engine, + valid_payload, + vec![], + valid_block.header().parent_beacon_block_root.unwrap_or_default(), + ) + .await?; + + println!("Valid payload (with tx) response: {:?}", valid_result.status); + + assert!( + matches!( + valid_result.status, + PayloadStatusEnum::Valid | PayloadStatusEnum::Syncing | PayloadStatusEnum::Accepted + ), + "Expected valid status for correct payload with transactions, got {:?}", + valid_result.status + ); + + // Update forkchoice + receiver.update_forkchoice(valid_block.hash(), valid_block.hash()).await?; + + // Verify both nodes are at the same head + let receiver_head = receiver.block_hash(1); + let producer_head = producer.block_hash(1); + assert_eq!( + receiver_head, producer_head, + "Receiver should have synced to the same chain as producer" + ); + + println!("Test passed: Receiver handled invalid payloads with transactions correctly"); + + Ok(()) +} diff --git a/crates/ethereum/node/tests/e2e/main.rs b/crates/ethereum/node/tests/e2e/main.rs index 9ed9c5b9a63..5960cd9c6f1 100644 --- a/crates/ethereum/node/tests/e2e/main.rs +++ b/crates/ethereum/node/tests/e2e/main.rs @@ -4,6 +4,7 @@ mod blobs; mod custom_genesis; mod dev; mod eth; +mod invalid_payload; mod p2p; mod pool; mod prestate; From bf43ebaa2937a83db7a30066c366f4eda7f9db4e Mon Sep 17 00:00:00 2001 From: iPLAY888 <133153661+letmehateu@users.noreply.github.com> Date: Thu, 22 Jan 2026 19:18:36 +0300 Subject: [PATCH 151/267] fix(cli): handle invalid hex in `db list --search` (#21315) --- crates/cli/commands/src/db/list.rs | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/crates/cli/commands/src/db/list.rs b/crates/cli/commands/src/db/list.rs index 452fcf0a789..8e3db03fb72 100644 --- a/crates/cli/commands/src/db/list.rs +++ b/crates/cli/commands/src/db/list.rs @@ -61,19 +61,21 @@ impl Command { } /// Generate [`ListFilter`] from command. - pub fn list_filter(&self) -> ListFilter { - let search = self - .search - .as_ref() - .map(|search| { + pub fn list_filter(&self) -> eyre::Result { + let search = match self.search.as_deref() { + Some(search) => { if let Some(search) = search.strip_prefix("0x") { - return hex::decode(search).unwrap() + hex::decode(search).wrap_err( + "Invalid hex content after 0x prefix in --search (expected valid hex like 0xdeadbeef).", + )? + } else { + search.as_bytes().to_vec() } - search.as_bytes().to_vec() - }) - .unwrap_or_default(); + } + None => Vec::new(), + }; - ListFilter { + Ok(ListFilter { skip: self.skip, len: self.len, search, @@ -82,7 +84,7 @@ impl Command { min_value_size: self.min_value_size, reverse: self.reverse, only_count: self.count, - } + }) } } @@ -115,7 +117,7 @@ impl TableViewer<()> for ListTableViewer<'_, N> { } - let list_filter = self.args.list_filter(); + let list_filter = self.args.list_filter()?; if self.args.json || self.args.count { let (list, count) = self.tool.list::(&list_filter)?; From 247ce3c4e941ab986cb8d7180b517e1f0302ee60 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Thu, 22 Jan 2026 08:40:10 -0800 Subject: [PATCH 152/267] feat(storage): warn storage settings diff at startup (#21320) Co-authored-by: YK --- crates/storage/db-common/src/init.rs | 51 ++++++++++++++++++++++++---- 1 file changed, 45 insertions(+), 6 deletions(-) diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index c82025970b7..1af87fcb361 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -15,9 +15,10 @@ use reth_primitives_traits::{ use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, - HashingWriter, HeaderProvider, HistoryWriter, MetadataWriter, OriginalValuesKnown, - ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, StateWriteConfig, - StateWriter, StaticFileProviderFactory, StorageSettings, StorageSettingsCache, TrieWriter, + HashingWriter, HeaderProvider, HistoryWriter, MetadataProvider, MetadataWriter, + OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, + StateWriteConfig, StateWriter, StaticFileProviderFactory, StorageSettings, + StorageSettingsCache, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -28,7 +29,7 @@ use reth_trie::{ use reth_trie_db::DatabaseStateRoot; use serde::{Deserialize, Serialize}; use std::io::BufRead; -use tracing::{debug, error, info, trace}; +use tracing::{debug, error, info, trace, warn}; /// Default soft limit for number of bytes to read from state dump file, before inserting into /// database. @@ -90,7 +91,8 @@ where + StaticFileProviderFactory> + ChainSpecProvider + StageCheckpointReader - + BlockHashReader + + BlockNumReader + + MetadataProvider + StorageSettingsCache, PF::ProviderRW: StaticFileProviderFactory + StageCheckpointWriter @@ -124,7 +126,8 @@ where + StaticFileProviderFactory> + ChainSpecProvider + StageCheckpointReader - + BlockHashReader + + BlockNumReader + + MetadataProvider + StorageSettingsCache, PF::ProviderRW: StaticFileProviderFactory + StageCheckpointWriter @@ -159,6 +162,16 @@ where return Err(InitStorageError::UninitializedDatabase) } + let stored = factory.storage_settings()?.unwrap_or(StorageSettings::legacy()); + if stored != genesis_storage_settings { + warn!( + target: "reth::storage", + ?stored, + requested = ?genesis_storage_settings, + "Storage settings mismatch detected" + ); + } + debug!("Genesis already written, skipping."); return Ok(hash) } @@ -897,4 +910,30 @@ mod tests { )], ); } + + #[test] + fn warn_storage_settings_mismatch() { + let factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + init_genesis_with_settings(&factory, StorageSettings::legacy()).unwrap(); + + // Request different settings - should warn but succeed + let result = init_genesis_with_settings( + &factory, + StorageSettings::legacy().with_receipts_in_static_files(true), + ); + + // Should succeed (warning is logged, not an error) + assert!(result.is_ok()); + } + + #[test] + fn allow_same_storage_settings() { + let factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + let settings = StorageSettings::legacy().with_receipts_in_static_files(true); + init_genesis_with_settings(&factory, settings).unwrap(); + + let result = init_genesis_with_settings(&factory, settings); + + assert!(result.is_ok()); + } } From da12451c9cb1aa246932fd0b15c991cf7349e0f7 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Thu, 22 Jan 2026 17:57:46 +0100 Subject: [PATCH 153/267] chore(trie): Cleanup unused trie changesets code (#21323) --- .../src/segments/user/merkle_change_sets.rs | 119 --- .../stages/src/stages/merkle_changesets.rs | 449 --------- crates/stages/types/src/id.rs | 6 + crates/storage/db-api/src/models/accounts.rs | 70 +- crates/storage/db-api/src/models/mod.rs | 5 +- crates/storage/db-api/src/tables/mod.rs | 22 +- .../storage/db/src/implementation/mdbx/mod.rs | 69 ++ crates/storage/db/src/mdbx.rs | 26 + .../src/providers/database/provider.rs | 944 +----------------- crates/storage/storage-api/src/trie.rs | 49 +- crates/trie/common/src/lib.rs | 2 +- crates/trie/common/src/storage.rs | 178 ---- docs/crates/db.md | 2 - docs/crates/stages.md | 7 - docs/vocs/docs/pages/run/configuration.mdx | 5 - 15 files changed, 114 insertions(+), 1839 deletions(-) delete mode 100644 crates/prune/prune/src/segments/user/merkle_change_sets.rs delete mode 100644 crates/stages/stages/src/stages/merkle_changesets.rs diff --git a/crates/prune/prune/src/segments/user/merkle_change_sets.rs b/crates/prune/prune/src/segments/user/merkle_change_sets.rs deleted file mode 100644 index c02d752fcda..00000000000 --- a/crates/prune/prune/src/segments/user/merkle_change_sets.rs +++ /dev/null @@ -1,119 +0,0 @@ -use crate::{ - db_ext::DbTxPruneExt, - segments::{PruneInput, Segment}, - PrunerError, -}; -use alloy_primitives::B256; -use reth_db_api::{models::BlockNumberHashedAddress, table::Value, tables, transaction::DbTxMut}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - errors::provider::ProviderResult, BlockReader, ChainStateBlockReader, DBProvider, - NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, -}; -use reth_prune_types::{ - PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, -}; -use reth_stages_types::StageId; -use tracing::{instrument, trace}; - -#[derive(Debug)] -pub struct MerkleChangeSets { - mode: PruneMode, -} - -impl MerkleChangeSets { - pub const fn new(mode: PruneMode) -> Self { - Self { mode } - } -} - -impl Segment for MerkleChangeSets -where - Provider: DBProvider - + PruneCheckpointWriter - + TransactionsProvider - + BlockReader - + ChainStateBlockReader - + NodePrimitivesProvider>, -{ - fn segment(&self) -> PruneSegment { - PruneSegment::MerkleChangeSets - } - - fn mode(&self) -> Option { - Some(self.mode) - } - - fn purpose(&self) -> PrunePurpose { - PrunePurpose::User - } - - fn required_stage(&self) -> Option { - Some(StageId::MerkleChangeSets) - } - - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] - fn prune(&self, provider: &Provider, input: PruneInput) -> Result { - let Some(block_range) = input.get_next_block_range() else { - trace!(target: "pruner", "No change sets to prune"); - return Ok(SegmentOutput::done()) - }; - - let block_range_end = *block_range.end(); - let mut limiter = input.limiter; - - // Create range for StoragesTrieChangeSets which uses BlockNumberHashedAddress as key - let storage_range_start: BlockNumberHashedAddress = - (*block_range.start(), B256::ZERO).into(); - let storage_range_end: BlockNumberHashedAddress = - (*block_range.end() + 1, B256::ZERO).into(); - let storage_range = storage_range_start..storage_range_end; - - let mut last_storages_pruned_block = None; - let (storages_pruned, done) = - provider.tx_ref().prune_dupsort_table_with_range::( - storage_range, - &mut limiter, - |(BlockNumberHashedAddress((block_number, _)), _)| { - last_storages_pruned_block = Some(block_number); - }, - )?; - - trace!(target: "pruner", %storages_pruned, %done, "Pruned storages change sets"); - - let mut last_accounts_pruned_block = block_range_end; - let last_storages_pruned_block = last_storages_pruned_block - // If there's more storage changesets to prune, set the checkpoint block number to - // previous, so we could finish pruning its storage changesets on the next run. - .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) - .unwrap_or(block_range_end); - - let (accounts_pruned, done) = - provider.tx_ref().prune_dupsort_table_with_range::( - block_range, - &mut limiter, - |row| last_accounts_pruned_block = row.0, - )?; - - trace!(target: "pruner", %accounts_pruned, %done, "Pruned accounts change sets"); - - let progress = limiter.progress(done); - - Ok(SegmentOutput { - progress, - pruned: accounts_pruned + storages_pruned, - checkpoint: Some(SegmentOutputCheckpoint { - block_number: Some(last_storages_pruned_block.min(last_accounts_pruned_block)), - tx_number: None, - }), - }) - } - - fn save_checkpoint( - &self, - provider: &Provider, - checkpoint: PruneCheckpoint, - ) -> ProviderResult<()> { - provider.save_prune_checkpoint(PruneSegment::MerkleChangeSets, checkpoint) - } -} diff --git a/crates/stages/stages/src/stages/merkle_changesets.rs b/crates/stages/stages/src/stages/merkle_changesets.rs deleted file mode 100644 index c4345fedb99..00000000000 --- a/crates/stages/stages/src/stages/merkle_changesets.rs +++ /dev/null @@ -1,449 +0,0 @@ -use crate::stages::merkle::INVALID_STATE_ROOT_ERROR_MESSAGE; -use alloy_consensus::BlockHeader; -use alloy_primitives::BlockNumber; -use reth_consensus::ConsensusError; -use reth_primitives_traits::{GotExpected, SealedHeader}; -use reth_provider::{ - BlockNumReader, ChainStateBlockReader, ChangeSetReader, DBProvider, HeaderProvider, - ProviderError, PruneCheckpointReader, PruneCheckpointWriter, StageCheckpointReader, - StageCheckpointWriter, StorageChangeSetReader, TrieWriter, -}; -use reth_prune_types::{ - PruneCheckpoint, PruneMode, PruneSegment, MERKLE_CHANGESETS_RETENTION_BLOCKS, -}; -use reth_stages_api::{ - BlockErrorKind, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, - UnwindInput, UnwindOutput, -}; -use reth_trie::{ - updates::TrieUpdates, HashedPostStateSorted, KeccakKeyHasher, StateRoot, TrieInputSorted, -}; -use reth_trie_db::{DatabaseHashedPostState, DatabaseStateRoot}; -use std::{ops::Range, sync::Arc}; -use tracing::{debug, error}; - -/// The `MerkleChangeSets` stage. -/// -/// This stage processes and maintains trie changesets from the finalized block to the latest block. -#[derive(Debug, Clone)] -pub struct MerkleChangeSets { - /// The number of blocks to retain changesets for, used as a fallback when the finalized block - /// is not found. Defaults to [`MERKLE_CHANGESETS_RETENTION_BLOCKS`] (2 epochs in beacon - /// chain). - retention_blocks: u64, -} - -impl MerkleChangeSets { - /// Creates a new `MerkleChangeSets` stage with the default retention blocks. - pub const fn new() -> Self { - Self { retention_blocks: MERKLE_CHANGESETS_RETENTION_BLOCKS } - } - - /// Creates a new `MerkleChangeSets` stage with a custom finalized block height. - pub const fn with_retention_blocks(retention_blocks: u64) -> Self { - Self { retention_blocks } - } - - /// Returns the range of blocks which are already computed. Will return an empty range if none - /// have been computed. - fn computed_range( - provider: &Provider, - checkpoint: Option, - ) -> Result, StageError> - where - Provider: PruneCheckpointReader, - { - let to = checkpoint.map(|chk| chk.block_number).unwrap_or_default(); - - // Get the prune checkpoint for MerkleChangeSets to use as the lower bound. If there's no - // prune checkpoint or if the pruned block number is None, return empty range - let Some(from) = provider - .get_prune_checkpoint(PruneSegment::MerkleChangeSets)? - .and_then(|chk| chk.block_number) - // prune checkpoint indicates the last block pruned, so the block after is the start of - // the computed data - .map(|block_number| block_number + 1) - else { - return Ok(0..0) - }; - - Ok(from..to + 1) - } - - /// Determines the target range for changeset computation based on the checkpoint and provider - /// state. - /// - /// Returns the target range (exclusive end) to compute changesets for. - fn determine_target_range( - &self, - provider: &Provider, - ) -> Result, StageError> - where - Provider: StageCheckpointReader + ChainStateBlockReader, - { - // Get merkle checkpoint which represents our target end block - let merkle_checkpoint = provider - .get_stage_checkpoint(StageId::MerkleExecute)? - .map(|checkpoint| checkpoint.block_number) - .unwrap_or(0); - - let target_end = merkle_checkpoint + 1; // exclusive - - // Calculate the target range based on the finalized block and the target block. - // We maintain changesets from the finalized block to the latest block. - let finalized_block = provider.last_finalized_block_number()?; - - // Calculate the fallback start position based on retention blocks - let retention_based_start = merkle_checkpoint.saturating_sub(self.retention_blocks); - - // If the finalized block was way in the past then we don't want to generate changesets for - // all of those past blocks; we only care about the recent history. - // - // Use maximum of finalized_block and retention_based_start if finalized_block exists, - // otherwise just use retention_based_start. - let mut target_start = finalized_block - .map(|finalized| finalized.saturating_add(1).max(retention_based_start)) - .unwrap_or(retention_based_start); - - // We cannot revert the genesis block; target_start must be >0 - target_start = target_start.max(1); - - Ok(target_start..target_end) - } - - /// Calculates the trie updates given a [`TrieInputSorted`], asserting that the resulting state - /// root matches the expected one for the block. - fn calculate_block_trie_updates( - provider: &Provider, - block_number: BlockNumber, - input: TrieInputSorted, - ) -> Result { - let (root, trie_updates) = - StateRoot::overlay_root_from_nodes_with_updates(provider.tx_ref(), input).map_err( - |e| { - error!( - target: "sync::stages::merkle_changesets", - %e, - ?block_number, - "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); - StageError::Fatal(Box::new(e)) - }, - )?; - - let block = provider - .header_by_number(block_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; - - let (got, expected) = (root, block.state_root()); - if got != expected { - // Only seal the header when we need it for the error - let header = SealedHeader::seal_slow(block); - error!( - target: "sync::stages::merkle_changesets", - ?block_number, - ?got, - ?expected, - "Failed to verify block state root! {INVALID_STATE_ROOT_ERROR_MESSAGE}", - ); - return Err(StageError::Block { - error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( - GotExpected { got, expected }.into(), - )), - block: Box::new(header.block_with_parent()), - }) - } - - Ok(trie_updates) - } - - fn populate_range( - provider: &Provider, - target_range: Range, - ) -> Result<(), StageError> - where - Provider: StageCheckpointReader - + TrieWriter - + DBProvider - + HeaderProvider - + ChainStateBlockReader - + BlockNumReader - + ChangeSetReader - + StorageChangeSetReader, - { - let target_start = target_range.start; - let target_end = target_range.end; - debug!( - target: "sync::stages::merkle_changesets", - ?target_range, - "Starting trie changeset computation", - ); - - // We need to distinguish a cumulative revert and a per-block revert. A cumulative revert - // reverts changes starting at db tip all the way to a block. A per-block revert only - // reverts a block's changes. - // - // We need to calculate the cumulative HashedPostState reverts for every block in the - // target range. The cumulative HashedPostState revert for block N can be calculated as: - // - // - // ``` - // // where `extend` overwrites any shared keys - // cumulative_state_revert(N) = cumulative_state_revert(N + 1).extend(get_block_state_revert(N)) - // ``` - // - // We need per-block reverts to calculate the prefix set for each individual block. By - // using the per-block reverts to calculate cumulative reverts on-the-fly we can save a - // bunch of memory. - debug!( - target: "sync::stages::merkle_changesets", - ?target_range, - "Computing per-block state reverts", - ); - let range_len = target_end - target_start; - let mut per_block_state_reverts = Vec::with_capacity(range_len as usize); - for block_number in target_range.clone() { - per_block_state_reverts.push(HashedPostStateSorted::from_reverts::( - provider, - block_number..=block_number, - )?); - } - - // Helper to retrieve state revert data for a specific block from the pre-computed array - let get_block_state_revert = |block_number: BlockNumber| -> &HashedPostStateSorted { - let index = (block_number - target_start) as usize; - &per_block_state_reverts[index] - }; - - // Helper to accumulate state reverts from a given block to the target end - let compute_cumulative_state_revert = |block_number: BlockNumber| -> HashedPostStateSorted { - let mut cumulative_revert = HashedPostStateSorted::default(); - for n in (block_number..target_end).rev() { - cumulative_revert.extend_ref_and_sort(get_block_state_revert(n)) - } - cumulative_revert - }; - - // To calculate the changeset for a block, we first need the TrieUpdates which are - // generated as a result of processing the block. To get these we need: - // 1) The TrieUpdates which revert the db's trie to _prior_ to the block - // 2) The HashedPostStateSorted to revert the db's state to _after_ the block - // - // To get (1) for `target_start` we need to do a big state root calculation which takes - // into account all changes between that block and db tip. For each block after the - // `target_start` we can update (1) using the TrieUpdates which were output by the previous - // block, only targeting the state changes of that block. - debug!( - target: "sync::stages::merkle_changesets", - ?target_start, - "Computing trie state at starting block", - ); - let initial_state = compute_cumulative_state_revert(target_start); - let initial_prefix_sets = initial_state.construct_prefix_sets(); - let initial_input = - TrieInputSorted::new(Arc::default(), Arc::new(initial_state), initial_prefix_sets); - // target_start will be >= 1, see `determine_target_range`. - let mut nodes = Arc::new( - Self::calculate_block_trie_updates(provider, target_start - 1, initial_input)? - .into_sorted(), - ); - - for block_number in target_range { - debug!( - target: "sync::stages::merkle_changesets", - ?block_number, - "Computing trie updates for block", - ); - // Revert the state so that this block has been just processed, meaning we take the - // cumulative revert of the subsequent block. - let state = Arc::new(compute_cumulative_state_revert(block_number + 1)); - - // Construct prefix sets from only this block's `HashedPostStateSorted`, because we only - // care about trie updates which occurred as a result of this block being processed. - let prefix_sets = get_block_state_revert(block_number).construct_prefix_sets(); - - let input = TrieInputSorted::new(Arc::clone(&nodes), state, prefix_sets); - - // Calculate the trie updates for this block, then apply those updates to the reverts. - // We calculate the overlay which will be passed into the next step using the trie - // reverts prior to them being updated. - let this_trie_updates = - Self::calculate_block_trie_updates(provider, block_number, input)?.into_sorted(); - - let trie_overlay = Arc::clone(&nodes); - let mut nodes_mut = Arc::unwrap_or_clone(nodes); - nodes_mut.extend_ref_and_sort(&this_trie_updates); - nodes = Arc::new(nodes_mut); - - // Write the changesets to the DB using the trie updates produced by the block, and the - // trie reverts as the overlay. - debug!( - target: "sync::stages::merkle_changesets", - ?block_number, - "Writing trie changesets for block", - ); - provider.write_trie_changesets( - block_number, - &this_trie_updates, - Some(&trie_overlay), - )?; - } - - Ok(()) - } -} - -impl Default for MerkleChangeSets { - fn default() -> Self { - Self::new() - } -} - -impl Stage for MerkleChangeSets -where - Provider: StageCheckpointReader - + TrieWriter - + DBProvider - + HeaderProvider - + ChainStateBlockReader - + StageCheckpointWriter - + PruneCheckpointReader - + PruneCheckpointWriter - + ChangeSetReader - + StorageChangeSetReader - + BlockNumReader, -{ - fn id(&self) -> StageId { - StageId::MerkleChangeSets - } - - fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { - // Get merkle checkpoint and assert that the target is the same. - let merkle_checkpoint = provider - .get_stage_checkpoint(StageId::MerkleExecute)? - .map(|checkpoint| checkpoint.block_number) - .unwrap_or(0); - - if input.target.is_none_or(|target| merkle_checkpoint != target) { - return Err(StageError::Fatal(eyre::eyre!("Cannot sync stage to block {:?} when MerkleExecute is at block {merkle_checkpoint:?}", input.target).into())) - } - - let mut target_range = self.determine_target_range(provider)?; - - // Get the previously computed range. This will be updated to reflect the populating of the - // target range. - let mut computed_range = Self::computed_range(provider, input.checkpoint)?; - debug!( - target: "sync::stages::merkle_changesets", - ?computed_range, - ?target_range, - "Got computed and target ranges", - ); - - // We want the target range to not include any data already computed previously, if - // possible, so we start the target range from the end of the computed range if that is - // greater. - // - // ------------------------------> Block # - // |------computed-----| - // |-----target-----| - // |--actual--| - // - // However, if the target start is less than the previously computed start, we don't want to - // do this, as it would leave a gap of data at `target_range.start..=computed_range.start`. - // - // ------------------------------> Block # - // |---computed---| - // |-------target-------| - // |-------actual-------| - // - if target_range.start >= computed_range.start { - target_range.start = target_range.start.max(computed_range.end); - } - - // If target range is empty (target_start >= target_end), stage is already successfully - // executed. - if target_range.start >= target_range.end { - return Ok(ExecOutput::done(StageCheckpoint::new(target_range.end.saturating_sub(1)))); - } - - // If our target range is a continuation of the already computed range then we can keep the - // already computed data. - if target_range.start == computed_range.end { - // Clear from target_start onwards to ensure no stale data exists - provider.clear_trie_changesets_from(target_range.start)?; - computed_range.end = target_range.end; - } else { - // If our target range is not a continuation of the already computed range then we - // simply clear the computed data, to make sure there's no gaps or conflicts. - provider.clear_trie_changesets()?; - computed_range = target_range.clone(); - } - - // Populate the target range with changesets - Self::populate_range(provider, target_range)?; - - // Update the prune checkpoint to reflect that all data before `computed_range.start` - // is not available. - provider.save_prune_checkpoint( - PruneSegment::MerkleChangeSets, - PruneCheckpoint { - block_number: Some(computed_range.start.saturating_sub(1)), - tx_number: None, - prune_mode: PruneMode::Before(computed_range.start), - }, - )?; - - // `computed_range.end` is exclusive. - let checkpoint = StageCheckpoint::new(computed_range.end.saturating_sub(1)); - - Ok(ExecOutput::done(checkpoint)) - } - - fn unwind( - &mut self, - provider: &Provider, - input: UnwindInput, - ) -> Result { - // Unwinding is trivial; just clear everything after the target block. - provider.clear_trie_changesets_from(input.unwind_to + 1)?; - - let mut computed_range = Self::computed_range(provider, Some(input.checkpoint))?; - computed_range.end = input.unwind_to + 1; - if computed_range.start > computed_range.end { - computed_range.start = computed_range.end; - } - - // If we've unwound so far that there are no longer enough trie changesets available then - // simply clear them and the checkpoints, so that on next pipeline startup they will be - // regenerated. - // - // We don't do this check if the target block is not greater than the retention threshold - // (which happens near genesis), as in that case would could still have all possible - // changesets even if the total count doesn't meet the threshold. - debug!( - target: "sync::stages::merkle_changesets", - ?computed_range, - retention_blocks=?self.retention_blocks, - "Checking if computed range is over retention threshold", - ); - if input.unwind_to > self.retention_blocks && - computed_range.end - computed_range.start < self.retention_blocks - { - debug!( - target: "sync::stages::merkle_changesets", - ?computed_range, - retention_blocks=?self.retention_blocks, - "Clearing checkpoints completely", - ); - provider.clear_trie_changesets()?; - provider - .save_stage_checkpoint(StageId::MerkleChangeSets, StageCheckpoint::default())?; - return Ok(UnwindOutput { checkpoint: StageCheckpoint::default() }) - } - - // `computed_range.end` is exclusive - let checkpoint = StageCheckpoint::new(computed_range.end.saturating_sub(1)); - - Ok(UnwindOutput { checkpoint }) - } -} diff --git a/crates/stages/types/src/id.rs b/crates/stages/types/src/id.rs index 78d7e0ec1b6..40f0eb066b7 100644 --- a/crates/stages/types/src/id.rs +++ b/crates/stages/types/src/id.rs @@ -12,6 +12,10 @@ pub enum StageId { note = "Static Files are generated outside of the pipeline and do not require a separate stage" )] StaticFile, + #[deprecated( + note = "MerkleChangeSets stage has been removed; kept for DB checkpoint compatibility" + )] + MerkleChangeSets, Era, Headers, Bodies, @@ -75,6 +79,8 @@ impl StageId { match self { #[expect(deprecated)] Self::StaticFile => "StaticFile", + #[expect(deprecated)] + Self::MerkleChangeSets => "MerkleChangeSets", Self::Era => "Era", Self::Headers => "Headers", Self::Bodies => "Bodies", diff --git a/crates/storage/db-api/src/models/accounts.rs b/crates/storage/db-api/src/models/accounts.rs index 41a11e1c7e5..0d0a9ee8a85 100644 --- a/crates/storage/db-api/src/models/accounts.rs +++ b/crates/storage/db-api/src/models/accounts.rs @@ -5,7 +5,7 @@ use crate::{ table::{Decode, Encode}, DatabaseError, }; -use alloy_primitives::{Address, BlockNumber, StorageKey, B256}; +use alloy_primitives::{Address, BlockNumber, StorageKey}; use serde::{Deserialize, Serialize}; use std::ops::{Bound, Range, RangeBounds, RangeInclusive}; @@ -108,43 +108,6 @@ impl> From for BlockNumberAddressRange { } } -/// [`BlockNumber`] concatenated with [`B256`] (hashed address). -/// -/// Since it's used as a key, it isn't compressed when encoding it. -#[derive( - Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Ord, PartialOrd, Hash, -)] -pub struct BlockNumberHashedAddress(pub (BlockNumber, B256)); - -impl From<(BlockNumber, B256)> for BlockNumberHashedAddress { - fn from(tpl: (BlockNumber, B256)) -> Self { - Self(tpl) - } -} - -impl Encode for BlockNumberHashedAddress { - type Encoded = [u8; 40]; - - fn encode(self) -> Self::Encoded { - let block_number = self.0 .0; - let hashed_address = self.0 .1; - - let mut buf = [0u8; 40]; - - buf[..8].copy_from_slice(&block_number.to_be_bytes()); - buf[8..].copy_from_slice(hashed_address.as_slice()); - buf - } -} - -impl Decode for BlockNumberHashedAddress { - fn decode(value: &[u8]) -> Result { - let num = u64::from_be_bytes(value[..8].try_into().map_err(|_| DatabaseError::Decode)?); - let hash = B256::from_slice(&value[8..]); - Ok(Self((num, hash))) - } -} - /// [`Address`] concatenated with [`StorageKey`]. Used by `reth_etl` and history stages. /// /// Since it's used as a key, it isn't compressed when encoding it. @@ -176,11 +139,7 @@ impl Decode for AddressStorageKey { } } -impl_fixed_arbitrary!( - (BlockNumberAddress, 28), - (BlockNumberHashedAddress, 40), - (AddressStorageKey, 52) -); +impl_fixed_arbitrary!((BlockNumberAddress, 28), (AddressStorageKey, 52)); #[cfg(test)] mod tests { @@ -213,31 +172,6 @@ mod tests { assert_eq!(bytes, Encode::encode(key)); } - #[test] - fn test_block_number_hashed_address() { - let num = 1u64; - let hash = B256::from_slice(&[0xba; 32]); - let key = BlockNumberHashedAddress((num, hash)); - - let mut bytes = [0u8; 40]; - bytes[..8].copy_from_slice(&num.to_be_bytes()); - bytes[8..].copy_from_slice(hash.as_slice()); - - let encoded = Encode::encode(key); - assert_eq!(encoded, bytes); - - let decoded: BlockNumberHashedAddress = Decode::decode(&encoded).unwrap(); - assert_eq!(decoded, key); - } - - #[test] - fn test_block_number_hashed_address_rand() { - let mut bytes = [0u8; 40]; - rng().fill(bytes.as_mut_slice()); - let key = BlockNumberHashedAddress::arbitrary(&mut Unstructured::new(&bytes)).unwrap(); - assert_eq!(bytes, Encode::encode(key)); - } - #[test] fn test_address_storage_key() { let storage_key = StorageKey::random(); diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 67bc3ea0d14..0b6a12f011c 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -12,9 +12,7 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned, TxType}; use reth_primitives_traits::{Account, Bytecode, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; -use reth_trie_common::{ - StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, TrieChangeSetsEntry, *, -}; +use reth_trie_common::{StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, *}; use serde::{Deserialize, Serialize}; pub mod accounts; @@ -220,7 +218,6 @@ impl_compression_for_compact!( TxType, StorageEntry, BranchNodeCompact, - TrieChangeSetsEntry, StoredNibbles, StoredNibblesSubKey, StorageTrieEntry, diff --git a/crates/storage/db-api/src/tables/mod.rs b/crates/storage/db-api/src/tables/mod.rs index 903d4ca7620..3fcf5014570 100644 --- a/crates/storage/db-api/src/tables/mod.rs +++ b/crates/storage/db-api/src/tables/mod.rs @@ -21,8 +21,8 @@ use crate::{ accounts::BlockNumberAddress, blocks::{HeaderHash, StoredBlockOmmers}, storage_sharded_key::StorageShardedKey, - AccountBeforeTx, BlockNumberHashedAddress, ClientVersion, CompactU256, IntegerList, - ShardedKey, StoredBlockBodyIndices, StoredBlockWithdrawals, + AccountBeforeTx, ClientVersion, CompactU256, IntegerList, ShardedKey, + StoredBlockBodyIndices, StoredBlockWithdrawals, }, table::{Decode, DupSort, Encode, Table, TableInfo}, }; @@ -32,9 +32,7 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_primitives_traits::{Account, Bytecode, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; -use reth_trie_common::{ - BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, TrieChangeSetsEntry, -}; +use reth_trie_common::{BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey}; use serde::{Deserialize, Serialize}; use std::fmt; @@ -492,20 +490,6 @@ tables! { type SubKey = StoredNibblesSubKey; } - /// Stores the state of a node in the accounts trie prior to a particular block being executed. - table AccountsTrieChangeSets { - type Key = BlockNumber; - type Value = TrieChangeSetsEntry; - type SubKey = StoredNibblesSubKey; - } - - /// Stores the state of a node in a storage trie prior to a particular block being executed. - table StoragesTrieChangeSets { - type Key = BlockNumberHashedAddress; - type Value = TrieChangeSetsEntry; - type SubKey = StoredNibblesSubKey; - } - /// Stores the transaction sender for each canonical transaction. /// It is needed to speed up execution stage and allows fetching signer without doing /// transaction signed recovery diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 1f51e9b49d2..07b09b3ef50 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -560,6 +560,35 @@ impl DatabaseEnv { Ok(handles) } + /// Drops an orphaned table by name. + /// + /// This is used to clean up tables that are no longer defined in the schema but may still + /// exist on disk from previous versions. + /// + /// Returns `Ok(true)` if the table existed and was dropped, `Ok(false)` if the table was not + /// found. + /// + /// # Safety + /// This permanently deletes the table and all its data. Only use for tables that are + /// confirmed to be obsolete. + pub fn drop_orphan_table(&self, name: &str) -> Result { + let tx = self.inner.begin_rw_txn().map_err(|e| DatabaseError::InitTx(e.into()))?; + + match tx.open_db(Some(name)) { + Ok(db) => { + // SAFETY: We just opened the db handle and will commit immediately after dropping. + // No other cursors or handles exist for this table. + unsafe { + tx.drop_db(db.dbi()).map_err(|e| DatabaseError::Delete(e.into()))?; + } + tx.commit().map_err(|e| DatabaseError::Commit(e.into()))?; + Ok(true) + } + Err(reth_libmdbx::Error::NotFound) => Ok(false), + Err(e) => Err(DatabaseError::Open(e.into())), + } + } + /// Records version that accesses the database with write privileges. pub fn record_client_version(&self, version: ClientVersion) -> Result<(), DatabaseError> { if version.is_empty() { @@ -646,6 +675,46 @@ mod tests { create_test_db(DatabaseEnvKind::RW); } + #[test] + fn db_drop_orphan_table() { + let path = tempfile::TempDir::new().expect(ERROR_TEMPDIR).keep(); + let db = create_test_db_with_path(DatabaseEnvKind::RW, &path); + + // Create an orphan table by manually creating it + let orphan_table_name = "OrphanTestTable"; + { + let tx = db.inner.begin_rw_txn().expect(ERROR_INIT_TX); + tx.create_db(Some(orphan_table_name), DatabaseFlags::empty()) + .expect("Failed to create orphan table"); + tx.commit().expect(ERROR_COMMIT); + } + + // Verify the table exists by opening it + { + let tx = db.inner.begin_ro_txn().expect(ERROR_INIT_TX); + assert!(tx.open_db(Some(orphan_table_name)).is_ok(), "Orphan table should exist"); + } + + // Drop the orphan table + let result = db.drop_orphan_table(orphan_table_name); + assert!(result.is_ok(), "drop_orphan_table should succeed"); + assert!(result.unwrap(), "drop_orphan_table should return true for existing table"); + + // Verify the table no longer exists + { + let tx = db.inner.begin_ro_txn().expect(ERROR_INIT_TX); + assert!( + tx.open_db(Some(orphan_table_name)).is_err(), + "Orphan table should no longer exist" + ); + } + + // Dropping a non-existent table should return Ok(false) + let result = db.drop_orphan_table("NonExistentTable"); + assert!(result.is_ok(), "drop_orphan_table should succeed for non-existent table"); + assert!(!result.unwrap(), "drop_orphan_table should return false for non-existent table"); + } + #[test] fn db_manual_put_get() { let env = create_test_db(DatabaseEnvKind::RW); diff --git a/crates/storage/db/src/mdbx.rs b/crates/storage/db/src/mdbx.rs index fb0fd8501e3..db650a68e46 100644 --- a/crates/storage/db/src/mdbx.rs +++ b/crates/storage/db/src/mdbx.rs @@ -2,11 +2,16 @@ use crate::{is_database_empty, TableSet, Tables}; use eyre::Context; +use reth_tracing::tracing::info; use std::path::Path; pub use crate::implementation::mdbx::*; pub use reth_libmdbx::*; +/// Tables that have been removed from the schema but may still exist on disk from previous +/// versions. These will be dropped during database initialization. +const ORPHAN_TABLES: &[&str] = &["AccountsTrieChangeSets", "StoragesTrieChangeSets"]; + /// Creates a new database at the specified path if it doesn't exist. Does NOT create tables. Check /// [`init_db`]. pub fn create_db>(path: P, args: DatabaseArguments) -> eyre::Result { @@ -44,9 +49,30 @@ pub fn init_db_for, TS: TableSet>( let mut db = create_db(path, args)?; db.create_and_track_tables_for::()?; db.record_client_version(client_version)?; + drop_orphan_tables(&db); Ok(db) } +/// Drops orphaned tables that are no longer part of the schema. +fn drop_orphan_tables(db: &DatabaseEnv) { + for table_name in ORPHAN_TABLES { + match db.drop_orphan_table(table_name) { + Ok(true) => { + info!(target: "reth::db", table = %table_name, "Dropped orphaned database table"); + } + Ok(false) => {} + Err(e) => { + reth_tracing::tracing::warn!( + target: "reth::db", + table = %table_name, + %e, + "Failed to drop orphaned database table" + ); + } + } + } +} + /// Opens up an existing database. Read only mode. It doesn't create it or create tables if missing. pub fn open_db_read_only( path: impl AsRef, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 39f1e35473d..940424da847 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -40,8 +40,7 @@ use reth_db_api::{ database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - BlockNumberHashedAddress, ShardedKey, StorageBeforeTx, StorageSettings, - StoredBlockBodyIndices, + ShardedKey, StorageBeforeTx, StorageSettings, StoredBlockBodyIndices, }, table::Table, tables, @@ -65,12 +64,10 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::{ProviderResult, StaticFileWriterError}; use reth_trie::{ - changesets::storage_trie_wiped_changeset_iter, - trie_cursor::{InMemoryTrieCursor, TrieCursor, TrieCursorIter, TrieStorageCursor}, updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, - HashedPostStateSorted, StoredNibbles, StoredNibblesSubKey, TrieChangeSetsEntry, + HashedPostStateSorted, StoredNibbles, }; -use reth_trie_db::{ChangesetCache, DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}; +use reth_trie_db::{ChangesetCache, DatabaseStorageTrieCursor}; use revm_database::states::{ PlainStateReverts, PlainStorageChangeset, PlainStorageRevert, StateChangeset, }; @@ -78,7 +75,7 @@ use std::{ cmp::Ordering, collections::{BTreeMap, BTreeSet}, fmt::Debug, - ops::{Deref, DerefMut, Range, RangeBounds, RangeFrom, RangeInclusive}, + ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::Arc, thread, time::Instant, @@ -792,9 +789,6 @@ impl DatabaseProvider TrieWriter for DatabaseProvider Ok(num_entries) } - - /// Records the current values of all trie nodes which will be updated using the `TrieUpdates` - /// into the trie changesets tables. - /// - /// The intended usage of this method is to call it _prior_ to calling `write_trie_updates` with - /// the same `TrieUpdates`. - /// - /// Returns the number of keys written. - #[instrument(level = "debug", target = "providers::db", skip_all)] - fn write_trie_changesets( - &self, - block_number: BlockNumber, - trie_updates: &TrieUpdatesSorted, - updates_overlay: Option<&TrieUpdatesSorted>, - ) -> ProviderResult { - let mut num_entries = 0; - - let mut changeset_cursor = - self.tx_ref().cursor_dup_write::()?; - let curr_values_cursor = self.tx_ref().cursor_read::()?; - - // Wrap the cursor in DatabaseAccountTrieCursor - let mut db_account_cursor = DatabaseAccountTrieCursor::new(curr_values_cursor); - - // Create empty TrieUpdatesSorted for when updates_overlay is None - let empty_updates = TrieUpdatesSorted::default(); - let overlay = updates_overlay.unwrap_or(&empty_updates); - - // Wrap the cursor in InMemoryTrieCursor with the overlay - let mut in_memory_account_cursor = - InMemoryTrieCursor::new_account(&mut db_account_cursor, overlay); - - for (path, _) in trie_updates.account_nodes_ref() { - num_entries += 1; - let node = in_memory_account_cursor.seek_exact(*path)?.map(|(_, node)| node); - changeset_cursor.append_dup( - block_number, - TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(*path), node }, - )?; - } - - let mut storage_updates = trie_updates.storage_tries_ref().iter().collect::>(); - storage_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); - - num_entries += self.write_storage_trie_changesets( - block_number, - storage_updates.into_iter(), - updates_overlay, - )?; - - Ok(num_entries) - } - - fn clear_trie_changesets(&self) -> ProviderResult<()> { - let tx = self.tx_ref(); - tx.clear::()?; - tx.clear::()?; - Ok(()) - } - - fn clear_trie_changesets_from(&self, from: BlockNumber) -> ProviderResult<()> { - let tx = self.tx_ref(); - { - let range = from..; - let mut cursor = tx.cursor_dup_write::()?; - let mut walker = cursor.walk_range(range)?; - - while walker.next().transpose()?.is_some() { - walker.delete_current()?; - } - } - - { - let range: RangeFrom = (from, B256::ZERO).into()..; - let mut cursor = tx.cursor_dup_write::()?; - let mut walker = cursor.walk_range(range)?; - - while walker.next().transpose()?.is_some() { - walker.delete_current()?; - } - } - - Ok(()) - } } impl StorageTrieWriter for DatabaseProvider { @@ -2893,75 +2803,6 @@ impl StorageTrieWriter for DatabaseP Ok(num_entries) } - - /// Records the current values of all trie nodes which will be updated using the - /// `StorageTrieUpdates` into the storage trie changesets table. - /// - /// The intended usage of this method is to call it _prior_ to calling - /// `write_storage_trie_updates` with the same set of `StorageTrieUpdates`. - /// - /// Returns the number of keys written. - fn write_storage_trie_changesets<'a>( - &self, - block_number: BlockNumber, - storage_tries: impl Iterator, - updates_overlay: Option<&TrieUpdatesSorted>, - ) -> ProviderResult { - let mut num_written = 0; - - let mut changeset_cursor = - self.tx_ref().cursor_dup_write::()?; - let curr_values_cursor = self.tx_ref().cursor_dup_read::()?; - - // Wrap the cursor in DatabaseStorageTrieCursor - let mut db_storage_cursor = DatabaseStorageTrieCursor::new( - curr_values_cursor, - B256::default(), // Will be set per iteration - ); - - // Create empty TrieUpdatesSorted for when updates_overlay is None - let empty_updates = TrieUpdatesSorted::default(); - - for (hashed_address, storage_trie_updates) in storage_tries { - let changeset_key = BlockNumberHashedAddress((block_number, *hashed_address)); - - // Update the hashed address for the cursor - db_storage_cursor.set_hashed_address(*hashed_address); - - // Get the overlay updates, or use empty updates - let overlay = updates_overlay.unwrap_or(&empty_updates); - - // Wrap the cursor in InMemoryTrieCursor with the overlay - let mut in_memory_storage_cursor = - InMemoryTrieCursor::new_storage(&mut db_storage_cursor, overlay, *hashed_address); - - let changed_paths = storage_trie_updates.storage_nodes.iter().map(|e| e.0); - - if storage_trie_updates.is_deleted() { - let all_nodes = TrieCursorIter::new(&mut in_memory_storage_cursor); - - for wiped in storage_trie_wiped_changeset_iter(changed_paths, all_nodes)? { - let (path, node) = wiped?; - num_written += 1; - changeset_cursor.append_dup( - changeset_key, - TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(path), node }, - )?; - } - } else { - for path in changed_paths { - let node = in_memory_storage_cursor.seek_exact(path)?.map(|(_, node)| node); - num_written += 1; - changeset_cursor.append_dup( - changeset_key, - TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(path), node }, - )?; - } - } - } - - Ok(num_written) - } } impl HashingWriter for DatabaseProvider { @@ -3732,7 +3573,7 @@ mod tests { use alloy_primitives::map::B256Map; use reth_ethereum_primitives::Receipt; use reth_testing_utils::generators::{self, random_block, BlockParams}; - use reth_trie::Nibbles; + use reth_trie::{Nibbles, StoredNibblesSubKey}; #[test] fn test_receipts_by_block_range_empty_range() { @@ -3976,781 +3817,6 @@ mod tests { assert_eq!(range_result, individual_results); } - #[test] - fn test_write_trie_changesets() { - use reth_db_api::models::BlockNumberHashedAddress; - use reth_trie::{BranchNodeCompact, StorageTrieEntry}; - - let factory = create_test_provider_factory(); - let provider_rw = factory.provider_rw().unwrap(); - - let block_number = 1u64; - - // Create some test nibbles and nodes - let account_nibbles1 = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); - let account_nibbles2 = Nibbles::from_nibbles([0x5, 0x6, 0x7, 0x8]); - - let node1 = BranchNodeCompact::new( - 0b1111_1111_1111_1111, // state_mask - 0b0000_0000_0000_0000, // tree_mask - 0b0000_0000_0000_0000, // hash_mask - vec![], // hashes - None, // root hash - ); - - // Pre-populate AccountsTrie with a node that will be updated (for account_nibbles1) - { - let mut cursor = provider_rw.tx_ref().cursor_write::().unwrap(); - cursor.insert(StoredNibbles(account_nibbles1), &node1).unwrap(); - } - - // Create account trie updates: one Some (update) and one None (removal) - let account_nodes = vec![ - (account_nibbles1, Some(node1.clone())), // This will update existing node - (account_nibbles2, None), // This will be a removal (no existing node) - ]; - - // Create storage trie updates - let storage_address1 = B256::from([1u8; 32]); // Normal storage trie - let storage_address2 = B256::from([2u8; 32]); // Wiped storage trie - - let storage_nibbles1 = Nibbles::from_nibbles([0xa, 0xb]); - let storage_nibbles2 = Nibbles::from_nibbles([0xc, 0xd]); - let storage_nibbles3 = Nibbles::from_nibbles([0xe, 0xf]); - - let storage_node1 = BranchNodeCompact::new( - 0b1111_0000_0000_0000, - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - ); - - let storage_node2 = BranchNodeCompact::new( - 0b0000_1111_0000_0000, - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - ); - - // Create an old version of storage_node1 to prepopulate - let storage_node1_old = BranchNodeCompact::new( - 0b1010_0000_0000_0000, // Different mask to show it's an old value - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - ); - - // Pre-populate StoragesTrie for normal storage (storage_address1) - { - let mut cursor = - provider_rw.tx_ref().cursor_dup_write::().unwrap(); - // Add node that will be updated (storage_nibbles1) with old value - let entry = StorageTrieEntry { - nibbles: StoredNibblesSubKey(storage_nibbles1), - node: storage_node1_old.clone(), - }; - cursor.upsert(storage_address1, &entry).unwrap(); - } - - // Pre-populate StoragesTrie for wiped storage (storage_address2) - { - let mut cursor = - provider_rw.tx_ref().cursor_dup_write::().unwrap(); - // Add node that will be updated (storage_nibbles1) - let entry1 = StorageTrieEntry { - nibbles: StoredNibblesSubKey(storage_nibbles1), - node: storage_node1.clone(), - }; - cursor.upsert(storage_address2, &entry1).unwrap(); - // Add node that won't be updated but exists (storage_nibbles3) - let entry3 = StorageTrieEntry { - nibbles: StoredNibblesSubKey(storage_nibbles3), - node: storage_node2.clone(), - }; - cursor.upsert(storage_address2, &entry3).unwrap(); - } - - // Normal storage trie: one Some (update) and one None (new) - let storage_trie1 = StorageTrieUpdatesSorted { - is_deleted: false, - storage_nodes: vec![ - (storage_nibbles1, Some(storage_node1.clone())), // This will update existing node - (storage_nibbles2, None), // This is a new node - ], - }; - - // Wiped storage trie - let storage_trie2 = StorageTrieUpdatesSorted { - is_deleted: true, - storage_nodes: vec![ - (storage_nibbles1, Some(storage_node1.clone())), // Updated node already in db - (storage_nibbles2, Some(storage_node2.clone())), /* Updated node not in db - * storage_nibbles3 is in db - * but not updated */ - ], - }; - - let mut storage_tries = B256Map::default(); - storage_tries.insert(storage_address1, storage_trie1); - storage_tries.insert(storage_address2, storage_trie2); - - let trie_updates = TrieUpdatesSorted::new(account_nodes, storage_tries); - - // Write the changesets - let num_written = - provider_rw.write_trie_changesets(block_number, &trie_updates, None).unwrap(); - - // Verify number of entries written - // Account changesets: 2 (one update, one removal) - // Storage changesets: - // - Normal storage: 2 (one update, one removal) - // - Wiped storage: 3 (two updated, one existing not updated) - // Total: 2 + 2 + 3 = 7 - assert_eq!(num_written, 7); - - // Verify account changesets were written correctly - { - let mut cursor = - provider_rw.tx_ref().cursor_dup_read::().unwrap(); - - // Get all entries for this block to see what was written - let all_entries = cursor - .walk_dup(Some(block_number), None) - .unwrap() - .collect::, _>>() - .unwrap(); - - // Assert the full value of all_entries in a single assert_eq - assert_eq!( - all_entries, - vec![ - ( - block_number, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(account_nibbles1), - node: Some(node1), - } - ), - ( - block_number, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(account_nibbles2), - node: None, - } - ), - ] - ); - } - - // Verify storage changesets were written correctly - { - let mut cursor = - provider_rw.tx_ref().cursor_dup_read::().unwrap(); - - // Check normal storage trie changesets - let key1 = BlockNumberHashedAddress((block_number, storage_address1)); - let entries1 = - cursor.walk_dup(Some(key1), None).unwrap().collect::, _>>().unwrap(); - - assert_eq!( - entries1, - vec![ - ( - key1, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles1), - node: Some(storage_node1_old), // Old value that was prepopulated - } - ), - ( - key1, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles2), - node: None, // New node, no previous value - } - ), - ] - ); - - // Check wiped storage trie changesets - let key2 = BlockNumberHashedAddress((block_number, storage_address2)); - let entries2 = - cursor.walk_dup(Some(key2), None).unwrap().collect::, _>>().unwrap(); - - assert_eq!( - entries2, - vec![ - ( - key2, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles1), - node: Some(storage_node1), // Was in db, so has old value - } - ), - ( - key2, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles2), - node: None, // Was not in db - } - ), - ( - key2, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles3), - node: Some(storage_node2), // Existing node in wiped storage - } - ), - ] - ); - } - - provider_rw.commit().unwrap(); - } - - #[test] - fn test_write_trie_changesets_with_overlay() { - use reth_db_api::models::BlockNumberHashedAddress; - use reth_trie::BranchNodeCompact; - - let factory = create_test_provider_factory(); - let provider_rw = factory.provider_rw().unwrap(); - - let block_number = 1u64; - - // Create some test nibbles and nodes - let account_nibbles1 = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); - let account_nibbles2 = Nibbles::from_nibbles([0x5, 0x6, 0x7, 0x8]); - - let node1 = BranchNodeCompact::new( - 0b1111_1111_1111_1111, // state_mask - 0b0000_0000_0000_0000, // tree_mask - 0b0000_0000_0000_0000, // hash_mask - vec![], // hashes - None, // root hash - ); - - // NOTE: Unlike the previous test, we're NOT pre-populating the database - // All node values will come from the overlay - - // Create the overlay with existing values that would normally be in the DB - let node1_old = BranchNodeCompact::new( - 0b1010_1010_1010_1010, // Different mask to show it's the overlay "existing" value - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - ); - - // Create overlay account nodes - let overlay_account_nodes = vec![ - (account_nibbles1, Some(node1_old.clone())), // This simulates existing node in overlay - ]; - - // Create account trie updates: one Some (update) and one None (removal) - let account_nodes = vec![ - (account_nibbles1, Some(node1)), // This will update overlay node - (account_nibbles2, None), // This will be a removal (no existing node) - ]; - - // Create storage trie updates - let storage_address1 = B256::from([1u8; 32]); // Normal storage trie - let storage_address2 = B256::from([2u8; 32]); // Wiped storage trie - - let storage_nibbles1 = Nibbles::from_nibbles([0xa, 0xb]); - let storage_nibbles2 = Nibbles::from_nibbles([0xc, 0xd]); - let storage_nibbles3 = Nibbles::from_nibbles([0xe, 0xf]); - - let storage_node1 = BranchNodeCompact::new( - 0b1111_0000_0000_0000, - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - ); - - let storage_node2 = BranchNodeCompact::new( - 0b0000_1111_0000_0000, - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - ); - - // Create old versions for overlay - let storage_node1_old = BranchNodeCompact::new( - 0b1010_0000_0000_0000, // Different mask to show it's an old value - 0b0000_0000_0000_0000, - 0b0000_0000_0000_0000, - vec![], - None, - ); - - // Create overlay storage nodes - let mut overlay_storage_tries = B256Map::default(); - - // Overlay for normal storage (storage_address1) - let overlay_storage_trie1 = StorageTrieUpdatesSorted { - is_deleted: false, - storage_nodes: vec![ - (storage_nibbles1, Some(storage_node1_old.clone())), /* Simulates existing in - * overlay */ - ], - }; - - // Overlay for wiped storage (storage_address2) - let overlay_storage_trie2 = StorageTrieUpdatesSorted { - is_deleted: false, - storage_nodes: vec![ - (storage_nibbles1, Some(storage_node1.clone())), // Existing in overlay - (storage_nibbles3, Some(storage_node2.clone())), // Also existing in overlay - ], - }; - - overlay_storage_tries.insert(storage_address1, overlay_storage_trie1); - overlay_storage_tries.insert(storage_address2, overlay_storage_trie2); - - let overlay = TrieUpdatesSorted::new(overlay_account_nodes, overlay_storage_tries); - - // Normal storage trie: one Some (update) and one None (new) - let storage_trie1 = StorageTrieUpdatesSorted { - is_deleted: false, - storage_nodes: vec![ - (storage_nibbles1, Some(storage_node1.clone())), // This will update overlay node - (storage_nibbles2, None), // This is a new node - ], - }; - - // Wiped storage trie - let storage_trie2 = StorageTrieUpdatesSorted { - is_deleted: true, - storage_nodes: vec![ - (storage_nibbles1, Some(storage_node1.clone())), // Updated node from overlay - (storage_nibbles2, Some(storage_node2.clone())), /* Updated node not in overlay - * storage_nibbles3 is in - * overlay - * but not updated */ - ], - }; - - let mut storage_tries = B256Map::default(); - storage_tries.insert(storage_address1, storage_trie1); - storage_tries.insert(storage_address2, storage_trie2); - - let trie_updates = TrieUpdatesSorted::new(account_nodes, storage_tries); - - // Write the changesets WITH OVERLAY - let num_written = - provider_rw.write_trie_changesets(block_number, &trie_updates, Some(&overlay)).unwrap(); - - // Verify number of entries written - // Account changesets: 2 (one update from overlay, one removal) - // Storage changesets: - // - Normal storage: 2 (one update from overlay, one new) - // - Wiped storage: 3 (two updated, one existing from overlay not updated) - // Total: 2 + 2 + 3 = 7 - assert_eq!(num_written, 7); - - // Verify account changesets were written correctly - { - let mut cursor = - provider_rw.tx_ref().cursor_dup_read::().unwrap(); - - // Get all entries for this block to see what was written - let all_entries = cursor - .walk_dup(Some(block_number), None) - .unwrap() - .collect::, _>>() - .unwrap(); - - // Assert the full value of all_entries in a single assert_eq - assert_eq!( - all_entries, - vec![ - ( - block_number, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(account_nibbles1), - node: Some(node1_old), // Value from overlay, not DB - } - ), - ( - block_number, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(account_nibbles2), - node: None, - } - ), - ] - ); - } - - // Verify storage changesets were written correctly - { - let mut cursor = - provider_rw.tx_ref().cursor_dup_read::().unwrap(); - - // Check normal storage trie changesets - let key1 = BlockNumberHashedAddress((block_number, storage_address1)); - let entries1 = - cursor.walk_dup(Some(key1), None).unwrap().collect::, _>>().unwrap(); - - assert_eq!( - entries1, - vec![ - ( - key1, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles1), - node: Some(storage_node1_old), // Old value from overlay - } - ), - ( - key1, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles2), - node: None, // New node, no previous value - } - ), - ] - ); - - // Check wiped storage trie changesets - let key2 = BlockNumberHashedAddress((block_number, storage_address2)); - let entries2 = - cursor.walk_dup(Some(key2), None).unwrap().collect::, _>>().unwrap(); - - assert_eq!( - entries2, - vec![ - ( - key2, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles1), - node: Some(storage_node1), // Value from overlay - } - ), - ( - key2, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles2), - node: None, // Was not in overlay - } - ), - ( - key2, - TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey(storage_nibbles3), - node: Some(storage_node2), /* Existing node from overlay in wiped - * storage */ - } - ), - ] - ); - } - - provider_rw.commit().unwrap(); - } - - #[test] - fn test_clear_trie_changesets_from() { - use alloy_primitives::hex_literal::hex; - use reth_db_api::models::BlockNumberHashedAddress; - use reth_trie::{BranchNodeCompact, StoredNibblesSubKey, TrieChangeSetsEntry}; - - let factory = create_test_provider_factory(); - - // Create some test data for different block numbers - let block1 = 100u64; - let block2 = 101u64; - let block3 = 102u64; - let block4 = 103u64; - let block5 = 104u64; - - // Create test addresses for storage changesets - let storage_address1 = - B256::from(hex!("1111111111111111111111111111111111111111111111111111111111111111")); - let storage_address2 = - B256::from(hex!("2222222222222222222222222222222222222222222222222222222222222222")); - - // Create test nibbles - let nibbles1 = StoredNibblesSubKey(Nibbles::from_nibbles([0x1, 0x2, 0x3])); - let nibbles2 = StoredNibblesSubKey(Nibbles::from_nibbles([0x4, 0x5, 0x6])); - let nibbles3 = StoredNibblesSubKey(Nibbles::from_nibbles([0x7, 0x8, 0x9])); - - // Create test nodes - let node1 = BranchNodeCompact::new( - 0b1111_1111_1111_1111, - 0b1111_1111_1111_1111, - 0b0000_0000_0000_0001, - vec![B256::from(hex!( - "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" - ))], - None, - ); - let node2 = BranchNodeCompact::new( - 0b1111_1111_1111_1110, - 0b1111_1111_1111_1110, - 0b0000_0000_0000_0010, - vec![B256::from(hex!( - "abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890" - ))], - Some(B256::from(hex!( - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" - ))), - ); - - // Populate AccountsTrieChangeSets with data across multiple blocks - { - let provider_rw = factory.provider_rw().unwrap(); - let mut cursor = - provider_rw.tx_ref().cursor_dup_write::().unwrap(); - - // Block 100: 2 entries (will be kept - before start block) - cursor - .upsert( - block1, - &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node1.clone()) }, - ) - .unwrap(); - cursor - .upsert(block1, &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: None }) - .unwrap(); - - // Block 101: 3 entries with duplicates (will be deleted - from this block onwards) - cursor - .upsert( - block2, - &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node2.clone()) }, - ) - .unwrap(); - cursor - .upsert( - block2, - &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node1.clone()) }, - ) - .unwrap(); // duplicate key - cursor - .upsert(block2, &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: None }) - .unwrap(); - - // Block 102: 2 entries (will be deleted - after start block) - cursor - .upsert( - block3, - &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node1.clone()) }, - ) - .unwrap(); - cursor - .upsert( - block3, - &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: Some(node2.clone()) }, - ) - .unwrap(); - - // Block 103: 1 entry (will be deleted - after start block) - cursor - .upsert(block4, &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: None }) - .unwrap(); - - // Block 104: 2 entries (will be deleted - after start block) - cursor - .upsert( - block5, - &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node2.clone()) }, - ) - .unwrap(); - cursor - .upsert(block5, &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: None }) - .unwrap(); - - provider_rw.commit().unwrap(); - } - - // Populate StoragesTrieChangeSets with data across multiple blocks - { - let provider_rw = factory.provider_rw().unwrap(); - let mut cursor = - provider_rw.tx_ref().cursor_dup_write::().unwrap(); - - // Block 100, address1: 2 entries (will be kept - before start block) - let key1_block1 = BlockNumberHashedAddress((block1, storage_address1)); - cursor - .upsert( - key1_block1, - &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node1.clone()) }, - ) - .unwrap(); - cursor - .upsert(key1_block1, &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: None }) - .unwrap(); - - // Block 101, address1: 3 entries with duplicates (will be deleted - from this block - // onwards) - let key1_block2 = BlockNumberHashedAddress((block2, storage_address1)); - cursor - .upsert( - key1_block2, - &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node2.clone()) }, - ) - .unwrap(); - cursor - .upsert(key1_block2, &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: None }) - .unwrap(); // duplicate key - cursor - .upsert( - key1_block2, - &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node1.clone()) }, - ) - .unwrap(); - - // Block 102, address2: 2 entries (will be deleted - after start block) - let key2_block3 = BlockNumberHashedAddress((block3, storage_address2)); - cursor - .upsert( - key2_block3, - &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node2.clone()) }, - ) - .unwrap(); - cursor - .upsert(key2_block3, &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: None }) - .unwrap(); - - // Block 103, address1: 2 entries with duplicate (will be deleted - after start block) - let key1_block4 = BlockNumberHashedAddress((block4, storage_address1)); - cursor - .upsert( - key1_block4, - &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: Some(node1) }, - ) - .unwrap(); - cursor - .upsert( - key1_block4, - &TrieChangeSetsEntry { nibbles: nibbles3, node: Some(node2.clone()) }, - ) - .unwrap(); // duplicate key - - // Block 104, address2: 2 entries (will be deleted - after start block) - let key2_block5 = BlockNumberHashedAddress((block5, storage_address2)); - cursor - .upsert(key2_block5, &TrieChangeSetsEntry { nibbles: nibbles1, node: None }) - .unwrap(); - cursor - .upsert(key2_block5, &TrieChangeSetsEntry { nibbles: nibbles2, node: Some(node2) }) - .unwrap(); - - provider_rw.commit().unwrap(); - } - - // Clear all changesets from block 101 onwards - { - let provider_rw = factory.provider_rw().unwrap(); - provider_rw.clear_trie_changesets_from(block2).unwrap(); - provider_rw.commit().unwrap(); - } - - // Verify AccountsTrieChangeSets after clearing - { - let provider = factory.provider().unwrap(); - let mut cursor = - provider.tx_ref().cursor_dup_read::().unwrap(); - - // Block 100 should still exist (before range) - let block1_entries = cursor - .walk_dup(Some(block1), None) - .unwrap() - .collect::, _>>() - .unwrap(); - assert_eq!(block1_entries.len(), 2, "Block 100 entries should be preserved"); - assert_eq!(block1_entries[0].0, block1); - assert_eq!(block1_entries[1].0, block1); - - // Blocks 101-104 should be deleted - let block2_entries = cursor - .walk_dup(Some(block2), None) - .unwrap() - .collect::, _>>() - .unwrap(); - assert!(block2_entries.is_empty(), "Block 101 entries should be deleted"); - - let block3_entries = cursor - .walk_dup(Some(block3), None) - .unwrap() - .collect::, _>>() - .unwrap(); - assert!(block3_entries.is_empty(), "Block 102 entries should be deleted"); - - let block4_entries = cursor - .walk_dup(Some(block4), None) - .unwrap() - .collect::, _>>() - .unwrap(); - assert!(block4_entries.is_empty(), "Block 103 entries should be deleted"); - - // Block 104 should also be deleted - let block5_entries = cursor - .walk_dup(Some(block5), None) - .unwrap() - .collect::, _>>() - .unwrap(); - assert!(block5_entries.is_empty(), "Block 104 entries should be deleted"); - } - - // Verify StoragesTrieChangeSets after clearing - { - let provider = factory.provider().unwrap(); - let mut cursor = - provider.tx_ref().cursor_dup_read::().unwrap(); - - // Block 100 entries should still exist (before range) - let key1_block1 = BlockNumberHashedAddress((block1, storage_address1)); - let block1_entries = cursor - .walk_dup(Some(key1_block1), None) - .unwrap() - .collect::, _>>() - .unwrap(); - assert_eq!(block1_entries.len(), 2, "Block 100 storage entries should be preserved"); - - // Blocks 101-104 entries should be deleted - let key1_block2 = BlockNumberHashedAddress((block2, storage_address1)); - let block2_entries = cursor - .walk_dup(Some(key1_block2), None) - .unwrap() - .collect::, _>>() - .unwrap(); - assert!(block2_entries.is_empty(), "Block 101 storage entries should be deleted"); - - let key2_block3 = BlockNumberHashedAddress((block3, storage_address2)); - let block3_entries = cursor - .walk_dup(Some(key2_block3), None) - .unwrap() - .collect::, _>>() - .unwrap(); - assert!(block3_entries.is_empty(), "Block 102 storage entries should be deleted"); - - let key1_block4 = BlockNumberHashedAddress((block4, storage_address1)); - let block4_entries = cursor - .walk_dup(Some(key1_block4), None) - .unwrap() - .collect::, _>>() - .unwrap(); - assert!(block4_entries.is_empty(), "Block 103 storage entries should be deleted"); - - // Block 104 entries should also be deleted - let key2_block5 = BlockNumberHashedAddress((block5, storage_address2)); - let block5_entries = cursor - .walk_dup(Some(key2_block5), None) - .unwrap() - .collect::, _>>() - .unwrap(); - assert!(block5_entries.is_empty(), "Block 104 storage entries should be deleted"); - } - } - #[test] fn test_write_trie_updates_sorted() { use reth_trie::{ diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index 50ea5a05670..3d6aa6a72cb 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -1,5 +1,5 @@ use alloc::vec::Vec; -use alloy_primitives::{Address, BlockNumber, Bytes, B256}; +use alloy_primitives::{Address, Bytes, B256}; use reth_storage_errors::provider::ProviderResult; use reth_trie_common::{ updates::{StorageTrieUpdatesSorted, TrieUpdates, TrieUpdatesSorted}, @@ -103,32 +103,6 @@ pub trait TrieWriter: Send { /// /// Returns the number of entries modified. fn write_trie_updates_sorted(&self, trie_updates: &TrieUpdatesSorted) -> ProviderResult; - - /// Records the current values of all trie nodes which will be updated using the [`TrieUpdates`] - /// into the trie changesets tables. - /// - /// The intended usage of this method is to call it _prior_ to calling `write_trie_updates` with - /// the same [`TrieUpdates`]. - /// - /// The `updates_overlay` parameter allows providing additional in-memory trie updates that - /// should be considered when looking up current node values. When provided, these overlay - /// updates are applied on top of the database state, allowing the method to see a view that - /// includes both committed database values and pending in-memory changes. This is useful - /// when writing changesets for updates that depend on previous uncommitted trie changes. - /// - /// Returns the number of keys written. - fn write_trie_changesets( - &self, - block_number: BlockNumber, - trie_updates: &TrieUpdatesSorted, - updates_overlay: Option<&TrieUpdatesSorted>, - ) -> ProviderResult; - - /// Clears contents of trie changesets completely - fn clear_trie_changesets(&self) -> ProviderResult<()>; - - /// Clears contents of trie changesets starting from the given block number (inclusive) onwards. - fn clear_trie_changesets_from(&self, from: BlockNumber) -> ProviderResult<()>; } /// Storage Trie Writer @@ -143,25 +117,4 @@ pub trait StorageTrieWriter: Send { &self, storage_tries: impl Iterator, ) -> ProviderResult; - - /// Records the current values of all trie nodes which will be updated using the - /// [`StorageTrieUpdatesSorted`] into the storage trie changesets table. - /// - /// The intended usage of this method is to call it _prior_ to calling - /// `write_storage_trie_updates` with the same set of [`StorageTrieUpdatesSorted`]. - /// - /// The `updates_overlay` parameter allows providing additional in-memory trie updates that - /// should be considered when looking up current node values. When provided, these overlay - /// updates are applied on top of the database state for each storage trie, allowing the - /// method to see a view that includes both committed database values and pending in-memory - /// changes. This is useful when writing changesets for storage updates that depend on - /// previous uncommitted trie changes. - /// - /// Returns the number of keys written. - fn write_storage_trie_changesets<'a>( - &self, - block_number: BlockNumber, - storage_tries: impl Iterator, - updates_overlay: Option<&TrieUpdatesSorted>, - ) -> ProviderResult; } diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index bc842768b8f..53ddc735137 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -40,7 +40,7 @@ mod nibbles; pub use nibbles::{Nibbles, StoredNibbles, StoredNibblesSubKey}; mod storage; -pub use storage::{StorageTrieEntry, TrieChangeSetsEntry}; +pub use storage::StorageTrieEntry; mod subnode; pub use subnode::StoredSubNode; diff --git a/crates/trie/common/src/storage.rs b/crates/trie/common/src/storage.rs index 77d037ff2e7..b43fafed2b1 100644 --- a/crates/trie/common/src/storage.rs +++ b/crates/trie/common/src/storage.rs @@ -42,181 +42,3 @@ impl reth_codecs::Compact for StorageTrieEntry { (this, buf) } } - -/// Trie changeset entry representing the state of a trie node before a block. -/// -/// `nibbles` is the subkey when used as a value in the changeset tables. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] -pub struct TrieChangeSetsEntry { - /// The nibbles of the intermediate node - pub nibbles: StoredNibblesSubKey, - /// Node value prior to the block being processed, None indicating it didn't exist. - pub node: Option, -} - -impl ValueWithSubKey for TrieChangeSetsEntry { - type SubKey = StoredNibblesSubKey; - - fn get_subkey(&self) -> Self::SubKey { - self.nibbles.clone() - } -} - -#[cfg(any(test, feature = "reth-codec"))] -impl reth_codecs::Compact for TrieChangeSetsEntry { - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - let nibbles_len = self.nibbles.to_compact(buf); - let node_len = self.node.as_ref().map(|node| node.to_compact(buf)).unwrap_or(0); - nibbles_len + node_len - } - - fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - if len == 0 { - // Return an empty entry without trying to parse anything - return ( - Self { nibbles: StoredNibblesSubKey::from(super::Nibbles::default()), node: None }, - buf, - ) - } - - let (nibbles, buf) = StoredNibblesSubKey::from_compact(buf, 65); - - if len <= 65 { - return (Self { nibbles, node: None }, buf) - } - - let (node, buf) = BranchNodeCompact::from_compact(buf, len - 65); - (Self { nibbles, node: Some(node) }, buf) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use bytes::BytesMut; - use reth_codecs::Compact; - - #[test] - fn test_trie_changesets_entry_full_empty() { - // Test a fully empty entry (empty nibbles, None node) - let entry = TrieChangeSetsEntry { nibbles: StoredNibblesSubKey::from(vec![]), node: None }; - - let mut buf = BytesMut::new(); - let len = entry.to_compact(&mut buf); - - // Empty nibbles takes 65 bytes (64 for padding + 1 for length) - // None node adds 0 bytes - assert_eq!(len, 65); - assert_eq!(buf.len(), 65); - - // Deserialize and verify - let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len); - assert_eq!(decoded.nibbles.0.to_vec(), Vec::::new()); - assert_eq!(decoded.node, None); - assert_eq!(remaining.len(), 0); - } - - #[test] - fn test_trie_changesets_entry_none_node() { - // Test non-empty nibbles with None node - let nibbles_data = vec![0x01, 0x02, 0x03, 0x04]; - let entry = TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey::from(nibbles_data.clone()), - node: None, - }; - - let mut buf = BytesMut::new(); - let len = entry.to_compact(&mut buf); - - // Nibbles takes 65 bytes regardless of content - assert_eq!(len, 65); - - // Deserialize and verify - let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len); - assert_eq!(decoded.nibbles.0.to_vec(), nibbles_data); - assert_eq!(decoded.node, None); - assert_eq!(remaining.len(), 0); - } - - #[test] - fn test_trie_changesets_entry_empty_path_with_node() { - // Test empty path with Some node - // Using the same signature as in the codebase: (state_mask, hash_mask, tree_mask, hashes, - // value) - let test_node = BranchNodeCompact::new( - 0b1111_1111_1111_1111, // state_mask: all children present - 0b1111_1111_1111_1111, // hash_mask: all have hashes - 0b0000_0000_0000_0000, // tree_mask: no embedded trees - vec![], // hashes - None, // value - ); - - let entry = TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey::from(vec![]), - node: Some(test_node.clone()), - }; - - let mut buf = BytesMut::new(); - let len = entry.to_compact(&mut buf); - - // Calculate expected length - let mut temp_buf = BytesMut::new(); - let node_len = test_node.to_compact(&mut temp_buf); - assert_eq!(len, 65 + node_len); - - // Deserialize and verify - let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len); - assert_eq!(decoded.nibbles.0.to_vec(), Vec::::new()); - assert_eq!(decoded.node, Some(test_node)); - assert_eq!(remaining.len(), 0); - } - - #[test] - fn test_trie_changesets_entry_normal() { - // Test normal case: non-empty path with Some node - let nibbles_data = vec![0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f]; - // Using the same signature as in the codebase - let test_node = BranchNodeCompact::new( - 0b0000_0000_1111_0000, // state_mask: some children present - 0b0000_0000_0011_0000, // hash_mask: some have hashes - 0b0000_0000_0000_0000, // tree_mask: no embedded trees - vec![], // hashes (empty for this test) - None, // value - ); - - let entry = TrieChangeSetsEntry { - nibbles: StoredNibblesSubKey::from(nibbles_data.clone()), - node: Some(test_node.clone()), - }; - - let mut buf = BytesMut::new(); - let len = entry.to_compact(&mut buf); - - // Verify serialization length - let mut temp_buf = BytesMut::new(); - let node_len = test_node.to_compact(&mut temp_buf); - assert_eq!(len, 65 + node_len); - - // Deserialize and verify - let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len); - assert_eq!(decoded.nibbles.0.to_vec(), nibbles_data); - assert_eq!(decoded.node, Some(test_node)); - assert_eq!(remaining.len(), 0); - } - - #[test] - fn test_trie_changesets_entry_from_compact_zero_len() { - // Test from_compact with zero length - let buf = vec![0x01, 0x02, 0x03]; - let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, 0); - - // Should return empty nibbles and None node - assert_eq!(decoded.nibbles.0.to_vec(), Vec::::new()); - assert_eq!(decoded.node, None); - assert_eq!(remaining, &buf[..]); // Buffer should be unchanged - } -} diff --git a/docs/crates/db.md b/docs/crates/db.md index f6460b6c121..76fa1ebad6f 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -58,8 +58,6 @@ There are many tables within the node, all used to store different types of data - HashedStorages - AccountsTrie - StoragesTrie -- AccountsTrieChangeSets -- StoragesTrieChangeSets - TransactionSenders - StageCheckpoints - StageCheckpointProgresses diff --git a/docs/crates/stages.md b/docs/crates/stages.md index fbc1641bd43..db02d3b8066 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -12,7 +12,6 @@ The `stages` lib plays a central role in syncing the node, maintaining state, up - AccountHashingStage - StorageHashingStage - MerkleStage (execute) -- MerkleChangeSets - TransactionLookupStage - IndexStorageHistoryStage - IndexAccountHistoryStage @@ -114,12 +113,6 @@ The `StorageHashingStage` is responsible for computing hashes of contract storag
-## MerkleChangeSets - -The `MerkleChangeSets` stage consolidates and finalizes Merkle-related change sets after the `MerkleStage` execution mode has run, ensuring consistent trie updates and checkpoints. - -
- ## TransactionLookupStage The `TransactionLookupStage` builds and maintains transaction lookup indices. These indices enable efficient querying of transactions by hash or block position. This stage is crucial for RPC functionality, allowing users to quickly retrieve transaction information without scanning the entire blockchain. diff --git a/docs/vocs/docs/pages/run/configuration.mdx b/docs/vocs/docs/pages/run/configuration.mdx index bab4d695ac4..64a67e0b82c 100644 --- a/docs/vocs/docs/pages/run/configuration.mdx +++ b/docs/vocs/docs/pages/run/configuration.mdx @@ -434,11 +434,6 @@ storage_history = { distance = 100_000 } # Prune all historical storage states b # Bodies History pruning configuration bodies_history = { distance = 100_000 } # Prune all historical block bodies before the block `head-100000` - -# Merkle Changesets pruning configuration -# Controls pruning of AccountsTrieChangeSets and StoragesTrieChangeSets. -# Default: { distance = 128 } - keeps the last 128 blocks of merkle changesets -merkle_changesets = { distance = 128 } ``` We can also prune receipts more granular, using the logs filtering: From 28a31cd5798b040a21b9cfcb48d87afcd6e07e22 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Thu, 22 Jan 2026 09:02:15 -0800 Subject: [PATCH 154/267] fix: use unwrap_or_else for lazy evaluation of StorageSettings::legacy (#21332) --- crates/storage/db-common/src/init.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 1af87fcb361..e5cc7da7558 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -162,7 +162,7 @@ where return Err(InitStorageError::UninitializedDatabase) } - let stored = factory.storage_settings()?.unwrap_or(StorageSettings::legacy()); + let stored = factory.storage_settings()?.unwrap_or_else(StorageSettings::legacy); if stored != genesis_storage_settings { warn!( target: "reth::storage", From 0c854b6f141f0ebfec577aebe2abde3a569ee361 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Thu, 22 Jan 2026 10:32:07 -0800 Subject: [PATCH 155/267] fix(net): limit pending pool imports for broadcast transactions (#21254) Co-authored-by: Arsenii Kulikov --- crates/net/network/src/metrics.rs | 2 ++ crates/net/network/src/transactions/mod.rs | 40 +++++++++++++++++++--- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index ba9efdff54b..6da8deced2d 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -131,6 +131,8 @@ pub struct TransactionsManagerMetrics { /// capacity. Note, this is not a limit to the number of inflight requests, but a health /// measure. pub(crate) capacity_pending_pool_imports: Counter, + /// Total number of transactions ignored because pending pool imports are at capacity. + pub(crate) skipped_transactions_pending_pool_imports_at_capacity: Counter, /// The time it took to prepare transactions for import. This is mostly sender recovery. pub(crate) pool_import_prepare_duration: Histogram, diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index a6cf3c4d096..5ed91e80f3c 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -429,11 +429,22 @@ impl TransactionsManager { /// Returns `true` if [`TransactionsManager`] has capacity to request pending hashes. Returns /// `false` if [`TransactionsManager`] is operating close to full capacity. fn has_capacity_for_fetching_pending_hashes(&self) -> bool { - self.pending_pool_imports_info - .has_capacity(self.pending_pool_imports_info.max_pending_pool_imports) && + self.has_capacity_for_pending_pool_imports() && self.transaction_fetcher.has_capacity_for_fetching_pending_hashes() } + /// Returns `true` if [`TransactionsManager`] has capacity for more pending pool imports. + fn has_capacity_for_pending_pool_imports(&self) -> bool { + self.remaining_pool_import_capacity() > 0 + } + + /// Returns the remaining capacity for pending pool imports. + fn remaining_pool_import_capacity(&self) -> usize { + self.pending_pool_imports_info.max_pending_pool_imports.saturating_sub( + self.pending_pool_imports_info.pending_pool_imports.load(Ordering::Relaxed), + ) + } + fn report_peer_bad_transactions(&self, peer_id: PeerId) { self.report_peer(peer_id, ReputationChangeKind::BadTransactions); self.metrics.reported_bad_transactions.increment(1); @@ -1285,6 +1296,7 @@ where trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), policy=?self.config.ingress_policy, "Ignoring full transactions from peer blocked by ingress policy"); return; } + // ensure we didn't receive any blob transactions as these are disallowed to be // broadcasted in full @@ -1335,7 +1347,13 @@ where return } + // Early return if we don't have capacity for any imports + if !self.has_capacity_for_pending_pool_imports() { + return + } + let Some(peer) = self.peers.get_mut(&peer_id) else { return }; + let client_version = peer.client_version.clone(); let mut transactions = transactions.0; let start = Instant::now(); @@ -1378,7 +1396,7 @@ where trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), hash=%tx.tx_hash(), - client_version=%peer.client_version, + %client_version, "received a known bad transaction from peer" ); has_bad_transactions = true; @@ -1387,6 +1405,18 @@ where true }); + // Truncate to remaining capacity before recovery to avoid wasting CPU on transactions + // that won't be imported anyway. + let capacity = self.remaining_pool_import_capacity(); + if transactions.len() > capacity { + let skipped = transactions.len() - capacity; + transactions.truncate(capacity); + self.metrics + .skipped_transactions_pending_pool_imports_at_capacity + .increment(skipped as u64); + trace!(target: "net::tx", skipped, capacity, "Truncated transactions batch to capacity"); + } + let txs_len = transactions.len(); let new_txs = transactions @@ -1397,7 +1427,7 @@ where trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), hash=%badtx.tx_hash(), - client_version=%peer.client_version, + client_version=%client_version, "failed ecrecovery for transaction" ); None @@ -1448,7 +1478,7 @@ where self.metrics .occurrences_of_transaction_already_seen_by_peer .increment(num_already_seen_by_peer); - trace!(target: "net::tx", num_txs=%num_already_seen_by_peer, ?peer_id, client=?peer.client_version, "Peer sent already seen transactions"); + trace!(target: "net::tx", num_txs=%num_already_seen_by_peer, ?peer_id, client=%client_version, "Peer sent already seen transactions"); } if has_bad_transactions { From be5a4ac7a6d594bf1938787caa13f0268d0e97f0 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 22 Jan 2026 22:43:24 +0400 Subject: [PATCH 156/267] feat: bump alloy and alloy-evm (#21337) --- Cargo.lock | 130 +++++++++--------- Cargo.toml | 64 ++++----- .../tree/src/tree/payload_processor/mod.rs | 14 +- .../src/tree/payload_processor/prewarm.rs | 5 +- crates/ethereum/evm/src/receipt.rs | 10 +- crates/ethereum/evm/src/test_utils.rs | 47 +++---- crates/evm/evm/src/engine.rs | 15 +- crates/evm/evm/src/execute.rs | 71 +++++----- crates/optimism/evm/src/receipts.rs | 6 +- .../custom-beacon-withdrawals/src/main.rs | 17 +-- examples/custom-node/src/evm/executor.rs | 25 ++-- 11 files changed, 199 insertions(+), 205 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 68308a1ee3a..0b59ce14029 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -121,9 +121,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c3a590d13de3944675987394715f37537b50b856e3b23a0e66e97d963edbf38" +checksum = "ed1958f0294ecc05ebe7b3c9a8662a3e221c2523b7f2bcd94c7a651efbd510bf" dependencies = [ "alloy-eips", "alloy-primitives", @@ -149,9 +149,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f28f769d5ea999f0d8a105e434f483456a15b4e1fcb08edbbbe1650a497ff6d" +checksum = "f752e99497ddc39e22d547d7dfe516af10c979405a034ed90e69b914b7dddeae" dependencies = [ "alloy-consensus", "alloy-eips", @@ -164,9 +164,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "990fa65cd132a99d3c3795a82b9f93ec82b81c7de3bab0bf26ca5c73286f7186" +checksum = "f2140796bc79150b1b7375daeab99750f0ff5e27b1f8b0aa81ccde229c7f02a2" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -255,19 +255,21 @@ checksum = "6adac476434bf024279164dcdca299309f0c7d1e3557024eb7a83f8d9d01c6b5" dependencies = [ "alloy-primitives", "alloy-rlp", + "arbitrary", "borsh", "serde", ] [[package]] name = "alloy-eips" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09535cbc646b0e0c6fcc12b7597eaed12cf86dff4c4fba9507a61e71b94f30eb" +checksum = "813a67f87e56b38554d18b182616ee5006e8e2bf9df96a0df8bf29dff1d52e3f" dependencies = [ "alloy-eip2124", "alloy-eip2930", "alloy-eip7702", + "alloy-eip7928", "alloy-primitives", "alloy-rlp", "alloy-serde", @@ -287,9 +289,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.26.3" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a96827207397445a919a8adc49289b53cc74e48e460411740bba31cec2fc307d" +checksum = "1582933a9fc27c0953220eb4f18f6492ff577822e9a8d848890ff59f6b4f5beb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -309,9 +311,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1005520ccf89fa3d755e46c1d992a9e795466c2e7921be2145ef1f749c5727de" +checksum = "05864eef929c4d28895ae4b4d8ac9c6753c4df66e873b9c8fafc8089b59c1502" dependencies = [ "alloy-eips", "alloy-primitives", @@ -350,9 +352,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b626409c98ba43aaaa558361bca21440c88fd30df7542c7484b9c7a1489cdb" +checksum = "d2dd146b3de349a6ffaa4e4e319ab3a90371fb159fb0bddeb1c7bbe8b1792eff" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -365,9 +367,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89924fdcfeee0e0fa42b1f10af42f92802b5d16be614a70897382565663bf7cf" +checksum = "8c12278ffbb8872dfba3b2f17d8ea5e8503c2df5155d9bc5ee342794bde505c3" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -391,9 +393,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f0dbe56ff50065713ff8635d8712a0895db3ad7f209db9793ad8fcb6b1734aa" +checksum = "833037c04917bc2031541a60e8249e4ab5500e24c637c1c62e95e963a655d66f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -404,9 +406,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.26.3" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54dc5c46a92fc7267055a174d30efb34e2599a0047102a4d38a025ae521435ba" +checksum = "6f19214adae08ea95600c3ede76bcbf0c40b36a263534a8f441a4c732f60e868" dependencies = [ "alloy-consensus", "alloy-eips", @@ -467,9 +469,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b56f7a77513308a21a2ba0e9d57785a9d9d2d609e77f4e71a78a1192b83ff2d" +checksum = "eafa840b0afe01c889a3012bb2fde770a544f74eab2e2870303eb0a5fb869c48" dependencies = [ "alloy-chains", "alloy-consensus", @@ -512,9 +514,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94813abbd7baa30c700ea02e7f92319dbcb03bff77aeea92a3a9af7ba19c5c70" +checksum = "57b3a3b3e4efc9f4d30e3326b6bd6811231d16ef94837e18a802b44ca55119e6" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -556,9 +558,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff01723afc25ec4c5b04de399155bef7b6a96dfde2475492b1b7b4e7a4f46445" +checksum = "12768ae6303ec764905a8a7cd472aea9072f9f9c980d18151e26913da8ae0123" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -582,9 +584,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91bf006bb06b7d812591b6ac33395cb92f46c6a65cda11ee30b348338214f0f" +checksum = "0622d8bcac2f16727590aa33f4c3f05ea98130e7e4b4924bce8be85da5ad0dae" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -595,9 +597,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b934c3bcdc6617563b45deb36a40881c8230b94d0546ea739dff7edb3aa2f6fd" +checksum = "c38c5ac70457ecc74e87fe1a5a19f936419224ded0eb0636241452412ca92733" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -607,9 +609,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e82145856df8abb1fefabef58cdec0f7d9abf337d4abd50c1ed7e581634acdd" +checksum = "ae8eb0e5d6c48941b61ab76fabab4af66f7d88309a98aa14ad3dec7911c1eba3" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -619,9 +621,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212ca1c1dab27f531d3858f8b1a2d6bfb2da664be0c1083971078eb7b71abe4b" +checksum = "a1cf5a093e437dfd62df48e480f24e1a3807632358aad6816d7a52875f1c04aa" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -630,9 +632,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d92a9b4b268fac505ef7fb1dac9bb129d4fd7de7753f22a5b6e9f666f7f7de6" +checksum = "e07949e912479ef3b848e1cf8db54b534bdd7bc58e6c23f28ea9488960990c8c" dependencies = [ "alloy-eips", "alloy-primitives", @@ -650,9 +652,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab1ebed118b701c497e6541d2d11dfa6f3c6ae31a3c52999daa802fcdcc16b7" +checksum = "925ff0f48c2169c050f0ae7a82769bdf3f45723d6742ebb6a5efb4ed2f491b26" dependencies = [ "alloy-primitives", "derive_more", @@ -662,9 +664,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "232f00fcbcd3ee3b9399b96223a8fc884d17742a70a44f9d7cef275f93e6e872" +checksum = "336ef381c7409f23c69f6e79bddc1917b6e832cff23e7a5cf84b9381d53582e6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -683,9 +685,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5715d0bf7efbd360873518bd9f6595762136b5327a9b759a8c42ccd9b5e44945" +checksum = "28e97603095020543a019ab133e0e3dc38cd0819f19f19bdd70c642404a54751" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -705,9 +707,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7b61941d2add2ee64646612d3eda92cbbde8e6c933489760b6222c8898c79be" +checksum = "2805153975e25d38e37ee100880e642d5b24e421ed3014a7d2dae1d9be77562e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -720,9 +722,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9763cc931a28682bd4b9a68af90057b0fbe80e2538a82251afd69d7ae00bbebf" +checksum = "f1aec4e1c66505d067933ea1a949a4fb60a19c4cfc2f109aa65873ea99e62ea8" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -734,9 +736,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "359a8caaa98cb49eed62d03f5bc511dd6dd5dee292238e8627a6e5690156df0f" +checksum = "25b73c1d6e4f1737a20d246dad5a0abd6c1b76ec4c3d153684ef8c6f1b6bb4f4" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -746,9 +748,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ed8531cae8d21ee1c6571d0995f8c9f0652a6ef6452fde369283edea6ab7138" +checksum = "946a0d413dbb5cd9adba0de5f8a1a34d5b77deda9b69c1d7feed8fc875a1aa26" dependencies = [ "alloy-primitives", "arbitrary", @@ -758,9 +760,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb10ccd49d0248df51063fce6b716f68a315dd912d55b32178c883fd48b4021d" +checksum = "2f7481dc8316768f042495eaf305d450c32defbc9bce09d8bf28afcd956895bb" dependencies = [ "alloy-primitives", "async-trait", @@ -773,9 +775,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4d992d44e6c414ece580294abbadb50e74cfd4eaa69787350a4dfd4b20eaa1b" +checksum = "1259dac1f534a4c66c1d65237c89915d0010a2a91d6c3b0bada24dc5ee0fb917" dependencies = [ "alloy-consensus", "alloy-network", @@ -862,9 +864,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f50a9516736d22dd834cc2240e5bf264f338667cc1d9e514b55ec5a78b987ca" +checksum = "78f169b85eb9334871db986e7eaf59c58a03d86a30cc68b846573d47ed0656bb" dependencies = [ "alloy-json-rpc", "auto_impl", @@ -885,9 +887,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a18b541a6197cf9a084481498a766fdf32fefda0c35ea6096df7d511025e9f1" +checksum = "019821102e70603e2c141954418255bec539ef64ac4117f8e84fb493769acf73" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -900,9 +902,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8075911680ebc537578cacf9453464fd394822a0f68614884a9c63f9fbaf5e89" +checksum = "e574ca2f490fb5961d2cdd78188897392c46615cd88b35c202d34bbc31571a81" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -920,9 +922,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "921d37a57e2975e5215f7dd0f28873ed5407c7af630d4831a4b5c737de4b0b8b" +checksum = "b92dea6996269769f74ae56475570e3586910661e037b7b52d50c9641f76c68f" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -957,9 +959,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2289a842d02fe63f8c466db964168bb2c7a9fdfb7b24816dbb17d45520575fb" +checksum = "45ceac797eb8a56bdf5ab1fab353072c17d472eab87645ca847afe720db3246d" dependencies = [ "darling 0.21.3", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index e2d2bd3b841..bc5df15fb5e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -485,10 +485,10 @@ revm-inspectors = "0.34.0" # eth alloy-chains = { version = "0.2.5", default-features = false } -alloy-dyn-abi = "1.4.3" +alloy-dyn-abi = "1.5.2" alloy-eip2124 = { version = "0.2.0", default-features = false } alloy-eip7928 = { version = "0.3.0", default-features = false } -alloy-evm = { version = "0.26.3", default-features = false } +alloy-evm = { version = "0.27.0", default-features = false } alloy-primitives = { version = "1.5.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.5.0" @@ -497,36 +497,36 @@ alloy-trie = { version = "0.9.1", default-features = false } alloy-hardforks = "0.4.5" -alloy-consensus = { version = "1.4.3", default-features = false } -alloy-contract = { version = "1.4.3", default-features = false } -alloy-eips = { version = "1.4.3", default-features = false } -alloy-genesis = { version = "1.4.3", default-features = false } -alloy-json-rpc = { version = "1.4.3", default-features = false } -alloy-network = { version = "1.4.3", default-features = false } -alloy-network-primitives = { version = "1.4.3", default-features = false } -alloy-provider = { version = "1.4.3", features = ["reqwest", "debug-api"], default-features = false } -alloy-pubsub = { version = "1.4.3", default-features = false } -alloy-rpc-client = { version = "1.4.3", default-features = false } -alloy-rpc-types = { version = "1.4.3", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.4.3", default-features = false } -alloy-rpc-types-anvil = { version = "1.4.3", default-features = false } -alloy-rpc-types-beacon = { version = "1.4.3", default-features = false } -alloy-rpc-types-debug = { version = "1.4.3", default-features = false } -alloy-rpc-types-engine = { version = "1.4.3", default-features = false } -alloy-rpc-types-eth = { version = "1.4.3", default-features = false } -alloy-rpc-types-mev = { version = "1.4.3", default-features = false } -alloy-rpc-types-trace = { version = "1.4.3", default-features = false } -alloy-rpc-types-txpool = { version = "1.4.3", default-features = false } -alloy-serde = { version = "1.4.3", default-features = false } -alloy-signer = { version = "1.4.3", default-features = false } -alloy-signer-local = { version = "1.4.3", default-features = false } -alloy-transport = { version = "1.4.3" } -alloy-transport-http = { version = "1.4.3", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.4.3", default-features = false } -alloy-transport-ws = { version = "1.4.3", default-features = false } +alloy-consensus = { version = "1.5.2", default-features = false } +alloy-contract = { version = "1.5.2", default-features = false } +alloy-eips = { version = "1.5.2", default-features = false } +alloy-genesis = { version = "1.5.2", default-features = false } +alloy-json-rpc = { version = "1.5.2", default-features = false } +alloy-network = { version = "1.5.2", default-features = false } +alloy-network-primitives = { version = "1.5.2", default-features = false } +alloy-provider = { version = "1.5.2", features = ["reqwest", "debug-api"], default-features = false } +alloy-pubsub = { version = "1.5.2", default-features = false } +alloy-rpc-client = { version = "1.5.2", default-features = false } +alloy-rpc-types = { version = "1.5.2", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.5.2", default-features = false } +alloy-rpc-types-anvil = { version = "1.5.2", default-features = false } +alloy-rpc-types-beacon = { version = "1.5.2", default-features = false } +alloy-rpc-types-debug = { version = "1.5.2", default-features = false } +alloy-rpc-types-engine = { version = "1.5.2", default-features = false } +alloy-rpc-types-eth = { version = "1.5.2", default-features = false } +alloy-rpc-types-mev = { version = "1.5.2", default-features = false } +alloy-rpc-types-trace = { version = "1.5.2", default-features = false } +alloy-rpc-types-txpool = { version = "1.5.2", default-features = false } +alloy-serde = { version = "1.5.2", default-features = false } +alloy-signer = { version = "1.5.2", default-features = false } +alloy-signer-local = { version = "1.5.2", default-features = false } +alloy-transport = { version = "1.5.2" } +alloy-transport-http = { version = "1.5.2", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.5.2", default-features = false } +alloy-transport-ws = { version = "1.5.2", default-features = false } # op -alloy-op-evm = { version = "0.26.3", default-features = false } +alloy-op-evm = { version = "0.27.0", default-features = false } alloy-op-hardforks = "0.4.4" op-alloy-rpc-types = { version = "0.23.1", default-features = false } op-alloy-rpc-types-engine = { version = "0.23.1", default-features = false } @@ -790,8 +790,8 @@ ipnet = "2.11" # jsonrpsee-http-client = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } # jsonrpsee-types = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } -# alloy-evm = { git = "https://github.com/alloy-rs/evm", rev = "a69f0b45a6b0286e16072cb8399e02ce6ceca353" } -# alloy-op-evm = { git = "https://github.com/alloy-rs/evm", rev = "a69f0b45a6b0286e16072cb8399e02ce6ceca353" } +# alloy-evm = { git = "https://github.com/alloy-rs/evm", rev = "df124c0" } +# alloy-op-evm = { git = "https://github.com/alloy-rs/evm", rev = "df124c0" } # revm-inspectors = { git = "https://github.com/paradigmxyz/revm-inspectors", rev = "3020ea8" } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 6d61578f636..23da7c23cc4 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -15,7 +15,7 @@ use crate::tree::{ }; use alloy_eip7928::BlockAccessList; use alloy_eips::eip1898::BlockWithParent; -use alloy_evm::{block::StateChangeSource, ToTxEnv}; +use alloy_evm::block::StateChangeSource; use alloy_primitives::B256; use crossbeam_channel::Sender as CrossbeamSender; use executor::WorkloadExecutor; @@ -25,6 +25,7 @@ use parking_lot::RwLock; use prewarm::PrewarmMetrics; use rayon::prelude::*; use reth_evm::{ + block::ExecutableTxParts, execute::{ExecutableTxFor, WithTxEnv}, ConfigureEvm, EvmEnvFor, ExecutableTxIterator, ExecutableTxTuple, OnStateHook, SpecFor, TxEnvFor, @@ -101,7 +102,7 @@ pub const SPARSE_TRIE_MAX_VALUES_SHRINK_CAPACITY: usize = 1_000_000; /// Type alias for [`PayloadHandle`] returned by payload processor spawn methods. type IteratorPayloadHandle = PayloadHandle< - WithTxEnv, ::Tx>, + WithTxEnv, >::Recovered>, ::Error, ::Receipt, >; @@ -369,8 +370,8 @@ where &self, transactions: I, ) -> ( - mpsc::Receiver, I::Tx>>, - mpsc::Receiver, I::Tx>, I::Error>>, + mpsc::Receiver, I::Recovered>>, + mpsc::Receiver, I::Recovered>, I::Error>>, usize, ) { let (transactions, convert) = transactions.into(); @@ -385,7 +386,10 @@ where self.executor.spawn_blocking(move || { transactions.enumerate().for_each_with(ooo_tx, |ooo_tx, (idx, tx)| { let tx = convert(tx); - let tx = tx.map(|tx| WithTxEnv { tx_env: tx.to_tx_env(), tx: Arc::new(tx) }); + let tx = tx.map(|tx| { + let (tx_env, tx) = tx.into_parts(); + WithTxEnv { tx_env, tx: Arc::new(tx) } + }); // Only send Ok(_) variants to prewarming task. if let Ok(tx) = &tx { let _ = prewarm_tx.send(tx.clone()); diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 4f044e98fad..5c782ed1f50 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -29,7 +29,7 @@ use alloy_evm::Database; use alloy_primitives::{keccak256, map::B256Set, B256}; use crossbeam_channel::Sender as CrossbeamSender; use metrics::{Counter, Gauge, Histogram}; -use reth_evm::{execute::ExecutableTxFor, ConfigureEvm, Evm, EvmFor, SpecFor}; +use reth_evm::{execute::ExecutableTxFor, ConfigureEvm, Evm, EvmFor, RecoveredTx, SpecFor}; use reth_metrics::Metrics; use reth_primitives_traits::NodePrimitives; use reth_provider::{ @@ -609,7 +609,8 @@ where break } - let res = match evm.transact(&tx) { + let (tx_env, tx) = tx.into_parts(); + let res = match evm.transact(tx_env) { Ok(res) => res, Err(err) => { trace!( diff --git a/crates/ethereum/evm/src/receipt.rs b/crates/ethereum/evm/src/receipt.rs index ac8f5358c0e..932c6ca0dba 100644 --- a/crates/ethereum/evm/src/receipt.rs +++ b/crates/ethereum/evm/src/receipt.rs @@ -1,3 +1,4 @@ +use alloy_consensus::TxType; use alloy_evm::eth::receipt_builder::{ReceiptBuilder, ReceiptBuilderCtx}; use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_evm::Evm; @@ -12,13 +13,10 @@ impl ReceiptBuilder for RethReceiptBuilder { type Transaction = TransactionSigned; type Receipt = Receipt; - fn build_receipt( - &self, - ctx: ReceiptBuilderCtx<'_, Self::Transaction, E>, - ) -> Self::Receipt { - let ReceiptBuilderCtx { tx, result, cumulative_gas_used, .. } = ctx; + fn build_receipt(&self, ctx: ReceiptBuilderCtx<'_, TxType, E>) -> Self::Receipt { + let ReceiptBuilderCtx { tx_type, result, cumulative_gas_used, .. } = ctx; Receipt { - tx_type: tx.tx_type(), + tx_type, // Success flag was added in `EIP-658: Embedding transaction status code in // receipts`. success: result.is_success(), diff --git a/crates/ethereum/evm/src/test_utils.rs b/crates/ethereum/evm/src/test_utils.rs index cf32d9e6bd1..ed472c28a4d 100644 --- a/crates/ethereum/evm/src/test_utils.rs +++ b/crates/ethereum/evm/src/test_utils.rs @@ -1,6 +1,6 @@ use crate::EthEvmConfig; use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; -use alloy_consensus::Header; +use alloy_consensus::{Header, TxType}; use alloy_eips::eip7685::Requests; use alloy_evm::precompiles::PrecompilesMap; use alloy_primitives::Bytes; @@ -11,14 +11,14 @@ use reth_evm::{ block::{ BlockExecutionError, BlockExecutor, BlockExecutorFactory, BlockExecutorFor, ExecutableTx, }, - eth::{EthBlockExecutionCtx, EthEvmContext}, + eth::{EthBlockExecutionCtx, EthEvmContext, EthTxResult}, ConfigureEngineEvm, ConfigureEvm, Database, EthEvm, EthEvmFactory, Evm, EvmEnvFor, EvmFactory, - ExecutableTxIterator, ExecutionCtxFor, + ExecutableTxIterator, ExecutionCtxFor, RecoveredTx, }; use reth_execution_types::{BlockExecutionResult, ExecutionOutcome}; use reth_primitives_traits::{BlockTy, SealedBlock, SealedHeader}; use revm::{ - context::result::{ExecutionResult, Output, ResultAndState, SuccessReason}, + context::result::{ExecutionResult, HaltReason, Output, ResultAndState, SuccessReason}, database::State, Inspector, }; @@ -90,6 +90,7 @@ impl<'a, DB: Database, I: Inspector>>> BlockExec type Evm = EthEvm<&'a mut State, I, PrecompilesMap>; type Transaction = TransactionSigned; type Receipt = Receipt; + type Result = EthTxResult; fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> { Ok(()) @@ -101,25 +102,25 @@ impl<'a, DB: Database, I: Inspector>>> BlockExec fn execute_transaction_without_commit( &mut self, - _tx: impl ExecutableTx, - ) -> Result::HaltReason>, BlockExecutionError> { - Ok(ResultAndState::new( - ExecutionResult::Success { - reason: SuccessReason::Return, - gas_used: 0, - gas_refunded: 0, - logs: vec![], - output: Output::Call(Bytes::from(vec![])), - }, - Default::default(), - )) - } - - fn commit_transaction( - &mut self, - _output: ResultAndState<::HaltReason>, - _tx: impl ExecutableTx, - ) -> Result { + tx: impl ExecutableTx, + ) -> Result { + Ok(EthTxResult { + result: ResultAndState::new( + ExecutionResult::Success { + reason: SuccessReason::Return, + gas_used: 0, + gas_refunded: 0, + logs: vec![], + output: Output::Call(Bytes::from(vec![])), + }, + Default::default(), + ), + tx_type: tx.into_parts().1.tx().tx_type(), + blob_gas_used: 0, + }) + } + + fn commit_transaction(&mut self, _output: Self::Result) -> Result { Ok(0) } diff --git a/crates/evm/evm/src/engine.rs b/crates/evm/evm/src/engine.rs index 5663745f456..13f802c27ab 100644 --- a/crates/evm/evm/src/engine.rs +++ b/crates/evm/evm/src/engine.rs @@ -1,5 +1,7 @@ -use crate::{execute::ExecutableTxFor, ConfigureEvm, EvmEnvFor, ExecutionCtxFor}; +use crate::{execute::ExecutableTxFor, ConfigureEvm, EvmEnvFor, ExecutionCtxFor, TxEnvFor}; +use alloy_evm::{block::ExecutableTxParts, RecoveredTx}; use rayon::prelude::*; +use reth_primitives_traits::TxTy; /// [`ConfigureEvm`] extension providing methods for executing payloads. pub trait ConfigureEngineEvm: ConfigureEvm { @@ -61,11 +63,16 @@ where /// Iterator over executable transactions. pub trait ExecutableTxIterator: - ExecutableTxTuple> + ExecutableTxTuple> { + /// HACK: for some reason, this duplicated AT is the only way to enforce the inner Recovered: + /// Send + Sync bound. Effectively alias for `Self::Tx::Recovered`. + type Recovered: RecoveredTx> + Send + Sync; } -impl ExecutableTxIterator for T where - T: ExecutableTxTuple> +impl ExecutableTxIterator for T +where + T: ExecutableTxTuple>, { + type Recovered = , TxTy>>::Recovered; } diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 866099a996d..540f86692d4 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -6,7 +6,7 @@ use alloy_consensus::{BlockHeader, Header}; use alloy_eips::eip2718::WithEncoded; pub use alloy_evm::block::{BlockExecutor, BlockExecutorFactory}; use alloy_evm::{ - block::{CommitChanges, ExecutableTx}, + block::{CommitChanges, ExecutableTxParts}, Evm, EvmEnv, EvmFactory, RecoveredTx, ToTxEnv, }; use alloy_primitives::{Address, B256}; @@ -401,49 +401,31 @@ where /// Conversions for executable transactions. pub trait ExecutorTx { - /// Converts the transaction into [`ExecutableTx`]. - fn as_executable(&self) -> impl ExecutableTx; - - /// Converts the transaction into [`Recovered`]. - fn into_recovered(self) -> Recovered; + /// Converts the transaction into a tuple of [`TxEnvFor`] and [`Recovered`]. + fn into_parts(self) -> (::Tx, Recovered); } impl ExecutorTx for WithEncoded> { - fn as_executable(&self) -> impl ExecutableTx { - self - } - - fn into_recovered(self) -> Recovered { - self.1 + fn into_parts(self) -> (::Tx, Recovered) { + (self.to_tx_env(), self.1) } } impl ExecutorTx for Recovered { - fn as_executable(&self) -> impl ExecutableTx { - self - } - - fn into_recovered(self) -> Self { - self + fn into_parts(self) -> (::Tx, Self) { + (self.to_tx_env(), self) } } -impl ExecutorTx - for WithTxEnv<<::Evm as Evm>::Tx, T> +impl ExecutorTx + for WithTxEnv<::Tx, Recovered> where - T: ExecutorTx + Clone, - Executor: BlockExecutor, - <::Evm as Evm>::Tx: Clone, - Self: RecoveredTx, + Executor: BlockExecutor, { - fn as_executable(&self) -> impl ExecutableTx { - self - } - - fn into_recovered(self) -> Recovered { - Arc::unwrap_or_clone(self.tx).into_recovered() + fn into_parts(self) -> (::Tx, Recovered) { + (self.tx_env, Arc::unwrap_or_clone(self.tx)) } } @@ -479,10 +461,11 @@ where &ExecutionResult<<::Evm as Evm>::HaltReason>, ) -> CommitChanges, ) -> Result, BlockExecutionError> { + let (tx_env, tx) = tx.into_parts(); if let Some(gas_used) = - self.executor.execute_transaction_with_commit_condition(tx.as_executable(), f)? + self.executor.execute_transaction_with_commit_condition((tx_env, &tx), f)? { - self.transactions.push(tx.into_recovered()); + self.transactions.push(tx); Ok(Some(gas_used)) } else { Ok(None) @@ -609,20 +592,20 @@ where } } -/// A helper trait marking a 'static type that can be converted into an [`ExecutableTx`] for block -/// executor. +/// A helper trait marking a 'static type that can be converted into an [`ExecutableTxParts`] for +/// block executor. pub trait ExecutableTxFor: - ToTxEnv> + RecoveredTx> + ExecutableTxParts, TxTy> + RecoveredTx> { } impl ExecutableTxFor for T where - T: ToTxEnv> + RecoveredTx> + T: ExecutableTxParts, TxTy> + RecoveredTx> { } /// A container for a transaction and a transaction environment. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct WithTxEnv { /// The transaction environment for EVM. pub tx_env: TxEnv, @@ -630,6 +613,12 @@ pub struct WithTxEnv { pub tx: Arc, } +impl Clone for WithTxEnv { + fn clone(&self) -> Self { + Self { tx_env: self.tx_env.clone(), tx: self.tx.clone() } + } +} + impl> RecoveredTx for WithTxEnv { fn tx(&self) -> &Tx { self.tx.tx() @@ -640,9 +629,11 @@ impl> RecoveredTx for WithTxEnv { } } -impl ToTxEnv for WithTxEnv { - fn to_tx_env(&self) -> TxEnv { - self.tx_env.clone() +impl, Tx> ExecutableTxParts for WithTxEnv { + type Recovered = Arc; + + fn into_parts(self) -> (TxEnv, Self::Recovered) { + (self.tx_env, self.tx) } } diff --git a/crates/optimism/evm/src/receipts.rs b/crates/optimism/evm/src/receipts.rs index 50ca3679ccc..9b95cf3a489 100644 --- a/crates/optimism/evm/src/receipts.rs +++ b/crates/optimism/evm/src/receipts.rs @@ -17,9 +17,9 @@ impl OpReceiptBuilder for OpRethReceiptBuilder { fn build_receipt<'a, E: Evm>( &self, - ctx: ReceiptBuilderCtx<'a, OpTransactionSigned, E>, - ) -> Result> { - match ctx.tx.tx_type() { + ctx: ReceiptBuilderCtx<'a, OpTxType, E>, + ) -> Result> { + match ctx.tx_type { OpTxType::Deposit => Err(ctx), ty => { let receipt = Receipt { diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index d1e59384c5c..4ba63885fa4 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -6,9 +6,9 @@ use alloy_eips::eip4895::Withdrawal; use alloy_evm::{ block::{BlockExecutorFactory, BlockExecutorFor, ExecutableTx}, - eth::{EthBlockExecutionCtx, EthBlockExecutor}, + eth::{EthBlockExecutionCtx, EthBlockExecutor, EthTxResult}, precompiles::PrecompilesMap, - revm::context::{result::ResultAndState, Block as _}, + revm::context::Block as _, EthEvm, EthEvmFactory, }; use alloy_sol_macro::sol; @@ -39,7 +39,7 @@ use reth_ethereum::{ primitives::{Header, SealedBlock, SealedHeader}, provider::BlockExecutionResult, rpc::types::engine::ExecutionData, - Block, EthPrimitives, Receipt, TransactionSigned, + Block, EthPrimitives, Receipt, TransactionSigned, TxType, }; use std::{fmt::Display, sync::Arc}; @@ -196,6 +196,7 @@ where type Transaction = TransactionSigned; type Receipt = Receipt; type Evm = E; + type Result = EthTxResult; fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> { self.inner.apply_pre_execution_changes() @@ -208,16 +209,12 @@ where fn execute_transaction_without_commit( &mut self, tx: impl ExecutableTx, - ) -> Result::HaltReason>, BlockExecutionError> { + ) -> Result { self.inner.execute_transaction_without_commit(tx) } - fn commit_transaction( - &mut self, - output: ResultAndState<::HaltReason>, - tx: impl ExecutableTx, - ) -> Result { - self.inner.commit_transaction(output, tx) + fn commit_transaction(&mut self, output: Self::Result) -> Result { + self.inner.commit_transaction(output) } fn finish(mut self) -> Result<(Self::Evm, BlockExecutionResult), BlockExecutionError> { diff --git a/examples/custom-node/src/evm/executor.rs b/examples/custom-node/src/evm/executor.rs index 575b4949c0a..3b935b4c64e 100644 --- a/examples/custom-node/src/evm/executor.rs +++ b/examples/custom-node/src/evm/executor.rs @@ -12,12 +12,12 @@ use alloy_evm::{ BlockExecutorFor, ExecutableTx, OnStateHook, }, precompiles::PrecompilesMap, - Database, Evm, + Database, Evm, RecoveredTx, }; -use alloy_op_evm::{OpBlockExecutionCtx, OpBlockExecutor}; +use alloy_op_evm::{block::OpTxResult, OpBlockExecutionCtx, OpBlockExecutor}; use reth_ethereum::evm::primitives::InspectorFor; -use reth_op::{chainspec::OpChainSpec, node::OpRethReceiptBuilder, OpReceipt}; -use revm::{context::result::ResultAndState, database::State}; +use reth_op::{chainspec::OpChainSpec, node::OpRethReceiptBuilder, OpReceipt, OpTxType}; +use revm::database::State; use std::sync::Arc; pub struct CustomBlockExecutor { @@ -32,6 +32,7 @@ where type Transaction = CustomTransaction; type Receipt = OpReceipt; type Evm = E; + type Result = OpTxResult; fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> { self.inner.apply_pre_execution_changes() @@ -44,7 +45,8 @@ where fn execute_transaction_without_commit( &mut self, tx: impl ExecutableTx, - ) -> Result::HaltReason>, BlockExecutionError> { + ) -> Result { + let tx = tx.into_parts().1; match tx.tx() { CustomTransaction::Op(op_tx) => self .inner @@ -53,17 +55,8 @@ where } } - fn commit_transaction( - &mut self, - output: ResultAndState<::HaltReason>, - tx: impl ExecutableTx, - ) -> Result { - match tx.tx() { - CustomTransaction::Op(op_tx) => { - self.inner.commit_transaction(output, Recovered::new_unchecked(op_tx, *tx.signer())) - } - CustomTransaction::Payment(..) => todo!(), - } + fn commit_transaction(&mut self, output: Self::Result) -> Result { + self.inner.commit_transaction(output) } fn finish(self) -> Result<(Self::Evm, BlockExecutionResult), BlockExecutionError> { From a0df561117013eba987759ef52019fd00f117be2 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 22 Jan 2026 19:04:56 +0000 Subject: [PATCH 157/267] fix(rocksdb): periodic batch commits in stages to prevent OOM (#21334) Co-authored-by: Amp --- .../src/stages/index_account_history.rs | 2 +- .../src/stages/index_storage_history.rs | 2 +- crates/stages/stages/src/stages/mod.rs | 3 +- crates/stages/stages/src/stages/tx_lookup.rs | 9 +- .../src/providers/rocksdb/provider.rs | 103 ++++++++++++++++++ .../provider/src/traits/rocksdb_provider.rs | 26 +++++ 6 files changed, 134 insertions(+), 11 deletions(-) diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index 92fa5f3244c..5a3ba750d52 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -136,7 +136,7 @@ where info!(target: "sync::stages::index_account_history::exec", "Loading indices into database"); - provider.with_rocksdb_batch(|rocksdb_batch| { + provider.with_rocksdb_batch_auto_commit(|rocksdb_batch| { let mut writer = EitherWriter::new_accounts_history(provider, rocksdb_batch)?; load_account_history(collector, first_sync, &mut writer) .map_err(|e| reth_provider::ProviderError::other(Box::new(e)))?; diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index 36f2f6ede6b..08192c8871b 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -140,7 +140,7 @@ where info!(target: "sync::stages::index_storage_history::exec", "Loading indices into database"); - provider.with_rocksdb_batch(|rocksdb_batch| { + provider.with_rocksdb_batch_auto_commit(|rocksdb_batch| { let mut writer = EitherWriter::new_storages_history(provider, rocksdb_batch)?; load_storage_history(collector, first_sync, &mut writer) .map_err(|e| reth_provider::ProviderError::other(Box::new(e)))?; diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 8249d749147..1145af1b934 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -1,5 +1,6 @@ /// The bodies stage. mod bodies; +mod era; /// The execution stage that generates state diff. mod execution; /// The finish stage @@ -36,9 +37,7 @@ pub use prune::*; pub use sender_recovery::*; pub use tx_lookup::*; -mod era; mod utils; - use utils::*; #[cfg(test)] diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 087a040f795..bf056a655bf 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -158,15 +158,13 @@ where let append_only = provider.count_entries::()?.is_zero(); - // Create RocksDB batch if feature is enabled + // Auto-commits on threshold; consistency check heals any crash. #[cfg(all(unix, feature = "rocksdb"))] let rocksdb = provider.rocksdb_provider(); #[cfg(all(unix, feature = "rocksdb"))] - let rocksdb_batch = rocksdb.batch(); + let rocksdb_batch = rocksdb.batch_with_auto_commit(); #[cfg(not(all(unix, feature = "rocksdb")))] let rocksdb_batch = (); - - // Create writer that routes to either MDBX or RocksDB based on settings let mut writer = EitherWriter::new_transaction_hash_numbers(provider, rocksdb_batch)?; @@ -217,15 +215,12 @@ where ) -> Result { let (range, unwind_to, _) = input.unwind_block_range_with_threshold(self.chunk_size); - // Create RocksDB batch if feature is enabled #[cfg(all(unix, feature = "rocksdb"))] let rocksdb = provider.rocksdb_provider(); #[cfg(all(unix, feature = "rocksdb"))] let rocksdb_batch = rocksdb.batch(); #[cfg(not(all(unix, feature = "rocksdb")))] let rocksdb_batch = (); - - // Create writer that routes to either MDBX or RocksDB based on settings let mut writer = EitherWriter::new_transaction_hash_numbers(provider, rocksdb_batch)?; let static_file_provider = provider.static_file_provider(); diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index d9aba31e5a6..75b9e6fa5de 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -98,6 +98,13 @@ const DEFAULT_BYTES_PER_SYNC: u64 = 1_048_576; /// reducing the first few reallocations without over-allocating. const DEFAULT_COMPRESS_BUF_CAPACITY: usize = 4096; +/// Default auto-commit threshold for batch writes (4 GiB). +/// +/// When a batch exceeds this size, it is automatically committed to prevent OOM +/// during large bulk writes. The consistency check on startup heals any crash +/// that occurs between auto-commits. +const DEFAULT_AUTO_COMMIT_THRESHOLD: usize = 4 * 1024 * 1024 * 1024; + /// Builder for [`RocksDBProvider`]. pub struct RocksDBBuilder { path: PathBuf, @@ -629,6 +636,21 @@ impl RocksDBProvider { provider: self, inner: WriteBatchWithTransaction::::default(), buf: Vec::with_capacity(DEFAULT_COMPRESS_BUF_CAPACITY), + auto_commit_threshold: None, + } + } + + /// Creates a new batch with auto-commit enabled. + /// + /// When the batch size exceeds the threshold (4 GiB), the batch is automatically + /// committed and reset. This prevents OOM during large bulk writes while maintaining + /// crash-safety via the consistency check on startup. + pub fn batch_with_auto_commit(&self) -> RocksDBBatch<'_> { + RocksDBBatch { + provider: self, + inner: WriteBatchWithTransaction::::default(), + buf: Vec::with_capacity(DEFAULT_COMPRESS_BUF_CAPACITY), + auto_commit_threshold: Some(DEFAULT_AUTO_COMMIT_THRESHOLD), } } @@ -1137,11 +1159,16 @@ impl RocksDBProvider { /// Unlike [`RocksTx`], this does NOT support read-your-writes. Use for write-only flows /// where you don't need to read back uncommitted data within the same operation /// (e.g., history index writes). +/// +/// When `auto_commit_threshold` is set, the batch will automatically commit and reset +/// when the batch size exceeds the threshold. This prevents OOM during large bulk writes. #[must_use = "batch must be committed"] pub struct RocksDBBatch<'a> { provider: &'a RocksDBProvider, inner: WriteBatchWithTransaction, buf: Vec, + /// If set, batch auto-commits when size exceeds this threshold (in bytes). + auto_commit_threshold: Option, } impl fmt::Debug for RocksDBBatch<'_> { @@ -1160,12 +1187,16 @@ impl fmt::Debug for RocksDBBatch<'_> { impl<'a> RocksDBBatch<'a> { /// Puts a value into the batch. + /// + /// If auto-commit is enabled and the batch exceeds the threshold, commits and resets. pub fn put(&mut self, key: T::Key, value: &T::Value) -> ProviderResult<()> { let encoded_key = key.encode(); self.put_encoded::(&encoded_key, value) } /// Puts a value into the batch using pre-encoded key. + /// + /// If auto-commit is enabled and the batch exceeds the threshold, commits and resets. pub fn put_encoded( &mut self, key: &::Encoded, @@ -1173,12 +1204,43 @@ impl<'a> RocksDBBatch<'a> { ) -> ProviderResult<()> { let value_bytes = compress_to_buf_or_ref!(self.buf, value).unwrap_or(&self.buf); self.inner.put_cf(self.provider.get_cf_handle::()?, key, value_bytes); + self.maybe_auto_commit()?; Ok(()) } /// Deletes a value from the batch. + /// + /// If auto-commit is enabled and the batch exceeds the threshold, commits and resets. pub fn delete(&mut self, key: T::Key) -> ProviderResult<()> { self.inner.delete_cf(self.provider.get_cf_handle::()?, key.encode().as_ref()); + self.maybe_auto_commit()?; + Ok(()) + } + + /// Commits and resets the batch if it exceeds the auto-commit threshold. + /// + /// This is called after each `put` or `delete` operation to prevent unbounded memory growth. + /// Returns immediately if auto-commit is disabled or threshold not reached. + fn maybe_auto_commit(&mut self) -> ProviderResult<()> { + if let Some(threshold) = self.auto_commit_threshold && + self.inner.size_in_bytes() >= threshold + { + tracing::debug!( + target: "providers::rocksdb", + batch_size = self.inner.size_in_bytes(), + threshold, + "Auto-committing RocksDB batch" + ); + let old_batch = std::mem::take(&mut self.inner); + self.provider.0.db_rw().write_opt(old_batch, &WriteOptions::default()).map_err( + |e| { + ProviderError::Database(DatabaseError::Commit(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + }, + )?; + } Ok(()) } @@ -1208,6 +1270,11 @@ impl<'a> RocksDBBatch<'a> { self.inner.is_empty() } + /// Returns the size of the batch in bytes. + pub fn size_in_bytes(&self) -> usize { + self.inner.size_in_bytes() + } + /// Returns a reference to the underlying `RocksDB` provider. pub const fn provider(&self) -> &RocksDBProvider { self.provider @@ -2767,4 +2834,40 @@ mod tests { assert_eq!(shards[1].0.highest_block_number, u64::MAX); assert_eq!(shards[1].1.iter().collect::>(), (51..=75).collect::>()); } + + #[test] + fn test_batch_auto_commit_on_threshold() { + let temp_dir = TempDir::new().unwrap(); + let provider = + RocksDBBuilder::new(temp_dir.path()).with_table::().build().unwrap(); + + // Create batch with tiny threshold (1KB) to force auto-commits + let mut batch = RocksDBBatch { + provider: &provider, + inner: WriteBatchWithTransaction::::default(), + buf: Vec::new(), + auto_commit_threshold: Some(1024), // 1KB + }; + + // Write entries until we exceed threshold multiple times + // Each entry is ~20 bytes, so 100 entries = ~2KB = 2 auto-commits + for i in 0..100u64 { + let value = format!("value_{i:04}").into_bytes(); + batch.put::(i, &value).unwrap(); + } + + // Data should already be visible (auto-committed) even before final commit + // At least some entries should be readable + let first_visible = provider.get::(0).unwrap(); + assert!(first_visible.is_some(), "Auto-committed data should be visible"); + + // Final commit for remaining batch + batch.commit().unwrap(); + + // All entries should now be visible + for i in 0..100u64 { + let value = format!("value_{i:04}").into_bytes(); + assert_eq!(provider.get::(i).unwrap(), Some(value)); + } + } } diff --git a/crates/storage/provider/src/traits/rocksdb_provider.rs b/crates/storage/provider/src/traits/rocksdb_provider.rs index b4abacd86e5..07c9b478661 100644 --- a/crates/storage/provider/src/traits/rocksdb_provider.rs +++ b/crates/storage/provider/src/traits/rocksdb_provider.rs @@ -58,4 +58,30 @@ pub trait RocksDBProviderFactory { Ok(result) } } + + /// Executes a closure with a `RocksDB` batch that auto-commits on threshold. + /// + /// Unlike [`Self::with_rocksdb_batch`], this uses a batch that automatically commits + /// when it exceeds the size threshold, preventing OOM during large bulk writes. + /// The consistency check on startup heals any crash between auto-commits. + fn with_rocksdb_batch_auto_commit(&self, f: F) -> ProviderResult + where + F: FnOnce(RocksBatchArg<'_>) -> ProviderResult<(R, Option)>, + { + #[cfg(all(unix, feature = "rocksdb"))] + { + let rocksdb = self.rocksdb_provider(); + let batch = rocksdb.batch_with_auto_commit(); + let (result, raw_batch) = f(batch)?; + if let Some(b) = raw_batch { + self.set_pending_rocksdb_batch(b); + } + Ok(result) + } + #[cfg(not(all(unix, feature = "rocksdb")))] + { + let (result, _) = f(())?; + Ok(result) + } + } } From 937a7f226df94e11776cc582644e75ab9283740f Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Thu, 22 Jan 2026 11:14:58 -0800 Subject: [PATCH 158/267] fix(rpc): use Default for SimulateError to prepare for alloy breaking change (#21319) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-eth-types/src/simulate.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 7f122723fa3..98658a75b52 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -258,11 +258,13 @@ where let call = match result { ExecutionResult::Halt { reason, gas_used } => { let error = Err::from_evm_halt(reason, tx.gas_limit()); + #[allow(clippy::needless_update)] SimCallResult { return_data: Bytes::new(), error: Some(SimulateError { message: error.to_string(), code: error.into().code(), + ..SimulateError::invalid_params() }), gas_used, logs: Vec::new(), @@ -271,11 +273,13 @@ where } ExecutionResult::Revert { output, gas_used } => { let error = Err::from_revert(output.clone()); + #[allow(clippy::needless_update)] SimCallResult { return_data: output, error: Some(SimulateError { message: error.to_string(), code: error.into().code(), + ..SimulateError::invalid_params() }), gas_used, status: false, From a02508600c9a5a493c9c6673ee2ebcc78ce87cb1 Mon Sep 17 00:00:00 2001 From: Seola Oh Date: Fri, 23 Jan 2026 04:35:00 +0900 Subject: [PATCH 159/267] chore(txpool): explicitly deref RwLockReadGuard in PartialEq impl (#21336) --- crates/transaction-pool/src/blobstore/mem.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index 1a3fef8cea0..d9d8a5c2808 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -56,7 +56,7 @@ struct InMemoryBlobStoreInner { impl PartialEq for InMemoryBlobStoreInner { fn eq(&self, other: &Self) -> bool { - self.store.read().eq(&other.store.read()) + self.store.read().eq(&*other.store.read()) } } From 653362a43609e854e3905f10415c1242a2aa8632 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 22 Jan 2026 20:48:53 +0100 Subject: [PATCH 160/267] ci: align check-alloy workflow with main clippy job (#21329) --- .github/workflows/check-alloy.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/check-alloy.yml b/.github/workflows/check-alloy.yml index cddfd12dd0b..44e7845ef60 100644 --- a/.github/workflows/check-alloy.yml +++ b/.github/workflows/check-alloy.yml @@ -60,7 +60,6 @@ jobs: tail -50 Cargo.toml - name: Check workspace - run: cargo check --workspace --all-features - - - name: Check Optimism - run: cargo check -p reth-optimism-node --all-features + run: cargo clippy --workspace --lib --examples --tests --benches --all-features --locked + env: + RUSTFLAGS: -D warnings From f643e93c353abed4cd36612a36d5d0b2e08467bd Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 22 Jan 2026 19:42:19 +0000 Subject: [PATCH 161/267] feat(reth-bench): send-invalid-payload command (#21335) --- bin/reth-bench/Cargo.toml | 2 +- bin/reth-bench/src/bench/mod.rs | 14 + .../send_invalid_payload/invalidation.rs | 219 +++++++++++ .../src/bench/send_invalid_payload/mod.rs | 367 ++++++++++++++++++ 4 files changed, 601 insertions(+), 1 deletion(-) create mode 100644 bin/reth-bench/src/bench/send_invalid_payload/invalidation.rs create mode 100644 bin/reth-bench/src/bench/send_invalid_payload/mod.rs diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 52bb9b036fc..45ec0cb53d4 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -32,7 +32,7 @@ alloy-eips.workspace = true alloy-json-rpc.workspace = true alloy-consensus.workspace = true alloy-network.workspace = true -alloy-primitives.workspace = true +alloy-primitives = { workspace = true, features = ["rand"] } alloy-provider = { workspace = true, features = ["engine-api", "pubsub", "reqwest-rustls-tls"], default-features = false } alloy-pubsub.workspace = true alloy-rpc-client = { workspace = true, features = ["pubsub"] } diff --git a/bin/reth-bench/src/bench/mod.rs b/bin/reth-bench/src/bench/mod.rs index fd1d0cccd34..5ccbc775467 100644 --- a/bin/reth-bench/src/bench/mod.rs +++ b/bin/reth-bench/src/bench/mod.rs @@ -16,6 +16,7 @@ mod new_payload_fcu; mod new_payload_only; mod output; mod replay_payloads; +mod send_invalid_payload; mod send_payload; /// `reth bench` command @@ -74,6 +75,18 @@ pub enum Subcommands { /// `reth-bench replay-payloads --payload-dir ./payloads --engine-rpc-url /// http://localhost:8551 --jwt-secret ~/.local/share/reth/mainnet/jwt.hex` ReplayPayloads(replay_payloads::Command), + + /// Generate and send an invalid `engine_newPayload` request for testing. + /// + /// Takes a valid block and modifies fields to make it invalid, allowing you to test + /// Engine API rejection behavior. Block hash is recalculated after modifications + /// unless `--invalid-block-hash` or `--skip-hash-recalc` is used. + /// + /// Example: + /// + /// `cast block latest --full --json | reth-bench send-invalid-payload --rpc-url localhost:5000 + /// --jwt-secret $(cat ~/.local/share/reth/mainnet/jwt.hex) --invalid-state-root` + SendInvalidPayload(Box), } impl BenchmarkCommand { @@ -89,6 +102,7 @@ impl BenchmarkCommand { Subcommands::SendPayload(command) => command.execute(ctx).await, Subcommands::GenerateBigBlock(command) => command.execute(ctx).await, Subcommands::ReplayPayloads(command) => command.execute(ctx).await, + Subcommands::SendInvalidPayload(command) => (*command).execute(ctx).await, } } diff --git a/bin/reth-bench/src/bench/send_invalid_payload/invalidation.rs b/bin/reth-bench/src/bench/send_invalid_payload/invalidation.rs new file mode 100644 index 00000000000..a9f1b606596 --- /dev/null +++ b/bin/reth-bench/src/bench/send_invalid_payload/invalidation.rs @@ -0,0 +1,219 @@ +use alloy_eips::eip4895::Withdrawal; +use alloy_primitives::{Address, Bloom, Bytes, B256, U256}; +use alloy_rpc_types_engine::{ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3}; + +/// Configuration for invalidating payload fields +#[derive(Debug, Default)] +pub(super) struct InvalidationConfig { + // Explicit value overrides (Option) + pub(super) parent_hash: Option, + pub(super) fee_recipient: Option

, + pub(super) state_root: Option, + pub(super) receipts_root: Option, + pub(super) logs_bloom: Option, + pub(super) prev_randao: Option, + pub(super) block_number: Option, + pub(super) gas_limit: Option, + pub(super) gas_used: Option, + pub(super) timestamp: Option, + pub(super) extra_data: Option, + pub(super) base_fee_per_gas: Option, + pub(super) block_hash: Option, + pub(super) blob_gas_used: Option, + pub(super) excess_blob_gas: Option, + + // Auto-invalidation flags + pub(super) invalidate_parent_hash: bool, + pub(super) invalidate_state_root: bool, + pub(super) invalidate_receipts_root: bool, + pub(super) invalidate_gas_used: bool, + pub(super) invalidate_block_number: bool, + pub(super) invalidate_timestamp: bool, + pub(super) invalidate_base_fee: bool, + pub(super) invalidate_transactions: bool, + pub(super) invalidate_block_hash: bool, + pub(super) invalidate_withdrawals: bool, + pub(super) invalidate_blob_gas_used: bool, + pub(super) invalidate_excess_blob_gas: bool, +} + +impl InvalidationConfig { + /// Returns true if `block_hash` is being explicitly set or auto-invalidated. + /// When true, the caller should skip recalculating the block hash since it will be overwritten. + pub(super) const fn should_skip_hash_recalc(&self) -> bool { + self.block_hash.is_some() || self.invalidate_block_hash + } + + /// Applies invalidations to a V1 payload, returns list of what was changed. + pub(super) fn apply_to_payload_v1(&self, payload: &mut ExecutionPayloadV1) -> Vec { + let mut changes = Vec::new(); + + // Explicit value overrides + if let Some(parent_hash) = self.parent_hash { + payload.parent_hash = parent_hash; + changes.push(format!("parent_hash = {parent_hash}")); + } + + if let Some(fee_recipient) = self.fee_recipient { + payload.fee_recipient = fee_recipient; + changes.push(format!("fee_recipient = {fee_recipient}")); + } + + if let Some(state_root) = self.state_root { + payload.state_root = state_root; + changes.push(format!("state_root = {state_root}")); + } + + if let Some(receipts_root) = self.receipts_root { + payload.receipts_root = receipts_root; + changes.push(format!("receipts_root = {receipts_root}")); + } + + if let Some(logs_bloom) = self.logs_bloom { + payload.logs_bloom = logs_bloom; + changes.push("logs_bloom = ".to_string()); + } + + if let Some(prev_randao) = self.prev_randao { + payload.prev_randao = prev_randao; + changes.push(format!("prev_randao = {prev_randao}")); + } + + if let Some(block_number) = self.block_number { + payload.block_number = block_number; + changes.push(format!("block_number = {block_number}")); + } + + if let Some(gas_limit) = self.gas_limit { + payload.gas_limit = gas_limit; + changes.push(format!("gas_limit = {gas_limit}")); + } + + if let Some(gas_used) = self.gas_used { + payload.gas_used = gas_used; + changes.push(format!("gas_used = {gas_used}")); + } + + if let Some(timestamp) = self.timestamp { + payload.timestamp = timestamp; + changes.push(format!("timestamp = {timestamp}")); + } + + if let Some(ref extra_data) = self.extra_data { + payload.extra_data = extra_data.clone(); + changes.push(format!("extra_data = {} bytes", extra_data.len())); + } + + if let Some(base_fee_per_gas) = self.base_fee_per_gas { + payload.base_fee_per_gas = U256::from_limbs([base_fee_per_gas, 0, 0, 0]); + changes.push(format!("base_fee_per_gas = {base_fee_per_gas}")); + } + + if let Some(block_hash) = self.block_hash { + payload.block_hash = block_hash; + changes.push(format!("block_hash = {block_hash}")); + } + + // Auto-invalidation flags + if self.invalidate_parent_hash { + let random_hash = B256::random(); + payload.parent_hash = random_hash; + changes.push(format!("parent_hash = {random_hash} (auto-invalidated: random)")); + } + + if self.invalidate_state_root { + payload.state_root = B256::ZERO; + changes.push("state_root = ZERO (auto-invalidated: empty trie root)".to_string()); + } + + if self.invalidate_receipts_root { + payload.receipts_root = B256::ZERO; + changes.push("receipts_root = ZERO (auto-invalidated)".to_string()); + } + + if self.invalidate_gas_used { + let invalid_gas = payload.gas_limit + 1; + payload.gas_used = invalid_gas; + changes.push(format!("gas_used = {invalid_gas} (auto-invalidated: exceeds gas_limit)")); + } + + if self.invalidate_block_number { + let invalid_number = payload.block_number + 999; + payload.block_number = invalid_number; + changes.push(format!("block_number = {invalid_number} (auto-invalidated: huge gap)")); + } + + if self.invalidate_timestamp { + payload.timestamp = 0; + changes.push("timestamp = 0 (auto-invalidated: impossibly old)".to_string()); + } + + if self.invalidate_base_fee { + payload.base_fee_per_gas = U256::ZERO; + changes + .push("base_fee_per_gas = 0 (auto-invalidated: invalid post-London)".to_string()); + } + + if self.invalidate_transactions { + let invalid_tx = Bytes::from_static(&[0xff, 0xff, 0xff]); + payload.transactions.insert(0, invalid_tx); + changes.push("transactions = prepended invalid RLP (auto-invalidated)".to_string()); + } + + if self.invalidate_block_hash { + let random_hash = B256::random(); + payload.block_hash = random_hash; + changes.push(format!("block_hash = {random_hash} (auto-invalidated: random)")); + } + + changes + } + + /// Applies invalidations to a V2 payload, returns list of what was changed. + pub(super) fn apply_to_payload_v2(&self, payload: &mut ExecutionPayloadV2) -> Vec { + let mut changes = self.apply_to_payload_v1(&mut payload.payload_inner); + + // Handle withdrawals invalidation (V2+) + if self.invalidate_withdrawals { + let fake_withdrawal = Withdrawal { + index: u64::MAX, + validator_index: u64::MAX, + address: Address::ZERO, + amount: u64::MAX, + }; + payload.withdrawals.push(fake_withdrawal); + changes.push("withdrawals = added fake withdrawal (auto-invalidated)".to_string()); + } + + changes + } + + /// Applies invalidations to a V3 payload, returns list of what was changed. + pub(super) fn apply_to_payload_v3(&self, payload: &mut ExecutionPayloadV3) -> Vec { + let mut changes = self.apply_to_payload_v2(&mut payload.payload_inner); + + // Explicit overrides for V3 fields + if let Some(blob_gas_used) = self.blob_gas_used { + payload.blob_gas_used = blob_gas_used; + changes.push(format!("blob_gas_used = {blob_gas_used}")); + } + + if let Some(excess_blob_gas) = self.excess_blob_gas { + payload.excess_blob_gas = excess_blob_gas; + changes.push(format!("excess_blob_gas = {excess_blob_gas}")); + } + + // Auto-invalidation for V3 fields + if self.invalidate_blob_gas_used { + payload.blob_gas_used = u64::MAX; + changes.push("blob_gas_used = MAX (auto-invalidated)".to_string()); + } + + if self.invalidate_excess_blob_gas { + payload.excess_blob_gas = u64::MAX; + changes.push("excess_blob_gas = MAX (auto-invalidated)".to_string()); + } + + changes + } +} diff --git a/bin/reth-bench/src/bench/send_invalid_payload/mod.rs b/bin/reth-bench/src/bench/send_invalid_payload/mod.rs new file mode 100644 index 00000000000..3fb2d9a71cd --- /dev/null +++ b/bin/reth-bench/src/bench/send_invalid_payload/mod.rs @@ -0,0 +1,367 @@ +//! Command for sending invalid payloads to test Engine API rejection. + +mod invalidation; +use invalidation::InvalidationConfig; + +use alloy_primitives::{Address, B256}; +use alloy_provider::network::AnyRpcBlock; +use alloy_rpc_types_engine::ExecutionPayload; +use clap::Parser; +use eyre::{OptionExt, Result}; +use op_alloy_consensus::OpTxEnvelope; +use reth_cli_runner::CliContext; +use std::io::{BufReader, Read, Write}; + +/// Command for generating and sending an invalid `engine_newPayload` request. +/// +/// Takes a valid block and modifies fields to make it invalid for testing +/// Engine API rejection behavior. Block hash is recalculated after modifications +/// unless `--invalidate-block-hash` or `--skip-hash-recalc` is used. +#[derive(Debug, Parser)] +pub struct Command { + // ==================== Input Options ==================== + /// Path to the JSON file containing the block. If not specified, stdin will be used. + #[arg(short, long, help_heading = "Input Options")] + path: Option, + + /// The engine RPC URL to use. + #[arg( + short, + long, + help_heading = "Input Options", + required_if_eq_any([("mode", "execute"), ("mode", "cast")]), + required_unless_present("mode") + )] + rpc_url: Option, + + /// The JWT secret to use. Can be either a path to a file containing the secret or the secret + /// itself. + #[arg(short, long, help_heading = "Input Options")] + jwt_secret: Option, + + /// The newPayload version to use (3 or 4). + #[arg(long, default_value_t = 3, help_heading = "Input Options")] + new_payload_version: u8, + + /// The output mode to use. + #[arg(long, value_enum, default_value = "execute", help_heading = "Input Options")] + mode: Mode, + + // ==================== Explicit Value Overrides ==================== + /// Override the parent hash with a specific value. + #[arg(long, value_name = "HASH", help_heading = "Explicit Value Overrides")] + parent_hash: Option, + + /// Override the fee recipient (coinbase) with a specific address. + #[arg(long, value_name = "ADDR", help_heading = "Explicit Value Overrides")] + fee_recipient: Option
, + + /// Override the state root with a specific value. + #[arg(long, value_name = "HASH", help_heading = "Explicit Value Overrides")] + state_root: Option, + + /// Override the receipts root with a specific value. + #[arg(long, value_name = "HASH", help_heading = "Explicit Value Overrides")] + receipts_root: Option, + + /// Override the block number with a specific value. + #[arg(long, value_name = "U64", help_heading = "Explicit Value Overrides")] + block_number: Option, + + /// Override the gas limit with a specific value. + #[arg(long, value_name = "U64", help_heading = "Explicit Value Overrides")] + gas_limit: Option, + + /// Override the gas used with a specific value. + #[arg(long, value_name = "U64", help_heading = "Explicit Value Overrides")] + gas_used: Option, + + /// Override the timestamp with a specific value. + #[arg(long, value_name = "U64", help_heading = "Explicit Value Overrides")] + timestamp: Option, + + /// Override the base fee per gas with a specific value. + #[arg(long, value_name = "U64", help_heading = "Explicit Value Overrides")] + base_fee_per_gas: Option, + + /// Override the block hash with a specific value (skips hash recalculation). + #[arg(long, value_name = "HASH", help_heading = "Explicit Value Overrides")] + block_hash: Option, + + /// Override the blob gas used with a specific value. + #[arg(long, value_name = "U64", help_heading = "Explicit Value Overrides")] + blob_gas_used: Option, + + /// Override the excess blob gas with a specific value. + #[arg(long, value_name = "U64", help_heading = "Explicit Value Overrides")] + excess_blob_gas: Option, + + /// Override the parent beacon block root with a specific value. + #[arg(long, value_name = "HASH", help_heading = "Explicit Value Overrides")] + parent_beacon_block_root: Option, + + /// Override the requests hash with a specific value (EIP-7685). + #[arg(long, value_name = "HASH", help_heading = "Explicit Value Overrides")] + requests_hash: Option, + + // ==================== Auto-Invalidation Flags ==================== + /// Invalidate the parent hash by setting it to a random value. + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_parent_hash: bool, + + /// Invalidate the state root by setting it to a random value. + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_state_root: bool, + + /// Invalidate the receipts root by setting it to a random value. + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_receipts_root: bool, + + /// Invalidate the gas used by setting it to an incorrect value. + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_gas_used: bool, + + /// Invalidate the block number by setting it to an incorrect value. + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_block_number: bool, + + /// Invalidate the timestamp by setting it to an incorrect value. + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_timestamp: bool, + + /// Invalidate the base fee by setting it to an incorrect value. + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_base_fee: bool, + + /// Invalidate the transactions by modifying them. + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_transactions: bool, + + /// Invalidate the block hash by not recalculating it after modifications. + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_block_hash: bool, + + /// Invalidate the withdrawals by modifying them. + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_withdrawals: bool, + + /// Invalidate the blob gas used by setting it to an incorrect value. + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_blob_gas_used: bool, + + /// Invalidate the excess blob gas by setting it to an incorrect value. + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_excess_blob_gas: bool, + + /// Invalidate the requests hash by setting it to a random value (EIP-7685). + #[arg(long, default_value_t = false, help_heading = "Auto-Invalidation Flags")] + invalidate_requests_hash: bool, + + // ==================== Meta Flags ==================== + /// Skip block hash recalculation after modifications. + #[arg(long, default_value_t = false, help_heading = "Meta Flags")] + skip_hash_recalc: bool, + + /// Print what would be done without actually sending the payload. + #[arg(long, default_value_t = false, help_heading = "Meta Flags")] + dry_run: bool, +} + +#[derive(Debug, Clone, clap::ValueEnum)] +enum Mode { + /// Execute the `cast` command. This works with blocks of any size, because it pipes the + /// payload into the `cast` command. + Execute, + /// Print the `cast` command. Caution: this may not work with large blocks because of the + /// command length limit. + Cast, + /// Print the JSON payload. Can be piped into `cast` command if the block is small enough. + Json, +} + +impl Command { + /// Read input from either a file or stdin + fn read_input(&self) -> Result { + Ok(match &self.path { + Some(path) => reth_fs_util::read_to_string(path)?, + None => String::from_utf8( + BufReader::new(std::io::stdin()).bytes().collect::, _>>()?, + )?, + }) + } + + /// Load JWT secret from either a file or use the provided string directly + fn load_jwt_secret(&self) -> Result> { + match &self.jwt_secret { + Some(secret) => match std::fs::read_to_string(secret) { + Ok(contents) => Ok(Some(contents.trim().to_string())), + Err(_) => Ok(Some(secret.clone())), + }, + None => Ok(None), + } + } + + /// Build `InvalidationConfig` from command flags + const fn build_invalidation_config(&self) -> InvalidationConfig { + InvalidationConfig { + parent_hash: self.parent_hash, + fee_recipient: self.fee_recipient, + state_root: self.state_root, + receipts_root: self.receipts_root, + logs_bloom: None, + prev_randao: None, + block_number: self.block_number, + gas_limit: self.gas_limit, + gas_used: self.gas_used, + timestamp: self.timestamp, + extra_data: None, + base_fee_per_gas: self.base_fee_per_gas, + block_hash: self.block_hash, + blob_gas_used: self.blob_gas_used, + excess_blob_gas: self.excess_blob_gas, + invalidate_parent_hash: self.invalidate_parent_hash, + invalidate_state_root: self.invalidate_state_root, + invalidate_receipts_root: self.invalidate_receipts_root, + invalidate_gas_used: self.invalidate_gas_used, + invalidate_block_number: self.invalidate_block_number, + invalidate_timestamp: self.invalidate_timestamp, + invalidate_base_fee: self.invalidate_base_fee, + invalidate_transactions: self.invalidate_transactions, + invalidate_block_hash: self.invalidate_block_hash, + invalidate_withdrawals: self.invalidate_withdrawals, + invalidate_blob_gas_used: self.invalidate_blob_gas_used, + invalidate_excess_blob_gas: self.invalidate_excess_blob_gas, + } + } + + /// Execute the command + pub async fn execute(self, _ctx: CliContext) -> Result<()> { + let block_json = self.read_input()?; + let jwt_secret = self.load_jwt_secret()?; + + let block = serde_json::from_str::(&block_json)? + .into_inner() + .map_header(|header| header.map(|h| h.into_header_with_defaults())) + .try_map_transactions(|tx| tx.try_into_either::())? + .into_consensus(); + + let config = self.build_invalidation_config(); + + let parent_beacon_block_root = + self.parent_beacon_block_root.or(block.header.parent_beacon_block_root); + let blob_versioned_hashes = + block.body.blob_versioned_hashes_iter().copied().collect::>(); + let use_v4 = block.header.requests_hash.is_some(); + let requests_hash = self.requests_hash.or(block.header.requests_hash); + + let mut execution_payload = ExecutionPayload::from_block_slow(&block).0; + + let changes = match &mut execution_payload { + ExecutionPayload::V1(p) => config.apply_to_payload_v1(p), + ExecutionPayload::V2(p) => config.apply_to_payload_v2(p), + ExecutionPayload::V3(p) => config.apply_to_payload_v3(p), + }; + + let skip_recalc = self.skip_hash_recalc || config.should_skip_hash_recalc(); + if !skip_recalc { + let new_hash = match execution_payload.clone().into_block_raw() { + Ok(block) => block.header.hash_slow(), + Err(e) => { + eprintln!( + "Warning: Could not recalculate block hash: {e}. Using original hash." + ); + match &execution_payload { + ExecutionPayload::V1(p) => p.block_hash, + ExecutionPayload::V2(p) => p.payload_inner.block_hash, + ExecutionPayload::V3(p) => p.payload_inner.payload_inner.block_hash, + } + } + }; + + match &mut execution_payload { + ExecutionPayload::V1(p) => p.block_hash = new_hash, + ExecutionPayload::V2(p) => p.payload_inner.block_hash = new_hash, + ExecutionPayload::V3(p) => p.payload_inner.payload_inner.block_hash = new_hash, + } + } + + if self.dry_run { + println!("=== Dry Run ==="); + println!("Changes that would be applied:"); + for change in &changes { + println!(" - {}", change); + } + if changes.is_empty() { + println!(" (no changes)"); + } + if skip_recalc { + println!(" - Block hash recalculation: SKIPPED"); + } else { + println!(" - Block hash recalculation: PERFORMED"); + } + println!("\nResulting payload JSON:"); + let json = serde_json::to_string_pretty(&execution_payload)?; + println!("{}", json); + return Ok(()); + } + + let json_request = if use_v4 { + serde_json::to_string(&( + execution_payload, + blob_versioned_hashes, + parent_beacon_block_root, + requests_hash.unwrap_or_default(), + ))? + } else { + serde_json::to_string(&( + execution_payload, + blob_versioned_hashes, + parent_beacon_block_root, + ))? + }; + + match self.mode { + Mode::Execute => { + let mut command = std::process::Command::new("cast"); + let method = if use_v4 { "engine_newPayloadV4" } else { "engine_newPayloadV3" }; + command.arg("rpc").arg(method).arg("--raw"); + if let Some(rpc_url) = self.rpc_url { + command.arg("--rpc-url").arg(rpc_url); + } + if let Some(secret) = &jwt_secret { + command.arg("--jwt-secret").arg(secret); + } + + let mut process = command.stdin(std::process::Stdio::piped()).spawn()?; + + process + .stdin + .take() + .ok_or_eyre("stdin not available")? + .write_all(json_request.as_bytes())?; + + process.wait()?; + } + Mode::Cast => { + let mut cmd = format!( + "cast rpc engine_newPayloadV{} --raw '{}'", + self.new_payload_version, json_request + ); + + if let Some(rpc_url) = self.rpc_url { + cmd += &format!(" --rpc-url {rpc_url}"); + } + if let Some(secret) = &jwt_secret { + cmd += &format!(" --jwt-secret {secret}"); + } + + println!("{cmd}"); + } + Mode::Json => { + println!("{json_request}"); + } + } + + Ok(()) + } +} From f07629eac0eeead74cea6001f25c74f0b2dc2f0c Mon Sep 17 00:00:00 2001 From: YK Date: Thu, 22 Jan 2026 21:30:52 +0100 Subject: [PATCH 162/267] perf: avoid creating RocksDB transactions for legacy MDBX-only nodes (#21325) --- crates/storage/provider/src/either_writer.rs | 41 +++++-- .../provider/src/traits/rocksdb_provider.rs | 109 +++++++++++++++++- 2 files changed, 139 insertions(+), 11 deletions(-) diff --git a/crates/storage/provider/src/either_writer.rs b/crates/storage/provider/src/either_writer.rs index efa1032420a..fcd3dea08a6 100644 --- a/crates/storage/provider/src/either_writer.rs +++ b/crates/storage/provider/src/either_writer.rs @@ -83,14 +83,17 @@ pub type RawRocksDBBatch = (); /// Helper type for `RocksDB` transaction reference argument in reader constructors. /// -/// When `rocksdb` feature is enabled, this is a reference to a `RocksDB` transaction. -/// Otherwise, it's `()` (unit type) to allow the same API without feature gates. +/// When `rocksdb` feature is enabled, this is an optional reference to a `RocksDB` transaction. +/// The `Option` allows callers to skip transaction creation when `RocksDB` isn't needed +/// (e.g., on legacy MDBX-only nodes). +/// When `rocksdb` feature is disabled, it's `()` (unit type) to allow the same API without +/// feature gates. #[cfg(all(unix, feature = "rocksdb"))] -pub type RocksTxRefArg<'a> = &'a crate::providers::rocksdb::RocksTx<'a>; +pub type RocksTxRefArg<'a> = Option<&'a crate::providers::rocksdb::RocksTx<'a>>; /// Helper type for `RocksDB` transaction reference argument in reader constructors. /// -/// When `rocksdb` feature is enabled, this is a reference to a `RocksDB` transaction. -/// Otherwise, it's `()` (unit type) to allow the same API without feature gates. +/// When `rocksdb` feature is disabled, it's `()` (unit type) to allow the same API without +/// feature gates. #[cfg(not(all(unix, feature = "rocksdb")))] pub type RocksTxRefArg<'a> = (); @@ -762,7 +765,9 @@ impl<'a> EitherReader<'a, (), ()> { { #[cfg(all(unix, feature = "rocksdb"))] if provider.cached_storage_settings().storages_history_in_rocksdb { - return Ok(EitherReader::RocksDB(_rocksdb_tx)); + return Ok(EitherReader::RocksDB( + _rocksdb_tx.expect("storages_history_in_rocksdb requires rocksdb tx"), + )); } Ok(EitherReader::Database( @@ -782,7 +787,9 @@ impl<'a> EitherReader<'a, (), ()> { { #[cfg(all(unix, feature = "rocksdb"))] if provider.cached_storage_settings().transaction_hash_numbers_in_rocksdb { - return Ok(EitherReader::RocksDB(_rocksdb_tx)); + return Ok(EitherReader::RocksDB( + _rocksdb_tx.expect("transaction_hash_numbers_in_rocksdb requires rocksdb tx"), + )); } Ok(EitherReader::Database( @@ -802,7 +809,9 @@ impl<'a> EitherReader<'a, (), ()> { { #[cfg(all(unix, feature = "rocksdb"))] if provider.cached_storage_settings().account_history_in_rocksdb { - return Ok(EitherReader::RocksDB(_rocksdb_tx)); + return Ok(EitherReader::RocksDB( + _rocksdb_tx.expect("account_history_in_rocksdb requires rocksdb tx"), + )); } Ok(EitherReader::Database( @@ -1814,4 +1823,20 @@ mod rocksdb_tests { "Data should be visible after provider.commit()" ); } + + /// Test that `EitherReader::new_accounts_history` panics when settings require + /// `RocksDB` but no tx is provided (`None`). This is an invariant violation that + /// indicates a bug - `with_rocksdb_tx` should always provide a tx when needed. + #[test] + #[should_panic(expected = "account_history_in_rocksdb requires rocksdb tx")] + fn test_settings_mismatch_panics() { + let factory = create_test_provider_factory(); + + factory.set_storage_settings_cache( + StorageSettings::legacy().with_account_history_in_rocksdb(true), + ); + + let provider = factory.database_provider_ro().unwrap(); + let _ = EitherReader::<(), ()>::new_accounts_history(&provider, None); + } } diff --git a/crates/storage/provider/src/traits/rocksdb_provider.rs b/crates/storage/provider/src/traits/rocksdb_provider.rs index 07c9b478661..06548d22752 100644 --- a/crates/storage/provider/src/traits/rocksdb_provider.rs +++ b/crates/storage/provider/src/traits/rocksdb_provider.rs @@ -2,6 +2,7 @@ use crate::{ either_writer::{RawRocksDBBatch, RocksBatchArg, RocksTxRefArg}, providers::RocksDBProvider, }; +use reth_storage_api::StorageSettingsCache; use reth_storage_errors::provider::ProviderResult; /// `RocksDB` provider factory. @@ -21,15 +22,21 @@ pub trait RocksDBProviderFactory { /// Executes a closure with a `RocksDB` transaction for reading. /// /// This helper encapsulates all the cfg-gated `RocksDB` transaction handling for reads. + /// On legacy MDBX-only nodes (where `any_in_rocksdb()` is false), this skips creating + /// the `RocksDB` transaction entirely, avoiding unnecessary overhead. fn with_rocksdb_tx(&self, f: F) -> ProviderResult where + Self: StorageSettingsCache, F: FnOnce(RocksTxRefArg<'_>) -> ProviderResult, { #[cfg(all(unix, feature = "rocksdb"))] { - let rocksdb = self.rocksdb_provider(); - let tx = rocksdb.tx(); - f(&tx) + if self.cached_storage_settings().any_in_rocksdb() { + let rocksdb = self.rocksdb_provider(); + let tx = rocksdb.tx(); + return f(Some(&tx)); + } + f(None) } #[cfg(not(all(unix, feature = "rocksdb")))] f(()) @@ -85,3 +92,99 @@ pub trait RocksDBProviderFactory { } } } + +#[cfg(all(test, unix, feature = "rocksdb"))] +mod tests { + use super::*; + use reth_db_api::models::StorageSettings; + use std::sync::atomic::{AtomicUsize, Ordering}; + + /// Mock `RocksDB` provider that tracks `tx()` calls. + struct MockRocksDBProvider { + tx_call_count: AtomicUsize, + } + + impl MockRocksDBProvider { + const fn new() -> Self { + Self { tx_call_count: AtomicUsize::new(0) } + } + + fn tx_call_count(&self) -> usize { + self.tx_call_count.load(Ordering::SeqCst) + } + + fn increment_tx_count(&self) { + self.tx_call_count.fetch_add(1, Ordering::SeqCst); + } + } + + /// Test provider that implements [`RocksDBProviderFactory`] + [`StorageSettingsCache`]. + struct TestProvider { + settings: StorageSettings, + mock_rocksdb: MockRocksDBProvider, + temp_dir: tempfile::TempDir, + } + + impl TestProvider { + fn new(settings: StorageSettings) -> Self { + Self { + settings, + mock_rocksdb: MockRocksDBProvider::new(), + temp_dir: tempfile::TempDir::new().unwrap(), + } + } + + fn tx_call_count(&self) -> usize { + self.mock_rocksdb.tx_call_count() + } + } + + impl StorageSettingsCache for TestProvider { + fn cached_storage_settings(&self) -> StorageSettings { + self.settings + } + + fn set_storage_settings_cache(&self, _settings: StorageSettings) {} + } + + impl RocksDBProviderFactory for TestProvider { + fn rocksdb_provider(&self) -> RocksDBProvider { + self.mock_rocksdb.increment_tx_count(); + RocksDBProvider::new(self.temp_dir.path()).unwrap() + } + + fn set_pending_rocksdb_batch(&self, _batch: rocksdb::WriteBatchWithTransaction) {} + } + + #[test] + fn test_legacy_settings_skip_rocksdb_tx_creation() { + let provider = TestProvider::new(StorageSettings::legacy()); + + let result = provider.with_rocksdb_tx(|tx| { + assert!(tx.is_none(), "legacy settings should pass None tx"); + Ok(42) + }); + + assert_eq!(result.unwrap(), 42); + assert_eq!(provider.tx_call_count(), 0, "should not create RocksDB tx for legacy settings"); + } + + #[test] + fn test_rocksdb_settings_create_tx() { + let settings = + StorageSettings { account_history_in_rocksdb: true, ..StorageSettings::legacy() }; + let provider = TestProvider::new(settings); + + let result = provider.with_rocksdb_tx(|tx| { + assert!(tx.is_some(), "rocksdb settings should pass Some tx"); + Ok(42) + }); + + assert_eq!(result.unwrap(), 42); + assert_eq!( + provider.tx_call_count(), + 1, + "should create RocksDB tx when any_in_rocksdb is true" + ); + } +} From 5a076df09afdb26082b54ac58271cdcfc882fa64 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 23 Jan 2026 00:40:26 +0400 Subject: [PATCH 163/267] feat: allow setting custom debug block provider (#21345) Co-authored-by: Karl --- crates/node/builder/src/launch/debug.rs | 79 +++++++++++++++++++++---- crates/node/builder/src/lib.rs | 2 +- 2 files changed, 69 insertions(+), 12 deletions(-) diff --git a/crates/node/builder/src/launch/debug.rs b/crates/node/builder/src/launch/debug.rs index a623a825ad8..896f56fb61d 100644 --- a/crates/node/builder/src/launch/debug.rs +++ b/crates/node/builder/src/launch/debug.rs @@ -4,10 +4,13 @@ use alloy_consensus::transaction::Either; use alloy_provider::network::AnyNetwork; use jsonrpsee::core::{DeserializeOwned, Serialize}; use reth_chainspec::EthChainSpec; -use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; +use reth_consensus_debug_client::{ + BlockProvider, DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider, +}; use reth_engine_local::LocalMiner; use reth_node_api::{ - BlockTy, FullNodeComponents, HeaderTy, PayloadAttrTy, PayloadAttributesBuilder, PayloadTypes, + BlockTy, FullNodeComponents, FullNodeTypes, HeaderTy, PayloadAttrTy, PayloadAttributesBuilder, + PayloadTypes, }; use std::{ future::{Future, IntoFuture}, @@ -109,9 +112,16 @@ impl DebugNodeLauncher { } } +/// Type alias for the default debug block provider. We use etherscan provider to satisfy the +/// bounds. +pub type DefaultDebugBlockProvider = EtherscanBlockProvider< + <::Types as DebugNode>::RpcBlock, + BlockTy<::Types>, +>; + /// Future for the [`DebugNodeLauncher`]. #[expect(missing_debug_implementations, clippy::type_complexity)] -pub struct DebugNodeLauncherFuture +pub struct DebugNodeLauncherFuture> where N: FullNodeComponents>, { @@ -121,14 +131,17 @@ where Option, HeaderTy>>>, map_attributes: Option) -> PayloadAttrTy + Send + Sync>>, + debug_block_provider: Option, } -impl DebugNodeLauncherFuture +impl DebugNodeLauncherFuture where N: FullNodeComponents>, AddOns: RethRpcAddOns, L: LaunchNode>, + B: BlockProvider> + Clone, { + /// Sets a custom payload attributes builder for local mining in dev mode. pub fn with_payload_attributes_builder( self, builder: impl PayloadAttributesBuilder, HeaderTy>, @@ -138,9 +151,11 @@ where target: self.target, local_payload_attributes_builder: Some(Box::new(builder)), map_attributes: None, + debug_block_provider: self.debug_block_provider, } } + /// Sets a function to map payload attributes before building. pub fn map_debug_payload_attributes( self, f: impl Fn(PayloadAttrTy) -> PayloadAttrTy + Send + Sync + 'static, @@ -150,16 +165,58 @@ where target: self.target, local_payload_attributes_builder: None, map_attributes: Some(Box::new(f)), + debug_block_provider: self.debug_block_provider, + } + } + + /// Sets a custom block provider for the debug consensus client. + /// + /// When set, this provider will be used instead of creating an `EtherscanBlockProvider` + /// or `RpcBlockProvider` from CLI arguments. + pub fn with_debug_block_provider( + self, + provider: B2, + ) -> DebugNodeLauncherFuture + where + B2: BlockProvider> + Clone, + { + DebugNodeLauncherFuture { + inner: self.inner, + target: self.target, + local_payload_attributes_builder: self.local_payload_attributes_builder, + map_attributes: self.map_attributes, + debug_block_provider: Some(provider), } } async fn launch_node(self) -> eyre::Result> { - let Self { inner, target, local_payload_attributes_builder, map_attributes } = self; + let Self { + inner, + target, + local_payload_attributes_builder, + map_attributes, + debug_block_provider, + } = self; let handle = inner.launch_node(target).await?; let config = &handle.node.config; - if let Some(url) = config.debug.rpc_consensus_url.clone() { + + if let Some(provider) = debug_block_provider { + info!(target: "reth::cli", "Using custom debug block provider"); + + let rpc_consensus_client = DebugConsensusClient::new( + handle.node.add_ons_handle.beacon_engine_handle.clone(), + Arc::new(provider), + ); + + handle + .node + .task_executor + .spawn_critical("custom debug block provider consensus client", async move { + rpc_consensus_client.run().await + }); + } else if let Some(url) = config.debug.rpc_consensus_url.clone() { info!(target: "reth::cli", "Using RPC consensus client: {}", url); let block_provider = @@ -180,14 +237,11 @@ where handle.node.task_executor.spawn_critical("rpc-ws consensus client", async move { rpc_consensus_client.run().await }); - } - - if let Some(maybe_custom_etherscan_url) = config.debug.etherscan.clone() { + } else if let Some(maybe_custom_etherscan_url) = config.debug.etherscan.clone() { info!(target: "reth::cli", "Using etherscan as consensus client"); let chain = config.chain.chain(); let etherscan_url = maybe_custom_etherscan_url.map(Ok).unwrap_or_else(|| { - // If URL isn't provided, use default Etherscan URL for the chain if it is known chain .etherscan_urls() .map(|urls| urls.0.to_string()) @@ -252,12 +306,13 @@ where } } -impl IntoFuture for DebugNodeLauncherFuture +impl IntoFuture for DebugNodeLauncherFuture where Target: Send + 'static, N: FullNodeComponents>, AddOns: RethRpcAddOns + 'static, L: LaunchNode> + 'static, + B: BlockProvider> + Clone + 'static, { type Output = eyre::Result>; type IntoFuture = Pin>> + Send>>; @@ -273,6 +328,7 @@ where N: FullNodeComponents>, AddOns: RethRpcAddOns + 'static, L: LaunchNode> + 'static, + DefaultDebugBlockProvider: BlockProvider> + Clone, { type Node = NodeHandle; type Future = DebugNodeLauncherFuture; @@ -283,6 +339,7 @@ where target, local_payload_attributes_builder: None, map_attributes: None, + debug_block_provider: None, } } } diff --git a/crates/node/builder/src/lib.rs b/crates/node/builder/src/lib.rs index 1218465e95e..7f1e71d5c17 100644 --- a/crates/node/builder/src/lib.rs +++ b/crates/node/builder/src/lib.rs @@ -31,7 +31,7 @@ pub use builder::{add_ons::AddOns, *}; mod launch; pub use launch::{ - debug::{DebugNode, DebugNodeLauncher}, + debug::{DebugNode, DebugNodeLauncher, DebugNodeLauncherFuture, DefaultDebugBlockProvider}, engine::EngineNodeLauncher, *, }; From 6df249c1f110298ef0fb6d19fdbd9755d36684ef Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 22 Jan 2026 21:48:11 +0100 Subject: [PATCH 164/267] feat(engine): stub Amsterdam engine API endpoints (newPayloadV5, getPayloadV6, BALs) (#21344) Co-authored-by: Ishika Choudhury <117741714+Rimeeeeee@users.noreply.github.com> Co-authored-by: Soubhik Singha Mahapatra <160333583+Soubhik-10@users.noreply.github.com> --- crates/engine/primitives/src/lib.rs | 11 +++++- crates/ethereum/engine-primitives/src/lib.rs | 9 +++-- .../ethereum/engine-primitives/src/payload.rs | 19 +++++++++- crates/optimism/node/src/engine.rs | 1 + crates/rpc/rpc-api/src/engine.rs | 28 +++++++++++++- crates/rpc/rpc-engine-api/src/engine_api.rs | 37 ++++++++++++++++++- examples/custom-engine-types/src/main.rs | 5 ++- 7 files changed, 99 insertions(+), 11 deletions(-) diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index a16b5b1c451..e97cb3b1046 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -62,7 +62,8 @@ pub trait EngineTypes: + TryInto + TryInto + TryInto - + TryInto, + + TryInto + + TryInto, > + DeserializeOwned + Serialize { @@ -106,6 +107,14 @@ pub trait EngineTypes: + Send + Sync + 'static; + /// Execution Payload V6 envelope type. + type ExecutionPayloadEnvelopeV6: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; } /// Type that validates the payloads processed by the engine API. diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 95c317a8c0f..58aa7a17676 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -17,10 +17,11 @@ pub use payload::{payload_id, BlobSidecars, EthBuiltPayload, EthPayloadBuilderAt mod error; pub use error::*; -use alloy_rpc_types_engine::{ExecutionData, ExecutionPayload, ExecutionPayloadEnvelopeV5}; +use alloy_rpc_types_engine::{ExecutionData, ExecutionPayload}; pub use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, - ExecutionPayloadV1, PayloadAttributes as EthPayloadAttributes, + ExecutionPayloadEnvelopeV5, ExecutionPayloadEnvelopeV6, ExecutionPayloadV1, + PayloadAttributes as EthPayloadAttributes, }; use reth_engine_primitives::EngineTypes; use reth_payload_primitives::{BuiltPayload, PayloadTypes}; @@ -66,13 +67,15 @@ where + TryInto + TryInto + TryInto - + TryInto, + + TryInto + + TryInto, { type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; type ExecutionPayloadEnvelopeV5 = ExecutionPayloadEnvelopeV5; + type ExecutionPayloadEnvelopeV6 = ExecutionPayloadEnvelopeV6; } /// A default payload type for [`EthEngineTypes`] diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 61b891e19d4..fb71371e718 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -11,8 +11,8 @@ use alloy_primitives::{Address, B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ BlobsBundleV1, BlobsBundleV2, ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, - ExecutionPayloadEnvelopeV4, ExecutionPayloadEnvelopeV5, ExecutionPayloadFieldV2, - ExecutionPayloadV1, ExecutionPayloadV3, PayloadAttributes, PayloadId, + ExecutionPayloadEnvelopeV4, ExecutionPayloadEnvelopeV5, ExecutionPayloadEnvelopeV6, + ExecutionPayloadFieldV2, ExecutionPayloadV1, ExecutionPayloadV3, PayloadAttributes, PayloadId, }; use core::convert::Infallible; use reth_ethereum_primitives::EthPrimitives; @@ -160,6 +160,13 @@ impl EthBuiltPayload { execution_requests: requests.unwrap_or_default(), }) } + + /// Try converting built payload into [`ExecutionPayloadEnvelopeV6`]. + /// + /// Note: Amsterdam fork is not yet implemented, so this conversion is not yet supported. + pub fn try_into_v6(self) -> Result { + unimplemented!("ExecutionPayloadEnvelopeV6 not yet supported") + } } impl BuiltPayload for EthBuiltPayload { @@ -227,6 +234,14 @@ impl TryFrom for ExecutionPayloadEnvelopeV5 { } } +impl TryFrom for ExecutionPayloadEnvelopeV6 { + type Error = BuiltPayloadConversionError; + + fn try_from(value: EthBuiltPayload) -> Result { + value.try_into_v6() + } +} + /// An enum representing blob transaction sidecars belonging to [`EthBuiltPayload`]. #[derive(Clone, Default, Debug)] pub enum BlobSidecars { diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 386026f234f..652bb44f473 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -62,6 +62,7 @@ where type ExecutionPayloadEnvelopeV3 = OpExecutionPayloadEnvelopeV3; type ExecutionPayloadEnvelopeV4 = OpExecutionPayloadEnvelopeV4; type ExecutionPayloadEnvelopeV5 = OpExecutionPayloadEnvelopeV4; + type ExecutionPayloadEnvelopeV6 = OpExecutionPayloadEnvelopeV4; } /// Validator for Optimism engine API. diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index aca0af4e76e..520058f0bba 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -12,7 +12,8 @@ use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, BlockHash, Bytes, B256, U256, U64}; use alloy_rpc_types_engine::{ ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, - ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, + ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, + PayloadStatus, }; use alloy_rpc_types_eth::{ state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, @@ -73,6 +74,18 @@ pub trait EngineApi { execution_requests: RequestsOrHash, ) -> RpcResult; + /// Post Amsterdam payload handler + /// + /// See also + #[method(name = "newPayloadV5")] + async fn new_payload_v5( + &self, + payload: ExecutionPayloadV4, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + execution_requests: RequestsOrHash, + ) -> RpcResult; + /// See also /// /// Caution: This should not accept the `withdrawals` field in the payload attributes. @@ -178,6 +191,19 @@ pub trait EngineApi { payload_id: PayloadId, ) -> RpcResult; + /// Post Amsterdam payload handler. + /// + /// See also + /// + /// Returns the most recent version of the payload that is available in the corresponding + /// payload build process at the time of receiving this call. Note: + /// > Provider software MAY stop the corresponding build process after serving this call. + #[method(name = "getPayloadV6")] + async fn get_payload_v6( + &self, + payload_id: PayloadId, + ) -> RpcResult; + /// See also #[method(name = "getPayloadBodiesByHashV1")] async fn get_payload_bodies_by_hash_v1( diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 5a7b69dd9e1..4c0eeed026d 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -11,8 +11,8 @@ use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionData, ExecutionPayloadBodiesV1, ExecutionPayloadBodyV1, ExecutionPayloadInputV2, ExecutionPayloadSidecar, ExecutionPayloadV1, - ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, - PraguePayloadFields, + ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, + PayloadStatus, PraguePayloadFields, }; use async_trait::async_trait; use jsonrpsee_core::{server::RpcModule, RpcResult}; @@ -963,6 +963,24 @@ where Ok(self.new_payload_v4_metered(payload).await?) } + /// Handler for `engine_newPayloadV5` + /// + /// Post Amsterdam payload handler. Currently returns unsupported fork error. + /// + /// See also + async fn new_payload_v5( + &self, + _payload: ExecutionPayloadV4, + _versioned_hashes: Vec, + _parent_beacon_block_root: B256, + _execution_requests: RequestsOrHash, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_newPayloadV5"); + Err(EngineApiError::EngineObjectValidationError( + reth_payload_primitives::EngineObjectValidationError::UnsupportedFork, + ))? + } + /// Handler for `engine_forkchoiceUpdatedV1` /// See also /// @@ -1086,6 +1104,21 @@ where Ok(self.get_payload_v5_metered(payload_id).await?) } + /// Handler for `engine_getPayloadV6` + /// + /// Post Amsterdam payload handler. Currently returns unsupported fork error. + /// + /// See also + async fn get_payload_v6( + &self, + _payload_id: PayloadId, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_getPayloadV6"); + Err(EngineApiError::EngineObjectValidationError( + reth_payload_primitives::EngineObjectValidationError::UnsupportedFork, + ))? + } + /// Handler for `engine_getPayloadBodiesByHashV1` /// See also async fn get_payload_bodies_by_hash_v1( diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index a26ce1594a2..e799d89c71e 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -23,8 +23,8 @@ use alloy_primitives::{Address, B256}; use alloy_rpc_types::{ engine::{ ExecutionData, ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, - ExecutionPayloadEnvelopeV4, ExecutionPayloadEnvelopeV5, ExecutionPayloadV1, - PayloadAttributes as EthPayloadAttributes, PayloadId, + ExecutionPayloadEnvelopeV4, ExecutionPayloadEnvelopeV5, ExecutionPayloadEnvelopeV6, + ExecutionPayloadV1, PayloadAttributes as EthPayloadAttributes, PayloadId, }, Withdrawal, }; @@ -169,6 +169,7 @@ impl EngineTypes for CustomEngineTypes { type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; type ExecutionPayloadEnvelopeV5 = ExecutionPayloadEnvelopeV5; + type ExecutionPayloadEnvelopeV6 = ExecutionPayloadEnvelopeV6; } /// Custom engine validator From ad476e2b5cb6cbc63fcb707d18a6bc6f3b0d6e38 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Thu, 22 Jan 2026 23:18:18 -0800 Subject: [PATCH 165/267] chore: add yongkangc as codeowner for crates/storage/provider (#21349) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 4c3ce10c7ab..aae3ca2c02a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -37,7 +37,7 @@ crates/storage/db/ @joshieDo crates/storage/errors/ @joshieDo crates/storage/libmdbx-rs/ @shekhirin crates/storage/nippy-jar/ @joshieDo @shekhirin -crates/storage/provider/ @joshieDo @shekhirin +crates/storage/provider/ @joshieDo @shekhirin @yongkangc crates/storage/storage-api/ @joshieDo crates/tasks/ @mattsse crates/tokio-util/ @mattsse From d99c0ffd624685cabcb3506d6325556f0bf83e28 Mon Sep 17 00:00:00 2001 From: Hwangjae Lee Date: Fri, 23 Jan 2026 19:59:53 +0900 Subject: [PATCH 166/267] chore(etc): update ethereum-metrics-exporter GitHub URL (#21348) Signed-off-by: Hwangjae Lee --- etc/grafana/dashboards/metrics-exporter.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/grafana/dashboards/metrics-exporter.json b/etc/grafana/dashboards/metrics-exporter.json index a626536eaa7..f6f49cc323d 100644 --- a/etc/grafana/dashboards/metrics-exporter.json +++ b/etc/grafana/dashboards/metrics-exporter.json @@ -21,7 +21,7 @@ } ] }, - "description": "Companion dashboard for https://github.com/samcm/ethereum-metrics-exporter", + "description": "Companion dashboard for https://github.com/ethpandaops/ethereum-metrics-exporter", "editable": true, "fiscalYearStartMonth": 0, "gnetId": 16277, From 22a68756c71a4a4e3408600c69ef5b6cf7169461 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 23 Jan 2026 12:26:57 +0100 Subject: [PATCH 167/267] fix(tree): evict changeset cache even when finalized block is unset (#21354) --- crates/engine/tree/src/tree/mod.rs | 40 ++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 1e04d19dba2..1e4a6315946 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -85,6 +85,12 @@ pub mod state; /// backfill this gap. pub(crate) const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; +/// The minimum number of blocks to retain in the changeset cache after eviction. +/// +/// This ensures that recent trie changesets are kept in memory for potential reorgs, +/// even when the finalized block is not set (e.g., on L2s like Optimism). +const CHANGESET_CACHE_RETENTION_BLOCKS: u64 = 64; + /// A builder for creating state providers that can be used across threads. #[derive(Clone, Debug)] pub struct StateProviderBuilder { @@ -1378,19 +1384,27 @@ where debug!(target: "engine::tree", ?last_persisted_block_hash, ?last_persisted_block_number, elapsed=?start_time.elapsed(), "Finished persisting, calling finish"); self.persistence_state.finish(last_persisted_block_hash, last_persisted_block_number); - // Evict trie changesets for blocks below the finalized block, but keep at least 64 blocks - if let Some(finalized) = self.canonical_in_memory_state.get_finalized_num_hash() { - let min_threshold = last_persisted_block_number.saturating_sub(64); - let eviction_threshold = finalized.number.min(min_threshold); - debug!( - target: "engine::tree", - last_persisted = last_persisted_block_number, - finalized_number = finalized.number, - eviction_threshold, - "Evicting changesets below threshold" - ); - self.changeset_cache.evict(eviction_threshold); - } + // Evict trie changesets for blocks below the eviction threshold. + // Keep at least CHANGESET_CACHE_RETENTION_BLOCKS from the persisted tip, and also respect + // the finalized block if set. + let min_threshold = + last_persisted_block_number.saturating_sub(CHANGESET_CACHE_RETENTION_BLOCKS); + let eviction_threshold = + if let Some(finalized) = self.canonical_in_memory_state.get_finalized_num_hash() { + // Use the minimum of finalized block and retention threshold to be conservative + finalized.number.min(min_threshold) + } else { + // When finalized is not set (e.g., on L2s), use the retention threshold + min_threshold + }; + debug!( + target: "engine::tree", + last_persisted = last_persisted_block_number, + finalized_number = ?self.canonical_in_memory_state.get_finalized_num_hash().map(|f| f.number), + eviction_threshold, + "Evicting changesets below threshold" + ); + self.changeset_cache.evict(eviction_threshold); self.on_new_persisted_block()?; Ok(()) From 1bd8fab887b798437cc4c5544e3b85455bb2436c Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 23 Jan 2026 17:16:05 +0400 Subject: [PATCH 168/267] feat(txpool): add Block associated type to TransactionValidator trait (#21359) --- crates/ethereum/node/src/node.rs | 9 +- crates/node/builder/src/components/pool.rs | 8 +- crates/optimism/node/src/node.rs | 4 +- crates/optimism/txpool/src/lib.rs | 5 +- crates/optimism/txpool/src/transaction.rs | 9 +- crates/optimism/txpool/src/validator.rs | 23 ++--- crates/transaction-pool/src/lib.rs | 18 ++-- crates/transaction-pool/src/maintain.rs | 12 ++- crates/transaction-pool/src/noop.rs | 1 + crates/transaction-pool/src/pool/mod.rs | 6 +- .../src/test_utils/okvalidator.rs | 2 + crates/transaction-pool/src/traits.rs | 7 +- crates/transaction-pool/src/validate/eth.rs | 95 +++++++++++-------- crates/transaction-pool/src/validate/mod.rs | 20 ++-- crates/transaction-pool/src/validate/task.rs | 11 +-- examples/custom-node-components/src/main.rs | 20 ++-- 16 files changed, 138 insertions(+), 112 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 95ff8072532..da93390154a 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -18,7 +18,7 @@ use reth_evm::{ }; use reth_network::{primitives::BasicNetworkPrimitives, NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, FullNodeComponents, HeaderTy, NodeAddOns, NodePrimitives, + AddOnsContext, BlockTy, FullNodeComponents, HeaderTy, NodeAddOns, NodePrimitives, PayloadAttributesBuilder, PrimitivesTy, TxTy, }; use reth_node_builder::{ @@ -53,8 +53,8 @@ use reth_rpc_eth_types::{error::FromEvmError, EthApiError}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ - blobstore::DiskFileBlobStore, EthTransactionPool, PoolPooledTx, PoolTransaction, - TransactionPool, TransactionValidationTaskExecutor, + blobstore::DiskFileBlobStore, EthPooledTransaction, EthTransactionPool, PoolPooledTx, + PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, }; use revm::context::TxEnv; use std::{marker::PhantomData, sync::Arc, time::SystemTime}; @@ -464,7 +464,8 @@ where >, Node: FullNodeTypes, { - type Pool = EthTransactionPool; + type Pool = + EthTransactionPool>; async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let pool_config = ctx.pool_config(); diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index bb88c54b9f5..9f32b279154 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -4,7 +4,7 @@ use crate::{BuilderContext, FullNodeTypes}; use alloy_primitives::Address; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::EthereumHardforks; -use reth_node_api::{NodeTypes, TxTy}; +use reth_node_api::{BlockTy, NodeTypes, TxTy}; use reth_transaction_pool::{ blobstore::DiskFileBlobStore, BlobStore, CoinbaseTipOrdering, PoolConfig, PoolTransaction, SubPoolLimit, TransactionPool, TransactionValidationTaskExecutor, TransactionValidator, @@ -129,7 +129,7 @@ impl<'a, Node: FullNodeTypes, V> TxPoolBuilder<'a, Node, V> { impl<'a, Node, V> TxPoolBuilder<'a, Node, TransactionValidationTaskExecutor> where Node: FullNodeTypes>, - V: TransactionValidator + 'static, + V: TransactionValidator> + 'static, V::Transaction: PoolTransaction> + reth_transaction_pool::EthPoolTransaction, { @@ -248,7 +248,7 @@ fn spawn_pool_maintenance_task( ) -> eyre::Result<()> where Node: FullNodeTypes>, - Pool: reth_transaction_pool::TransactionPoolExt + Clone + 'static, + Pool: reth_transaction_pool::TransactionPoolExt> + Clone + 'static, Pool::Transaction: PoolTransaction>, { let chain_events = ctx.provider().canonical_state_stream(); @@ -280,7 +280,7 @@ pub fn spawn_maintenance_tasks( ) -> eyre::Result<()> where Node: FullNodeTypes>, - Pool: reth_transaction_pool::TransactionPoolExt + Clone + 'static, + Pool: reth_transaction_pool::TransactionPoolExt> + Clone + 'static, Pool::Transaction: PoolTransaction>, { spawn_local_backup_task(ctx, pool.clone())?; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index dd68ab8a8e3..6ee024ace97 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -16,7 +16,7 @@ use reth_network::{ PeersInfo, }; use reth_node_api::{ - AddOnsContext, BuildNextEnv, EngineTypes, FullNodeComponents, HeaderTy, NodeAddOns, + AddOnsContext, BlockTy, BuildNextEnv, EngineTypes, FullNodeComponents, HeaderTy, NodeAddOns, NodePrimitives, PayloadAttributesBuilder, PayloadTypes, PrimitivesTy, TxTy, }; use reth_node_builder::{ @@ -962,7 +962,7 @@ where Node: FullNodeTypes>, T: EthPoolTransaction> + OpPooledTx, { - type Pool = OpTransactionPool; + type Pool = OpTransactionPool>; async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let Self { pool_config_overrides, .. } = self; diff --git a/crates/optimism/txpool/src/lib.rs b/crates/optimism/txpool/src/lib.rs index 43421ed3b30..b2c240abe14 100644 --- a/crates/optimism/txpool/src/lib.rs +++ b/crates/optimism/txpool/src/lib.rs @@ -9,6 +9,7 @@ #![cfg_attr(docsrs, feature(doc_cfg))] mod validator; +use op_alloy_consensus::OpBlock; pub use validator::{OpL1BlockInfo, OpTransactionValidator}; pub mod conditional; @@ -24,8 +25,8 @@ pub mod estimated_da_size; use reth_transaction_pool::{CoinbaseTipOrdering, Pool, TransactionValidationTaskExecutor}; /// Type alias for default optimism transaction pool -pub type OpTransactionPool = Pool< - TransactionValidationTaskExecutor>, +pub type OpTransactionPool = Pool< + TransactionValidationTaskExecutor>, CoinbaseTipOrdering, S, >; diff --git a/crates/optimism/txpool/src/transaction.rs b/crates/optimism/txpool/src/transaction.rs index 6cbc645fe51..fa2ec80e4d8 100644 --- a/crates/optimism/txpool/src/transaction.rs +++ b/crates/optimism/txpool/src/transaction.rs @@ -325,10 +325,11 @@ mod tests { #[tokio::test] async fn validate_optimism_transaction() { let client = MockEthProvider::default().with_chain_spec(OP_MAINNET.clone()); - let validator = EthTransactionValidatorBuilder::new(client) - .no_shanghai() - .no_cancun() - .build(InMemoryBlobStore::default()); + let validator = + EthTransactionValidatorBuilder::new(client) + .no_shanghai() + .no_cancun() + .build::<_, _, reth_optimism_primitives::OpBlock>(InMemoryBlobStore::default()); let validator = OpTransactionValidator::new(validator); let origin = TransactionOrigin::External; diff --git a/crates/optimism/txpool/src/validator.rs b/crates/optimism/txpool/src/validator.rs index 8a715fc47c8..900a005d852 100644 --- a/crates/optimism/txpool/src/validator.rs +++ b/crates/optimism/txpool/src/validator.rs @@ -1,5 +1,6 @@ use crate::{supervisor::SupervisorClient, InvalidCrossTx, OpPooledTx}; use alloy_consensus::{BlockHeader, Transaction}; +use op_alloy_consensus::OpBlock; use op_revm::L1BlockInfo; use parking_lot::RwLock; use reth_chainspec::ChainSpecProvider; @@ -39,9 +40,9 @@ impl OpL1BlockInfo { /// Validator for Optimism transactions. #[derive(Debug, Clone)] -pub struct OpTransactionValidator { +pub struct OpTransactionValidator { /// The type that performs the actual validation. - inner: Arc>, + inner: Arc>, /// Additional block info required for validation. block_info: Arc, /// If true, ensure that the transaction's sender has enough balance to cover the L1 gas fee @@ -54,7 +55,7 @@ pub struct OpTransactionValidator { fork_tracker: Arc, } -impl OpTransactionValidator { +impl OpTransactionValidator { /// Returns the configured chain spec pub fn chain_spec(&self) -> Arc where @@ -86,14 +87,15 @@ impl OpTransactionValidator { } } -impl OpTransactionValidator +impl OpTransactionValidator where Client: ChainSpecProvider + StateProviderFactory + BlockReaderIdExt + Sync, Tx: EthPoolTransaction + OpPooledTx, + B: Block, { /// Create a new [`OpTransactionValidator`]. - pub fn new(inner: EthTransactionValidator) -> Self { + pub fn new(inner: EthTransactionValidator) -> Self { let this = Self::with_block_info(inner, OpL1BlockInfo::default()); if let Ok(Some(block)) = this.inner.client().block_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest) @@ -112,7 +114,7 @@ where /// Create a new [`OpTransactionValidator`] with the given [`OpL1BlockInfo`]. pub fn with_block_info( - inner: EthTransactionValidator, + inner: EthTransactionValidator, block_info: OpL1BlockInfo, ) -> Self { Self { @@ -288,13 +290,15 @@ where } } -impl TransactionValidator for OpTransactionValidator +impl TransactionValidator for OpTransactionValidator where Client: ChainSpecProvider + StateProviderFactory + BlockReaderIdExt + Sync, Tx: EthPoolTransaction + OpPooledTx, + B: Block, { type Transaction = Tx; + type Block = B; async fn validate_transaction( &self, @@ -325,10 +329,7 @@ where .await } - fn on_new_head_block(&self, new_tip_block: &SealedBlock) - where - B: Block, - { + fn on_new_head_block(&self, new_tip_block: &SealedBlock) { self.inner.on_new_head_block(new_tip_block); self.update_l1_block_info( new_tip_block.header(), diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 1dca5c33d07..7fbdda5f291 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -303,7 +303,7 @@ use aquamarine as _; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; -use reth_primitives_traits::{Block, Recovered}; +use reth_primitives_traits::Recovered; use reth_storage_api::StateProviderFactory; use std::{collections::HashSet, sync::Arc}; use tokio::sync::mpsc::Receiver; @@ -328,8 +328,13 @@ mod traits; pub mod test_utils; /// Type alias for default ethereum transaction pool -pub type EthTransactionPool = Pool< - TransactionValidationTaskExecutor>, +pub type EthTransactionPool< + Client, + S, + T = EthPooledTransaction, + B = reth_ethereum_primitives::Block, +> = Pool< + TransactionValidationTaskExecutor>, CoinbaseTipOrdering, S, >; @@ -776,16 +781,15 @@ where T: TransactionOrdering::Transaction>, S: BlobStore, { + type Block = V::Block; + #[instrument(skip(self), target = "txpool")] fn set_block_info(&self, info: BlockInfo) { trace!(target: "txpool", "updating pool block info"); self.pool.set_block_info(info) } - fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_, B>) - where - B: Block, - { + fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_, Self::Block>) { self.pool.on_canonical_state_change(update); } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index c58a5151da9..ca4546e7892 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -107,7 +107,8 @@ where + ChainSpecProvider + EthereumHardforks> + Clone + 'static, - P: TransactionPoolExt> + 'static, + P: TransactionPoolExt, Block = N::Block> + + 'static, St: Stream> + Send + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, { @@ -133,7 +134,8 @@ pub async fn maintain_transaction_pool( + ChainSpecProvider + EthereumHardforks> + Clone + 'static, - P: TransactionPoolExt> + 'static, + P: TransactionPoolExt, Block = N::Block> + + 'static, St: Stream> + Send + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, { @@ -855,7 +857,8 @@ mod tests { use super::*; use crate::{ blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, - CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionOrigin, + CoinbaseTipOrdering, EthPooledTransaction, EthTransactionValidator, Pool, + TransactionOrigin, }; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, U256}; @@ -889,7 +892,8 @@ mod tests { let sender = hex!("1f9090aaE28b8a3dCeaDf281B0F12828e676c326").into(); provider.add_account(sender, ExtendedAccount::new(42, U256::MAX)); let blob_store = InMemoryBlobStore::default(); - let validator = EthTransactionValidatorBuilder::new(provider).build(blob_store.clone()); + let validator: EthTransactionValidator<_, _, reth_ethereum_primitives::Block> = + EthTransactionValidatorBuilder::new(provider).build(blob_store.clone()); let txpool = Pool::new( validator, diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 8dabdbbbfad..ac37d896ffd 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -373,6 +373,7 @@ pub struct MockTransactionValidator { impl TransactionValidator for MockTransactionValidator { type Transaction = T; + type Block = reth_ethereum_primitives::Block; async fn validate_transaction( &self, diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 27c63ed8acd..07f5085a005 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -114,7 +114,6 @@ pub use events::{FullTransactionEvent, NewTransactionEvent, TransactionEvent}; pub use listener::{AllTransactionsEvents, TransactionEvents, TransactionListenerKind}; pub use parked::{BasefeeOrd, ParkedOrd, ParkedPool, QueuedOrd}; pub use pending::PendingPool; -use reth_primitives_traits::Block; mod best; pub use best::BestTransactions; @@ -504,10 +503,7 @@ where } /// Updates the entire pool after a new block was executed. - pub fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_, B>) - where - B: Block, - { + pub fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_, V::Block>) { trace!(target: "txpool", ?update, "updating pool on canonical state change"); let block_info = update.block_info(); diff --git a/crates/transaction-pool/src/test_utils/okvalidator.rs b/crates/transaction-pool/src/test_utils/okvalidator.rs index fc15dce74ec..27959a30e46 100644 --- a/crates/transaction-pool/src/test_utils/okvalidator.rs +++ b/crates/transaction-pool/src/test_utils/okvalidator.rs @@ -4,6 +4,7 @@ use crate::{ validate::ValidTransaction, EthPooledTransaction, PoolTransaction, TransactionOrigin, TransactionValidationOutcome, TransactionValidator, }; +use reth_ethereum_primitives::Block; /// A transaction validator that determines all transactions to be valid. #[derive(Debug)] @@ -33,6 +34,7 @@ where T: PoolTransaction, { type Transaction = T; + type Block = Block; async fn validate_transaction( &self, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index f70e74f7837..24acfcac2dc 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -667,6 +667,9 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { /// Extension for [`TransactionPool`] trait that allows to set the current block info. #[auto_impl::auto_impl(&, Arc)] pub trait TransactionPoolExt: TransactionPool { + /// The block type used for chain tip updates. + type Block: Block; + /// Sets the current block info for the pool. fn set_block_info(&self, info: BlockInfo); @@ -685,9 +688,7 @@ pub trait TransactionPoolExt: TransactionPool { /// sidecar must not be removed from the blob store. Only after a blob transaction is /// finalized, its sidecar is removed from the blob store. This ensures that in case of a reorg, /// the sidecar is still available. - fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_, B>) - where - B: Block; + fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_, Self::Block>); /// Updates the accounts in the pool fn update_accounts(&self, accounts: Vec); diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index da3e8680e59..1775387c68f 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -28,7 +28,7 @@ use alloy_eips::{ use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_primitives_traits::{ constants::MAX_TX_GAS_LIMIT_OSAKA, transaction::error::InvalidTransactionError, Account, Block, - GotExpected, SealedBlock, + GotExpected, }; use reth_storage_api::{AccountInfoReader, BytecodeReader, StateProviderFactory}; use reth_tasks::TaskSpawner; @@ -58,7 +58,7 @@ use tokio::sync::Mutex; /// /// And adheres to the configured [`LocalTransactionConfig`]. #[derive(Debug)] -pub struct EthTransactionValidator { +pub struct EthTransactionValidator { /// This type fetches account info from the db client: Client, /// Blobstore used for fetching re-injected blob transactions. @@ -90,14 +90,14 @@ pub struct EthTransactionValidator { /// Disable balance checks during transaction validation disable_balance_check: bool, /// Marker for the transaction type - _marker: PhantomData, + _marker: PhantomData<(T, B)>, /// Metrics for tsx pool validation validation_metrics: TxPoolValidationMetrics, /// Bitmap of custom transaction types that are allowed. other_tx_types: U256, } -impl EthTransactionValidator { +impl EthTransactionValidator { /// Returns the configured chain spec pub fn chain_spec(&self) -> Arc where @@ -176,7 +176,7 @@ impl EthTransactionValidator { } } -impl EthTransactionValidator +impl EthTransactionValidator where Client: ChainSpecProvider + StateProviderFactory, Tx: EthPoolTransaction, @@ -799,12 +799,14 @@ where } } -impl TransactionValidator for EthTransactionValidator +impl TransactionValidator for EthTransactionValidator where Client: ChainSpecProvider + StateProviderFactory, Tx: EthPoolTransaction, + B: Block, { type Transaction = Tx; + type Block = B; async fn validate_transaction( &self, @@ -829,11 +831,8 @@ where self.validate_batch_with_origin(origin, transactions) } - fn on_new_head_block(&self, new_tip_block: &SealedBlock) - where - B: Block, - { - self.on_new_head_block(new_tip_block.header()) + fn on_new_head_block(&self, new_tip_block: &reth_primitives_traits::SealedBlock) { + Self::on_new_head_block(self, new_tip_block.header()) } } @@ -1105,9 +1104,10 @@ impl EthTransactionValidatorBuilder { } /// Builds a the [`EthTransactionValidator`] without spawning validator tasks. - pub fn build(self, blob_store: S) -> EthTransactionValidator + pub fn build(self, blob_store: S) -> EthTransactionValidator where S: BlobStore, + B: Block, { let Self { client, @@ -1170,17 +1170,18 @@ impl EthTransactionValidatorBuilder { /// The validator will spawn `additional_tasks` additional tasks for validation. /// /// By default this will spawn 1 additional task. - pub fn build_with_tasks( + pub fn build_with_tasks( self, tasks: T, blob_store: S, - ) -> TransactionValidationTaskExecutor> + ) -> TransactionValidationTaskExecutor> where T: TaskSpawner, S: BlobStore, + B: Block, { let additional_tasks = self.additional_tasks; - let validator = self.build(blob_store); + let validator = self.build::(blob_store); let (tx, task) = ValidationTask::new(); @@ -1341,7 +1342,8 @@ mod tests { ExtendedAccount::new(transaction.nonce(), U256::MAX), ); let blob_store = InMemoryBlobStore::default(); - let validator = EthTransactionValidatorBuilder::new(provider).build(blob_store.clone()); + let validator: EthTransactionValidator<_, _> = + EthTransactionValidatorBuilder::new(provider).build(blob_store.clone()); let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); @@ -1368,9 +1370,10 @@ mod tests { ); let blob_store = InMemoryBlobStore::default(); - let validator = EthTransactionValidatorBuilder::new(provider) - .set_block_gas_limit(1_000_000) // tx gas limit is 1_015_288 - .build(blob_store.clone()); + let validator: EthTransactionValidator<_, _> = + EthTransactionValidatorBuilder::new(provider) + .set_block_gas_limit(1_000_000) // tx gas limit is 1_015_288 + .build(blob_store.clone()); let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); @@ -1401,9 +1404,10 @@ mod tests { ); let blob_store = InMemoryBlobStore::default(); - let validator = EthTransactionValidatorBuilder::new(provider) - .set_tx_fee_cap(100) // 100 wei cap - .build(blob_store.clone()); + let validator: EthTransactionValidator<_, _> = + EthTransactionValidatorBuilder::new(provider) + .set_tx_fee_cap(100) // 100 wei cap + .build(blob_store.clone()); let outcome = validator.validate_one(TransactionOrigin::Local, transaction.clone()); assert!(outcome.is_invalid()); @@ -1438,9 +1442,10 @@ mod tests { ); let blob_store = InMemoryBlobStore::default(); - let validator = EthTransactionValidatorBuilder::new(provider) - .set_tx_fee_cap(0) // no cap - .build(blob_store); + let validator: EthTransactionValidator<_, _> = + EthTransactionValidatorBuilder::new(provider) + .set_tx_fee_cap(0) // no cap + .build(blob_store); let outcome = validator.validate_one(TransactionOrigin::Local, transaction); assert!(outcome.is_valid()); @@ -1456,9 +1461,10 @@ mod tests { ); let blob_store = InMemoryBlobStore::default(); - let validator = EthTransactionValidatorBuilder::new(provider) - .set_tx_fee_cap(2e18 as u128) // 2 ETH cap - .build(blob_store); + let validator: EthTransactionValidator<_, _> = + EthTransactionValidatorBuilder::new(provider) + .set_tx_fee_cap(2e18 as u128) // 2 ETH cap + .build(blob_store); let outcome = validator.validate_one(TransactionOrigin::Local, transaction); assert!(outcome.is_valid()); @@ -1474,9 +1480,10 @@ mod tests { ); let blob_store = InMemoryBlobStore::default(); - let validator = EthTransactionValidatorBuilder::new(provider) - .with_max_tx_gas_limit(Some(500_000)) // Set limit lower than transaction gas limit (1_015_288) - .build(blob_store.clone()); + let validator: EthTransactionValidator<_, _> = + EthTransactionValidatorBuilder::new(provider) + .with_max_tx_gas_limit(Some(500_000)) // Set limit lower than transaction gas limit (1_015_288) + .build(blob_store.clone()); let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); assert!(outcome.is_invalid()); @@ -1506,9 +1513,10 @@ mod tests { ); let blob_store = InMemoryBlobStore::default(); - let validator = EthTransactionValidatorBuilder::new(provider) - .with_max_tx_gas_limit(None) // disabled - .build(blob_store); + let validator: EthTransactionValidator<_, _> = + EthTransactionValidatorBuilder::new(provider) + .with_max_tx_gas_limit(None) // disabled + .build(blob_store); let outcome = validator.validate_one(TransactionOrigin::External, transaction); assert!(outcome.is_valid()); @@ -1524,9 +1532,10 @@ mod tests { ); let blob_store = InMemoryBlobStore::default(); - let validator = EthTransactionValidatorBuilder::new(provider) - .with_max_tx_gas_limit(Some(2_000_000)) // Set limit higher than transaction gas limit (1_015_288) - .build(blob_store); + let validator: EthTransactionValidator<_, _> = + EthTransactionValidatorBuilder::new(provider) + .with_max_tx_gas_limit(Some(2_000_000)) // Set limit higher than transaction gas limit (1_015_288) + .build(blob_store); let outcome = validator.validate_one(TransactionOrigin::External, transaction); assert!(outcome.is_valid()); @@ -1727,8 +1736,9 @@ mod tests { ); // Validate with balance check enabled - let validator = EthTransactionValidatorBuilder::new(provider.clone()) - .build(InMemoryBlobStore::default()); + let validator: EthTransactionValidator<_, _> = + EthTransactionValidatorBuilder::new(provider.clone()) + .build(InMemoryBlobStore::default()); let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); let expected_cost = *transaction.cost(); @@ -1743,9 +1753,10 @@ mod tests { } // Validate with balance check disabled - let validator = EthTransactionValidatorBuilder::new(provider) - .disable_balance_check() // This should allow the transaction through despite zero balance - .build(InMemoryBlobStore::default()); + let validator: EthTransactionValidator<_, _> = + EthTransactionValidatorBuilder::new(provider) + .disable_balance_check() + .build(InMemoryBlobStore::default()); let outcome = validator.validate_one(TransactionOrigin::External, transaction); assert!(outcome.is_valid()); // Should be valid because balance check is disabled diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 59e187dd1c8..d3ec6b36160 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -9,7 +9,7 @@ use crate::{ use alloy_eips::{eip7594::BlobTransactionSidecarVariant, eip7702::SignedAuthorization}; use alloy_primitives::{Address, TxHash, B256, U256}; use futures_util::future::Either; -use reth_primitives_traits::{Recovered, SealedBlock}; +use reth_primitives_traits::{Block, Recovered, SealedBlock}; use std::{fmt, fmt::Debug, future::Future, time::Instant}; mod constants; @@ -24,7 +24,6 @@ pub use task::{TransactionValidationTaskExecutor, ValidationTask}; pub use constants::{ DEFAULT_MAX_TX_INPUT_BYTES, MAX_CODE_BYTE_SIZE, MAX_INIT_CODE_BYTE_SIZE, TX_SLOT_BYTE_SIZE, }; -use reth_primitives_traits::Block; /// A Result type returned after checking a transaction's validity. #[derive(Debug)] @@ -174,6 +173,9 @@ pub trait TransactionValidator: Debug + Send + Sync { /// The transaction type to validate. type Transaction: PoolTransaction; + /// The block type used for new head block notifications. + type Block: Block; + /// Validates the transaction and returns a [`TransactionValidationOutcome`] describing the /// validity of the given transaction. /// @@ -236,19 +238,16 @@ pub trait TransactionValidator: Debug + Send + Sync { /// Invoked when the head block changes. /// /// This can be used to update fork specific values (timestamp). - fn on_new_head_block(&self, _new_tip_block: &SealedBlock) - where - B: Block, - { - } + fn on_new_head_block(&self, _new_tip_block: &SealedBlock) {} } impl TransactionValidator for Either where A: TransactionValidator, - B: TransactionValidator, + B: TransactionValidator, { type Transaction = A::Transaction; + type Block = A::Block; async fn validate_transaction( &self, @@ -282,10 +281,7 @@ where } } - fn on_new_head_block(&self, new_tip_block: &SealedBlock) - where - Bl: Block, - { + fn on_new_head_block(&self, new_tip_block: &SealedBlock) { match self { Self::Left(v) => v.on_new_head_block(new_tip_block), Self::Right(v) => v.on_new_head_block(new_tip_block), diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index 0959d5b3fdb..39ae41bc3de 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -8,7 +8,7 @@ use crate::{ TransactionValidator, }; use futures_util::{lock::Mutex, StreamExt}; -use reth_primitives_traits::{Block, SealedBlock}; +use reth_primitives_traits::SealedBlock; use reth_tasks::TaskSpawner; use std::{future::Future, pin::Pin, sync::Arc}; use tokio::{ @@ -171,7 +171,7 @@ impl TransactionValidationTaskExecutor(tasks, blob_store) + .build_with_tasks(tasks, blob_store) } } @@ -197,6 +197,7 @@ where V: TransactionValidator + 'static, { type Transaction = ::Transaction; + type Block = V::Block; async fn validate_transaction( &self, @@ -284,10 +285,7 @@ where self.validate_transactions(transactions.into_iter().map(|tx| (origin, tx)).collect()).await } - fn on_new_head_block(&self, new_tip_block: &SealedBlock) - where - B: Block, - { + fn on_new_head_block(&self, new_tip_block: &SealedBlock) { self.validator.on_new_head_block(new_tip_block) } } @@ -307,6 +305,7 @@ mod tests { impl TransactionValidator for NoopValidator { type Transaction = MockTransaction; + type Block = reth_ethereum_primitives::Block; async fn validate_transaction( &self, diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index b6b8fb3cdf2..2150118173e 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -6,14 +6,14 @@ use reth_ethereum::{ chainspec::ChainSpec, cli::interface::Cli, node::{ - api::{FullNodeTypes, NodeTypes}, + api::{BlockTy, FullNodeTypes, NodeTypes}, builder::{components::PoolBuilder, BuilderContext}, node::EthereumAddOns, EthereumNode, }, pool::{ - blobstore::InMemoryBlobStore, EthTransactionPool, PoolConfig, - TransactionValidationTaskExecutor, + blobstore::InMemoryBlobStore, CoinbaseTipOrdering, EthPooledTransaction, + EthTransactionPool, Pool, PoolConfig, TransactionValidationTaskExecutor, }, provider::CanonStateSubscriptions, EthPrimitives, @@ -53,7 +53,12 @@ impl PoolBuilder for CustomPoolBuilder where Node: FullNodeTypes>, { - type Pool = EthTransactionPool; + type Pool = EthTransactionPool< + Node::Provider, + InMemoryBlobStore, + EthPooledTransaction, + BlockTy, + >; async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let data_dir = ctx.config().datadir(); @@ -62,10 +67,13 @@ where .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) - .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()); + .build_with_tasks::<_, _, _, BlockTy>( + ctx.task_executor().clone(), + blob_store.clone(), + ); let transaction_pool = - reth_ethereum::pool::Pool::eth_pool(validator, blob_store, self.pool_config); + Pool::new(validator, CoinbaseTipOrdering::default(), blob_store, self.pool_config); info!(target: "reth::cli", "Transaction pool initialized"); let transactions_path = data_dir.txpool_transactions(); From a2237c534e99d59bd2d44d568082c193318e643c Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 23 Jan 2026 05:23:44 -0800 Subject: [PATCH 169/267] feat(p2p): add `reth p2p enode` command (#21357) Co-authored-by: Amp --- crates/cli/commands/src/p2p/enode.rs | 34 ++++ crates/cli/commands/src/p2p/mod.rs | 19 ++ docs/vocs/docs/pages/cli/SUMMARY.mdx | 2 + docs/vocs/docs/pages/cli/op-reth/p2p.mdx | 1 + .../vocs/docs/pages/cli/op-reth/p2p/enode.mdx | 165 ++++++++++++++++++ docs/vocs/docs/pages/cli/reth/p2p.mdx | 1 + docs/vocs/docs/pages/cli/reth/p2p/enode.mdx | 165 ++++++++++++++++++ docs/vocs/sidebar-cli-op-reth.ts | 4 + docs/vocs/sidebar-cli-reth.ts | 4 + 9 files changed, 395 insertions(+) create mode 100644 crates/cli/commands/src/p2p/enode.rs create mode 100644 docs/vocs/docs/pages/cli/op-reth/p2p/enode.mdx create mode 100644 docs/vocs/docs/pages/cli/reth/p2p/enode.mdx diff --git a/crates/cli/commands/src/p2p/enode.rs b/crates/cli/commands/src/p2p/enode.rs new file mode 100644 index 00000000000..eb58acb2905 --- /dev/null +++ b/crates/cli/commands/src/p2p/enode.rs @@ -0,0 +1,34 @@ +//! Enode identifier command + +use clap::Parser; +use reth_cli_util::get_secret_key; +use reth_network_peers::NodeRecord; +use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + path::PathBuf, +}; + +/// Print the enode identifier for a given secret key. +#[derive(Parser, Debug)] +pub struct Command { + /// Path to the secret key file for discovery. + pub discovery_secret: PathBuf, + + /// Optional IP address to include in the enode URL. + /// + /// If not provided, defaults to 0.0.0.0. + #[arg(long)] + pub ip: Option, +} + +impl Command { + /// Execute the enode command. + pub fn execute(self) -> eyre::Result<()> { + let sk = get_secret_key(&self.discovery_secret)?; + let ip = self.ip.unwrap_or(IpAddr::V4(Ipv4Addr::UNSPECIFIED)); + let addr = SocketAddr::new(ip, 30303); + let enr = NodeRecord::from_secret_key(addr, &sk); + println!("{enr}"); + Ok(()) + } +} diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index 31d017ba92f..9634d95ba11 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -18,6 +18,7 @@ use reth_node_core::{ }; pub mod bootnode; +pub mod enode; pub mod rlpx; /// `reth p2p` command @@ -85,6 +86,9 @@ impl Subcommands::Bootnode(command) => { command.execute().await?; } + Subcommands::Enode(command) => { + command.execute()?; + } } Ok(()) @@ -99,6 +103,7 @@ impl Command { Subcommands::Body { args, .. } => Some(&args.chain), Subcommands::Rlpx(_) => None, Subcommands::Bootnode(_) => None, + Subcommands::Enode(_) => None, } } } @@ -126,6 +131,8 @@ pub enum Subcommands { Rlpx(rlpx::Command), /// Bootnode command Bootnode(bootnode::Command), + /// Print enode identifier + Enode(enode::Command), } #[derive(Debug, Clone, Parser)] @@ -225,4 +232,16 @@ mod tests { let _args: Command = Command::parse_from(["reth", "body", "--chain", "mainnet", "1000"]); } + + #[test] + fn parse_enode_cmd() { + let _args: Command = + Command::parse_from(["reth", "enode", "/tmp/secret"]); + } + + #[test] + fn parse_enode_cmd_with_ip() { + let _args: Command = + Command::parse_from(["reth", "enode", "/tmp/secret", "--ip", "192.168.1.1"]); + } } diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index ac193f4d95d..4012565f488 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -55,6 +55,7 @@ - [`reth p2p rlpx`](./reth/p2p/rlpx.mdx) - [`reth p2p rlpx ping`](./reth/p2p/rlpx/ping.mdx) - [`reth p2p bootnode`](./reth/p2p/bootnode.mdx) + - [`reth p2p enode`](./reth/p2p/enode.mdx) - [`reth config`](./reth/config.mdx) - [`reth prune`](./reth/prune.mdx) - [`reth re-execute`](./reth/re-execute.mdx) @@ -113,6 +114,7 @@ - [`op-reth p2p rlpx`](./op-reth/p2p/rlpx.mdx) - [`op-reth p2p rlpx ping`](./op-reth/p2p/rlpx/ping.mdx) - [`op-reth p2p bootnode`](./op-reth/p2p/bootnode.mdx) + - [`op-reth p2p enode`](./op-reth/p2p/enode.mdx) - [`op-reth config`](./op-reth/config.mdx) - [`op-reth prune`](./op-reth/prune.mdx) - [`op-reth re-execute`](./op-reth/re-execute.mdx) \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/p2p.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p.mdx index 3b4efdbd6f0..9be0964d76b 100644 --- a/docs/vocs/docs/pages/cli/op-reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/p2p.mdx @@ -13,6 +13,7 @@ Commands: body Download block body rlpx RLPx commands bootnode Bootnode command + enode Print enode identifier help Print this message or the help of the given subcommand(s) Options: diff --git a/docs/vocs/docs/pages/cli/op-reth/p2p/enode.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p/enode.mdx new file mode 100644 index 00000000000..cdcd7ae1111 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/p2p/enode.mdx @@ -0,0 +1,165 @@ +# op-reth p2p enode + +Print enode identifier + +```bash +$ op-reth p2p enode --help +``` +```txt +Usage: op-reth p2p enode [OPTIONS] + +Arguments: + + Path to the secret key file for discovery + +Options: + --ip + Optional IP address to include in the enode URL. + + If not provided, defaults to 0.0.0.0. + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 11d9743c97c..d9427629998 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -13,6 +13,7 @@ Commands: body Download block body rlpx RLPx commands bootnode Bootnode command + enode Print enode identifier help Print this message or the help of the given subcommand(s) Options: diff --git a/docs/vocs/docs/pages/cli/reth/p2p/enode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/enode.mdx new file mode 100644 index 00000000000..93f69247872 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/p2p/enode.mdx @@ -0,0 +1,165 @@ +# reth p2p enode + +Print enode identifier + +```bash +$ reth p2p enode --help +``` +```txt +Usage: reth p2p enode [OPTIONS] + +Arguments: + + Path to the secret key file for discovery + +Options: + --ip + Optional IP address to include in the enode URL. + + If not provided, defaults to 0.0.0.0. + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/sidebar-cli-op-reth.ts b/docs/vocs/sidebar-cli-op-reth.ts index 7a15d764376..2c4eb8a61eb 100644 --- a/docs/vocs/sidebar-cli-op-reth.ts +++ b/docs/vocs/sidebar-cli-op-reth.ts @@ -254,6 +254,10 @@ export const opRethCliSidebar: SidebarItem = { { text: "op-reth p2p bootnode", link: "/cli/op-reth/p2p/bootnode" + }, + { + text: "op-reth p2p enode", + link: "/cli/op-reth/p2p/enode" } ] }, diff --git a/docs/vocs/sidebar-cli-reth.ts b/docs/vocs/sidebar-cli-reth.ts index 5b5a74c2a69..9cf94ca9806 100644 --- a/docs/vocs/sidebar-cli-reth.ts +++ b/docs/vocs/sidebar-cli-reth.ts @@ -262,6 +262,10 @@ export const rethCliSidebar: SidebarItem = { { text: "reth p2p bootnode", link: "/cli/reth/p2p/bootnode" + }, + { + text: "reth p2p enode", + link: "/cli/reth/p2p/enode" } ] }, From f77d7d59833018ab165b03e87f8fed2ba55b6e1c Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 23 Jan 2026 06:24:34 -0800 Subject: [PATCH 170/267] feat(reth-bench): support human-readable gas format in generate-big-block (#21361) --- bin/reth-bench/src/bench/gas_limit_ramp.rs | 72 +------------------ .../src/bench/generate_big_block.rs | 8 ++- bin/reth-bench/src/bench/helpers.rs | 70 ++++++++++++++++++ 3 files changed, 77 insertions(+), 73 deletions(-) diff --git a/bin/reth-bench/src/bench/gas_limit_ramp.rs b/bin/reth-bench/src/bench/gas_limit_ramp.rs index 3a969d17cb0..77126835838 100644 --- a/bin/reth-bench/src/bench/gas_limit_ramp.rs +++ b/bin/reth-bench/src/bench/gas_limit_ramp.rs @@ -3,7 +3,7 @@ use crate::{ authenticated_transport::AuthenticatedTransportConnect, bench::{ - helpers::{build_payload, prepare_payload_request, rpc_block_to_header}, + helpers::{build_payload, parse_gas_limit, prepare_payload_request, rpc_block_to_header}, output::GasRampPayloadFile, }, valid_payload::{call_forkchoice_updated, call_new_payload, payload_to_new_payload}, @@ -22,29 +22,6 @@ use reth_primitives_traits::constants::{GAS_LIMIT_BOUND_DIVISOR, MAXIMUM_GAS_LIM use std::{path::PathBuf, time::Instant}; use tracing::info; -/// Parses a gas limit value with optional suffix: K for thousand, M for million, G for billion. -/// -/// Examples: "30000000", "30M", "1G", "2G" -fn parse_gas_limit(s: &str) -> eyre::Result { - let s = s.trim(); - if s.is_empty() { - return Err(eyre::eyre!("empty value")); - } - - let (num_str, multiplier) = if let Some(prefix) = s.strip_suffix(['G', 'g']) { - (prefix, 1_000_000_000u64) - } else if let Some(prefix) = s.strip_suffix(['M', 'm']) { - (prefix, 1_000_000u64) - } else if let Some(prefix) = s.strip_suffix(['K', 'k']) { - (prefix, 1_000u64) - } else { - (s, 1u64) - }; - - let base: u64 = num_str.trim().parse()?; - base.checked_mul(multiplier).ok_or_else(|| eyre::eyre!("value overflow")) -} - /// `reth benchmark gas-limit-ramp` command. #[derive(Debug, Parser)] pub struct Command { @@ -237,50 +214,3 @@ const fn should_stop(mode: RampMode, blocks_processed: u64, current_gas_limit: u RampMode::TargetGasLimit(target) => current_gas_limit >= target, } } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_parse_gas_limit_plain_number() { - assert_eq!(parse_gas_limit("30000000").unwrap(), 30_000_000); - assert_eq!(parse_gas_limit("1").unwrap(), 1); - assert_eq!(parse_gas_limit("0").unwrap(), 0); - } - - #[test] - fn test_parse_gas_limit_k_suffix() { - assert_eq!(parse_gas_limit("1K").unwrap(), 1_000); - assert_eq!(parse_gas_limit("30k").unwrap(), 30_000); - assert_eq!(parse_gas_limit("100K").unwrap(), 100_000); - } - - #[test] - fn test_parse_gas_limit_m_suffix() { - assert_eq!(parse_gas_limit("1M").unwrap(), 1_000_000); - assert_eq!(parse_gas_limit("30m").unwrap(), 30_000_000); - assert_eq!(parse_gas_limit("100M").unwrap(), 100_000_000); - } - - #[test] - fn test_parse_gas_limit_g_suffix() { - assert_eq!(parse_gas_limit("1G").unwrap(), 1_000_000_000); - assert_eq!(parse_gas_limit("2g").unwrap(), 2_000_000_000); - assert_eq!(parse_gas_limit("10G").unwrap(), 10_000_000_000); - } - - #[test] - fn test_parse_gas_limit_with_whitespace() { - assert_eq!(parse_gas_limit(" 1G ").unwrap(), 1_000_000_000); - assert_eq!(parse_gas_limit("2 M").unwrap(), 2_000_000); - } - - #[test] - fn test_parse_gas_limit_errors() { - assert!(parse_gas_limit("").is_err()); - assert!(parse_gas_limit("abc").is_err()); - assert!(parse_gas_limit("G").is_err()); - assert!(parse_gas_limit("-1G").is_err()); - } -} diff --git a/bin/reth-bench/src/bench/generate_big_block.rs b/bin/reth-bench/src/bench/generate_big_block.rs index 7ddab1125e6..f237bfd47aa 100644 --- a/bin/reth-bench/src/bench/generate_big_block.rs +++ b/bin/reth-bench/src/bench/generate_big_block.rs @@ -3,7 +3,9 @@ //! This command fetches transactions from existing blocks and packs them into a single //! large block using the `testing_buildBlockV1` RPC endpoint. -use crate::authenticated_transport::AuthenticatedTransportConnect; +use crate::{ + authenticated_transport::AuthenticatedTransportConnect, bench::helpers::parse_gas_limit, +}; use alloy_eips::{BlockNumberOrTag, Typed2718}; use alloy_primitives::{Bytes, B256}; use alloy_provider::{ext::EngineApi, network::AnyNetwork, Provider, RootProvider}; @@ -202,7 +204,9 @@ pub struct Command { jwt_secret: std::path::PathBuf, /// Target gas to pack into the block. - #[arg(long, value_name = "TARGET_GAS", default_value = "30000000")] + /// Accepts short notation: K for thousand, M for million, G for billion (e.g., 1G = 1 + /// billion). + #[arg(long, value_name = "TARGET_GAS", default_value = "30000000", value_parser = parse_gas_limit)] target_gas: u64, /// Starting block number to fetch transactions from. diff --git a/bin/reth-bench/src/bench/helpers.rs b/bin/reth-bench/src/bench/helpers.rs index f367fd69a1d..cb78d1c4e3a 100644 --- a/bin/reth-bench/src/bench/helpers.rs +++ b/bin/reth-bench/src/bench/helpers.rs @@ -1,6 +1,29 @@ //! Common helpers for reth-bench commands. use crate::valid_payload::call_forkchoice_updated; + +/// Parses a gas limit value with optional suffix: K for thousand, M for million, G for billion. +/// +/// Examples: "30000000", "30M", "1G", "2G" +pub(crate) fn parse_gas_limit(s: &str) -> eyre::Result { + let s = s.trim(); + if s.is_empty() { + return Err(eyre::eyre!("empty value")); + } + + let (num_str, multiplier) = if let Some(prefix) = s.strip_suffix(['G', 'g']) { + (prefix, 1_000_000_000u64) + } else if let Some(prefix) = s.strip_suffix(['M', 'm']) { + (prefix, 1_000_000u64) + } else if let Some(prefix) = s.strip_suffix(['K', 'k']) { + (prefix, 1_000u64) + } else { + (s, 1u64) + }; + + let base: u64 = num_str.trim().parse()?; + base.checked_mul(multiplier).ok_or_else(|| eyre::eyre!("value overflow")) +} use alloy_consensus::Header; use alloy_eips::eip4844::kzg_to_versioned_hash; use alloy_primitives::{Address, B256}; @@ -194,3 +217,50 @@ pub(crate) async fn get_payload_with_sidecar( _ => panic!("This tool does not support getPayload versions past v5"), } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_gas_limit_plain_number() { + assert_eq!(parse_gas_limit("30000000").unwrap(), 30_000_000); + assert_eq!(parse_gas_limit("1").unwrap(), 1); + assert_eq!(parse_gas_limit("0").unwrap(), 0); + } + + #[test] + fn test_parse_gas_limit_k_suffix() { + assert_eq!(parse_gas_limit("1K").unwrap(), 1_000); + assert_eq!(parse_gas_limit("30k").unwrap(), 30_000); + assert_eq!(parse_gas_limit("100K").unwrap(), 100_000); + } + + #[test] + fn test_parse_gas_limit_m_suffix() { + assert_eq!(parse_gas_limit("1M").unwrap(), 1_000_000); + assert_eq!(parse_gas_limit("30m").unwrap(), 30_000_000); + assert_eq!(parse_gas_limit("100M").unwrap(), 100_000_000); + } + + #[test] + fn test_parse_gas_limit_g_suffix() { + assert_eq!(parse_gas_limit("1G").unwrap(), 1_000_000_000); + assert_eq!(parse_gas_limit("2g").unwrap(), 2_000_000_000); + assert_eq!(parse_gas_limit("10G").unwrap(), 10_000_000_000); + } + + #[test] + fn test_parse_gas_limit_with_whitespace() { + assert_eq!(parse_gas_limit(" 1G ").unwrap(), 1_000_000_000); + assert_eq!(parse_gas_limit("2 M").unwrap(), 2_000_000); + } + + #[test] + fn test_parse_gas_limit_errors() { + assert!(parse_gas_limit("").is_err()); + assert!(parse_gas_limit("abc").is_err()); + assert!(parse_gas_limit("G").is_err()); + assert!(parse_gas_limit("-1G").is_err()); + } +} From 830cd5e3551427e8191d941c010a9f55a2dabc5f Mon Sep 17 00:00:00 2001 From: Gigi Date: Fri, 23 Jan 2026 15:57:46 +0100 Subject: [PATCH 171/267] chore: update snmalloc upstream repository link (#21360) --- bin/reth/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 9d47c679eeb..e41c9d27648 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -25,8 +25,8 @@ //! - `jemalloc-unprefixed`: Uses unprefixed jemalloc symbols. //! - `tracy-allocator`: Enables [Tracy](https://github.com/wolfpld/tracy) profiler allocator //! integration for memory profiling. -//! - `snmalloc`: Uses [snmalloc](https://github.com/snmalloc/snmalloc) as the global allocator. Use -//! `--no-default-features` when enabling this, as jemalloc takes precedence. +//! - `snmalloc`: Uses [snmalloc](https://github.com/microsoft/snmalloc) as the global allocator. +//! Use `--no-default-features` when enabling this, as jemalloc takes precedence. //! - `snmalloc-native`: Uses snmalloc with native CPU optimizations. Use `--no-default-features` //! when enabling this. //! From 0ddaf1b26c1ec5344afc03a233357d6e089a347d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 23 Jan 2026 16:17:33 +0100 Subject: [PATCH 172/267] feat(engine): add BAL metrics type for EIP-7928 (#21356) --- crates/engine/tree/src/tree/metrics.rs | 30 ++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index ea17ff23148..fb6508d99b3 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -22,6 +22,9 @@ pub(crate) struct EngineApiMetrics { pub(crate) block_validation: BlockValidationMetrics, /// Canonical chain and reorg related metrics pub tree: TreeMetrics, + /// Metrics for EIP-7928 Block-Level Access Lists (BAL). + #[allow(dead_code)] + pub(crate) bal: BalMetrics, } impl EngineApiMetrics { @@ -293,6 +296,33 @@ impl NewPayloadStatusMetrics { } } +/// Metrics for EIP-7928 Block-Level Access Lists (BAL). +/// +/// See also +#[allow(dead_code)] +#[derive(Metrics, Clone)] +#[metrics(scope = "execution.block_access_list")] +pub(crate) struct BalMetrics { + /// Size of the BAL in bytes for the current block. + pub(crate) size_bytes: Gauge, + /// Total number of blocks with valid BALs. + pub(crate) valid_total: Counter, + /// Total number of blocks with invalid BALs. + pub(crate) invalid_total: Counter, + /// Time taken to validate the BAL against actual execution. + pub(crate) validation_time_seconds: Histogram, + /// Number of account changes in the BAL. + pub(crate) account_changes: Gauge, + /// Number of storage changes in the BAL. + pub(crate) storage_changes: Gauge, + /// Number of balance changes in the BAL. + pub(crate) balance_changes: Gauge, + /// Number of nonce changes in the BAL. + pub(crate) nonce_changes: Gauge, + /// Number of code changes in the BAL. + pub(crate) code_changes: Gauge, +} + /// Metrics for non-execution related block validation. #[derive(Metrics, Clone)] #[metrics(scope = "sync.block_validation")] From 1f536cce65905c856bfa663dbbce24bf499bc3fb Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 23 Jan 2026 15:41:08 +0000 Subject: [PATCH 173/267] test(e2e): selfdestruct pre- and post-Dencun (#21363) --- crates/ethereum/node/tests/e2e/main.rs | 1 + .../ethereum/node/tests/e2e/selfdestruct.rs | 529 ++++++++++++++++++ crates/ethereum/node/tests/e2e/utils.rs | 13 + 3 files changed, 543 insertions(+) create mode 100644 crates/ethereum/node/tests/e2e/selfdestruct.rs diff --git a/crates/ethereum/node/tests/e2e/main.rs b/crates/ethereum/node/tests/e2e/main.rs index 5960cd9c6f1..2f2e4ee1298 100644 --- a/crates/ethereum/node/tests/e2e/main.rs +++ b/crates/ethereum/node/tests/e2e/main.rs @@ -9,6 +9,7 @@ mod p2p; mod pool; mod prestate; mod rpc; +mod selfdestruct; mod utils; const fn main() {} diff --git a/crates/ethereum/node/tests/e2e/selfdestruct.rs b/crates/ethereum/node/tests/e2e/selfdestruct.rs new file mode 100644 index 00000000000..8ffd9169efb --- /dev/null +++ b/crates/ethereum/node/tests/e2e/selfdestruct.rs @@ -0,0 +1,529 @@ +//! E2E tests for SELFDESTRUCT behavior and output state verification. +//! +//! These tests verify that: +//! - Pre-Dencun: SELFDESTRUCT clears storage and code, output state reflects this +//! - Post-Dencun (EIP-6780): SELFDESTRUCT only works in same-tx creation, state persists +//! +//! We disable prewarming to ensure deterministic cache behavior and verify the execution +//! output state contains the expected account status after SELFDESTRUCT. + +use crate::utils::{eth_payload_attributes, eth_payload_attributes_shanghai}; +use alloy_network::{EthereumWallet, TransactionBuilder}; +use alloy_primitives::{bytes, Address, Bytes, TxKind, U256}; +use alloy_provider::{Provider, ProviderBuilder}; +use alloy_rpc_types_eth::TransactionRequest; +use futures::StreamExt; +use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; +use reth_e2e_test_utils::setup_engine; +use reth_node_api::TreeConfig; +use reth_node_ethereum::EthereumNode; +use reth_revm::db::BundleAccount; +use std::sync::Arc; + +const MAX_FEE_PER_GAS: u128 = 20_000_000_000; +const MAX_PRIORITY_FEE_PER_GAS: u128 = 1_000_000_000; + +fn cancun_spec() -> Arc { + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ) +} + +fn shanghai_spec() -> Arc { + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .shanghai_activated() + .build(), + ) +} + +fn deploy_tx(from: Address, nonce: u64, init_code: Bytes) -> TransactionRequest { + TransactionRequest::default() + .with_from(from) + .with_nonce(nonce) + .with_gas_limit(500_000) + .with_max_fee_per_gas(MAX_FEE_PER_GAS) + .with_max_priority_fee_per_gas(MAX_PRIORITY_FEE_PER_GAS) + .with_input(init_code) + .with_kind(TxKind::Create) +} + +fn call_tx(from: Address, to: Address, nonce: u64) -> TransactionRequest { + TransactionRequest::default() + .with_from(from) + .with_to(to) + .with_nonce(nonce) + .with_gas_limit(100_000) + .with_max_fee_per_gas(MAX_FEE_PER_GAS) + .with_max_priority_fee_per_gas(MAX_PRIORITY_FEE_PER_GAS) +} + +fn transfer_tx(from: Address, to: Address, nonce: u64, value: U256) -> TransactionRequest { + TransactionRequest::default() + .with_from(from) + .with_to(to) + .with_nonce(nonce) + .with_value(value) + .with_gas_limit(21_000) + .with_max_fee_per_gas(MAX_FEE_PER_GAS) + .with_max_priority_fee_per_gas(MAX_PRIORITY_FEE_PER_GAS) +} + +/// Creates init code for a contract that selfdestructs during deployment (same tx). +/// This tests the EIP-6780 exception where SELFDESTRUCT in same tx as creation still works. +/// +/// The contract: +/// 1. Stores 0x42 at slot 0 +/// 2. Immediately selfdestructs to beneficiary (during init, before returning runtime) +fn selfdestruct_in_constructor_init_code() -> Bytes { + // Init code that selfdestructs during deployment: + // PUSH1 0x42, PUSH1 0x00, SSTORE (store 0x42 at slot 0) + // PUSH20 , SELFDESTRUCT + let mut init = Vec::new(); + init.extend_from_slice(&[0x60, 0x42, 0x60, 0x00, 0x55]); // PUSH1 0x42, PUSH1 0x00, SSTORE + init.extend_from_slice(&[ + 0x73, // PUSH20 + 0xde, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, // beneficiary address + ]); + init.push(0xff); // SELFDESTRUCT + + Bytes::from(init) +} + +/// Creates init code for a simple contract that: +/// 1. Stores 0x42 at slot 0 during deployment +/// 2. On any call: selfdestructs to beneficiary +/// +/// This simpler contract avoids complex branching logic. +fn selfdestruct_contract_init_code() -> Bytes { + // Runtime: just selfdestruct on any call + // PUSH20 + // SELFDESTRUCT + let runtime = bytes!( + "73dead000000000000000000000000000000000001" // PUSH20 beneficiary + "ff" // SELFDESTRUCT + ); + + let runtime_len = runtime.len(); // 22 bytes + + // Init code: SSTORE(0, 0x42), CODECOPY, RETURN + // Total init code before runtime = 17 bytes + let init_len: u8 = 17; + + let mut init = Vec::new(); + init.extend_from_slice(&[0x60, 0x42, 0x60, 0x00, 0x55]); // PUSH1 0x42, PUSH1 0x00, SSTORE + init.extend_from_slice(&[0x60, runtime_len as u8, 0x60, init_len, 0x60, 0x00, 0x39]); // CODECOPY + init.extend_from_slice(&[0x60, runtime_len as u8, 0x60, 0x00, 0xf3]); // RETURN + init.extend_from_slice(&runtime); + + Bytes::from(init) +} + +/// Tests SELFDESTRUCT behavior post-Dencun (Cancun+). +/// +/// Post-Dencun (EIP-6780): +/// - SELFDESTRUCT only deletes contract if called in same tx as creation +/// - For existing contracts, SELFDESTRUCT only sends balance, code/storage persist +/// - The output state should NOT mark the account as destroyed +/// +/// This test verifies: +/// 1. Contract deploys with storage +/// 2. SELFDESTRUCT in later tx does NOT delete code/storage +/// 3. Output state shows account is NOT destroyed +#[tokio::test] +async fn test_selfdestruct_post_dencun() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let tree_config = TreeConfig::default().without_prewarming(true).without_state_cache(false); + let (mut nodes, _tasks, wallet) = + setup_engine::(1, cancun_spec(), false, tree_config, eth_payload_attributes) + .await?; + let mut node = nodes.pop().unwrap(); + let signer = wallet.inner.clone(); + let provider = ProviderBuilder::new() + .wallet(EthereumWallet::new(signer.clone())) + .connect_http(node.rpc_url()); + + // Deploy contract that stores 0x42 at slot 0 and selfdestructs on any call + let pending = provider + .send_transaction(deploy_tx(signer.address(), 0, selfdestruct_contract_init_code())) + .await?; + node.advance_block().await?; + let receipt = pending.get_receipt().await?; + assert!(receipt.status(), "Contract deployment should succeed"); + + let contract_address = receipt.contract_address.expect("Should have contract address"); + + // Consume the canonical notification for deployment block + let _ = node.canonical_stream.next().await; + + // Trigger SELFDESTRUCT by calling the contract + let pending = provider.send_transaction(call_tx(signer.address(), contract_address, 1)).await?; + node.advance_block().await?; + let receipt = pending.get_receipt().await?; + assert!(receipt.status(), "Selfdestruct tx should succeed"); + + // Get the canonical notification for the selfdestruct block + let notification = node.canonical_stream.next().await.unwrap(); + let chain = notification.committed(); + let execution_outcome = chain.execution_outcome(); + + // Verify the output state: post-Dencun, account should NOT be destroyed + let account_state: Option<&BundleAccount> = execution_outcome.bundle.account(&contract_address); + assert!( + account_state.is_none() || !account_state.unwrap().was_destroyed(), + "Post-Dencun (EIP-6780): Account should NOT be destroyed when SELFDESTRUCT called on existing contract" + ); + + // Verify via RPC that code and storage persist + let code_after = provider.get_code_at(contract_address).await?; + assert!(!code_after.is_empty(), "Post-Dencun: Contract code should persist"); + + let slot0_after = provider.get_storage_at(contract_address, U256::ZERO).await?; + assert_eq!(slot0_after, U256::from(0x42), "Post-Dencun: Storage should persist"); + + // Send another transaction to the contract address in a new block. + // This tests cache behavior - if cache has stale data, execution would be incorrect. + // Post-Dencun: calling the contract should trigger SELFDESTRUCT again (but only transfer + // balance) + let pending = provider.send_transaction(call_tx(signer.address(), contract_address, 2)).await?; + node.advance_block().await?; + let receipt = pending.get_receipt().await?; + assert!(receipt.status(), "Second call to contract should succeed"); + + // Consume the canonical notification + let notification = node.canonical_stream.next().await.unwrap(); + let chain = notification.committed(); + let execution_outcome = chain.execution_outcome(); + + // Verify the output state still shows account NOT destroyed + let account_state: Option<&BundleAccount> = execution_outcome.bundle.account(&contract_address); + assert!( + account_state.is_none() || !account_state.unwrap().was_destroyed(), + "Post-Dencun: Account should still NOT be destroyed after second SELFDESTRUCT call" + ); + + // Verify code and storage still persist after the second call + let code_final = provider.get_code_at(contract_address).await?; + assert!(!code_final.is_empty(), "Post-Dencun: Contract code should still persist"); + + let slot0_final = provider.get_storage_at(contract_address, U256::ZERO).await?; + assert_eq!(slot0_final, U256::from(0x42), "Post-Dencun: Storage should still persist"); + + Ok(()) +} + +/// Tests SELFDESTRUCT in same transaction as creation (post-Dencun). +/// +/// Post-Dencun (EIP-6780): +/// - SELFDESTRUCT during the same transaction as creation DOES delete the contract +/// - This is the exception to the rule that SELFDESTRUCT no longer deletes contracts +/// +/// This test verifies: +/// 1. Contract selfdestructs during its constructor +/// 2. Contract is deleted (same-tx exception applies) +/// 3. No code or storage remains +/// 4. Since account never existed in DB before, bundle has no entry for it +#[tokio::test] +async fn test_selfdestruct_same_tx_post_dencun() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let tree_config = TreeConfig::default().without_prewarming(true).without_state_cache(false); + let (mut nodes, _tasks, wallet) = + setup_engine::(1, cancun_spec(), false, tree_config, eth_payload_attributes) + .await?; + let mut node = nodes.pop().unwrap(); + let signer = wallet.inner.clone(); + let provider = ProviderBuilder::new() + .wallet(EthereumWallet::new(signer.clone())) + .connect_http(node.rpc_url()); + + // Deploy contract that selfdestructs during its constructor + let pending = provider + .send_transaction(deploy_tx(signer.address(), 0, selfdestruct_in_constructor_init_code())) + .await?; + node.advance_block().await?; + let receipt = pending.get_receipt().await?; + assert!(receipt.status(), "Contract deployment with selfdestruct should succeed"); + + // Calculate the contract address (CREATE uses sender + nonce) + let contract_address = signer.address().create(0); + + // Get the canonical notification for the deployment block + let notification = node.canonical_stream.next().await.unwrap(); + let chain = notification.committed(); + let execution_outcome = chain.execution_outcome(); + + // Verify the output state: same-tx SELFDESTRUCT should destroy the account + let account_state: Option<&BundleAccount> = execution_outcome.bundle.account(&contract_address); + assert!( + account_state.is_none(), + "Post-Dencun same-tx: Account was created and selfdestructed in the same transaction, no trace in bundle state" + ); + + // Verify via RPC that code and storage are cleared + let code = provider.get_code_at(contract_address).await?; + assert!(code.is_empty(), "Post-Dencun same-tx: Contract code should be deleted"); + + let slot0 = provider.get_storage_at(contract_address, U256::ZERO).await?; + assert_eq!(slot0, U256::ZERO, "Post-Dencun same-tx: Storage should be cleared"); + + // Send ETH to the destroyed address in a new block to test cache behavior + let pending = provider + .send_transaction(transfer_tx(signer.address(), contract_address, 1, U256::from(1000))) + .await?; + node.advance_block().await?; + let receipt = pending.get_receipt().await?; + assert!(receipt.status(), "ETH transfer to destroyed address should succeed"); + + // Consume the canonical notification + let _ = node.canonical_stream.next().await; + + // Verify code is still empty and account received ETH + let code_final = provider.get_code_at(contract_address).await?; + assert!(code_final.is_empty(), "Post-Dencun same-tx: Contract code should remain deleted"); + + let balance = provider.get_balance(contract_address).await?; + assert_eq!(balance, U256::from(1000), "Post-Dencun same-tx: Account should have received ETH"); + + Ok(()) +} + +/// Tests SELFDESTRUCT behavior pre-Dencun (Shanghai). +/// +/// Pre-Dencun: +/// - SELFDESTRUCT deletes contract code and storage regardless of when contract was created +/// - The output state MUST mark the account as destroyed +/// +/// This test verifies: +/// 1. Contract deploys with storage +/// 2. SELFDESTRUCT deletes code and storage +/// 3. Output state shows account IS destroyed +#[tokio::test] +async fn test_selfdestruct_pre_dencun() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let tree_config = TreeConfig::default().without_prewarming(true).without_state_cache(false); + let (mut nodes, _tasks, wallet) = setup_engine::( + 1, + shanghai_spec(), + false, + tree_config, + eth_payload_attributes_shanghai, + ) + .await?; + let mut node = nodes.pop().unwrap(); + let signer = wallet.inner.clone(); + let provider = ProviderBuilder::new() + .wallet(EthereumWallet::new(signer.clone())) + .connect_http(node.rpc_url()); + + // Deploy contract that stores 0x42 at slot 0 and selfdestructs on any call + let pending = provider + .send_transaction(deploy_tx(signer.address(), 0, selfdestruct_contract_init_code())) + .await?; + node.advance_block().await?; + let receipt = pending.get_receipt().await?; + assert!(receipt.status(), "Contract deployment should succeed"); + + let contract_address = receipt.contract_address.expect("Should have contract address"); + + // Consume the canonical notification for deployment block + let _ = node.canonical_stream.next().await; + + // Trigger SELFDESTRUCT by calling the contract + let pending = provider.send_transaction(call_tx(signer.address(), contract_address, 1)).await?; + node.advance_block().await?; + let receipt = pending.get_receipt().await?; + assert!(receipt.status(), "Selfdestruct tx should succeed"); + + // Get the canonical notification for the selfdestruct block + let notification = node.canonical_stream.next().await.unwrap(); + let chain = notification.committed(); + let execution_outcome = chain.execution_outcome(); + + // Verify the output state: pre-Dencun, account MUST be destroyed + let account_state: Option<&BundleAccount> = execution_outcome.bundle.account(&contract_address); + assert!( + account_state.is_some_and(|a: &BundleAccount| a.was_destroyed()), + "Pre-Dencun: Account MUST be marked as destroyed in output state" + ); + + // Verify via RPC that code and storage are cleared + let code_after = provider.get_code_at(contract_address).await?; + assert!(code_after.is_empty(), "Pre-Dencun: Contract code should be deleted"); + + let slot0_after = provider.get_storage_at(contract_address, U256::ZERO).await?; + assert_eq!(slot0_after, U256::ZERO, "Pre-Dencun: Storage should be cleared"); + + // Send ETH to the destroyed contract address in a new block. + // This tests cache behavior - the cache should correctly reflect the account was destroyed. + // Pre-Dencun: the contract no longer exists, so this is just a plain ETH transfer. + let pending = provider + .send_transaction(transfer_tx(signer.address(), contract_address, 2, U256::from(1000))) + .await?; + node.advance_block().await?; + let receipt = pending.get_receipt().await?; + assert!(receipt.status(), "ETH transfer to destroyed contract address should succeed"); + + // Consume the canonical notification + let notification = node.canonical_stream.next().await.unwrap(); + let chain = notification.committed(); + let execution_outcome = chain.execution_outcome(); + + // Verify the output state shows the account exists (received ETH) but has no code + let account_state: Option<&BundleAccount> = execution_outcome.bundle.account(&contract_address); + // After receiving ETH, the account should exist with balance but no code + assert!( + account_state.is_some(), + "Pre-Dencun: Account should exist after receiving ETH (even though contract was destroyed)" + ); + + // Verify code is still empty (contract was destroyed, only ETH was received) + let code_final = provider.get_code_at(contract_address).await?; + assert!(code_final.is_empty(), "Pre-Dencun: Contract code should remain deleted"); + + // Verify storage is still cleared + let slot0_final = provider.get_storage_at(contract_address, U256::ZERO).await?; + assert_eq!(slot0_final, U256::ZERO, "Pre-Dencun: Storage should remain cleared"); + + // Verify the account now has the ETH balance we sent + let balance = provider.get_balance(contract_address).await?; + assert_eq!(balance, U256::from(1000), "Pre-Dencun: Account should have received ETH"); + + Ok(()) +} + +/// Tests SELFDESTRUCT in same transaction as creation, where account previously had ETH +/// (post-Dencun). +/// +/// Post-Dencun (EIP-6780): +/// - The same-tx exception applies when the CONTRACT is created in that transaction +/// - Even if the address previously had ETH (as an EOA), deploying a contract there and +/// selfdestructing in the same tx DOES delete the contract +/// - The "created in same tx" refers to contract creation, not account existence +/// +/// This test verifies: +/// 1. Send ETH to the future contract address (address has balance but no code) +/// 2. Deploy contract that selfdestructs during constructor to that address +/// 3. Contract is deleted (same-tx exception applies - contract was created this tx) +/// 4. Code and storage are cleared +/// 5. Since account existed in DB before (had ETH), bundle marks it as Destroyed +#[tokio::test] +async fn test_selfdestruct_same_tx_preexisting_account_post_dencun() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let tree_config = TreeConfig::default().without_prewarming(true).without_state_cache(false); + let (mut nodes, _tasks, wallet) = + setup_engine::(1, cancun_spec(), false, tree_config, eth_payload_attributes) + .await?; + let mut node = nodes.pop().unwrap(); + let signer = wallet.inner.clone(); + let provider = ProviderBuilder::new() + .wallet(EthereumWallet::new(signer.clone())) + .connect_http(node.rpc_url()); + + // Calculate where the contract will be deployed (CREATE uses sender + nonce) + // We'll use nonce 1 for deployment, so first send ETH with nonce 0 + let future_contract_address = signer.address().create(1); + + // Send ETH to the future contract address first (makes it a pre-existing account) + let pending = provider + .send_transaction(transfer_tx( + signer.address(), + future_contract_address, + 0, + U256::from(1000), + )) + .await?; + node.advance_block().await?; + let receipt = pending.get_receipt().await?; + assert!(receipt.status(), "ETH transfer should succeed"); + + // Consume the canonical notification + let _ = node.canonical_stream.next().await; + + // Verify the account exists and has balance + let balance_before = provider.get_balance(future_contract_address).await?; + assert_eq!(balance_before, U256::from(1000), "Account should have ETH before deployment"); + + // Now deploy contract that selfdestructs during its constructor to the same address + let pending = provider + .send_transaction(deploy_tx(signer.address(), 1, selfdestruct_in_constructor_init_code())) + .await?; + node.advance_block().await?; + let receipt = pending.get_receipt().await?; + assert!(receipt.status(), "Contract deployment with selfdestruct should succeed"); + + // Verify deployment went to the expected address + assert_eq!( + receipt.contract_address, + Some(future_contract_address), + "Contract should be deployed to pre-computed address" + ); + + // Get the canonical notification for the deployment block + let notification = node.canonical_stream.next().await.unwrap(); + let chain = notification.committed(); + let execution_outcome = chain.execution_outcome(); + + // Verify the output state: same-tx exception DOES apply because contract was created this tx + // The account should be marked as destroyed. Since it had prior state (ETH balance), + // the bundle will contain it with status Destroyed and original_info set. + let account_state: Option<&BundleAccount> = + execution_outcome.bundle.account(&future_contract_address); + assert!( + account_state.is_some_and(|a| a.was_destroyed()), + "Post-Dencun same-tx with prior ETH: Account MUST be marked as destroyed" + ); + + // Verify via RPC that code and storage are cleared + let code = provider.get_code_at(future_contract_address).await?; + assert!(code.is_empty(), "Post-Dencun same-tx: Contract code should be deleted"); + + let slot0 = provider.get_storage_at(future_contract_address, U256::ZERO).await?; + assert_eq!(slot0, U256::ZERO, "Post-Dencun same-tx: Storage should be cleared"); + + // Balance should be zero (sent to beneficiary during SELFDESTRUCT) + let balance_after = provider.get_balance(future_contract_address).await?; + assert_eq!( + balance_after, + U256::ZERO, + "Post-Dencun same-tx: Balance should be zero (sent to beneficiary)" + ); + + // Send ETH to the destroyed address to verify cache behavior + let pending = provider + .send_transaction(transfer_tx( + signer.address(), + future_contract_address, + 2, + U256::from(2000), + )) + .await?; + node.advance_block().await?; + let receipt = pending.get_receipt().await?; + assert!(receipt.status(), "ETH transfer should succeed"); + + // Consume notification + let _ = node.canonical_stream.next().await; + + // Verify the account received ETH and has no code (it's now just an EOA) + let balance_final = provider.get_balance(future_contract_address).await?; + assert_eq!(balance_final, U256::from(2000), "Account should have received ETH"); + + let code_final = provider.get_code_at(future_contract_address).await?; + assert!(code_final.is_empty(), "Code should remain empty after ETH transfer"); + + let slot0_final = provider.get_storage_at(future_contract_address, U256::ZERO).await?; + assert_eq!(slot0_final, U256::ZERO, "Storage should remain cleared"); + + Ok(()) +} diff --git a/crates/ethereum/node/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs index 75f8ea9bac4..e4933169abf 100644 --- a/crates/ethereum/node/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -29,6 +29,19 @@ pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttribu EthPayloadBuilderAttributes::new(B256::ZERO, attributes) } +/// Helper function to create pre-Cancun (Shanghai) payload attributes. +/// No `parent_beacon_block_root` field. +pub(crate) fn eth_payload_attributes_shanghai(timestamp: u64) -> EthPayloadBuilderAttributes { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: None, + }; + EthPayloadBuilderAttributes::new(B256::ZERO, attributes) +} + /// Advances node by producing blocks with random transactions. pub(crate) async fn advance_with_random_transactions( node: &mut NodeHelperType, From d3846d98a940b18d77f5593c5cf8d1c3ebcb5425 Mon Sep 17 00:00:00 2001 From: iPLAY888 <133153661+letmehateu@users.noreply.github.com> Date: Fri, 23 Jan 2026 18:56:09 +0300 Subject: [PATCH 174/267] refactor: refactor get_idle_peer_for to use Iterator::find (#21321) --- crates/net/network/src/transactions/fetcher.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 8237716a8b4..fcfe9e2693a 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -188,13 +188,7 @@ impl TransactionFetcher { let TxFetchMetadata { fallback_peers, .. } = self.hashes_fetch_inflight_and_pending_fetch.peek(&hash)?; - for peer_id in fallback_peers.iter() { - if self.is_idle(peer_id) { - return Some(peer_id) - } - } - - None + fallback_peers.iter().find(|peer_id| self.is_idle(peer_id)) } /// Returns any idle peer for any hash pending fetch. If one is found, the corresponding From fcef82261d3f1e3bc0864d73e079728be59a1837 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 23 Jan 2026 08:37:15 -0800 Subject: [PATCH 175/267] fix(libmdbx): handle errors gracefully in TransactionInner::drop (#21368) --- crates/storage/libmdbx-rs/src/transaction.rs | 41 +++++++++++--------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 20e315bb380..c3b454e0530 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -339,26 +339,31 @@ where fn drop(&mut self) { // To be able to abort a timed out transaction, we need to renew it first. // Hence the usage of `txn_execute_renew_on_timeout` here. - self.txn - .txn_execute_renew_on_timeout(|txn| { - if !self.has_committed() { - if K::IS_READ_ONLY { - #[cfg(feature = "read-tx-timeouts")] - self.env.txn_manager().remove_active_read_transaction(txn); - - unsafe { - ffi::mdbx_txn_abort(txn); - } - } else { - let (sender, rx) = sync_channel(0); - self.env - .txn_manager() - .send_message(TxnManagerMessage::Abort { tx: TxnPtr(txn), sender }); - rx.recv().unwrap().unwrap(); + // + // We intentionally ignore errors here because Drop should never panic. + // MDBX can return errors (e.g., MDBX_PANIC) during abort if the environment + // is in a fatal state, but panicking in Drop can cause double-panics during + // unwinding which terminates the process. + let _ = self.txn.txn_execute_renew_on_timeout(|txn| { + if !self.has_committed() { + if K::IS_READ_ONLY { + #[cfg(feature = "read-tx-timeouts")] + self.env.txn_manager().remove_active_read_transaction(txn); + + unsafe { + ffi::mdbx_txn_abort(txn); + } + } else { + let (sender, rx) = sync_channel(0); + self.env + .txn_manager() + .send_message(TxnManagerMessage::Abort { tx: TxnPtr(txn), sender }); + if let Ok(Err(e)) = rx.recv() { + tracing::error!(target: "libmdbx", %e, "failed to abort transaction in drop"); } } - }) - .unwrap(); + } + }); } } From b814893221d55a252d37db59839e455c927e0de7 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 23 Jan 2026 17:02:53 +0000 Subject: [PATCH 176/267] feat(stages): flush RocksDB at end of history and tx_lookup stages (#21367) --- .../src/stages/index_account_history.rs | 7 +++- .../src/stages/index_storage_history.rs | 6 ++- crates/stages/stages/src/stages/tx_lookup.rs | 6 ++- crates/storage/errors/src/db.rs | 2 + .../src/providers/rocksdb/provider.rs | 39 +++++++++++++++++++ .../provider/src/providers/rocksdb_stub.rs | 7 ++++ 6 files changed, 63 insertions(+), 4 deletions(-) diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index 5a3ba750d52..91334c10cfb 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -1,7 +1,7 @@ use super::collect_account_history_indices; use crate::stages::utils::{collect_history_indices, load_account_history}; use reth_config::config::{EtlConfig, IndexHistoryConfig}; -use reth_db_api::{models::ShardedKey, tables, transaction::DbTxMut}; +use reth_db_api::{models::ShardedKey, table::Table, tables, transaction::DbTxMut}; use reth_provider::{ DBProvider, EitherWriter, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, RocksDBProviderFactory, StorageSettingsCache, @@ -111,7 +111,6 @@ where // but this is safe for first_sync because if we crash before commit, the // checkpoint stays at 0 and we'll just clear and rebuild again on restart. The // source data (changesets) is intact. - #[cfg(all(unix, feature = "rocksdb"))] provider.rocksdb_provider().clear::()?; } else { provider.tx_ref().clear::()?; @@ -143,6 +142,10 @@ where Ok(((), writer.into_raw_rocksdb_batch())) })?; + if use_rocksdb { + provider.rocksdb_provider().flush(&[tables::AccountsHistory::NAME])?; + } + Ok(ExecOutput { checkpoint: StageCheckpoint::new(*range.end()), done: true }) } diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index 08192c8871b..0990575500a 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -3,6 +3,7 @@ use crate::{stages::utils::load_storage_history, StageCheckpoint, StageId}; use reth_config::config::{EtlConfig, IndexHistoryConfig}; use reth_db_api::{ models::{storage_sharded_key::StorageShardedKey, AddressStorageKey, BlockNumberAddress}, + table::Table, tables, transaction::DbTxMut, }; @@ -115,7 +116,6 @@ where // but this is safe for first_sync because if we crash before commit, the // checkpoint stays at 0 and we'll just clear and rebuild again on restart. The // source data (changesets) is intact. - #[cfg(all(unix, feature = "rocksdb"))] provider.rocksdb_provider().clear::()?; } else { provider.tx_ref().clear::()?; @@ -147,6 +147,10 @@ where Ok(((), writer.into_raw_rocksdb_batch())) })?; + if use_rocksdb { + provider.rocksdb_provider().flush(&[tables::StoragesHistory::NAME])?; + } + Ok(ExecOutput { checkpoint: StageCheckpoint::new(*range.end()), done: true }) } diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index bf056a655bf..404cecae56c 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -3,7 +3,7 @@ use alloy_primitives::{TxHash, TxNumber}; use num_traits::Zero; use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db_api::{ - table::{Decode, Decompress, Value}, + table::{Decode, Decompress, Table, Value}, tables, transaction::DbTxMut, }; @@ -200,6 +200,10 @@ where } } + if provider.cached_storage_settings().transaction_hash_numbers_in_rocksdb { + provider.rocksdb_provider().flush(&[tables::TransactionHashNumbers::NAME])?; + } + Ok(ExecOutput { checkpoint: StageCheckpoint::new(input.target()) .with_entities_stage_checkpoint(stage_checkpoint(provider)?), diff --git a/crates/storage/errors/src/db.rs b/crates/storage/errors/src/db.rs index 300491ed8a1..5f6da8f347f 100644 --- a/crates/storage/errors/src/db.rs +++ b/crates/storage/errors/src/db.rs @@ -115,6 +115,8 @@ pub enum DatabaseWriteOperation { PutUpsert, /// Put append. PutAppend, + /// Flush to disk. + Flush, } /// Database log level. diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 75b9e6fa5de..7f322c74927 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -833,6 +833,45 @@ impl RocksDBProvider { self.0.table_stats() } + /// Flushes pending writes for the specified tables to disk. + /// + /// This performs a flush of: + /// 1. The Write-Ahead Log (WAL) with sync + /// 2. The column family memtables for the specified table names to SST files + /// + /// After this call completes, all data for the specified tables is durably persisted to disk. + /// + /// # Panics + /// Panics if the provider is in read-only mode. + #[instrument(level = "debug", target = "providers::rocksdb", skip_all, fields(tables = ?tables))] + pub fn flush(&self, tables: &[&'static str]) -> ProviderResult<()> { + let db = self.0.db_rw(); + + db.flush_wal(true).map_err(|e| { + ProviderError::Database(DatabaseError::Write(Box::new(DatabaseWriteError { + info: DatabaseErrorInfo { message: e.to_string().into(), code: -1 }, + operation: DatabaseWriteOperation::Flush, + table_name: "WAL", + key: Vec::new(), + }))) + })?; + + for cf_name in tables { + if let Some(cf) = db.cf_handle(cf_name) { + db.flush_cf(&cf).map_err(|e| { + ProviderError::Database(DatabaseError::Write(Box::new(DatabaseWriteError { + info: DatabaseErrorInfo { message: e.to_string().into(), code: -1 }, + operation: DatabaseWriteOperation::Flush, + table_name: cf_name, + key: Vec::new(), + }))) + })?; + } + } + + Ok(()) + } + /// Creates a raw iterator over all entries in the specified table. /// /// Returns raw `(key_bytes, value_bytes)` pairs without decoding. diff --git a/crates/storage/provider/src/providers/rocksdb_stub.rs b/crates/storage/provider/src/providers/rocksdb_stub.rs index 965877db2ed..31c38103e3d 100644 --- a/crates/storage/provider/src/providers/rocksdb_stub.rs +++ b/crates/storage/provider/src/providers/rocksdb_stub.rs @@ -88,6 +88,13 @@ impl RocksDBProvider { pub const fn clear(&self) -> ProviderResult<()> { Ok(()) } + + /// Flushes all pending writes to disk (stub implementation). + /// + /// This is a no-op since there is no `RocksDB` when the feature is disabled. + pub const fn flush(&self, _tables: &[&'static str]) -> ProviderResult<()> { + Ok(()) + } } impl DatabaseMetrics for RocksDBProvider { From a543752f7d80778a5bfd1c3487010e85576e6cae Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 23 Jan 2026 17:52:33 +0000 Subject: [PATCH 177/267] chore(reth-bench): make from-block a required flag (#21372) --- .../src/bench/generate_big_block.rs | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/bin/reth-bench/src/bench/generate_big_block.rs b/bin/reth-bench/src/bench/generate_big_block.rs index f237bfd47aa..0352d0a39df 100644 --- a/bin/reth-bench/src/bench/generate_big_block.rs +++ b/bin/reth-bench/src/bench/generate_big_block.rs @@ -209,10 +209,21 @@ pub struct Command { #[arg(long, value_name = "TARGET_GAS", default_value = "30000000", value_parser = parse_gas_limit)] target_gas: u64, - /// Starting block number to fetch transactions from. - /// If not specified, starts from the engine's latest block. + /// Block number to start fetching transactions from (required). + /// + /// This must be the last canonical block BEFORE any gas limit ramping was performed. + /// The command collects transactions from historical blocks starting at this number + /// to pack into large blocks. + /// + /// How to determine this value: + /// - If starting from a fresh node (no gas limit ramp yet): use the current chain tip + /// - If gas limit ramping has already been performed: use the block number that was the chain + /// tip BEFORE ramping began (you must track this yourself) + /// + /// Using a block after ramping started will cause transaction collection to fail + /// because those blocks contain synthetic transactions that cannot be replayed. #[arg(long, value_name = "FROM_BLOCK")] - from_block: Option, + from_block: u64, /// Execute the payload (call newPayload + forkchoiceUpdated). /// If false, only builds the payload and prints it. @@ -288,7 +299,7 @@ impl Command { format!("Failed to create output directory: {:?}", self.output_dir) })?; - let start_block = self.from_block.unwrap_or(parent_number); + let start_block = self.from_block; // Use pipelined execution when generating multiple payloads if self.count > 1 { From c137ed836f1094ee0c44652e5a1626962c11782b Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 23 Jan 2026 17:57:42 +0000 Subject: [PATCH 178/267] perf(engine): fixed-cache for execution cache (#21128) Co-authored-by: Georgios Konstantopoulos Co-authored-by: Tempo AI --- Cargo.lock | 145 +-- Cargo.toml | 2 +- crates/e2e-test-utils/src/lib.rs | 5 +- crates/e2e-test-utils/src/setup_builder.rs | 8 +- crates/engine/primitives/src/config.rs | 20 +- crates/engine/tree/Cargo.toml | 2 +- crates/engine/tree/src/tree/cached_state.rs | 1039 ++++++++++------- .../tree/src/tree/payload_processor/mod.rs | 87 +- .../src/tree/payload_processor/multiproof.rs | 5 +- .../src/tree/payload_processor/prewarm.rs | 2 +- crates/node/core/src/args/engine.rs | 6 +- crates/node/core/src/node_config.rs | 2 +- 12 files changed, 709 insertions(+), 614 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b59ce14029..13f08781ab7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,9 +106,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.28" +version = "0.2.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3842d8c52fcd3378039f4703dba392dca8b546b1c8ed6183048f8dab95b2be78" +checksum = "ef3a72a2247c34a8545ee99e562b1b9b69168e5000567257ae51e91b4e6b1193" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -495,7 +495,7 @@ dependencies = [ "async-stream", "async-trait", "auto_impl", - "dashmap 6.1.0", + "dashmap", "either", "futures", "futures-utils-wasm", @@ -1774,7 +1774,7 @@ dependencies = [ "bytemuck", "cfg-if", "cow-utils", - "dashmap 6.1.0", + "dashmap", "dynify", "fast-float2", "float16", @@ -1968,12 +1968,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" -[[package]] -name = "bytecount" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "175812e0be2bccb6abe50bb8d566126198344f707e304f45c648fd8f2cc0365e" - [[package]] name = "bytemuck" version = "1.24.0" @@ -2063,19 +2057,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "cargo_metadata" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" -dependencies = [ - "camino", - "cargo-platform 0.1.9", - "semver 1.0.27", - "serde", - "serde_json", -] - [[package]] name = "cargo_metadata" version = "0.19.2" @@ -2127,9 +2108,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.53" +version = "1.2.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755d2fce177175ffca841e9a06afdb2c4ab0f593d53b4dee48147dfaade85932" +checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583" dependencies = [ "find-msvc-tools", "jobserver", @@ -2930,19 +2911,6 @@ dependencies = [ "syn 2.0.114", ] -[[package]] -name = "dashmap" -version = "5.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" -dependencies = [ - "cfg-if", - "hashbrown 0.14.5", - "lock_api", - "once_cell", - "parking_lot_core", -] - [[package]] name = "dashmap" version = "6.1.0" @@ -3495,15 +3463,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "error-chain" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "version_check", -] - [[package]] name = "ethereum_hashing" version = "0.7.0" @@ -4082,11 +4041,12 @@ checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" [[package]] name = "fixed-cache" -version = "0.1.5" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25d3af83468398d500e9bc19e001812dcb1a11e4d3d6a5956c789aa3c11a8cb5" +checksum = "0aaafa7294e9617eb29e5c684a3af33324ef512a1bf596af2d1938a03798da29" dependencies = [ "equivalent", + "typeid", ] [[package]] @@ -4851,7 +4811,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.1", + "socket2 0.6.2", "tokio", "tower-service", "tracing", @@ -5967,21 +5927,6 @@ dependencies = [ "unicase", ] -[[package]] -name = "mini-moka" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c325dfab65f261f386debee8b0969da215b3fa0037e74c8a1234db7ba986d803" -dependencies = [ - "crossbeam-channel", - "crossbeam-utils", - "dashmap 5.5.3", - "skeptic", - "smallvec", - "tagptr", - "triomphe", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -6515,9 +6460,9 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl-probe" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" [[package]] name = "opentelemetry" @@ -7022,9 +6967,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.105" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] @@ -7172,17 +7117,6 @@ dependencies = [ "syn 2.0.114", ] -[[package]] -name = "pulldown-cmark" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" -dependencies = [ - "bitflags 2.10.0", - "memchr", - "unicase", -] - [[package]] name = "quanta" version = "0.12.6" @@ -7226,7 +7160,7 @@ dependencies = [ "quinn-udp", "rustc-hash", "rustls", - "socket2 0.6.1", + "socket2 0.6.2", "thiserror 2.0.18", "tokio", "tracing", @@ -7263,16 +7197,16 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.1", + "socket2 0.6.2", "tracing", "windows-sys 0.60.2", ] [[package]] name = "quote" -version = "1.0.43" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] @@ -8465,13 +8399,13 @@ dependencies = [ "assert_matches", "codspeed-criterion-compat", "crossbeam-channel", - "dashmap 6.1.0", + "dashmap", "derive_more", "eyre", + "fixed-cache", "futures", "metrics", "metrics-util", - "mini-moka", "moka", "parking_lot", "proptest", @@ -9111,7 +9045,7 @@ dependencies = [ "bitflags 2.10.0", "byteorder", "codspeed-criterion-compat", - "dashmap 6.1.0", + "dashmap", "derive_more", "parking_lot", "rand 0.9.2", @@ -10218,7 +10152,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", "assert_matches", - "dashmap 6.1.0", + "dashmap", "eyre", "itertools 0.14.0", "metrics", @@ -11245,7 +11179,7 @@ dependencies = [ "alloy-rlp", "codspeed-criterion-compat", "crossbeam-channel", - "dashmap 6.1.0", + "dashmap", "derive_more", "itertools 0.14.0", "metrics", @@ -12381,21 +12315,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" -[[package]] -name = "skeptic" -version = "0.13.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" -dependencies = [ - "bytecount", - "cargo_metadata 0.14.2", - "error-chain", - "glob", - "pulldown-cmark", - "tempfile", - "walkdir", -] - [[package]] name = "sketches-ddsketch" version = "0.3.0" @@ -12464,9 +12383,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0" dependencies = [ "libc", "windows-sys 0.60.2", @@ -12956,7 +12875,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.6.1", + "socket2 0.6.2", "tokio-macros", "windows-sys 0.61.2", ] @@ -13436,12 +13355,6 @@ dependencies = [ "rlp", ] -[[package]] -name = "triomphe" -version = "0.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd69c5aa8f924c7519d6372789a74eac5b94fb0f8fcf0d4a97eb0bfc3e785f39" - [[package]] name = "try-lock" version = "0.2.5" @@ -13467,6 +13380,12 @@ dependencies = [ "utf-8", ] +[[package]] +name = "typeid" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c" + [[package]] name = "typenum" version = "1.19.0" diff --git a/Cargo.toml b/Cargo.toml index bc5df15fb5e..9e83760c22c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -588,7 +588,7 @@ tracing-appender = "0.2" url = { version = "2.3", default-features = false } zstd = "0.13" byteorder = "1" -mini-moka = "0.10" +fixed-cache = { version = "0.1.7", features = ["stats"] } moka = "0.12" tar-no-std = { version = "0.3.2", default-features = false } miniz_oxide = { version = "0.8.4", default-features = false } diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index f5a2d1b030f..aadf101eb72 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -103,7 +103,10 @@ where N: NodeBuilderHelper, { E2ETestSetupBuilder::new(num_nodes, chain_spec, attributes_generator) - .with_tree_config_modifier(move |_| tree_config.clone()) + .with_tree_config_modifier(move |base| { + // Apply caller's tree_config but preserve the small cache size from base + tree_config.clone().with_cross_block_cache_size(base.cross_block_cache_size()) + }) .with_node_config_modifier(move |config| config.set_dev(is_dev)) .with_connect_nodes(connect_nodes) .build() diff --git a/crates/e2e-test-utils/src/setup_builder.rs b/crates/e2e-test-utils/src/setup_builder.rs index 8f38b66eb5c..30f7b1d28a0 100644 --- a/crates/e2e-test-utils/src/setup_builder.rs +++ b/crates/e2e-test-utils/src/setup_builder.rs @@ -112,11 +112,13 @@ where ..NetworkArgs::default() }; - // Apply tree config modifier if present + // Apply tree config modifier if present, with test-appropriate defaults + let base_tree_config = + reth_node_api::TreeConfig::default().with_cross_block_cache_size(1024 * 1024); let tree_config = if let Some(modifier) = self.tree_config_modifier { - modifier(reth_node_api::TreeConfig::default()) + modifier(base_tree_config) } else { - reth_node_api::TreeConfig::default() + base_tree_config }; let mut nodes = (0..self.num_nodes) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 0b72e1d6243..20902705dc7 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -50,7 +50,17 @@ pub const DEFAULT_PREWARM_MAX_CONCURRENCY: usize = 16; const DEFAULT_BLOCK_BUFFER_LIMIT: u32 = EPOCH_SLOTS as u32 * 2; const DEFAULT_MAX_INVALID_HEADER_CACHE_LENGTH: u32 = 256; const DEFAULT_MAX_EXECUTE_BLOCK_BATCH_SIZE: usize = 4; -const DEFAULT_CROSS_BLOCK_CACHE_SIZE: u64 = 4 * 1024 * 1024 * 1024; +const DEFAULT_CROSS_BLOCK_CACHE_SIZE: usize = default_cross_block_cache_size(); + +const fn default_cross_block_cache_size() -> usize { + if cfg!(test) { + 1024 * 1024 // 1 MB in tests + } else if cfg!(target_pointer_width = "32") { + usize::MAX // max possible on wasm32 / 32-bit + } else { + 4 * 1024 * 1024 * 1024 // 4 GB on 64-bit + } +} /// Determines if the host has enough parallelism to run the payload processor. /// @@ -105,7 +115,7 @@ pub struct TreeConfig { /// Whether to enable state provider metrics. state_provider_metrics: bool, /// Cross-block cache size in bytes. - cross_block_cache_size: u64, + cross_block_cache_size: usize, /// Whether the host has enough parallelism to run state root task. has_enough_parallelism: bool, /// Whether multiproof task should chunk proof targets. @@ -193,7 +203,7 @@ impl TreeConfig { disable_prewarming: bool, disable_parallel_sparse_trie: bool, state_provider_metrics: bool, - cross_block_cache_size: u64, + cross_block_cache_size: usize, has_enough_parallelism: bool, multiproof_chunking_enabled: bool, multiproof_chunk_size: usize, @@ -321,7 +331,7 @@ impl TreeConfig { } /// Returns the cross-block cache size. - pub const fn cross_block_cache_size(&self) -> u64 { + pub const fn cross_block_cache_size(&self) -> usize { self.cross_block_cache_size } @@ -424,7 +434,7 @@ impl TreeConfig { } /// Setter for cross block cache size. - pub const fn with_cross_block_cache_size(mut self, cross_block_cache_size: u64) -> Self { + pub const fn with_cross_block_cache_size(mut self, cross_block_cache_size: usize) -> Self { self.cross_block_cache_size = cross_block_cache_size; self } diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index b2124098eae..4f2a4540baf 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -53,7 +53,7 @@ revm-primitives.workspace = true futures.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread", "sync", "macros"] } -mini-moka = { workspace = true, features = ["sync"] } +fixed-cache.workspace = true moka = { workspace = true, features = ["sync"] } smallvec.workspace = true diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index 0f0b23b4ea2..dfdcafa49e9 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -1,7 +1,11 @@ //! Execution cache implementation for block processing. -use alloy_primitives::{Address, StorageKey, StorageValue, B256}; -use metrics::Gauge; -use mini_moka::sync::CacheBuilder; +use alloy_primitives::{ + map::{DefaultHashBuilder, FbBuildHasher}, + Address, StorageKey, StorageValue, B256, +}; +use fixed_cache::{AnyRef, CacheConfig, Stats, StatsHandler}; +use metrics::{Counter, Gauge, Histogram}; +use parking_lot::Once; use reth_errors::ProviderResult; use reth_metrics::Metrics; use reth_primitives_traits::{Account, Bytecode}; @@ -14,12 +18,62 @@ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; -use revm_primitives::map::DefaultHashBuilder; -use std::{sync::Arc, time::Duration}; -use tracing::{debug_span, instrument, trace}; +use revm_primitives::eip7907::MAX_CODE_SIZE; +use std::{ + mem::size_of, + sync::{ + atomic::{AtomicU64, AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; +use tracing::{debug_span, instrument, trace, warn}; + +/// Alignment in bytes for entries in the fixed-cache. +/// +/// Each bucket in `fixed-cache` is aligned to 128 bytes (cache line) due to +/// `#[repr(C, align(128))]` on the internal `Bucket` struct. +const FIXED_CACHE_ALIGNMENT: usize = 128; + +/// Overhead per entry in the fixed-cache (the `AtomicUsize` tag field). +const FIXED_CACHE_ENTRY_OVERHEAD: usize = size_of::(); + +/// Calculates the actual size of a fixed-cache entry for a given key-value pair. +/// +/// The entry size is `overhead + size_of::() + size_of::()`, rounded up to the +/// next multiple of [`FIXED_CACHE_ALIGNMENT`] (128 bytes). +const fn fixed_cache_entry_size() -> usize { + fixed_cache_key_size_with_value::(size_of::()) +} + +/// Calculates the actual size of a fixed-cache entry for a given key-value pair. +/// +/// The entry size is `overhead + size_of::() + size_of::()`, rounded up to the +/// next multiple of [`FIXED_CACHE_ALIGNMENT`] (128 bytes). +const fn fixed_cache_key_size_with_value(value: usize) -> usize { + let raw_size = FIXED_CACHE_ENTRY_OVERHEAD + size_of::() + value; + // Round up to next multiple of alignment + raw_size.div_ceil(FIXED_CACHE_ALIGNMENT) * FIXED_CACHE_ALIGNMENT +} + +/// Size in bytes of a single code cache entry. +const CODE_CACHE_ENTRY_SIZE: usize = fixed_cache_key_size_with_value::
(MAX_CODE_SIZE); + +/// Size in bytes of a single storage cache entry. +const STORAGE_CACHE_ENTRY_SIZE: usize = + fixed_cache_entry_size::<(Address, StorageKey), StorageValue>(); + +/// Size in bytes of a single account cache entry. +const ACCOUNT_CACHE_ENTRY_SIZE: usize = fixed_cache_entry_size::>(); -pub(crate) type Cache = - mini_moka::sync::Cache; +/// Cache configuration with epoch tracking enabled for O(1) cache invalidation. +struct EpochCacheConfig; +impl CacheConfig for EpochCacheConfig { + const EPOCHS: bool = true; +} + +/// Type alias for the fixed-cache used for accounts and storage. +type FixedCache = fixed_cache::Cache; /// A wrapper of a state provider and a shared cache. pub(crate) struct CachedStateProvider { @@ -71,45 +125,63 @@ impl CachedStateProvider { } } -/// Metrics for the cached state provider, showing hits / misses for each cache +/// Metrics for the cached state provider, showing hits / misses / size for each cache. +/// +/// This struct combines both the provider-level metrics (hits/misses tracked by the provider) +/// and the fixed-cache internal stats (collisions, size, capacity). #[derive(Metrics, Clone)] #[metrics(scope = "sync.caching")] pub(crate) struct CachedStateMetrics { + /// Number of times a new execution cache was created + execution_cache_created_total: Counter, + + /// Duration of execution cache creation in seconds + execution_cache_creation_duration_seconds: Histogram, + /// Code cache hits code_cache_hits: Gauge, /// Code cache misses code_cache_misses: Gauge, - /// Code cache size - /// - /// NOTE: this uses the moka caches' `entry_count`, NOT the `weighted_size` method to calculate - /// size. + /// Code cache size (number of entries) code_cache_size: Gauge, + /// Code cache capacity (maximum entries) + code_cache_capacity: Gauge, + + /// Code cache collisions (hash collisions causing eviction) + code_cache_collisions: Gauge, + /// Storage cache hits storage_cache_hits: Gauge, /// Storage cache misses storage_cache_misses: Gauge, - /// Storage cache size - /// - /// NOTE: this uses the moka caches' `entry_count`, NOT the `weighted_size` method to calculate - /// size. + /// Storage cache size (number of entries) storage_cache_size: Gauge, + /// Storage cache capacity (maximum entries) + storage_cache_capacity: Gauge, + + /// Storage cache collisions (hash collisions causing eviction) + storage_cache_collisions: Gauge, + /// Account cache hits account_cache_hits: Gauge, /// Account cache misses account_cache_misses: Gauge, - /// Account cache size - /// - /// NOTE: this uses the moka caches' `entry_count`, NOT the `weighted_size` method to calculate - /// size. + /// Account cache size (number of entries) account_cache_size: Gauge, + + /// Account cache capacity (maximum entries) + account_cache_capacity: Gauge, + + /// Account cache collisions (hash collisions causing eviction) + account_cache_collisions: Gauge, } impl CachedStateMetrics { @@ -118,14 +190,17 @@ impl CachedStateMetrics { // code cache self.code_cache_hits.set(0); self.code_cache_misses.set(0); + self.code_cache_collisions.set(0); // storage cache self.storage_cache_hits.set(0); self.storage_cache_misses.set(0); + self.storage_cache_collisions.set(0); // account cache self.account_cache_hits.set(0); self.account_cache_misses.set(0); + self.account_cache_collisions.set(0); } /// Returns a new zeroed-out instance of [`CachedStateMetrics`]. @@ -134,35 +209,135 @@ impl CachedStateMetrics { zeroed.reset(); zeroed } + + /// Records a new execution cache creation with its duration. + pub(crate) fn record_cache_creation(&self, duration: Duration) { + self.execution_cache_created_total.increment(1); + self.execution_cache_creation_duration_seconds.record(duration.as_secs_f64()); + } } -impl AccountReader for CachedStateProvider { - fn basic_account(&self, address: &Address) -> ProviderResult> { - if let Some(res) = self.caches.account_cache.get(address) { - self.metrics.account_cache_hits.increment(1); - return Ok(res) - } +/// A stats handler for fixed-cache that tracks collisions and size. +/// +/// Note: Hits and misses are tracked directly by the [`CachedStateProvider`] via +/// [`CachedStateMetrics`], not here. The stats handler is used for: +/// - Collision detection (hash collisions causing eviction of a different key) +/// - Size tracking +/// +/// ## Size Tracking +/// +/// Size is tracked via `on_insert` and `on_remove` callbacks: +/// - `on_insert`: increment size only when inserting into an empty bucket (no eviction) +/// - `on_remove`: always decrement size +/// +/// Collisions (evicting a different key) don't change size since they replace an existing entry. +#[derive(Debug)] +pub(crate) struct CacheStatsHandler { + collisions: AtomicU64, + size: AtomicUsize, + capacity: usize, +} + +impl CacheStatsHandler { + /// Creates a new stats handler with all counters initialized to zero. + pub(crate) const fn new(capacity: usize) -> Self { + Self { collisions: AtomicU64::new(0), size: AtomicUsize::new(0), capacity } + } + + /// Returns the number of cache collisions. + pub(crate) fn collisions(&self) -> u64 { + self.collisions.load(Ordering::Relaxed) + } + + /// Returns the current size (number of entries). + pub(crate) fn size(&self) -> usize { + self.size.load(Ordering::Relaxed) + } + + /// Returns the capacity (maximum number of entries). + pub(crate) const fn capacity(&self) -> usize { + self.capacity + } + + /// Increments the size counter. Called on cache insert. + pub(crate) fn increment_size(&self) { + let _ = self.size.fetch_add(1, Ordering::Relaxed); + } + + /// Decrements the size counter. Called on cache remove. + pub(crate) fn decrement_size(&self) { + let _ = self.size.fetch_sub(1, Ordering::Relaxed); + } + + /// Resets size to zero. Called on cache clear. + pub(crate) fn reset_size(&self) { + self.size.store(0, Ordering::Relaxed); + } + + /// Resets collision counter to zero (but not size). + pub(crate) fn reset_stats(&self) { + self.collisions.store(0, Ordering::Relaxed); + } +} + +impl StatsHandler for CacheStatsHandler { + fn on_hit(&self, _key: &K, _value: &V) {} + + fn on_miss(&self, _key: AnyRef<'_>) {} - self.metrics.account_cache_misses.increment(1); + fn on_insert(&self, key: &K, _value: &V, evicted: Option<(&K, &V)>) { + match evicted { + None => { + // Inserting into an empty bucket + self.increment_size(); + } + Some((evicted_key, _)) if evicted_key != key => { + // Collision: evicting a different key + self.collisions.fetch_add(1, Ordering::Relaxed); + } + Some(_) => { + // Updating the same key, size unchanged + } + } + } - let res = self.state_provider.basic_account(address)?; + fn on_remove(&self, _key: &K, _value: &V) { + self.decrement_size(); + } +} +impl AccountReader for CachedStateProvider { + fn basic_account(&self, address: &Address) -> ProviderResult> { if self.is_prewarm() { - self.caches.account_cache.insert(*address, res); + match self.caches.get_or_try_insert_account_with(*address, || { + self.state_provider.basic_account(address) + })? { + CachedStatus::NotCached(value) => { + self.metrics.account_cache_misses.increment(1); + Ok(value) + } + CachedStatus::Cached(value) => { + self.metrics.account_cache_hits.increment(1); + Ok(value) + } + } + } else if let Some(account) = self.caches.account_cache.get(address) { + self.metrics.account_cache_hits.increment(1); + Ok(account) + } else { + self.metrics.account_cache_misses.increment(1); + self.state_provider.basic_account(address) } - Ok(res) } } -/// Represents the status of a storage slot in the cache. +/// Represents the status of a key in the cache. #[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) enum SlotStatus { - /// The account's storage cache doesn't exist. - NotCached, - /// The storage slot exists in cache and is empty (value is zero). - Empty, - /// The storage slot exists in cache and has a specific non-zero value. - Value(StorageValue), +pub(crate) enum CachedStatus { + /// The key is not in the cache (or was invalidated). The value was recalculated. + NotCached(T), + /// The key exists in cache and has a specific value. + Cached(T), } impl StateProvider for CachedStateProvider { @@ -171,54 +346,55 @@ impl StateProvider for CachedStateProvider { account: Address, storage_key: StorageKey, ) -> ProviderResult> { - match self.caches.get_storage(&account, &storage_key) { - (SlotStatus::NotCached, maybe_cache) => { - let final_res = self.state_provider.storage(account, storage_key)?; - - if self.is_prewarm() { - let account_cache = maybe_cache.unwrap_or_default(); - account_cache.insert_storage(storage_key, final_res); - // we always need to insert the value to update the weights. - // Note: there exists a race when the storage cache did not exist yet and two - // consumers looking up the a storage value for this account for the first time, - // however we can assume that this will only happen for the very first - // (mostlikely the same) value, and don't expect that this - // will accidentally replace an account storage cache with - // additional values. - self.caches.insert_storage_cache(account, account_cache); + if self.is_prewarm() { + match self.caches.get_or_try_insert_storage_with(account, storage_key, || { + self.state_provider.storage(account, storage_key).map(Option::unwrap_or_default) + })? { + CachedStatus::NotCached(value) => { + self.metrics.storage_cache_misses.increment(1); + // The slot that was never written to is indistinguishable from a slot + // explicitly set to zero. We return `None` in both cases. + Ok(Some(value).filter(|v| !v.is_zero())) + } + CachedStatus::Cached(value) => { + self.metrics.storage_cache_hits.increment(1); + // The slot that was never written to is indistinguishable from a slot + // explicitly set to zero. We return `None` in both cases. + Ok(Some(value).filter(|v| !v.is_zero())) } - - self.metrics.storage_cache_misses.increment(1); - Ok(final_res) - } - (SlotStatus::Empty, _) => { - self.metrics.storage_cache_hits.increment(1); - Ok(None) - } - (SlotStatus::Value(value), _) => { - self.metrics.storage_cache_hits.increment(1); - Ok(Some(value)) } + } else if let Some(value) = self.caches.storage_cache.get(&(account, storage_key)) { + self.metrics.storage_cache_hits.increment(1); + Ok(Some(value).filter(|v| !v.is_zero())) + } else { + self.metrics.storage_cache_misses.increment(1); + self.state_provider.storage(account, storage_key) } } } impl BytecodeReader for CachedStateProvider { fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult> { - if let Some(res) = self.caches.code_cache.get(code_hash) { - self.metrics.code_cache_hits.increment(1); - return Ok(res) - } - - self.metrics.code_cache_misses.increment(1); - - let final_res = self.state_provider.bytecode_by_hash(code_hash)?; - if self.is_prewarm() { - self.caches.code_cache.insert(*code_hash, final_res.clone()); + match self.caches.get_or_try_insert_code_with(*code_hash, || { + self.state_provider.bytecode_by_hash(code_hash) + })? { + CachedStatus::NotCached(code) => { + self.metrics.code_cache_misses.increment(1); + Ok(code) + } + CachedStatus::Cached(code) => { + self.metrics.code_cache_hits.increment(1); + Ok(code) + } + } + } else if let Some(code) = self.caches.code_cache.get(code_hash) { + self.metrics.code_cache_hits.increment(1); + Ok(code) + } else { + self.metrics.code_cache_misses.increment(1); + self.state_provider.bytecode_by_hash(code_hash) } - - Ok(final_res) } } @@ -291,18 +467,6 @@ impl StorageRootProvider for CachedStateProvider { self.state_provider.storage_proof(address, slot, hashed_storage) } - /// Generate a storage multiproof for multiple storage slots. - /// - /// A **storage multiproof** is a cryptographic proof that can verify the values - /// of multiple storage slots for a single account in a single verification step. - /// Instead of generating separate proofs for each slot (which would be inefficient), - /// a multiproof bundles the necessary trie nodes to prove all requested slots. - /// - /// ## How it works: - /// 1. Takes an account address and a list of storage slot keys - /// 2. Traverses the account's storage trie to collect proof nodes - /// 3. Returns a [`StorageMultiProof`] containing the minimal set of trie nodes needed to verify - /// all the requested storage slots fn storage_multiproof( &self, address: Address, @@ -338,89 +502,166 @@ impl HashedPostStateProvider for CachedStateProvider /// Optimizes state access by maintaining in-memory copies of frequently accessed /// accounts, storage slots, and bytecode. Works in conjunction with prewarming /// to reduce database I/O during block execution. +/// +/// ## Storage Invalidation +/// +/// Since EIP-6780, SELFDESTRUCT only works within the same transaction where the +/// contract was created, so we don't need to handle clearing the storage. #[derive(Debug, Clone)] pub(crate) struct ExecutionCache { /// Cache for contract bytecode, keyed by code hash. - code_cache: Cache>, + code_cache: Arc, FbBuildHasher<32>>>, - /// Per-account storage cache: outer cache keyed by Address, inner cache tracks that account’s - /// storage slots. - storage_cache: Cache>, + /// Flat storage cache: maps `(Address, StorageKey)` to storage value. + storage_cache: Arc>, /// Cache for basic account information (nonce, balance, code hash). - account_cache: Cache>, + account_cache: Arc, FbBuildHasher<20>>>, + + /// Stats handler for the code cache. + code_stats: Arc, + + /// Stats handler for the storage cache. + storage_stats: Arc, + + /// Stats handler for the account cache. + account_stats: Arc, + + /// One-time notification when SELFDESTRUCT is encountered + selfdestruct_encountered: Arc, } impl ExecutionCache { - /// Get storage value from hierarchical cache. + /// Minimum cache size required when epochs are enabled. + /// With EPOCHS=true, fixed-cache requires 12 bottom bits to be zero (2 needed + 10 epoch). + const MIN_CACHE_SIZE_WITH_EPOCHS: usize = 1 << 12; // 4096 + + /// Converts a byte size to number of cache entries, rounding down to a power of two. /// - /// Returns a tuple of: - /// - `SlotStatus` indicating whether: - /// - `NotCached`: The account's storage cache doesn't exist - /// - `Empty`: The slot exists in the account's cache but is empty - /// - `Value`: The slot exists and has a specific value - /// - `Option>`: The account's storage cache if it exists - pub(crate) fn get_storage( + /// Fixed-cache requires power-of-two sizes for efficient indexing. + /// With epochs enabled, the minimum size is 4096 entries. + pub(crate) const fn bytes_to_entries(size_bytes: usize, entry_size: usize) -> usize { + let entries = size_bytes / entry_size; + // Round down to nearest power of two + let rounded = if entries == 0 { 1 } else { (entries + 1).next_power_of_two() >> 1 }; + // Ensure minimum size for epoch tracking + if rounded < Self::MIN_CACHE_SIZE_WITH_EPOCHS { + Self::MIN_CACHE_SIZE_WITH_EPOCHS + } else { + rounded + } + } + + /// Build an [`ExecutionCache`] struct, so that execution caches can be easily cloned. + pub(crate) fn new(total_cache_size: usize) -> Self { + let storage_cache_size = (total_cache_size * 8888) / 10000; // 88.88% of total + let account_cache_size = (total_cache_size * 556) / 10000; // 5.56% of total + let code_cache_size = (total_cache_size * 556) / 10000; // 5.56% of total + + let code_capacity = Self::bytes_to_entries(code_cache_size, CODE_CACHE_ENTRY_SIZE); + let storage_capacity = Self::bytes_to_entries(storage_cache_size, STORAGE_CACHE_ENTRY_SIZE); + let account_capacity = Self::bytes_to_entries(account_cache_size, ACCOUNT_CACHE_ENTRY_SIZE); + + let code_stats = Arc::new(CacheStatsHandler::new(code_capacity)); + let storage_stats = Arc::new(CacheStatsHandler::new(storage_capacity)); + let account_stats = Arc::new(CacheStatsHandler::new(account_capacity)); + + Self { + code_cache: Arc::new( + FixedCache::new(code_capacity, FbBuildHasher::<32>::default()) + .with_stats(Some(Stats::new(code_stats.clone()))), + ), + storage_cache: Arc::new( + FixedCache::new(storage_capacity, DefaultHashBuilder::default()) + .with_stats(Some(Stats::new(storage_stats.clone()))), + ), + account_cache: Arc::new( + FixedCache::new(account_capacity, FbBuildHasher::<20>::default()) + .with_stats(Some(Stats::new(account_stats.clone()))), + ), + code_stats, + storage_stats, + account_stats, + selfdestruct_encountered: Arc::default(), + } + } + + /// Gets code from cache, or inserts using the provided function. + pub(crate) fn get_or_try_insert_code_with( &self, - address: &Address, - key: &StorageKey, - ) -> (SlotStatus, Option>) { - match self.storage_cache.get(address) { - None => (SlotStatus::NotCached, None), - Some(account_cache) => { - let status = account_cache.get_storage(key); - (status, Some(account_cache)) - } + hash: B256, + f: impl FnOnce() -> Result, E>, + ) -> Result>, E> { + let mut miss = false; + let result = self.code_cache.get_or_try_insert_with(hash, |_| { + miss = true; + f() + })?; + + if miss { + Ok(CachedStatus::NotCached(result)) + } else { + Ok(CachedStatus::Cached(result)) } } - /// Insert storage value into hierarchical cache - #[cfg(test)] - pub(crate) fn insert_storage( + /// Gets storage from cache, or inserts using the provided function. + pub(crate) fn get_or_try_insert_storage_with( &self, address: Address, key: StorageKey, - value: Option, - ) { - self.insert_storage_bulk(address, [(key, value)]); + f: impl FnOnce() -> Result, + ) -> Result, E> { + let mut miss = false; + let result = self.storage_cache.get_or_try_insert_with((address, key), |_| { + miss = true; + f() + })?; + + if miss { + Ok(CachedStatus::NotCached(result)) + } else { + Ok(CachedStatus::Cached(result)) + } } - /// Insert multiple storage values into hierarchical cache for a single account - /// - /// This method is optimized for inserting multiple storage values for the same address - /// by doing the account cache lookup only once instead of for each key-value pair. - pub(crate) fn insert_storage_bulk(&self, address: Address, storage_entries: I) - where - I: IntoIterator)>, - { - let account_cache = self.storage_cache.get(&address).unwrap_or_default(); - - for (key, value) in storage_entries { - account_cache.insert_storage(key, value); + /// Gets account from cache, or inserts using the provided function. + pub(crate) fn get_or_try_insert_account_with( + &self, + address: Address, + f: impl FnOnce() -> Result, E>, + ) -> Result>, E> { + let mut miss = false; + let result = self.account_cache.get_or_try_insert_with(address, |_| { + miss = true; + f() + })?; + + if miss { + Ok(CachedStatus::NotCached(result)) + } else { + Ok(CachedStatus::Cached(result)) } - - // Insert to the cache so that moka picks up on the changed size, even though the actual - // value (the Arc) is the same - self.storage_cache.insert(address, account_cache); } - /// Inserts the [`AccountStorageCache`]. - pub(crate) fn insert_storage_cache( + /// Insert storage value into cache. + pub(crate) fn insert_storage( &self, address: Address, - storage_cache: Arc, + key: StorageKey, + value: Option, ) { - self.storage_cache.insert(address, storage_cache); + self.storage_cache.insert((address, key), value.unwrap_or_default()); } - /// Invalidate storage for specific account - pub(crate) fn invalidate_account_storage(&self, address: &Address) { - self.storage_cache.invalidate(address); + /// Insert code into cache. + fn insert_code(&self, hash: B256, code: Option) { + self.code_cache.insert(hash, code); } - /// Returns the total number of storage slots cached across all accounts - pub(crate) fn total_storage_slots(&self) -> usize { - self.storage_cache.iter().map(|addr| addr.len()).sum() + /// Insert account into cache. + fn insert_account(&self, address: Address, account: Option) { + self.account_cache.insert(address, account); } /// Inserts the post-execution state changes into the cache. @@ -448,7 +689,7 @@ impl ExecutionCache { .entered(); // Insert bytecodes for (code_hash, bytecode) in &state_updates.contracts { - self.code_cache.insert(*code_hash, Some(Bytecode(bytecode.clone()))); + self.insert_code(*code_hash, Some(Bytecode(bytecode.clone()))); } drop(_enter); @@ -467,12 +708,31 @@ impl ExecutionCache { continue } - // If the account was destroyed, invalidate from the account / storage caches + // If the original account had code (was a contract), we must clear the entire cache + // because we can't efficiently invalidate all storage slots for a single address. + // This should only happen on pre-Dencun networks. + // + // If the original account had no code (was an EOA or a not yet deployed contract), we + // just remove the account from cache - no storage exists for it. if account.was_destroyed() { - // Invalidate the account cache entry if destroyed - self.account_cache.invalidate(addr); + let had_code = + account.original_info.as_ref().is_some_and(|info| !info.is_empty_code_hash()); + if had_code { + self.selfdestruct_encountered.call_once(|| { + warn!( + target: "engine::caching", + address = ?addr, + info = ?account.info, + original_info = ?account.original_info, + "Encountered an inter-transaction SELFDESTRUCT that reset the storage cache. Are you running a pre-Dencun network?" + ); + }); + self.clear(); + return Ok(()) + } - self.invalidate_account_storage(addr); + self.account_cache.remove(addr); + self.account_stats.decrement_size(); continue } @@ -485,108 +745,47 @@ impl ExecutionCache { }; // Now we iterate over all storage and make updates to the cached storage values - // Use bulk insertion to optimize cache lookups - only lookup the account cache once - // instead of for each storage key - let storage_entries = account.storage.iter().map(|(storage_key, slot)| { - // We convert the storage key from U256 to B256 because that is how it's represented - // in the cache - ((*storage_key).into(), Some(slot.present_value)) - }); - self.insert_storage_bulk(*addr, storage_entries); + for (key, slot) in &account.storage { + self.insert_storage(*addr, (*key).into(), Some(slot.present_value)); + } // Insert will update if present, so we just use the new account info as the new value // for the account cache - self.account_cache.insert(*addr, Some(Account::from(account_info))); + self.insert_account(*addr, Some(Account::from(account_info))); } Ok(()) } -} - -/// A builder for [`ExecutionCache`]. -#[derive(Debug)] -pub(crate) struct ExecutionCacheBuilder { - /// Code cache entries - code_cache_entries: u64, - - /// Storage cache entries - storage_cache_entries: u64, - - /// Account cache entries - account_cache_entries: u64, -} - -impl ExecutionCacheBuilder { - /// Build an [`ExecutionCache`] struct, so that execution caches can be easily cloned. - pub(crate) fn build_caches(self, total_cache_size: u64) -> ExecutionCache { - let storage_cache_size = (total_cache_size * 8888) / 10000; // 88.88% of total - let account_cache_size = (total_cache_size * 556) / 10000; // 5.56% of total - let code_cache_size = (total_cache_size * 556) / 10000; // 5.56% of total - const EXPIRY_TIME: Duration = Duration::from_secs(7200); // 2 hours - const TIME_TO_IDLE: Duration = Duration::from_secs(3600); // 1 hour - - let storage_cache = CacheBuilder::new(self.storage_cache_entries) - .weigher(|_key: &Address, value: &Arc| -> u32 { - // values based on results from measure_storage_cache_overhead test - let base_weight = 39_000; - let slots_weight = value.len() * 218; - (base_weight + slots_weight) as u32 - }) - .max_capacity(storage_cache_size) - .time_to_live(EXPIRY_TIME) - .time_to_idle(TIME_TO_IDLE) - .build_with_hasher(DefaultHashBuilder::default()); - - let account_cache = CacheBuilder::new(self.account_cache_entries) - .weigher(|_key: &Address, value: &Option| -> u32 { - // Account has a fixed size (none, balance,code_hash) - 20 + size_of_val(value) as u32 - }) - .max_capacity(account_cache_size) - .time_to_live(EXPIRY_TIME) - .time_to_idle(TIME_TO_IDLE) - .build_with_hasher(DefaultHashBuilder::default()); - - let code_cache = CacheBuilder::new(self.code_cache_entries) - .weigher(|_key: &B256, value: &Option| -> u32 { - let code_size = match value { - Some(bytecode) => { - // base weight + actual (padded) bytecode size + size of the jump table - (size_of_val(value) + - bytecode.bytecode().len() + - bytecode - .legacy_jump_table() - .map(|table| table.as_slice().len()) - .unwrap_or_default()) as u32 - } - None => size_of_val(value) as u32, - }; - 32 + code_size - }) - .max_capacity(code_cache_size) - .time_to_live(EXPIRY_TIME) - .time_to_idle(TIME_TO_IDLE) - .build_with_hasher(DefaultHashBuilder::default()); - - ExecutionCache { code_cache, storage_cache, account_cache } + /// Clears storage and account caches, resetting them to empty state. + /// + /// We do not clear the bytecodes cache, because its mapping can never change, as it's + /// `keccak256(bytecode) => bytecode`. + pub(crate) fn clear(&self) { + self.storage_cache.clear(); + self.account_cache.clear(); + + self.storage_stats.reset_size(); + self.account_stats.reset_size(); } -} -impl Default for ExecutionCacheBuilder { - fn default() -> Self { - // With weigher and max_capacity in place, these numbers represent - // the maximum number of entries that can be stored, not the actual - // memory usage which is controlled by max_capacity. - // - // Code cache: up to 10M entries but limited to 0.5GB - // Storage cache: up to 10M accounts but limited to 8GB - // Account cache: up to 10M accounts but limited to 0.5GB - Self { - code_cache_entries: 10_000_000, - storage_cache_entries: 10_000_000, - account_cache_entries: 10_000_000, - } + /// Updates the provided metrics with the current stats from the cache's stats handlers, + /// and resets the hit/miss/collision counters. + pub(crate) fn update_metrics(&self, metrics: &CachedStateMetrics) { + metrics.code_cache_size.set(self.code_stats.size() as f64); + metrics.code_cache_capacity.set(self.code_stats.capacity() as f64); + metrics.code_cache_collisions.set(self.code_stats.collisions() as f64); + self.code_stats.reset_stats(); + + metrics.storage_cache_size.set(self.storage_stats.size() as f64); + metrics.storage_cache_capacity.set(self.storage_stats.capacity() as f64); + metrics.storage_cache_collisions.set(self.storage_stats.collisions() as f64); + self.storage_stats.reset_stats(); + + metrics.account_cache_size.set(self.account_stats.size() as f64); + metrics.account_cache_capacity.set(self.account_stats.capacity() as f64); + metrics.account_cache_collisions.set(self.account_stats.collisions() as f64); + self.account_stats.reset_stats(); } } @@ -600,7 +799,7 @@ pub(crate) struct SavedCache { /// The caches used for the provider. caches: ExecutionCache, - /// Metrics for the cached state provider + /// Metrics for the cached state provider (includes size/capacity/collisions from fixed-cache) metrics: CachedStateMetrics, /// A guard to track in-flight usage of this cache. @@ -653,17 +852,20 @@ impl SavedCache { &self.metrics } - /// Updates the metrics for the [`ExecutionCache`]. + /// Updates the cache metrics (size/capacity/collisions) from the stats handlers. /// - /// Note: This can be expensive with large cached state as it iterates over - /// all storage entries. Use `with_disable_cache_metrics(true)` to skip. + /// Note: This can be expensive with large cached state. Use + /// `with_disable_cache_metrics(true)` to skip. pub(crate) fn update_metrics(&self) { if self.disable_cache_metrics { - return; + return } - self.metrics.storage_cache_size.set(self.caches.total_storage_slots() as f64); - self.metrics.account_cache_size.set(self.caches.account_cache.entry_count() as f64); - self.metrics.code_cache_size.set(self.caches.code_cache.entry_count() as f64); + self.caches.update_metrics(&self.metrics); + } + + /// Clears all caches, resetting them to empty state. + pub(crate) fn clear(&self) { + self.caches.clear(); } } @@ -674,174 +876,27 @@ impl SavedCache { } } -/// Cache for an individual account's storage slots. -/// -/// This represents the second level of the hierarchical storage cache. -/// Each account gets its own `AccountStorageCache` to store accessed storage slots. -#[derive(Debug, Clone)] -pub(crate) struct AccountStorageCache { - /// Map of storage keys to their cached values. - slots: Cache>, -} - -impl AccountStorageCache { - /// Create a new [`AccountStorageCache`] - pub(crate) fn new(max_slots: u64) -> Self { - Self { - slots: CacheBuilder::new(max_slots).build_with_hasher(DefaultHashBuilder::default()), - } - } - - /// Get a storage value from this account's cache. - /// - `NotCached`: The slot is not in the cache - /// - `Empty`: The slot is empty - /// - `Value`: The slot has a specific value - pub(crate) fn get_storage(&self, key: &StorageKey) -> SlotStatus { - match self.slots.get(key) { - None => SlotStatus::NotCached, - Some(None) => SlotStatus::Empty, - Some(Some(value)) => SlotStatus::Value(value), - } - } - - /// Insert a storage value - pub(crate) fn insert_storage(&self, key: StorageKey, value: Option) { - self.slots.insert(key, value); - } - - /// Returns the number of slots in the cache - pub(crate) fn len(&self) -> usize { - self.slots.entry_count() as usize - } -} - -impl Default for AccountStorageCache { - fn default() -> Self { - // With weigher and max_capacity in place, this number represents - // the maximum number of entries that can be stored, not the actual - // memory usage which is controlled by storage cache's max_capacity. - Self::new(1_000_000) - } -} - #[cfg(test)] mod tests { use super::*; - use alloy_primitives::{B256, U256}; - use rand::Rng; + use alloy_primitives::{map::HashMap, U256}; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; - use std::mem::size_of; - - mod tracking_allocator { - use std::{ - alloc::{GlobalAlloc, Layout, System}, - sync::atomic::{AtomicUsize, Ordering}, - }; - - #[derive(Debug)] - pub(crate) struct TrackingAllocator { - allocated: AtomicUsize, - total_allocated: AtomicUsize, - inner: System, - } - - impl TrackingAllocator { - pub(crate) const fn new() -> Self { - Self { - allocated: AtomicUsize::new(0), - total_allocated: AtomicUsize::new(0), - inner: System, - } - } - - pub(crate) fn reset(&self) { - self.allocated.store(0, Ordering::SeqCst); - self.total_allocated.store(0, Ordering::SeqCst); - } - - pub(crate) fn total_allocated(&self) -> usize { - self.total_allocated.load(Ordering::SeqCst) - } - } - - unsafe impl GlobalAlloc for TrackingAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - let ret = unsafe { self.inner.alloc(layout) }; - if !ret.is_null() { - self.allocated.fetch_add(layout.size(), Ordering::SeqCst); - self.total_allocated.fetch_add(layout.size(), Ordering::SeqCst); - } - ret - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - self.allocated.fetch_sub(layout.size(), Ordering::SeqCst); - unsafe { self.inner.dealloc(ptr, layout) } - } - } - } - - use tracking_allocator::TrackingAllocator; - - #[global_allocator] - static ALLOCATOR: TrackingAllocator = TrackingAllocator::new(); - - fn measure_allocation(f: F) -> (usize, T) - where - F: FnOnce() -> T, - { - ALLOCATOR.reset(); - let result = f(); - let total = ALLOCATOR.total_allocated(); - (total, result) - } - - #[test] - fn measure_storage_cache_overhead() { - let (base_overhead, cache) = measure_allocation(|| AccountStorageCache::new(1000)); - println!("Base AccountStorageCache overhead: {base_overhead} bytes"); - let mut rng = rand::rng(); - - let key = StorageKey::random(); - let value = StorageValue::from(rng.random::()); - let (first_slot, _) = measure_allocation(|| { - cache.insert_storage(key, Some(value)); - }); - println!("First slot insertion overhead: {first_slot} bytes"); - - const TOTAL_SLOTS: usize = 10_000; - let (test_slots, _) = measure_allocation(|| { - for _ in 0..TOTAL_SLOTS { - let key = StorageKey::random(); - let value = StorageValue::from(rng.random::()); - cache.insert_storage(key, Some(value)); - } - }); - println!("Average overhead over {} slots: {} bytes", TOTAL_SLOTS, test_slots / TOTAL_SLOTS); - - println!("\nTheoretical sizes:"); - println!("StorageKey size: {} bytes", size_of::()); - println!("StorageValue size: {} bytes", size_of::()); - println!("Option size: {} bytes", size_of::>()); - println!("Option size: {} bytes", size_of::>()); - } + use reth_revm::db::{AccountStatus, BundleAccount}; + use revm_state::AccountInfo; #[test] fn test_empty_storage_cached_state_provider() { - // make sure when we have an empty value in storage, we return `Empty` and not `NotCached` let address = Address::random(); let storage_key = StorageKey::random(); let account = ExtendedAccount::new(0, U256::ZERO); - // note there is no storage here let provider = MockEthProvider::default(); provider.extend_accounts(vec![(address, account)]); - let caches = ExecutionCacheBuilder::default().build_caches(1000); + let caches = ExecutionCache::new(1000); let state_provider = CachedStateProvider::new(provider, caches, CachedStateMetrics::zeroed()); - // check that the storage is empty let res = state_provider.storage(address, storage_key); assert!(res.is_ok()); assert_eq!(res.unwrap(), None); @@ -849,22 +904,19 @@ mod tests { #[test] fn test_uncached_storage_cached_state_provider() { - // make sure when we have something uncached, we get the cached value let address = Address::random(); let storage_key = StorageKey::random(); let storage_value = U256::from(1); let account = ExtendedAccount::new(0, U256::ZERO).extend_storage(vec![(storage_key, storage_value)]); - // note that we extend storage here with one value let provider = MockEthProvider::default(); provider.extend_accounts(vec![(address, account)]); - let caches = ExecutionCacheBuilder::default().build_caches(1000); + let caches = ExecutionCache::new(1000); let state_provider = CachedStateProvider::new(provider, caches, CachedStateMetrics::zeroed()); - // check that the storage returns the expected value let res = state_provider.storage(address, storage_key); assert!(res.is_ok()); assert_eq!(res.unwrap(), Some(storage_value)); @@ -872,88 +924,191 @@ mod tests { #[test] fn test_get_storage_populated() { - // make sure when we have something cached, we get the cached value in the `SlotStatus` let address = Address::random(); let storage_key = StorageKey::random(); let storage_value = U256::from(1); - // insert into caches directly - let caches = ExecutionCacheBuilder::default().build_caches(1000); + let caches = ExecutionCache::new(1000); caches.insert_storage(address, storage_key, Some(storage_value)); - // check that the storage returns the cached value - let (slot_status, _) = caches.get_storage(&address, &storage_key); - assert_eq!(slot_status, SlotStatus::Value(storage_value)); - } - - #[test] - fn test_get_storage_not_cached() { - // make sure when we have nothing cached, we get the `NotCached` value in the `SlotStatus` - let storage_key = StorageKey::random(); - let address = Address::random(); - - // just create empty caches - let caches = ExecutionCacheBuilder::default().build_caches(1000); - - // check that the storage is not cached - let (slot_status, _) = caches.get_storage(&address, &storage_key); - assert_eq!(slot_status, SlotStatus::NotCached); + let result = caches + .get_or_try_insert_storage_with(address, storage_key, || Ok::<_, ()>(U256::from(999))); + assert_eq!(result.unwrap(), CachedStatus::Cached(storage_value)); } #[test] fn test_get_storage_empty() { - // make sure when we insert an empty value to the cache, we get the `Empty` value in the - // `SlotStatus` let address = Address::random(); let storage_key = StorageKey::random(); - // insert into caches directly - let caches = ExecutionCacheBuilder::default().build_caches(1000); + let caches = ExecutionCache::new(1000); caches.insert_storage(address, storage_key, None); - // check that the storage is empty - let (slot_status, _) = caches.get_storage(&address, &storage_key); - assert_eq!(slot_status, SlotStatus::Empty); + let result = caches + .get_or_try_insert_storage_with(address, storage_key, || Ok::<_, ()>(U256::from(999))); + assert_eq!(result.unwrap(), CachedStatus::Cached(U256::ZERO)); } - // Tests for SavedCache locking mechanism #[test] fn test_saved_cache_is_available() { - let execution_cache = ExecutionCacheBuilder::default().build_caches(1000); + let execution_cache = ExecutionCache::new(1000); let cache = SavedCache::new(B256::ZERO, execution_cache, CachedStateMetrics::zeroed()); - // Initially, the cache should be available (only one reference) assert!(cache.is_available(), "Cache should be available initially"); - // Clone the usage guard (simulating it being handed out) let _guard = cache.clone_guard_for_test(); - // Now the cache should not be available (two references) assert!(!cache.is_available(), "Cache should not be available with active guard"); } #[test] fn test_saved_cache_multiple_references() { - let execution_cache = ExecutionCacheBuilder::default().build_caches(1000); + let execution_cache = ExecutionCache::new(1000); let cache = SavedCache::new(B256::from([2u8; 32]), execution_cache, CachedStateMetrics::zeroed()); - // Create multiple references to the usage guard let guard1 = cache.clone_guard_for_test(); let guard2 = cache.clone_guard_for_test(); let guard3 = guard1.clone(); - // Cache should not be available with multiple guards assert!(!cache.is_available()); - // Drop guards one by one drop(guard1); - assert!(!cache.is_available()); // Still not available + assert!(!cache.is_available()); drop(guard2); - assert!(!cache.is_available()); // Still not available + assert!(!cache.is_available()); drop(guard3); - assert!(cache.is_available()); // Now available + assert!(cache.is_available()); + } + + #[test] + fn test_insert_state_destroyed_account_with_code_clears_cache() { + let caches = ExecutionCache::new(1000); + + // Pre-populate caches with some data + let addr1 = Address::random(); + let addr2 = Address::random(); + let storage_key = StorageKey::random(); + caches.insert_account(addr1, Some(Account::default())); + caches.insert_account(addr2, Some(Account::default())); + caches.insert_storage(addr1, storage_key, Some(U256::from(42))); + + // Verify caches are populated + assert!(caches.account_cache.get(&addr1).is_some()); + assert!(caches.account_cache.get(&addr2).is_some()); + assert!(caches.storage_cache.get(&(addr1, storage_key)).is_some()); + + let bundle = BundleState { + // BundleState with a destroyed contract (had code) + state: HashMap::from_iter([( + Address::random(), + BundleAccount::new( + Some(AccountInfo { + balance: U256::ZERO, + nonce: 1, + code_hash: B256::random(), // Non-empty code hash + code: None, + account_id: None, + }), + None, // Destroyed, so no current info + Default::default(), + AccountStatus::Destroyed, + ), + )]), + contracts: Default::default(), + reverts: Default::default(), + state_size: 0, + reverts_size: 0, + }; + + // Insert state should clear all caches because a contract was destroyed + let result = caches.insert_state(&bundle); + assert!(result.is_ok()); + + // Verify all caches were cleared + assert!(caches.account_cache.get(&addr1).is_none()); + assert!(caches.account_cache.get(&addr2).is_none()); + assert!(caches.storage_cache.get(&(addr1, storage_key)).is_none()); + } + + #[test] + fn test_insert_state_destroyed_account_without_code_removes_only_account() { + let caches = ExecutionCache::new(1000); + + // Pre-populate caches with some data + let addr1 = Address::random(); + let addr2 = Address::random(); + let storage_key = StorageKey::random(); + caches.insert_account(addr1, Some(Account::default())); + caches.insert_account(addr2, Some(Account::default())); + caches.insert_storage(addr1, storage_key, Some(U256::from(42))); + + let bundle = BundleState { + // BundleState with a destroyed EOA (no code) + state: HashMap::from_iter([( + addr1, + BundleAccount::new( + Some(AccountInfo { + balance: U256::from(100), + nonce: 1, + code_hash: alloy_primitives::KECCAK256_EMPTY, // Empty code hash = EOA + code: None, + account_id: None, + }), + None, // Destroyed + Default::default(), + AccountStatus::Destroyed, + ), + )]), + contracts: Default::default(), + reverts: Default::default(), + state_size: 0, + reverts_size: 0, + }; + + // Insert state should only remove the destroyed account + assert!(caches.insert_state(&bundle).is_ok()); + + // Verify only addr1 was removed, other data is still present + assert!(caches.account_cache.get(&addr1).is_none()); + assert!(caches.account_cache.get(&addr2).is_some()); + assert!(caches.storage_cache.get(&(addr1, storage_key)).is_some()); + } + + #[test] + fn test_insert_state_destroyed_account_no_original_info_removes_only_account() { + let caches = ExecutionCache::new(1000); + + // Pre-populate caches + let addr1 = Address::random(); + let addr2 = Address::random(); + caches.insert_account(addr1, Some(Account::default())); + caches.insert_account(addr2, Some(Account::default())); + + let bundle = BundleState { + // BundleState with a destroyed account (has no original info) + state: HashMap::from_iter([( + addr1, + BundleAccount::new( + None, // No original info + None, // Destroyed + Default::default(), + AccountStatus::Destroyed, + ), + )]), + contracts: Default::default(), + reverts: Default::default(), + state_size: 0, + reverts_size: 0, + }; + + // Insert state should only remove the destroyed account (no code = no full clear) + assert!(caches.insert_state(&bundle).is_ok()); + + // Verify only addr1 was removed + assert!(caches.account_cache.get(&addr1).is_none()); + assert!(caches.account_cache.get(&addr2).is_some()); } } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 23da7c23cc4..24af9148731 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -2,10 +2,7 @@ use super::precompile_cache::PrecompileCacheMap; use crate::tree::{ - cached_state::{ - CachedStateMetrics, CachedStateProvider, ExecutionCache as StateExecutionCache, - ExecutionCacheBuilder, SavedCache, - }, + cached_state::{CachedStateMetrics, CachedStateProvider, ExecutionCache, SavedCache}, payload_processor::{ prewarm::{PrewarmCacheTask, PrewarmContext, PrewarmMode, PrewarmTaskEvent}, sparse_trie::StateRootComputeOutcome, @@ -116,11 +113,11 @@ where /// The executor used by to spawn tasks. executor: WorkloadExecutor, /// The most recent cache used for execution. - execution_cache: ExecutionCache, + execution_cache: PayloadExecutionCache, /// Metrics for trie operations trie_metrics: MultiProofTaskMetrics, /// Cross-block cache size in bytes. - cross_block_cache_size: u64, + cross_block_cache_size: usize, /// Whether transactions should not be executed on prewarming task. disable_transaction_prewarming: bool, /// Whether state cache should be disable @@ -313,7 +310,7 @@ where // Build a state provider for the multiproof task let provider = provider_builder.build().expect("failed to build provider"); let provider = if let Some(saved_cache) = saved_cache { - let (cache, metrics, _) = saved_cache.split(); + let (cache, metrics, _disable_metrics) = saved_cache.split(); Box::new(CachedStateProvider::new(provider, cache, metrics)) as Box } else { @@ -495,8 +492,11 @@ where cache } else { debug!("creating new execution cache on cache miss"); - let cache = ExecutionCacheBuilder::default().build_caches(self.cross_block_cache_size); - SavedCache::new(parent_hash, cache, CachedStateMetrics::zeroed()) + let start = Instant::now(); + let cache = ExecutionCache::new(self.cross_block_cache_size); + let metrics = CachedStateMetrics::zeroed(); + metrics.record_cache_creation(start.elapsed()); + SavedCache::new(parent_hash, cache, metrics) .with_disable_cache_metrics(self.disable_cache_metrics) } } @@ -587,28 +587,27 @@ where parent_hash = %block_with_parent.parent, "Cannot find cache for parent hash, skip updating cache with new state for inserted executed block", ); - return; + return } // Take existing cache (if any) or create fresh caches - let (caches, cache_metrics) = match cached.take() { - Some(existing) => { - let (c, m, _) = existing.split(); - (c, m) - } + let (caches, cache_metrics, _) = match cached.take() { + Some(existing) => existing.split(), None => ( - ExecutionCacheBuilder::default().build_caches(self.cross_block_cache_size), + ExecutionCache::new(self.cross_block_cache_size), CachedStateMetrics::zeroed(), + false, ), }; // Insert the block's bundle state into cache - let new_cache = SavedCache::new(block_with_parent.block.hash, caches, cache_metrics) - .with_disable_cache_metrics(disable_cache_metrics); + let new_cache = + SavedCache::new(block_with_parent.block.hash, caches, cache_metrics) + .with_disable_cache_metrics(disable_cache_metrics); if new_cache.cache().insert_state(bundle_state).is_err() { *cached = None; debug!(target: "engine::caching", "cleared execution cache on update error"); - return; + return } new_cache.update_metrics(); @@ -672,7 +671,7 @@ impl PayloadHandle { } /// Returns a clone of the caches used by prewarming - pub(super) fn caches(&self) -> Option { + pub(super) fn caches(&self) -> Option { self.prewarm_handle.saved_cache.as_ref().map(|cache| cache.cache().clone()) } @@ -776,29 +775,29 @@ impl Drop for CacheTaskHandle { /// ## Cache Safety /// /// **CRITICAL**: Cache update operations require exclusive access. All concurrent cache users -/// (such as prewarming tasks) must be terminated before calling `update_with_guard`, otherwise -/// the cache may be corrupted or cleared. +/// (such as prewarming tasks) must be terminated before calling +/// [`PayloadExecutionCache::update_with_guard`], otherwise the cache may be corrupted or cleared. /// /// ## Cache vs Prewarming Distinction /// -/// **`ExecutionCache`**: +/// **[`PayloadExecutionCache`]**: /// - Stores parent block's execution state after completion /// - Used to fetch parent data for next block's execution /// - Must be exclusively accessed during save operations /// -/// **`PrewarmCacheTask`**: +/// **[`PrewarmCacheTask`]**: /// - Speculatively loads accounts/storage that might be used in transaction execution /// - Prepares data for state root proof computation /// - Runs concurrently but must not interfere with cache saves #[derive(Clone, Debug, Default)] -struct ExecutionCache { +struct PayloadExecutionCache { /// Guarded cloneable cache identified by a block hash. inner: Arc>>, /// Metrics for cache operations. metrics: ExecutionCacheMetrics, } -impl ExecutionCache { +impl PayloadExecutionCache { /// Returns the cache for `parent_hash` if it's available for use. /// /// A cache is considered available when: @@ -834,11 +833,15 @@ impl ExecutionCache { "Existing cache found" ); - if hash_matches && available { - return Some(c.clone()); - } - - if hash_matches && !available { + if available { + // If the has is available (no other threads are using it), but has a mismatching + // parent hash, we can just clear it and keep using without re-creating from + // scratch. + if !hash_matches { + c.clear(); + } + return Some(c.clone()) + } else if hash_matches { self.metrics.execution_cache_in_use.increment(1); } } else { @@ -911,9 +914,9 @@ where #[cfg(test)] mod tests { - use super::ExecutionCache; + use super::PayloadExecutionCache; use crate::tree::{ - cached_state::{CachedStateMetrics, ExecutionCacheBuilder, SavedCache}, + cached_state::{CachedStateMetrics, ExecutionCache, SavedCache}, payload_processor::{ evm_state_to_hashed_post_state, executor::WorkloadExecutor, PayloadProcessor, }, @@ -943,13 +946,13 @@ mod tests { use std::sync::Arc; fn make_saved_cache(hash: B256) -> SavedCache { - let execution_cache = ExecutionCacheBuilder::default().build_caches(1_000); + let execution_cache = ExecutionCache::new(1_000); SavedCache::new(hash, execution_cache, CachedStateMetrics::zeroed()) } #[test] fn execution_cache_allows_single_checkout() { - let execution_cache = ExecutionCache::default(); + let execution_cache = PayloadExecutionCache::default(); let hash = B256::from([1u8; 32]); execution_cache.update_with_guard(|slot| *slot = Some(make_saved_cache(hash))); @@ -968,7 +971,7 @@ mod tests { #[test] fn execution_cache_checkout_releases_on_drop() { - let execution_cache = ExecutionCache::default(); + let execution_cache = PayloadExecutionCache::default(); let hash = B256::from([2u8; 32]); execution_cache.update_with_guard(|slot| *slot = Some(make_saved_cache(hash))); @@ -984,19 +987,21 @@ mod tests { } #[test] - fn execution_cache_mismatch_parent_returns_none() { - let execution_cache = ExecutionCache::default(); + fn execution_cache_mismatch_parent_clears_and_returns() { + let execution_cache = PayloadExecutionCache::default(); let hash = B256::from([3u8; 32]); execution_cache.update_with_guard(|slot| *slot = Some(make_saved_cache(hash))); - let miss = execution_cache.get_cache_for(B256::from([4u8; 32])); - assert!(miss.is_none(), "checkout should fail for different parent hash"); + // When the parent hash doesn't match, the cache is cleared and returned for reuse + let different_hash = B256::from([4u8; 32]); + let cache = execution_cache.get_cache_for(different_hash); + assert!(cache.is_some(), "cache should be returned for reuse after clearing") } #[test] fn execution_cache_update_after_release_succeeds() { - let execution_cache = ExecutionCache::default(); + let execution_cache = PayloadExecutionCache::default(); let initial = B256::from([5u8; 32]); execution_cache.update_with_guard(|slot| *slot = Some(make_saved_cache(initial))); diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 12514e2dc9a..472ea08a6c6 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1516,8 +1516,9 @@ where #[cfg(test)] mod tests { + use crate::tree::cached_state::CachedStateProvider; + use super::*; - use crate::tree::cached_state::{CachedStateProvider, ExecutionCacheBuilder}; use alloy_eip7928::{AccountChanges, BalanceChange}; use alloy_primitives::Address; use reth_provider::{ @@ -1577,7 +1578,7 @@ mod tests { { let db_provider = factory.database_provider_ro().unwrap(); let state_provider: StateProviderBox = Box::new(LatestStateProvider::new(db_provider)); - let cache = ExecutionCacheBuilder::default().build_caches(1000); + let cache = crate::tree::cached_state::ExecutionCache::new(1000); CachedStateProvider::new(state_provider, cache, Default::default()) } diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 5c782ed1f50..e68342112a3 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -17,7 +17,7 @@ use crate::tree::{ bal::{total_slots, BALSlotIter}, executor::WorkloadExecutor, multiproof::{MultiProofMessage, VersionedMultiProofTargets}, - ExecutionCache as PayloadExecutionCache, + PayloadExecutionCache, }, precompile_cache::{CachedPrecompile, PrecompileCacheMap}, ExecutionEnv, StateProviderBuilder, diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index d7c320fc52d..bd16e3b359b 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -24,7 +24,7 @@ pub struct DefaultEngineValues { prewarming_disabled: bool, parallel_sparse_trie_disabled: bool, state_provider_metrics: bool, - cross_block_cache_size: u64, + cross_block_cache_size: usize, state_root_task_compare_updates: bool, accept_execution_requests_hash: bool, multiproof_chunking_enabled: bool, @@ -94,7 +94,7 @@ impl DefaultEngineValues { } /// Set the default cross-block cache size in MB - pub const fn with_cross_block_cache_size(mut self, v: u64) -> Self { + pub const fn with_cross_block_cache_size(mut self, v: usize) -> Self { self.cross_block_cache_size = v; self } @@ -262,7 +262,7 @@ pub struct EngineArgs { /// Configure the size of cross-block cache in megabytes #[arg(long = "engine.cross-block-cache-size", default_value_t = DefaultEngineValues::get_global().cross_block_cache_size)] - pub cross_block_cache_size: u64, + pub cross_block_cache_size: usize, /// Enable comparing trie updates from the state root task to the trie updates from the regular /// state root calculation. diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 225c957c1cf..5d4d8cfe52c 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -39,7 +39,7 @@ pub use reth_engine_primitives::{ }; /// Default size of cross-block cache in megabytes. -pub const DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB: u64 = 4 * 1024; +pub const DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB: usize = 4 * 1024; /// This includes all necessary configuration to launch the node. /// The individual configuration options can be overwritten before launching the node. From dd0c6d279fe88a0df364362b6bcc8725d88b4ead Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 23 Jan 2026 10:09:19 -0800 Subject: [PATCH 179/267] revert: perf(trie): parallelize merge_ancestors_into_overlay (#21202) (#21370) --- crates/chain-state/src/deferred_trie.rs | 49 +------------------------ crates/trie/common/src/hashed_state.rs | 32 +--------------- crates/trie/common/src/updates.rs | 31 ---------------- 3 files changed, 3 insertions(+), 109 deletions(-) diff --git a/crates/chain-state/src/deferred_trie.rs b/crates/chain-state/src/deferred_trie.rs index 9755b54b99d..1b4a3d43a35 100644 --- a/crates/chain-state/src/deferred_trie.rs +++ b/crates/chain-state/src/deferred_trie.rs @@ -243,53 +243,8 @@ impl DeferredTrieData { /// In normal operation, the parent always has a cached overlay and this /// function is never called. /// - /// When the `rayon` feature is enabled, uses parallel collection and merge: - /// 1. Collects ancestor data in parallel (each `wait_cloned()` may compute) - /// 2. Merges hashed state and trie updates in parallel with each other - /// 3. Uses tree reduction within each merge for O(log n) depth - #[cfg(feature = "rayon")] - fn merge_ancestors_into_overlay( - ancestors: &[Self], - sorted_hashed_state: &HashedPostStateSorted, - sorted_trie_updates: &TrieUpdatesSorted, - ) -> TrieInputSorted { - // Early exit: no ancestors means just wrap current block's data - if ancestors.is_empty() { - return TrieInputSorted::new( - Arc::new(sorted_trie_updates.clone()), - Arc::new(sorted_hashed_state.clone()), - Default::default(), - ); - } - - // Collect ancestor data, unzipping states and updates into Arc slices - let (states, updates): (Vec<_>, Vec<_>) = ancestors - .iter() - .map(|a| { - let data = a.wait_cloned(); - (data.hashed_state, data.trie_updates) - }) - .unzip(); - - // Merge state and nodes in parallel with each other using tree reduction - let (state, nodes) = rayon::join( - || { - let mut merged = HashedPostStateSorted::merge_parallel(&states); - merged.extend_ref_and_sort(sorted_hashed_state); - merged - }, - || { - let mut merged = TrieUpdatesSorted::merge_parallel(&updates); - merged.extend_ref_and_sort(sorted_trie_updates); - merged - }, - ); - - TrieInputSorted::new(Arc::new(nodes), Arc::new(state), Default::default()) - } - - /// Merge all ancestors and current block's data into a single overlay (sequential fallback). - #[cfg(not(feature = "rayon"))] + /// Iterates ancestors oldest -> newest, then extends with current block's data, + /// so later state takes precedence. fn merge_ancestors_into_overlay( ancestors: &[Self], sorted_hashed_state: &HashedPostStateSorted, diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 3273e65829b..315bda49a45 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -6,7 +6,7 @@ use crate::{ utils::{extend_sorted_vec, kway_merge_sorted}, KeyHasher, MultiProofTargets, Nibbles, }; -use alloc::{borrow::Cow, sync::Arc, vec::Vec}; +use alloc::{borrow::Cow, vec::Vec}; use alloy_primitives::{ keccak256, map::{hash_map, B256Map, HashMap, HashSet}, @@ -710,36 +710,6 @@ impl HashedPostStateSorted { self.accounts.clear(); self.storages.clear(); } - - /// Parallel batch-merge sorted hashed post states. Slice is **oldest to newest**. - /// - /// This is more efficient than sequential `extend_ref` calls when merging many states, - /// as it processes all states in parallel with tree reduction using divide-and-conquer. - #[cfg(feature = "rayon")] - pub fn merge_parallel(states: &[Arc]) -> Self { - fn parallel_merge_tree(states: &[Arc]) -> HashedPostStateSorted { - match states.len() { - 0 => HashedPostStateSorted::default(), - 1 => states[0].as_ref().clone(), - 2 => { - let mut acc = states[0].as_ref().clone(); - acc.extend_ref_and_sort(&states[1]); - acc - } - n => { - let mid = n / 2; - let (mut left, right) = rayon::join( - || parallel_merge_tree(&states[..mid]), - || parallel_merge_tree(&states[mid..]), - ); - left.extend_ref_and_sort(&right); - left - } - } - } - - parallel_merge_tree(states) - } } impl AsRef for HashedPostStateSorted { diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index 0155e0e4846..26985108089 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -4,7 +4,6 @@ use crate::{ }; use alloc::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - sync::Arc, vec::Vec, }; use alloy_primitives::{ @@ -698,36 +697,6 @@ impl TrieUpdatesSorted { Self { account_nodes, storage_tries }.into() } - - /// Parallel batch-merge sorted trie updates. Slice is **oldest to newest**. - /// - /// This is more efficient than sequential `extend_ref` calls when merging many updates, - /// as it processes all updates in parallel with tree reduction using divide-and-conquer. - #[cfg(feature = "rayon")] - pub fn merge_parallel(updates: &[Arc]) -> Self { - fn parallel_merge_tree(updates: &[Arc]) -> TrieUpdatesSorted { - match updates.len() { - 0 => TrieUpdatesSorted::default(), - 1 => updates[0].as_ref().clone(), - 2 => { - let mut acc = updates[0].as_ref().clone(); - acc.extend_ref_and_sort(&updates[1]); - acc - } - n => { - let mid = n / 2; - let (mut left, right) = rayon::join( - || parallel_merge_tree(&updates[..mid]), - || parallel_merge_tree(&updates[mid..]), - ); - left.extend_ref_and_sort(&right); - left - } - } - } - - parallel_merge_tree(updates) - } } impl AsRef for TrieUpdatesSorted { From d7bf87da52da107b556746a80c31ea4eb03510e5 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 23 Jan 2026 10:21:44 -0800 Subject: [PATCH 180/267] feat(engine): add metric for state root task fallback success (#21371) --- crates/engine/tree/src/tree/metrics.rs | 2 ++ crates/engine/tree/src/tree/payload_validator.rs | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index fb6508d99b3..e97decbcc16 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -331,6 +331,8 @@ pub(crate) struct BlockValidationMetrics { pub(crate) state_root_storage_tries_updated_total: Counter, /// Total number of times the parallel state root computation fell back to regular. pub(crate) state_root_parallel_fallback_total: Counter, + /// Total number of times the state root task failed but the fallback succeeded. + pub(crate) state_root_task_fallback_success_total: Counter, /// Latest state root duration, ie the time spent blocked waiting for the state root. pub(crate) state_root_duration: Gauge, /// Histogram for state root duration ie the time spent blocked waiting for the state root diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 2372256cabf..637d9fb2ad8 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -503,6 +503,7 @@ where let root_time = Instant::now(); let mut maybe_state_root = None; + let mut state_root_task_failed = false; match strategy { StateRootStrategy::StateRootTask => { @@ -521,10 +522,12 @@ where block_state_root = ?block.header().state_root(), "State root task returned incorrect state root" ); + state_root_task_failed = true; } } Err(error) => { debug!(target: "engine::tree::payload_validator", %error, "State root task failed"); + state_root_task_failed = true; } } } @@ -569,6 +572,11 @@ where self.compute_state_root_serial(overlay_factory.clone(), &hashed_state), block ); + + if state_root_task_failed { + self.metrics.block_validation.state_root_task_fallback_success_total.increment(1); + } + (root, updates, root_time.elapsed()) }; From ee1ec8f9f0d726cbd6057d6db4082c540faacd4a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 23 Jan 2026 19:31:04 +0100 Subject: [PATCH 181/267] perf(trie): parallelize COW extend operations with rayon (#21375) --- crates/chain-state/src/deferred_trie.rs | 30 +++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/crates/chain-state/src/deferred_trie.rs b/crates/chain-state/src/deferred_trie.rs index 1b4a3d43a35..479c86cad5b 100644 --- a/crates/chain-state/src/deferred_trie.rs +++ b/crates/chain-state/src/deferred_trie.rs @@ -206,11 +206,33 @@ impl DeferredTrieData { Default::default(), // prefix_sets are per-block, not cumulative ); // Only trigger COW clone if there's actually data to add. - if !sorted_hashed_state.is_empty() { - Arc::make_mut(&mut overlay.state).extend_ref_and_sort(&sorted_hashed_state); + #[cfg(feature = "rayon")] + { + rayon::join( + || { + if !sorted_hashed_state.is_empty() { + Arc::make_mut(&mut overlay.state) + .extend_ref_and_sort(&sorted_hashed_state); + } + }, + || { + if !sorted_trie_updates.is_empty() { + Arc::make_mut(&mut overlay.nodes) + .extend_ref_and_sort(&sorted_trie_updates); + } + }, + ); } - if !sorted_trie_updates.is_empty() { - Arc::make_mut(&mut overlay.nodes).extend_ref_and_sort(&sorted_trie_updates); + #[cfg(not(feature = "rayon"))] + { + if !sorted_hashed_state.is_empty() { + Arc::make_mut(&mut overlay.state) + .extend_ref_and_sort(&sorted_hashed_state); + } + if !sorted_trie_updates.is_empty() { + Arc::make_mut(&mut overlay.nodes) + .extend_ref_and_sort(&sorted_trie_updates); + } } overlay } From decb56fae1c9e056548cd4d9f81744f14cae87d8 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 23 Jan 2026 19:28:10 +0000 Subject: [PATCH 182/267] feat(rocksdb): changeset-based crash recovery healing for history indices (#21341) --- .../src/providers/rocksdb/invariants.rs | 1132 +++++++++++------ 1 file changed, 726 insertions(+), 406 deletions(-) diff --git a/crates/storage/provider/src/providers/rocksdb/invariants.rs b/crates/storage/provider/src/providers/rocksdb/invariants.rs index 75be8ca5adf..63901ac74e6 100644 --- a/crates/storage/provider/src/providers/rocksdb/invariants.rs +++ b/crates/storage/provider/src/providers/rocksdb/invariants.rs @@ -14,9 +14,15 @@ use reth_db_api::{tables, transaction::DbTx}; use reth_stages_types::StageId; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ - DBProvider, StageCheckpointReader, StorageSettingsCache, TransactionsProvider, + ChangeSetReader, DBProvider, StageCheckpointReader, StorageChangeSetReader, + StorageSettingsCache, TransactionsProvider, }; use reth_storage_errors::provider::ProviderResult; +use std::collections::HashSet; + +/// Batch size for changeset iteration during history healing. +/// Balances memory usage against iteration overhead. +const HEAL_HISTORY_BATCH_SIZE: u64 = 10_000; impl RocksDBProvider { /// Checks consistency of `RocksDB` tables against MDBX stage checkpoints. @@ -32,10 +38,8 @@ impl RocksDBProvider { /// - If `RocksDB` is ahead, excess entries are pruned (healed). /// - If `RocksDB` is behind, an unwind is required. /// - /// For `StoragesHistory`: - /// - The maximum block number in shards should not exceed the `IndexStorageHistory` stage - /// checkpoint. - /// - Similar healing/unwind logic applies. + /// For `StoragesHistory` and `AccountsHistory`: + /// - Uses changesets to heal stale entries when static file tip > checkpoint. /// /// # Requirements /// @@ -51,6 +55,8 @@ impl RocksDBProvider { + StageCheckpointReader + StorageSettingsCache + StaticFileProviderFactory + + StorageChangeSetReader + + ChangeSetReader + TransactionsProvider, { let mut unwind_target: Option = None; @@ -62,16 +68,16 @@ impl RocksDBProvider { unwind_target = Some(unwind_target.map_or(target, |t| t.min(target))); } - // Check StoragesHistory if stored in RocksDB + // Heal StoragesHistory if stored in RocksDB if provider.cached_storage_settings().storages_history_in_rocksdb && - let Some(target) = self.check_storages_history(provider)? + let Some(target) = self.heal_storages_history(provider)? { unwind_target = Some(unwind_target.map_or(target, |t| t.min(target))); } - // Check AccountsHistory if stored in RocksDB + // Heal AccountsHistory if stored in RocksDB if provider.cached_storage_settings().account_history_in_rocksdb && - let Some(target) = self.check_accounts_history(provider)? + let Some(target) = self.heal_accounts_history(provider)? { unwind_target = Some(unwind_target.map_or(target, |t| t.min(target))); } @@ -221,251 +227,192 @@ impl RocksDBProvider { Ok(()) } - /// Checks invariants for the `StoragesHistory` table. + /// Heals the `StoragesHistory` table by removing stale entries. /// - /// Returns a block number to unwind to if `RocksDB` is behind the checkpoint. - /// If `RocksDB` is ahead of the checkpoint, excess entries are pruned (healed). - fn check_storages_history( + /// Returns an unwind target if static file tip is behind checkpoint (cannot heal). + /// Otherwise iterates changesets in batches to identify and unwind affected keys. + fn heal_storages_history( &self, provider: &Provider, ) -> ProviderResult> where - Provider: DBProvider + StageCheckpointReader, + Provider: + DBProvider + StageCheckpointReader + StaticFileProviderFactory + StorageChangeSetReader, { - // Get the IndexStorageHistory stage checkpoint let checkpoint = provider .get_stage_checkpoint(StageId::IndexStorageHistory)? .map(|cp| cp.block_number) .unwrap_or(0); - // Check if RocksDB has any data - let rocks_first = self.first::()?; + // Fast path: if checkpoint is 0 and RocksDB has data, clear everything. + if checkpoint == 0 && self.first::()?.is_some() { + tracing::info!( + target: "reth::providers::rocksdb", + "StoragesHistory has data but checkpoint is 0, clearing all" + ); + self.clear::()?; + return Ok(None); + } - match rocks_first { - Some(_) => { - // If checkpoint is 0 but we have data, clear everything - if checkpoint == 0 { - tracing::info!( - target: "reth::providers::rocksdb", - "StoragesHistory has data but checkpoint is 0, clearing all" - ); - self.prune_storages_history_above(0)?; - return Ok(None); - } + let sf_tip = provider + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::StorageChangeSets) + .unwrap_or(0); - // Find the max highest_block_number (excluding u64::MAX sentinel) across all - // entries. Also track if we found any non-sentinel entries. - let mut max_highest_block = 0u64; - let mut found_non_sentinel = false; - for result in self.iter::()? { - let (key, _) = result?; - let highest = key.sharded_key.highest_block_number; - if highest != u64::MAX { - found_non_sentinel = true; - if highest > max_highest_block { - max_highest_block = highest; - } - } - } + if sf_tip < checkpoint { + // This should never happen in normal operation - static files are always + // committed before RocksDB. If we get here, something is seriously wrong. + // The unwind is a best-effort attempt but is probably futile. + tracing::warn!( + target: "reth::providers::rocksdb", + sf_tip, + checkpoint, + "StoragesHistory: static file tip behind checkpoint, unwind needed" + ); + return Ok(Some(sf_tip)); + } - // If all entries are sentinel entries (u64::MAX), treat as first-run scenario. - // This means no completed shards exist (only sentinel shards with - // highest_block_number=u64::MAX), so no actual history has been indexed. - if !found_non_sentinel { - return Ok(None); - } + if sf_tip == checkpoint { + return Ok(None); + } - // If any entry has highest_block > checkpoint, prune excess - if max_highest_block > checkpoint { - tracing::info!( - target: "reth::providers::rocksdb", - rocks_highest = max_highest_block, - checkpoint, - "StoragesHistory ahead of checkpoint, pruning excess data" - ); - self.prune_storages_history_above(checkpoint)?; - } else if max_highest_block < checkpoint { - // RocksDB is behind checkpoint, return highest block to signal unwind needed - tracing::warn!( - target: "reth::providers::rocksdb", - rocks_highest = max_highest_block, - checkpoint, - "StoragesHistory behind checkpoint, unwind needed" - ); - return Ok(Some(max_highest_block)); - } + let total_blocks = sf_tip - checkpoint; + tracing::info!( + target: "reth::providers::rocksdb", + checkpoint, + sf_tip, + total_blocks, + "StoragesHistory: healing via changesets" + ); - Ok(None) - } - None => { - // Empty RocksDB table, nothing to check. - Ok(None) - } - } - } + let mut batch_start = checkpoint + 1; + let mut batch_num = 0u64; + let total_batches = total_blocks.div_ceil(HEAL_HISTORY_BATCH_SIZE); - /// Prunes `StoragesHistory` entries where `highest_block_number` > `max_block`. - /// - /// For `StoragesHistory`, the key contains `highest_block_number`, so we can iterate - /// and delete entries where `key.sharded_key.highest_block_number > max_block`. - /// - /// TODO(): this iterates the whole table, - /// which is inefficient. Use changeset-based pruning instead. - fn prune_storages_history_above(&self, max_block: BlockNumber) -> ProviderResult<()> { - use reth_db_api::models::storage_sharded_key::StorageShardedKey; - - let mut to_delete: Vec = Vec::new(); - for result in self.iter::()? { - let (key, _) = result?; - let highest_block = key.sharded_key.highest_block_number; - if max_block == 0 || (highest_block != u64::MAX && highest_block > max_block) { - to_delete.push(key); - } - } + while batch_start <= sf_tip { + let batch_end = (batch_start + HEAL_HISTORY_BATCH_SIZE - 1).min(sf_tip); + batch_num += 1; - let deleted = to_delete.len(); - if deleted > 0 { - tracing::info!( - target: "reth::providers::rocksdb", - deleted_count = deleted, - max_block, - "Pruning StoragesHistory entries" - ); + let changesets = provider.storage_changesets_range(batch_start..=batch_end)?; - let mut batch = self.batch(); - for key in to_delete { - batch.delete::(key)?; + let unique_keys: HashSet<_> = changesets + .into_iter() + .map(|(block_addr, entry)| (block_addr.address(), entry.key, checkpoint + 1)) + .collect(); + let indices: Vec<_> = unique_keys.into_iter().collect(); + + if !indices.is_empty() { + tracing::info!( + target: "reth::providers::rocksdb", + batch_num, + total_batches, + batch_start, + batch_end, + indices_count = indices.len(), + "StoragesHistory: unwinding batch" + ); + + let batch = self.unwind_storage_history_indices(&indices)?; + self.commit_batch(batch)?; } - batch.commit()?; + + batch_start = batch_end + 1; } - Ok(()) + Ok(None) } - /// Checks invariants for the `AccountsHistory` table. + /// Heals the `AccountsHistory` table by removing stale entries. /// - /// Returns a block number to unwind to if `RocksDB` is behind the checkpoint. - /// If `RocksDB` is ahead of the checkpoint, excess entries are pruned (healed). - fn check_accounts_history( + /// Returns an unwind target if static file tip is behind checkpoint (cannot heal). + /// Otherwise iterates changesets in batches to identify and unwind affected keys. + fn heal_accounts_history( &self, provider: &Provider, ) -> ProviderResult> where - Provider: DBProvider + StageCheckpointReader, + Provider: DBProvider + StageCheckpointReader + StaticFileProviderFactory + ChangeSetReader, { - // Get the IndexAccountHistory stage checkpoint let checkpoint = provider .get_stage_checkpoint(StageId::IndexAccountHistory)? .map(|cp| cp.block_number) .unwrap_or(0); - // Check if RocksDB has any data - let rocks_first = self.first::()?; + // Fast path: if checkpoint is 0 and RocksDB has data, clear everything. + if checkpoint == 0 && self.first::()?.is_some() { + tracing::info!( + target: "reth::providers::rocksdb", + "AccountsHistory has data but checkpoint is 0, clearing all" + ); + self.clear::()?; + return Ok(None); + } - match rocks_first { - Some(_) => { - // If checkpoint is 0 but we have data, clear everything - if checkpoint == 0 { - tracing::info!( - target: "reth::providers::rocksdb", - "AccountsHistory has data but checkpoint is 0, clearing all" - ); - self.prune_accounts_history_above(0)?; - return Ok(None); - } + let sf_tip = provider + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::AccountChangeSets) + .unwrap_or(0); - // Find the max highest_block_number (excluding u64::MAX sentinel) across all - // entries. Also track if we found any non-sentinel entries. - let mut max_highest_block = 0u64; - let mut found_non_sentinel = false; - for result in self.iter::()? { - let (key, _) = result?; - let highest = key.highest_block_number; - if highest != u64::MAX { - found_non_sentinel = true; - if highest > max_highest_block { - max_highest_block = highest; - } - } - } + if sf_tip < checkpoint { + // This should never happen in normal operation - static files are always + // committed before RocksDB. If we get here, something is seriously wrong. + // The unwind is a best-effort attempt but is probably futile. + tracing::warn!( + target: "reth::providers::rocksdb", + sf_tip, + checkpoint, + "AccountsHistory: static file tip behind checkpoint, unwind needed" + ); + return Ok(Some(sf_tip)); + } - // If all entries are sentinel entries (u64::MAX), treat as first-run scenario. - // This means no completed shards exist (only sentinel shards with - // highest_block_number=u64::MAX), so no actual history has been indexed. - if !found_non_sentinel { - return Ok(None); - } + if sf_tip == checkpoint { + return Ok(None); + } - // If any entry has highest_block > checkpoint, prune excess - if max_highest_block > checkpoint { - tracing::info!( - target: "reth::providers::rocksdb", - rocks_highest = max_highest_block, - checkpoint, - "AccountsHistory ahead of checkpoint, pruning excess data" - ); - self.prune_accounts_history_above(checkpoint)?; - return Ok(None); - } + let total_blocks = sf_tip - checkpoint; + tracing::info!( + target: "reth::providers::rocksdb", + checkpoint, + sf_tip, + total_blocks, + "AccountsHistory: healing via changesets" + ); - // If RocksDB is behind the checkpoint, request an unwind to rebuild. - if max_highest_block < checkpoint { - tracing::warn!( - target: "reth::providers::rocksdb", - rocks_highest = max_highest_block, - checkpoint, - "AccountsHistory behind checkpoint, unwind needed" - ); - return Ok(Some(max_highest_block)); - } + let mut batch_start = checkpoint + 1; + let mut batch_num = 0u64; + let total_batches = total_blocks.div_ceil(HEAL_HISTORY_BATCH_SIZE); - Ok(None) - } - None => { - // Empty RocksDB table, nothing to check. - Ok(None) - } - } - } + while batch_start <= sf_tip { + let batch_end = (batch_start + HEAL_HISTORY_BATCH_SIZE - 1).min(sf_tip); + batch_num += 1; - /// Prunes `AccountsHistory` entries where `highest_block_number` > `max_block`. - /// - /// For `AccountsHistory`, the key is `ShardedKey
` which contains - /// `highest_block_number`, so we can iterate and delete entries where - /// `key.highest_block_number > max_block`. - /// - /// TODO(): this iterates the whole table, - /// which is inefficient. Use changeset-based pruning instead. - fn prune_accounts_history_above(&self, max_block: BlockNumber) -> ProviderResult<()> { - use alloy_primitives::Address; - use reth_db_api::models::ShardedKey; + let changesets = provider.account_changesets_range(batch_start..=batch_end)?; - let mut to_delete: Vec> = Vec::new(); - for result in self.iter::()? { - let (key, _) = result?; - let highest_block = key.highest_block_number; - if max_block == 0 || (highest_block != u64::MAX && highest_block > max_block) { - to_delete.push(key); - } - } + let mut addresses = HashSet::with_capacity(changesets.len()); + addresses.extend(changesets.iter().map(|(_, cs)| cs.address)); + let unwind_from = checkpoint + 1; + let indices: Vec<_> = addresses.into_iter().map(|addr| (addr, unwind_from)).collect(); - let deleted = to_delete.len(); - if deleted > 0 { - tracing::info!( - target: "reth::providers::rocksdb", - deleted_count = deleted, - max_block, - "Pruning AccountsHistory entries" - ); + if !indices.is_empty() { + tracing::info!( + target: "reth::providers::rocksdb", + batch_num, + total_batches, + batch_start, + batch_end, + indices_count = indices.len(), + "AccountsHistory: unwinding batch" + ); - let mut batch = self.batch(); - for key in to_delete { - batch.delete::(key)?; + let batch = self.unwind_account_history_indices(&indices)?; + self.commit_batch(batch)?; } - batch.commit()?; + + batch_start = batch_end + 1; } - Ok(()) + Ok(None) } } @@ -473,8 +420,9 @@ impl RocksDBProvider { mod tests { use super::*; use crate::{ - providers::rocksdb::RocksDBBuilder, test_utils::create_test_provider_factory, BlockWriter, - DatabaseProviderFactory, StageCheckpointWriter, TransactionsProvider, + providers::{rocksdb::RocksDBBuilder, static_file::StaticFileWriter}, + test_utils::create_test_provider_factory, + BlockWriter, DatabaseProviderFactory, StageCheckpointWriter, TransactionsProvider, }; use alloy_primitives::{Address, B256}; use reth_db::cursor::DbCursorRW; @@ -680,9 +628,10 @@ mod tests { let provider = factory.database_provider_ro().unwrap(); // RocksDB is empty but checkpoint says block 100 was processed. - // This is treated as a first-run/migration scenario - no unwind needed. + // Since sf_tip=0 < checkpoint=100, we return unwind target of 0. + // This should never happen in normal operation. let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!(result, None, "Empty RocksDB with checkpoint is treated as first run"); + assert_eq!(result, Some(0), "sf_tip=0 < checkpoint=100 returns unwind target"); } #[test] @@ -721,46 +670,6 @@ mod tests { ); } - #[test] - fn test_check_consistency_storages_history_behind_checkpoint_needs_unwind() { - let temp_dir = TempDir::new().unwrap(); - let rocksdb = RocksDBBuilder::new(temp_dir.path()) - .with_table::() - .build() - .unwrap(); - - // Insert data into RocksDB with max highest_block_number = 80 - let key_block_50 = StorageShardedKey::new(Address::ZERO, B256::ZERO, 50); - let key_block_80 = StorageShardedKey::new(Address::ZERO, B256::from([1u8; 32]), 80); - let key_block_max = StorageShardedKey::new(Address::ZERO, B256::from([2u8; 32]), u64::MAX); - - let block_list = BlockNumberList::new_pre_sorted([10, 20, 30]); - rocksdb.put::(key_block_50, &block_list).unwrap(); - rocksdb.put::(key_block_80, &block_list).unwrap(); - rocksdb.put::(key_block_max, &block_list).unwrap(); - - // Create a test provider factory for MDBX - let factory = create_test_provider_factory(); - factory.set_storage_settings_cache( - StorageSettings::legacy().with_storages_history_in_rocksdb(true), - ); - - // Set checkpoint to block 100 - { - let provider = factory.database_provider_rw().unwrap(); - provider - .save_stage_checkpoint(StageId::IndexStorageHistory, StageCheckpoint::new(100)) - .unwrap(); - provider.commit().unwrap(); - } - - let provider = factory.database_provider_ro().unwrap(); - - // RocksDB max highest_block (80) is behind checkpoint (100) - let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!(result, Some(80), "Should unwind to the highest block present in RocksDB"); - } - #[test] fn test_check_consistency_mdbx_behind_checkpoint_needs_unwind() { let temp_dir = TempDir::new().unwrap(); @@ -923,67 +832,6 @@ mod tests { } } - #[test] - fn test_check_consistency_storages_history_ahead_of_checkpoint_prunes_excess() { - let temp_dir = TempDir::new().unwrap(); - let rocksdb = RocksDBBuilder::new(temp_dir.path()) - .with_table::() - .build() - .unwrap(); - - // Insert data into RocksDB with different highest_block_numbers - let key_block_50 = StorageShardedKey::new(Address::ZERO, B256::ZERO, 50); - let key_block_100 = StorageShardedKey::new(Address::ZERO, B256::from([1u8; 32]), 100); - let key_block_150 = StorageShardedKey::new(Address::ZERO, B256::from([2u8; 32]), 150); - let key_block_max = StorageShardedKey::new(Address::ZERO, B256::from([3u8; 32]), u64::MAX); - - let block_list = BlockNumberList::new_pre_sorted([10, 20, 30]); - rocksdb.put::(key_block_50.clone(), &block_list).unwrap(); - rocksdb.put::(key_block_100.clone(), &block_list).unwrap(); - rocksdb.put::(key_block_150.clone(), &block_list).unwrap(); - rocksdb.put::(key_block_max.clone(), &block_list).unwrap(); - - // Create a test provider factory for MDBX - let factory = create_test_provider_factory(); - factory.set_storage_settings_cache( - StorageSettings::legacy().with_storages_history_in_rocksdb(true), - ); - - // Set checkpoint to block 100 - { - let provider = factory.database_provider_rw().unwrap(); - provider - .save_stage_checkpoint(StageId::IndexStorageHistory, StageCheckpoint::new(100)) - .unwrap(); - provider.commit().unwrap(); - } - - let provider = factory.database_provider_ro().unwrap(); - - // RocksDB has entries with highest_block = 150 which exceeds checkpoint (100) - // Should prune entries where highest_block > 100 (but not u64::MAX sentinel) - let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!(result, None, "Should heal by pruning, no unwind needed"); - - // Verify key_block_150 was pruned, but others remain - assert!( - rocksdb.get::(key_block_50).unwrap().is_some(), - "Entry with highest_block=50 should remain" - ); - assert!( - rocksdb.get::(key_block_100).unwrap().is_some(), - "Entry with highest_block=100 should remain" - ); - assert!( - rocksdb.get::(key_block_150).unwrap().is_none(), - "Entry with highest_block=150 should be pruned" - ); - assert!( - rocksdb.get::(key_block_max).unwrap().is_some(), - "Entry with highest_block=u64::MAX (sentinel) should remain" - ); - } - #[test] fn test_check_consistency_storages_history_sentinel_only_with_checkpoint_is_first_run() { let temp_dir = TempDir::new().unwrap(); @@ -1020,13 +868,11 @@ mod tests { let provider = factory.database_provider_ro().unwrap(); - // RocksDB has only sentinel entries (no completed shards) but checkpoint is set. - // This is treated as a first-run/migration scenario - no unwind needed. + // RocksDB has only sentinel entries but checkpoint is set. + // Since sf_tip=0 < checkpoint=100, we return unwind target of 0. + // This should never happen in normal operation. let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!( - result, None, - "Sentinel-only entries with checkpoint should be treated as first run" - ); + assert_eq!(result, Some(0), "sf_tip=0 < checkpoint=100 returns unwind target"); } #[test] @@ -1066,53 +912,11 @@ mod tests { let provider = factory.database_provider_ro().unwrap(); - // RocksDB has only sentinel entries (no completed shards) but checkpoint is set. - // This is treated as a first-run/migration scenario - no unwind needed. - let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!( - result, None, - "Sentinel-only entries with checkpoint should be treated as first run" - ); - } - - #[test] - fn test_check_consistency_storages_history_behind_checkpoint_single_entry() { - use reth_db_api::models::storage_sharded_key::StorageShardedKey; - - let temp_dir = TempDir::new().unwrap(); - let rocksdb = RocksDBBuilder::new(temp_dir.path()) - .with_table::() - .build() - .unwrap(); - - // Insert data into RocksDB with highest_block_number below checkpoint - let key_block_50 = StorageShardedKey::new(Address::ZERO, B256::ZERO, 50); - let block_list = BlockNumberList::new_pre_sorted([10, 20, 30, 50]); - rocksdb.put::(key_block_50, &block_list).unwrap(); - - let factory = create_test_provider_factory(); - factory.set_storage_settings_cache( - StorageSettings::legacy().with_storages_history_in_rocksdb(true), - ); - - // Set checkpoint to block 100 - { - let provider = factory.database_provider_rw().unwrap(); - provider - .save_stage_checkpoint(StageId::IndexStorageHistory, StageCheckpoint::new(100)) - .unwrap(); - provider.commit().unwrap(); - } - - let provider = factory.database_provider_ro().unwrap(); - - // RocksDB only has data up to block 50, but checkpoint says block 100 was processed + // RocksDB has only sentinel entries but checkpoint is set. + // Since sf_tip=0 < checkpoint=100, we return unwind target of 0. + // This should never happen in normal operation. let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!( - result, - Some(50), - "Should require unwind to block 50 to rebuild StoragesHistory" - ); + assert_eq!(result, Some(0), "sf_tip=0 < checkpoint=100 returns unwind target"); } /// Test that pruning works by fetching transactions and computing their hashes, @@ -1257,9 +1061,10 @@ mod tests { let provider = factory.database_provider_ro().unwrap(); // RocksDB is empty but checkpoint says block 100 was processed. - // This is treated as a first-run/migration scenario - no unwind needed. + // Since sf_tip=0 < checkpoint=100, we return unwind target of 0. + // This should never happen in normal operation. let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!(result, None, "Empty RocksDB with checkpoint is treated as first run"); + assert_eq!(result, Some(0), "sf_tip=0 < checkpoint=100 returns unwind target"); } #[test] @@ -1301,8 +1106,10 @@ mod tests { } #[test] - fn test_check_consistency_accounts_history_ahead_of_checkpoint_prunes_excess() { + fn test_check_consistency_accounts_history_sf_tip_equals_checkpoint_no_action() { + use reth_db::models::AccountBeforeTx; use reth_db_api::models::ShardedKey; + use reth_static_file_types::StaticFileSegment; let temp_dir = TempDir::new().unwrap(); let rocksdb = RocksDBBuilder::new(temp_dir.path()) @@ -1310,17 +1117,21 @@ mod tests { .build() .unwrap(); - // Insert data into RocksDB with different highest_block_numbers - let key_block_50 = ShardedKey::new(Address::ZERO, 50); - let key_block_100 = ShardedKey::new(Address::random(), 100); - let key_block_150 = ShardedKey::new(Address::random(), 150); - let key_block_max = ShardedKey::new(Address::random(), u64::MAX); - - let block_list = BlockNumberList::new_pre_sorted([10, 20, 30]); - rocksdb.put::(key_block_50.clone(), &block_list).unwrap(); - rocksdb.put::(key_block_100.clone(), &block_list).unwrap(); - rocksdb.put::(key_block_150.clone(), &block_list).unwrap(); - rocksdb.put::(key_block_max.clone(), &block_list).unwrap(); + // Insert some AccountsHistory entries with various highest_block_numbers + let key1 = ShardedKey::new(Address::ZERO, 50); + let key2 = ShardedKey::new(Address::random(), 75); + let key3 = ShardedKey::new(Address::random(), u64::MAX); // sentinel + let block_list1 = BlockNumberList::new_pre_sorted([10, 20, 30, 50]); + let block_list2 = BlockNumberList::new_pre_sorted([40, 60, 75]); + let block_list3 = BlockNumberList::new_pre_sorted([80, 90, 100]); + rocksdb.put::(key1, &block_list1).unwrap(); + rocksdb.put::(key2, &block_list2).unwrap(); + rocksdb.put::(key3, &block_list3).unwrap(); + + // Capture RocksDB state before consistency check + let entries_before: Vec<_> = + rocksdb.iter::().unwrap().map(|r| r.unwrap()).collect(); + assert_eq!(entries_before.len(), 3, "Should have 3 entries before check"); // Create a test provider factory for MDBX let factory = create_test_provider_factory(); @@ -1328,7 +1139,21 @@ mod tests { StorageSettings::legacy().with_account_history_in_rocksdb(true), ); - // Set checkpoint to block 100 + // Write account changesets to static files for blocks 0-100 + { + let sf_provider = factory.static_file_provider(); + let mut writer = + sf_provider.latest_writer(StaticFileSegment::AccountChangeSets).unwrap(); + + for block_num in 0..=100 { + let changeset = vec![AccountBeforeTx { address: Address::random(), info: None }]; + writer.append_account_changeset(changeset, block_num).unwrap(); + } + + writer.commit().unwrap(); + } + + // Set IndexAccountHistory checkpoint to block 100 (same as sf_tip) { let provider = factory.database_provider_rw().unwrap(); provider @@ -1339,33 +1164,285 @@ mod tests { let provider = factory.database_provider_ro().unwrap(); - // RocksDB has entries with highest_block = 150 which exceeds checkpoint (100) - // Should prune entries where highest_block > 100 (but not u64::MAX sentinel) + // Verify sf_tip equals checkpoint (both at 100) + let sf_tip = provider + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::AccountChangeSets) + .unwrap(); + assert_eq!(sf_tip, 100, "Static file tip should be 100"); + + // Run check_consistency - should return None (no unwind needed) let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!(result, None, "Should heal by pruning, no unwind needed"); + assert_eq!(result, None, "sf_tip == checkpoint should not require unwind"); - // Verify key_block_150 was pruned, but others remain - assert!( - rocksdb.get::(key_block_50).unwrap().is_some(), - "Entry with highest_block=50 should remain" + // Verify NO entries are deleted - RocksDB state unchanged + let entries_after: Vec<_> = + rocksdb.iter::().unwrap().map(|r| r.unwrap()).collect(); + + assert_eq!( + entries_after.len(), + entries_before.len(), + "RocksDB entry count should be unchanged when sf_tip == checkpoint" ); - assert!( - rocksdb.get::(key_block_100).unwrap().is_some(), - "Entry with highest_block=100 should remain" + + // Verify exact entries are preserved + for (before, after) in entries_before.iter().zip(entries_after.iter()) { + assert_eq!(before.0.key, after.0.key, "Entry key should be unchanged"); + assert_eq!( + before.0.highest_block_number, after.0.highest_block_number, + "Entry highest_block_number should be unchanged" + ); + assert_eq!(before.1, after.1, "Entry block list should be unchanged"); + } + } + + /// Tests `StoragesHistory` changeset-based healing with enough blocks to trigger batching. + /// + /// Scenario: + /// 1. Generate 15,000 blocks worth of storage changeset data (to exceed the 10k batch size) + /// 2. Each block has 1 storage change (address + slot + value) + /// 3. Write storage changesets to static files for all 15k blocks + /// 4. Set `IndexStorageHistory` checkpoint to block 5000 + /// 5. Insert stale `StoragesHistory` entries in `RocksDB` for (address, slot) pairs that + /// changed in blocks 5001-15000 + /// 6. Run `check_consistency` + /// 7. Verify stale entries for blocks > 5000 are pruned and batching worked + #[test] + fn test_check_consistency_storages_history_heals_via_changesets_large_range() { + use alloy_primitives::U256; + use reth_db_api::models::StorageBeforeTx; + + const TOTAL_BLOCKS: u64 = 15_000; + const CHECKPOINT_BLOCK: u64 = 5_000; + + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy() + .with_storages_history_in_rocksdb(true) + .with_storage_changesets_in_static_files(true), ); - assert!( - rocksdb.get::(key_block_150).unwrap().is_none(), - "Entry with highest_block=150 should be pruned" + + // Helper to generate address from block number (reuses stack arrays) + #[inline] + fn make_address(block_num: u64) -> Address { + let mut addr_bytes = [0u8; 20]; + addr_bytes[0..8].copy_from_slice(&block_num.to_le_bytes()); + Address::from(addr_bytes) + } + + // Helper to generate slot from block number (reuses stack arrays) + #[inline] + fn make_slot(block_num: u64) -> B256 { + let mut slot_bytes = [0u8; 32]; + slot_bytes[0..8].copy_from_slice(&block_num.to_le_bytes()); + B256::from(slot_bytes) + } + + // Write storage changesets to static files for 15k blocks. + // Each block has 1 storage change with a unique (address, slot) pair. + { + let sf_provider = factory.static_file_provider(); + let mut writer = + sf_provider.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + + // Reuse changeset vec to avoid repeated allocations + let mut changeset = Vec::with_capacity(1); + + for block_num in 0..TOTAL_BLOCKS { + changeset.clear(); + changeset.push(StorageBeforeTx { + address: make_address(block_num), + key: make_slot(block_num), + value: U256::from(block_num), + }); + + writer.append_storage_changeset(changeset.clone(), block_num).unwrap(); + } + + writer.commit().unwrap(); + } + + // Verify static files have data up to block 14999 + { + let sf_provider = factory.static_file_provider(); + let highest = sf_provider + .get_highest_static_file_block(StaticFileSegment::StorageChangeSets) + .unwrap(); + assert_eq!(highest, TOTAL_BLOCKS - 1, "Static files should have blocks 0..14999"); + } + + // Set IndexStorageHistory checkpoint to block 5000 + { + let provider = factory.database_provider_rw().unwrap(); + provider + .save_stage_checkpoint( + StageId::IndexStorageHistory, + StageCheckpoint::new(CHECKPOINT_BLOCK), + ) + .unwrap(); + provider.commit().unwrap(); + } + + // Insert stale StoragesHistory entries for blocks 5001-14999 + // These are (address, slot) pairs that changed after the checkpoint + for block_num in (CHECKPOINT_BLOCK + 1)..TOTAL_BLOCKS { + let key = + StorageShardedKey::new(make_address(block_num), make_slot(block_num), block_num); + let block_list = BlockNumberList::new_pre_sorted([block_num]); + rocksdb.put::(key, &block_list).unwrap(); + } + + // Verify RocksDB has stale entries before healing + let count_before: usize = rocksdb.iter::().unwrap().count(); + assert_eq!( + count_before, + (TOTAL_BLOCKS - CHECKPOINT_BLOCK - 1) as usize, + "Should have {} stale entries before healing", + TOTAL_BLOCKS - CHECKPOINT_BLOCK - 1 ); - assert!( - rocksdb.get::(key_block_max).unwrap().is_some(), - "Entry with highest_block=u64::MAX (sentinel) should remain" + + // Run check_consistency - this should heal by pruning stale entries + let provider = factory.database_provider_ro().unwrap(); + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!(result, None, "Should heal via changesets, no unwind needed"); + + // Verify all stale entries were pruned + // After healing, entries with highest_block_number > checkpoint should be gone + let mut remaining_stale = 0; + for result in rocksdb.iter::().unwrap() { + let (key, _) = result.unwrap(); + if key.sharded_key.highest_block_number > CHECKPOINT_BLOCK { + remaining_stale += 1; + } + } + assert_eq!( + remaining_stale, 0, + "All stale entries (block > {}) should be pruned", + CHECKPOINT_BLOCK + ); + } + + /// Tests that healing preserves entries at exactly the checkpoint block. + /// + /// This catches off-by-one bugs where checkpoint block data is incorrectly deleted. + #[test] + fn test_check_consistency_storages_history_preserves_checkpoint_block() { + use alloy_primitives::U256; + use reth_db_api::models::StorageBeforeTx; + + const CHECKPOINT_BLOCK: u64 = 100; + const SF_TIP: u64 = 200; + + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy() + .with_storages_history_in_rocksdb(true) + .with_storage_changesets_in_static_files(true), ); + + let checkpoint_addr = Address::repeat_byte(0xAA); + let checkpoint_slot = B256::repeat_byte(0xBB); + let stale_addr = Address::repeat_byte(0xCC); + let stale_slot = B256::repeat_byte(0xDD); + + // Write storage changesets to static files + { + let sf_provider = factory.static_file_provider(); + let mut writer = + sf_provider.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + + for block_num in 0..=SF_TIP { + let changeset = if block_num == CHECKPOINT_BLOCK { + vec![StorageBeforeTx { + address: checkpoint_addr, + key: checkpoint_slot, + value: U256::from(block_num), + }] + } else if block_num > CHECKPOINT_BLOCK { + vec![StorageBeforeTx { + address: stale_addr, + key: stale_slot, + value: U256::from(block_num), + }] + } else { + vec![StorageBeforeTx { + address: Address::ZERO, + key: B256::ZERO, + value: U256::ZERO, + }] + }; + writer.append_storage_changeset(changeset, block_num).unwrap(); + } + writer.commit().unwrap(); + } + + // Set checkpoint + { + let provider = factory.database_provider_rw().unwrap(); + provider + .save_stage_checkpoint( + StageId::IndexStorageHistory, + StageCheckpoint::new(CHECKPOINT_BLOCK), + ) + .unwrap(); + provider.commit().unwrap(); + } + + // Insert entry AT the checkpoint block (should be preserved) + let checkpoint_key = + StorageShardedKey::new(checkpoint_addr, checkpoint_slot, CHECKPOINT_BLOCK); + let checkpoint_list = BlockNumberList::new_pre_sorted([CHECKPOINT_BLOCK]); + rocksdb.put::(checkpoint_key.clone(), &checkpoint_list).unwrap(); + + // Insert stale entry AFTER the checkpoint (should be removed) + let stale_key = StorageShardedKey::new(stale_addr, stale_slot, SF_TIP); + let stale_list = BlockNumberList::new_pre_sorted([CHECKPOINT_BLOCK + 1, SF_TIP]); + rocksdb.put::(stale_key.clone(), &stale_list).unwrap(); + + // Run healing + let provider = factory.database_provider_ro().unwrap(); + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!(result, None, "Should heal without unwind"); + + // Verify checkpoint block entry is PRESERVED + let preserved = rocksdb.get::(checkpoint_key).unwrap(); + assert!(preserved.is_some(), "Entry at checkpoint block should be preserved, not deleted"); + + // Verify stale entry is removed or unwound + let stale = rocksdb.get::(stale_key).unwrap(); + assert!(stale.is_none(), "Stale entry after checkpoint should be removed"); } + /// Tests `AccountsHistory` changeset-based healing with enough blocks to trigger batching. + /// + /// Scenario: + /// 1. Generate 15,000 blocks worth of account changeset data (to exceed the 10k batch size) + /// 2. Each block has 1 account change (simple - just random addresses) + /// 3. Write account changesets to static files for all 15k blocks + /// 4. Set `IndexAccountHistory` checkpoint to block 5000 + /// 5. Insert stale `AccountsHistory` entries in `RocksDB` for addresses that changed in blocks + /// 5001-15000 + /// 6. Run `check_consistency` + /// 7. Verify: + /// - Stale entries for blocks > 5000 are pruned + /// - The batching worked (no OOM, completed successfully) #[test] - fn test_check_consistency_accounts_history_behind_checkpoint_needs_unwind() { + fn test_check_consistency_accounts_history_heals_via_changesets_large_range() { + use reth_db::models::AccountBeforeTx; use reth_db_api::models::ShardedKey; + use reth_static_file_types::StaticFileSegment; let temp_dir = TempDir::new().unwrap(); let rocksdb = RocksDBBuilder::new(temp_dir.path()) @@ -1373,33 +1450,276 @@ mod tests { .build() .unwrap(); - // Insert data into RocksDB with highest_block_number below checkpoint - let key_block_50 = ShardedKey::new(Address::ZERO, 50); - let block_list = BlockNumberList::new_pre_sorted([10, 20, 30, 50]); - rocksdb.put::(key_block_50, &block_list).unwrap(); + // Create test provider factory + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy() + .with_account_history_in_rocksdb(true) + .with_account_changesets_in_static_files(true), + ); + + const TOTAL_BLOCKS: u64 = 15_000; + const CHECKPOINT_BLOCK: u64 = 5_000; + + // Helper to generate address from block number (avoids pre-allocating 15k addresses) + #[inline] + fn make_address(block_num: u64) -> Address { + let mut addr = Address::ZERO; + addr.0[0..8].copy_from_slice(&block_num.to_le_bytes()); + addr + } + + // Write account changesets to static files for all 15k blocks + { + let sf_provider = factory.static_file_provider(); + let mut writer = + sf_provider.latest_writer(StaticFileSegment::AccountChangeSets).unwrap(); + + // Reuse changeset vec to avoid repeated allocations + let mut changeset = Vec::with_capacity(1); + + for block_num in 0..TOTAL_BLOCKS { + changeset.clear(); + changeset.push(AccountBeforeTx { address: make_address(block_num), info: None }); + writer.append_account_changeset(changeset.clone(), block_num).unwrap(); + } + + writer.commit().unwrap(); + } + + // Insert stale AccountsHistory entries in RocksDB for addresses that changed + // in blocks 5001-15000 (i.e., blocks after the checkpoint) + // These should be pruned by check_consistency + for block_num in (CHECKPOINT_BLOCK + 1)..TOTAL_BLOCKS { + let key = ShardedKey::new(make_address(block_num), block_num); + let block_list = BlockNumberList::new_pre_sorted([block_num]); + rocksdb.put::(key, &block_list).unwrap(); + } + + // Also insert some valid entries for blocks <= 5000 that should NOT be pruned + for block_num in [100u64, 500, 1000, 2500, 5000] { + let key = ShardedKey::new(make_address(block_num), block_num); + let block_list = BlockNumberList::new_pre_sorted([block_num]); + rocksdb.put::(key, &block_list).unwrap(); + } + + // Verify we have entries before healing + let entries_before: usize = rocksdb.iter::().unwrap().count(); + let stale_count = (TOTAL_BLOCKS - CHECKPOINT_BLOCK - 1) as usize; + let valid_count = 5usize; + assert_eq!( + entries_before, + stale_count + valid_count, + "Should have {} stale + {} valid entries before healing", + stale_count, + valid_count + ); + + // Set IndexAccountHistory checkpoint to block 5000 + { + let provider = factory.database_provider_rw().unwrap(); + provider + .save_stage_checkpoint( + StageId::IndexAccountHistory, + StageCheckpoint::new(CHECKPOINT_BLOCK), + ) + .unwrap(); + provider.commit().unwrap(); + } + + let provider = factory.database_provider_ro().unwrap(); + + // Verify sf_tip > checkpoint + let sf_tip = provider + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::AccountChangeSets) + .unwrap(); + assert_eq!(sf_tip, TOTAL_BLOCKS - 1, "Static file tip should be 14999"); + assert!(sf_tip > CHECKPOINT_BLOCK, "sf_tip should be > checkpoint to trigger healing"); + + // Run check_consistency - this should trigger batched changeset-based healing + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!(result, None, "Healing should succeed without requiring unwind"); + + // Verify: all stale entries for blocks > 5000 should be pruned + // Count remaining entries with highest_block_number > checkpoint + let mut remaining_stale = 0; + for result in rocksdb.iter::().unwrap() { + let (key, _) = result.unwrap(); + if key.highest_block_number > CHECKPOINT_BLOCK && key.highest_block_number != u64::MAX { + remaining_stale += 1; + } + } + assert_eq!( + remaining_stale, 0, + "All stale entries (block > {}) should be pruned", + CHECKPOINT_BLOCK + ); + } + + /// Tests that accounts history healing preserves entries at exactly the checkpoint block. + #[test] + fn test_check_consistency_accounts_history_preserves_checkpoint_block() { + use reth_db::models::AccountBeforeTx; + use reth_db_api::models::ShardedKey; + + const CHECKPOINT_BLOCK: u64 = 100; + const SF_TIP: u64 = 200; + + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); let factory = create_test_provider_factory(); factory.set_storage_settings_cache( - StorageSettings::legacy().with_account_history_in_rocksdb(true), + StorageSettings::legacy() + .with_account_history_in_rocksdb(true) + .with_account_changesets_in_static_files(true), ); - // Set checkpoint to block 100 + let checkpoint_addr = Address::repeat_byte(0xAA); + let stale_addr = Address::repeat_byte(0xCC); + + // Write account changesets to static files + { + let sf_provider = factory.static_file_provider(); + let mut writer = + sf_provider.latest_writer(StaticFileSegment::AccountChangeSets).unwrap(); + + for block_num in 0..=SF_TIP { + let changeset = if block_num == CHECKPOINT_BLOCK { + vec![AccountBeforeTx { address: checkpoint_addr, info: None }] + } else if block_num > CHECKPOINT_BLOCK { + vec![AccountBeforeTx { address: stale_addr, info: None }] + } else { + vec![AccountBeforeTx { address: Address::ZERO, info: None }] + }; + writer.append_account_changeset(changeset, block_num).unwrap(); + } + writer.commit().unwrap(); + } + + // Set checkpoint { let provider = factory.database_provider_rw().unwrap(); provider - .save_stage_checkpoint(StageId::IndexAccountHistory, StageCheckpoint::new(100)) + .save_stage_checkpoint( + StageId::IndexAccountHistory, + StageCheckpoint::new(CHECKPOINT_BLOCK), + ) + .unwrap(); + provider.commit().unwrap(); + } + + // Insert entry AT the checkpoint block (should be preserved) + let checkpoint_key = ShardedKey::new(checkpoint_addr, CHECKPOINT_BLOCK); + let checkpoint_list = BlockNumberList::new_pre_sorted([CHECKPOINT_BLOCK]); + rocksdb.put::(checkpoint_key.clone(), &checkpoint_list).unwrap(); + + // Insert stale entry AFTER the checkpoint (should be removed) + let stale_key = ShardedKey::new(stale_addr, SF_TIP); + let stale_list = BlockNumberList::new_pre_sorted([CHECKPOINT_BLOCK + 1, SF_TIP]); + rocksdb.put::(stale_key.clone(), &stale_list).unwrap(); + + // Run healing + let provider = factory.database_provider_ro().unwrap(); + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!(result, None, "Should heal without unwind"); + + // Verify checkpoint block entry is PRESERVED + let preserved = rocksdb.get::(checkpoint_key).unwrap(); + assert!(preserved.is_some(), "Entry at checkpoint block should be preserved, not deleted"); + + // Verify stale entry is removed or unwound + let stale = rocksdb.get::(stale_key).unwrap(); + assert!(stale.is_none(), "Stale entry after checkpoint should be removed"); + } + + #[test] + fn test_check_consistency_storages_history_sf_tip_equals_checkpoint_no_action() { + use alloy_primitives::U256; + use reth_db::models::StorageBeforeTx; + use reth_static_file_types::StaticFileSegment; + + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + // Insert StoragesHistory entries into RocksDB + let key1 = StorageShardedKey::new(Address::ZERO, B256::ZERO, 50); + let key2 = StorageShardedKey::new(Address::random(), B256::random(), 80); + let block_list1 = BlockNumberList::new_pre_sorted([10, 20, 30, 50]); + let block_list2 = BlockNumberList::new_pre_sorted([40, 60, 80]); + rocksdb.put::(key1, &block_list1).unwrap(); + rocksdb.put::(key2, &block_list2).unwrap(); + + // Capture entries before consistency check + let entries_before: Vec<_> = + rocksdb.iter::().unwrap().map(|r| r.unwrap()).collect(); + + // Create a test provider factory + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy().with_storages_history_in_rocksdb(true), + ); + + // Write storage changesets to static files for blocks 0-100 + { + let sf_provider = factory.static_file_provider(); + let mut writer = + sf_provider.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + + for block_num in 0..=100u64 { + let changeset = vec![StorageBeforeTx { + address: Address::ZERO, + key: B256::with_last_byte(block_num as u8), + value: U256::from(block_num), + }]; + writer.append_storage_changeset(changeset, block_num).unwrap(); + } + writer.commit().unwrap(); + } + + // Set IndexStorageHistory checkpoint to block 100 (same as sf_tip) + { + let provider = factory.database_provider_rw().unwrap(); + provider + .save_stage_checkpoint(StageId::IndexStorageHistory, StageCheckpoint::new(100)) .unwrap(); provider.commit().unwrap(); } let provider = factory.database_provider_ro().unwrap(); - // RocksDB only has data up to block 50, but checkpoint says block 100 was processed + // Verify sf_tip equals checkpoint (both at 100) + let sf_tip = provider + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::StorageChangeSets) + .unwrap(); + assert_eq!(sf_tip, 100, "Static file tip should be 100"); + + // Run check_consistency - should return None (no unwind needed) let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!(result, None, "sf_tip == checkpoint should not require unwind"); + + // Verify NO entries are deleted - RocksDB state unchanged + let entries_after: Vec<_> = + rocksdb.iter::().unwrap().map(|r| r.unwrap()).collect(); + assert_eq!( - result, - Some(50), - "Should require unwind to block 50 to rebuild AccountsHistory" + entries_after.len(), + entries_before.len(), + "RocksDB entry count should be unchanged when sf_tip == checkpoint" ); + + // Verify exact entries are preserved + for (before, after) in entries_before.iter().zip(entries_after.iter()) { + assert_eq!(before.0, after.0, "Entry key should be unchanged"); + assert_eq!(before.1, after.1, "Entry block list should be unchanged"); + } } } From ab418642b463df6410e6ea3ad4173e39d543d1d4 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 23 Jan 2026 19:28:52 +0000 Subject: [PATCH 183/267] fix(stages): commit RocksDB batches before flush and configure immediate WAL cleanup (#21374) Co-authored-by: Amp Co-authored-by: Georgios Konstantopoulos --- .../src/stages/index_account_history.rs | 8 ++++-- .../src/stages/index_storage_history.rs | 7 +++-- crates/stages/stages/src/stages/tx_lookup.rs | 8 ++++-- .../src/providers/blockchain_provider.rs | 5 ++++ .../provider/src/providers/database/mod.rs | 5 ++++ .../src/providers/database/provider.rs | 9 +++++++ .../src/providers/rocksdb/provider.rs | 27 +++++++++++-------- .../storage/provider/src/test_utils/noop.rs | 7 +++-- .../provider/src/traits/rocksdb_provider.rs | 12 +++++++++ 9 files changed, 69 insertions(+), 19 deletions(-) diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index 91334c10cfb..d6871475ce8 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -1,7 +1,9 @@ use super::collect_account_history_indices; use crate::stages::utils::{collect_history_indices, load_account_history}; use reth_config::config::{EtlConfig, IndexHistoryConfig}; -use reth_db_api::{models::ShardedKey, table::Table, tables, transaction::DbTxMut}; +#[cfg(all(unix, feature = "rocksdb"))] +use reth_db_api::Tables; +use reth_db_api::{models::ShardedKey, tables, transaction::DbTxMut}; use reth_provider::{ DBProvider, EitherWriter, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, RocksDBProviderFactory, StorageSettingsCache, @@ -142,8 +144,10 @@ where Ok(((), writer.into_raw_rocksdb_batch())) })?; + #[cfg(all(unix, feature = "rocksdb"))] if use_rocksdb { - provider.rocksdb_provider().flush(&[tables::AccountsHistory::NAME])?; + provider.commit_pending_rocksdb_batches()?; + provider.rocksdb_provider().flush(&[Tables::AccountsHistory.name()])?; } Ok(ExecOutput { checkpoint: StageCheckpoint::new(*range.end()), done: true }) diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index 0990575500a..7b7d39f6d67 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -1,9 +1,10 @@ use super::{collect_history_indices, collect_storage_history_indices}; use crate::{stages::utils::load_storage_history, StageCheckpoint, StageId}; use reth_config::config::{EtlConfig, IndexHistoryConfig}; +#[cfg(all(unix, feature = "rocksdb"))] +use reth_db_api::Tables; use reth_db_api::{ models::{storage_sharded_key::StorageShardedKey, AddressStorageKey, BlockNumberAddress}, - table::Table, tables, transaction::DbTxMut, }; @@ -147,8 +148,10 @@ where Ok(((), writer.into_raw_rocksdb_batch())) })?; + #[cfg(all(unix, feature = "rocksdb"))] if use_rocksdb { - provider.rocksdb_provider().flush(&[tables::StoragesHistory::NAME])?; + provider.commit_pending_rocksdb_batches()?; + provider.rocksdb_provider().flush(&[Tables::StoragesHistory.name()])?; } Ok(ExecOutput { checkpoint: StageCheckpoint::new(*range.end()), done: true }) diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 404cecae56c..1af65fb8d76 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -2,8 +2,10 @@ use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{TxHash, TxNumber}; use num_traits::Zero; use reth_config::config::{EtlConfig, TransactionLookupConfig}; +#[cfg(all(unix, feature = "rocksdb"))] +use reth_db_api::Tables; use reth_db_api::{ - table::{Decode, Decompress, Table, Value}, + table::{Decode, Decompress, Value}, tables, transaction::DbTxMut, }; @@ -200,8 +202,10 @@ where } } + #[cfg(all(unix, feature = "rocksdb"))] if provider.cached_storage_settings().transaction_hash_numbers_in_rocksdb { - provider.rocksdb_provider().flush(&[tables::TransactionHashNumbers::NAME])?; + provider.commit_pending_rocksdb_batches()?; + provider.rocksdb_provider().flush(&[Tables::TransactionHashNumbers.name()])?; } Ok(ExecOutput { diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 1644219428b..141a5074b63 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -186,6 +186,11 @@ impl RocksDBProviderFactory for BlockchainProvider { fn set_pending_rocksdb_batch(&self, _batch: rocksdb::WriteBatchWithTransaction) { unimplemented!("BlockchainProvider wraps ProviderFactory - use DatabaseProvider::set_pending_rocksdb_batch instead") } + + #[cfg(all(unix, feature = "rocksdb"))] + fn commit_pending_rocksdb_batches(&self) -> ProviderResult<()> { + unimplemented!("BlockchainProvider wraps ProviderFactory - use DatabaseProvider::commit_pending_rocksdb_batches instead") + } } impl HeaderProvider for BlockchainProvider { diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index b35af670c94..b9f7f1ccdd4 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -171,6 +171,11 @@ impl RocksDBProviderFactory for ProviderFactory { fn set_pending_rocksdb_batch(&self, _batch: rocksdb::WriteBatchWithTransaction) { unimplemented!("ProviderFactory is a factory, not a provider - use DatabaseProvider::set_pending_rocksdb_batch instead") } + + #[cfg(all(unix, feature = "rocksdb"))] + fn commit_pending_rocksdb_batches(&self) -> ProviderResult<()> { + unimplemented!("ProviderFactory is a factory, not a provider - use DatabaseProvider::commit_pending_rocksdb_batches instead") + } } impl>> ProviderFactory { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 940424da847..d693791fa5e 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -321,6 +321,15 @@ impl RocksDBProviderFactory for DatabaseProvider { fn set_pending_rocksdb_batch(&self, batch: rocksdb::WriteBatchWithTransaction) { self.pending_rocksdb_batches.lock().push(batch); } + + #[cfg(all(unix, feature = "rocksdb"))] + fn commit_pending_rocksdb_batches(&self) -> ProviderResult<()> { + let batches = std::mem::take(&mut *self.pending_rocksdb_batches.lock()); + for batch in batches { + self.rocksdb_provider.commit_batch(batch)?; + } + Ok(()) + } } impl> ChainSpecProvider diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 7f322c74927..0cc85f43c49 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -175,6 +175,11 @@ impl RocksDBBuilder { options.set_log_level(log_level); + // Delete obsolete WAL files immediately after all column families have flushed. + // Both set to 0 means "delete ASAP, no archival". + options.set_wal_ttl_seconds(0); + options.set_wal_size_limit_mb(0); + // Statistics can view from RocksDB log file if enable_statistics { options.enable_statistics(); @@ -836,8 +841,8 @@ impl RocksDBProvider { /// Flushes pending writes for the specified tables to disk. /// /// This performs a flush of: - /// 1. The Write-Ahead Log (WAL) with sync - /// 2. The column family memtables for the specified table names to SST files + /// 1. The column family memtables for the specified table names to SST files + /// 2. The Write-Ahead Log (WAL) with sync /// /// After this call completes, all data for the specified tables is durably persisted to disk. /// @@ -847,15 +852,6 @@ impl RocksDBProvider { pub fn flush(&self, tables: &[&'static str]) -> ProviderResult<()> { let db = self.0.db_rw(); - db.flush_wal(true).map_err(|e| { - ProviderError::Database(DatabaseError::Write(Box::new(DatabaseWriteError { - info: DatabaseErrorInfo { message: e.to_string().into(), code: -1 }, - operation: DatabaseWriteOperation::Flush, - table_name: "WAL", - key: Vec::new(), - }))) - })?; - for cf_name in tables { if let Some(cf) = db.cf_handle(cf_name) { db.flush_cf(&cf).map_err(|e| { @@ -869,6 +865,15 @@ impl RocksDBProvider { } } + db.flush_wal(true).map_err(|e| { + ProviderError::Database(DatabaseError::Write(Box::new(DatabaseWriteError { + info: DatabaseErrorInfo { message: e.to_string().into(), code: -1 }, + operation: DatabaseWriteOperation::Flush, + table_name: "WAL", + key: Vec::new(), + }))) + })?; + Ok(()) } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 64eff68b03f..4f1e620bbba 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -31,7 +31,10 @@ impl RocksDBProviderFactory for NoopProvider< } #[cfg(all(unix, feature = "rocksdb"))] - fn set_pending_rocksdb_batch(&self, _batch: rocksdb::WriteBatchWithTransaction) { - // No-op for NoopProvider + fn set_pending_rocksdb_batch(&self, _batch: rocksdb::WriteBatchWithTransaction) {} + + #[cfg(all(unix, feature = "rocksdb"))] + fn commit_pending_rocksdb_batches(&self) -> ProviderResult<()> { + Ok(()) } } diff --git a/crates/storage/provider/src/traits/rocksdb_provider.rs b/crates/storage/provider/src/traits/rocksdb_provider.rs index 06548d22752..02332a9ccfd 100644 --- a/crates/storage/provider/src/traits/rocksdb_provider.rs +++ b/crates/storage/provider/src/traits/rocksdb_provider.rs @@ -19,6 +19,14 @@ pub trait RocksDBProviderFactory { #[cfg(all(unix, feature = "rocksdb"))] fn set_pending_rocksdb_batch(&self, batch: rocksdb::WriteBatchWithTransaction); + /// Takes all pending `RocksDB` batches and commits them. + /// + /// This drains the pending batches from the lock and commits each one using the `RocksDB` + /// provider. Can be called before flush to persist `RocksDB` writes independently of the + /// full commit path. + #[cfg(all(unix, feature = "rocksdb"))] + fn commit_pending_rocksdb_batches(&self) -> ProviderResult<()>; + /// Executes a closure with a `RocksDB` transaction for reading. /// /// This helper encapsulates all the cfg-gated `RocksDB` transaction handling for reads. @@ -154,6 +162,10 @@ mod tests { } fn set_pending_rocksdb_batch(&self, _batch: rocksdb::WriteBatchWithTransaction) {} + + fn commit_pending_rocksdb_batches(&self) -> ProviderResult<()> { + Ok(()) + } } #[test] From 3648483512044997d97b8020b2a2d2790e081d02 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 23 Jan 2026 19:59:10 +0000 Subject: [PATCH 184/267] feat(rocksdb): add WAL size tracking metric and Grafana dashboard (#21295) Co-authored-by: Amp --- crates/cli/commands/src/db/stats.rs | 10 +++ crates/node/metrics/src/server.rs | 5 ++ crates/storage/provider/src/providers/mod.rs | 3 +- .../provider/src/providers/rocksdb/mod.rs | 3 +- .../src/providers/rocksdb/provider.rs | 62 +++++++++++++++++++ .../provider/src/providers/rocksdb_stub.rs | 24 +++++++ etc/grafana/dashboards/overview.json | 38 ++++++++++-- 7 files changed, 139 insertions(+), 6 deletions(-) diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index 0b73727a608..62c8af1f407 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -205,6 +205,16 @@ impl Command { .add_cell(Cell::new(human_bytes(total_size as f64))) .add_cell(Cell::new(human_bytes(total_pending as f64))); table.add_row(row); + + let wal_size = tool.provider_factory.rocksdb_provider().wal_size_bytes(); + let mut row = Row::new(); + row.add_cell(Cell::new("WAL")) + .add_cell(Cell::new("")) + .add_cell(Cell::new("")) + .add_cell(Cell::new("")) + .add_cell(Cell::new(human_bytes(wal_size as f64))) + .add_cell(Cell::new("")); + table.add_row(row); } table diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index ea24e6572ee..9ef68cf3033 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -257,6 +257,11 @@ fn describe_rocksdb_metrics() { Unit::Bytes, "The size of memtables for a RocksDB table" ); + describe_gauge!( + "rocksdb.wal_size", + Unit::Bytes, + "The total size of WAL (Write-Ahead Log) files. Important: this is not included in table_size or sst_size metrics" + ); } #[cfg(all(feature = "jemalloc", unix))] diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index c477ccbb987..91aff23fe9d 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -39,7 +39,8 @@ pub use consistent::ConsistentProvider; pub(crate) mod rocksdb; pub use rocksdb::{ - RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksDBRawIter, RocksDBTableStats, RocksTx, + RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksDBRawIter, RocksDBStats, RocksDBTableStats, + RocksTx, }; /// Helper trait to bound [`NodeTypes`] so that combined with database they satisfy diff --git a/crates/storage/provider/src/providers/rocksdb/mod.rs b/crates/storage/provider/src/providers/rocksdb/mod.rs index efab03e2afd..219a3ebfbe9 100644 --- a/crates/storage/provider/src/providers/rocksdb/mod.rs +++ b/crates/storage/provider/src/providers/rocksdb/mod.rs @@ -6,5 +6,6 @@ mod provider; pub(crate) use provider::{PendingRocksDBBatches, RocksDBWriteCtx}; pub use provider::{ - RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksDBRawIter, RocksDBTableStats, RocksTx, + RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksDBRawIter, RocksDBStats, RocksDBTableStats, + RocksTx, }; diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 0cc85f43c49..06e5837a917 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -57,6 +57,19 @@ pub struct RocksDBTableStats { pub pending_compaction_bytes: u64, } +/// Database-level statistics for `RocksDB`. +/// +/// Contains both per-table statistics and DB-level metrics like WAL size. +#[derive(Debug, Clone)] +pub struct RocksDBStats { + /// Statistics for each table (column family). + pub tables: Vec, + /// Total size of WAL (Write-Ahead Log) files in bytes. + /// + /// WAL is shared across all tables and not included in per-table metrics. + pub wal_size_bytes: u64, +} + /// Context for `RocksDB` block writes. #[derive(Clone)] pub(crate) struct RocksDBWriteCtx { @@ -457,6 +470,31 @@ impl RocksDBProviderInner { } } + /// Returns the path to the database directory. + fn path(&self) -> &Path { + match self { + Self::ReadWrite { db, .. } => db.path(), + Self::ReadOnly { db, .. } => db.path(), + } + } + + /// Returns the total size of WAL (Write-Ahead Log) files in bytes. + /// + /// WAL files have a `.log` extension in the `RocksDB` directory. + fn wal_size_bytes(&self) -> u64 { + let path = self.path(); + + match std::fs::read_dir(path) { + Ok(entries) => entries + .filter_map(|e| e.ok()) + .filter(|e| e.path().extension().is_some_and(|ext| ext == "log")) + .filter_map(|e| e.metadata().ok()) + .map(|m| m.len()) + .sum(), + Err(_) => 0, + } + } + /// Returns statistics for all column families in the database. fn table_stats(&self) -> Vec { let mut stats = Vec::new(); @@ -515,6 +553,11 @@ impl RocksDBProviderInner { stats } + + /// Returns database-level statistics including per-table stats and WAL size. + fn db_stats(&self) -> RocksDBStats { + RocksDBStats { tables: self.table_stats(), wal_size_bytes: self.wal_size_bytes() } + } } impl fmt::Debug for RocksDBProviderInner { @@ -595,6 +638,9 @@ impl DatabaseMetrics for RocksDBProvider { )); } + // WAL size (DB-level, shared across all tables) + metrics.push(("rocksdb.wal_size", self.wal_size_bytes() as f64, vec![])); + metrics } } @@ -838,6 +884,22 @@ impl RocksDBProvider { self.0.table_stats() } + /// Returns the total size of WAL (Write-Ahead Log) files in bytes. + /// + /// This scans the `RocksDB` directory for `.log` files and sums their sizes. + /// WAL files can be significant (e.g., 2.7GB observed) and are not included + /// in `table_size`, `sst_size`, or `memtable_size` metrics. + pub fn wal_size_bytes(&self) -> u64 { + self.0.wal_size_bytes() + } + + /// Returns database-level statistics including per-table stats and WAL size. + /// + /// This combines [`Self::table_stats`] and [`Self::wal_size_bytes`] into a single struct. + pub fn db_stats(&self) -> RocksDBStats { + self.0.db_stats() + } + /// Flushes pending writes for the specified tables to disk. /// /// This performs a flush of: diff --git a/crates/storage/provider/src/providers/rocksdb_stub.rs b/crates/storage/provider/src/providers/rocksdb_stub.rs index 31c38103e3d..822bafd7e8f 100644 --- a/crates/storage/provider/src/providers/rocksdb_stub.rs +++ b/crates/storage/provider/src/providers/rocksdb_stub.rs @@ -32,6 +32,15 @@ pub struct RocksDBTableStats { pub pending_compaction_bytes: u64, } +/// Database-level statistics for `RocksDB` - stub. +#[derive(Debug, Clone)] +pub struct RocksDBStats { + /// Statistics for each table (column family). + pub tables: Vec, + /// Total size of WAL (Write-Ahead Log) files in bytes. + pub wal_size_bytes: u64, +} + /// Context for `RocksDB` block writes (stub). #[derive(Debug, Clone)] #[allow(dead_code)] @@ -89,6 +98,21 @@ impl RocksDBProvider { Ok(()) } + /// Returns the total size of WAL (Write-Ahead Log) files in bytes (stub implementation). + /// + /// Returns 0 since there is no `RocksDB` when the feature is disabled. + pub const fn wal_size_bytes(&self) -> u64 { + 0 + } + + /// Returns database-level statistics including per-table stats and WAL size (stub + /// implementation). + /// + /// Returns empty stats since there is no `RocksDB` when the feature is disabled. + pub const fn db_stats(&self) -> RocksDBStats { + RocksDBStats { tables: Vec::new(), wal_size_bytes: 0 } + } + /// Flushes all pending writes to disk (stub implementation). /// /// This is a no-op since there is no `RocksDB` when the feature is disabled. diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index b81d90c6224..62dfcab9655 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -828,7 +828,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(reth_rocksdb_table_size{$instance_label=\"$instance\"}) or vector(0)", + "expr": "(sum(reth_rocksdb_table_size{$instance_label=\"$instance\"}) or vector(0)) + (sum(reth_rocksdb_wal_size{$instance_label=\"$instance\"}) or vector(0))", "hide": false, "instant": false, "legendFormat": "RocksDB", @@ -841,7 +841,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(reth_db_table_size{$instance_label=\"$instance\"}) + sum(reth_db_freelist{$instance_label=\"$instance\"} * reth_db_page_size{$instance_label=\"$instance\"}) + sum(reth_static_files_segment_size{$instance_label=\"$instance\"}) + (sum(reth_rocksdb_table_size{$instance_label=\"$instance\"}) or vector(0))", + "expr": "sum(reth_db_table_size{$instance_label=\"$instance\"}) + sum(reth_db_freelist{$instance_label=\"$instance\"} * reth_db_page_size{$instance_label=\"$instance\"}) + sum(reth_static_files_segment_size{$instance_label=\"$instance\"}) + (sum(reth_rocksdb_table_size{$instance_label=\"$instance\"}) or vector(0)) + (sum(reth_rocksdb_wal_size{$instance_label=\"$instance\"}) or vector(0))", "hide": false, "instant": false, "legendFormat": "Total", @@ -6771,6 +6771,17 @@ "legendFormat": "{{table}}", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_rocksdb_wal_size{$instance_label=\"$instance\"}", + "legendFormat": "WAL", + "range": true, + "refId": "B" } ], "title": "RocksDB Tables Size", @@ -7091,7 +7102,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum by (job) ( reth_rocksdb_table_size{$instance_label=\"$instance\"} )", + "expr": "sum by (job) ( reth_rocksdb_table_size{$instance_label=\"$instance\"} ) + (sum by (job) ( reth_rocksdb_wal_size{$instance_label=\"$instance\"} ) or vector(0))", "legendFormat": "__auto", "range": true, "refId": "A" @@ -12441,6 +12452,18 @@ "legendFormat": "__auto", "range": true, "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "label_replace(reth_rocksdb_wal_size{$instance_label=\"$instance\"}, \"table\", \"WAL\", \"\", \"\")", + "format": "table", + "legendFormat": "__auto", + "range": true, + "refId": "C" } ], "transformations": [ @@ -12464,6 +12487,12 @@ ], "operation": "aggregate" }, + "Value #C": { + "aggregations": [ + "lastNotNull" + ], + "operation": "aggregate" + }, "table": { "aggregations": [], "operation": "groupby" @@ -12489,7 +12518,8 @@ "renameByName": { "table": "Table", "Value #A (lastNotNull)": "SST Size", - "Value #B (lastNotNull)": "Memtable Size" + "Value #B (lastNotNull)": "Memtable Size", + "Value #C (lastNotNull)": "WAL Size" } } } From 963c26550a65fdc886c94ca990331a774eefd5be Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 23 Jan 2026 20:13:01 +0000 Subject: [PATCH 185/267] fix(trie): only clone required keys in on_prefetch_proofs (#21378) --- .../tree/src/tree/payload_processor/multiproof.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 472ea08a6c6..dce45447649 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -777,10 +777,21 @@ impl MultiProofTask { // [`MultiAddedRemovedKeys`]. Even if there are not any known removed keys for the account, // we still want to optimistically fetch extension children for the leaf addition case. // V2 multiproofs don't need this. + // + // Only clone the AddedRemovedKeys for accounts in the targets, not the entire accumulated + // set, to avoid O(n) cloning with many buffered blocks. let multi_added_removed_keys = if let VersionedMultiProofTargets::Legacy(legacy_targets) = &targets { self.multi_added_removed_keys.touch_accounts(legacy_targets.keys().copied()); - Some(Arc::new(self.multi_added_removed_keys.clone())) + Some(Arc::new(MultiAddedRemovedKeys { + account: self.multi_added_removed_keys.account.clone(), + storages: legacy_targets + .keys() + .filter_map(|k| { + self.multi_added_removed_keys.storages.get(k).map(|v| (*k, v.clone())) + }) + .collect(), + })) } else { None }; From 9a4c6d8a118c15115763f5f92cda1723218a8b91 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 23 Jan 2026 20:11:47 +0000 Subject: [PATCH 186/267] feat(rocksdb): static file based healing for TransactionHashNumbers (#21343) --- .../src/providers/rocksdb/invariants.rs | 221 +++++++++--------- 1 file changed, 113 insertions(+), 108 deletions(-) diff --git a/crates/storage/provider/src/providers/rocksdb/invariants.rs b/crates/storage/provider/src/providers/rocksdb/invariants.rs index 63901ac74e6..286e3a41ecc 100644 --- a/crates/storage/provider/src/providers/rocksdb/invariants.rs +++ b/crates/storage/provider/src/providers/rocksdb/invariants.rs @@ -9,13 +9,12 @@ use crate::StaticFileProviderFactory; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::BlockNumber; use rayon::prelude::*; -use reth_db::cursor::DbCursorRO; -use reth_db_api::{tables, transaction::DbTx}; +use reth_db_api::tables; use reth_stages_types::StageId; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ - ChangeSetReader, DBProvider, StageCheckpointReader, StorageChangeSetReader, - StorageSettingsCache, TransactionsProvider, + BlockBodyIndicesProvider, ChangeSetReader, DBProvider, StageCheckpointReader, + StorageChangeSetReader, StorageSettingsCache, TransactionsProvider, }; use reth_storage_errors::provider::ProviderResult; use std::collections::HashSet; @@ -55,15 +54,16 @@ impl RocksDBProvider { + StageCheckpointReader + StorageSettingsCache + StaticFileProviderFactory + + BlockBodyIndicesProvider + StorageChangeSetReader + ChangeSetReader + TransactionsProvider, { let mut unwind_target: Option = None; - // Check TransactionHashNumbers if stored in RocksDB + // Heal TransactionHashNumbers if stored in RocksDB if provider.cached_storage_settings().transaction_hash_numbers_in_rocksdb && - let Some(target) = self.check_transaction_hash_numbers(provider)? + let Some(target) = self.heal_transaction_hash_numbers(provider)? { unwind_target = Some(unwind_target.map_or(target, |t| t.min(target))); } @@ -85,19 +85,13 @@ impl RocksDBProvider { Ok(unwind_target) } - /// Checks invariants for the `TransactionHashNumbers` table. + /// Heals the `TransactionHashNumbers` table. /// - /// Returns a block number to unwind to if MDBX is behind the checkpoint. - /// If static files are ahead of MDBX, excess `RocksDB` entries are pruned (healed). - /// - /// # Approach - /// - /// Instead of iterating `RocksDB` entries (which is expensive and doesn't give us the - /// tx range we need), we use static files and MDBX to determine what needs pruning: - /// - Static files are committed before `RocksDB`, so they're at least at the same height - /// - MDBX `TransactionBlocks` tells us what's been fully committed - /// - If static files have more transactions than MDBX, prune the excess range - fn check_transaction_hash_numbers( + /// - Fast path: if checkpoint == 0 AND `RocksDB` has data, clear everything + /// - If `sf_tip` < checkpoint, return unwind target (static files behind) + /// - If `sf_tip` == checkpoint, nothing to do + /// - If `sf_tip` > checkpoint, heal via transaction ranges in batches + fn heal_transaction_hash_numbers( &self, provider: &Provider, ) -> ProviderResult> @@ -105,73 +99,99 @@ impl RocksDBProvider { Provider: DBProvider + StageCheckpointReader + StaticFileProviderFactory + + BlockBodyIndicesProvider + TransactionsProvider, { - // Get the TransactionLookup stage checkpoint let checkpoint = provider .get_stage_checkpoint(StageId::TransactionLookup)? .map(|cp| cp.block_number) .unwrap_or(0); - // Get last tx_num from MDBX - this tells us what MDBX has fully committed - let mut cursor = provider.tx_ref().cursor_read::()?; - let mdbx_last = cursor.last()?; + let sf_tip = provider + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::Transactions) + .unwrap_or(0); + + // Fast path: if checkpoint is 0 and RocksDB has data, clear everything. + if checkpoint == 0 && self.first::()?.is_some() { + tracing::info!( + target: "reth::providers::rocksdb", + "TransactionHashNumbers has data but checkpoint is 0, clearing all" + ); + self.clear::()?; + return Ok(None); + } - // Get highest tx_num from static files - this tells us what tx data is available - let highest_static_tx = provider + if sf_tip < checkpoint { + // This should never happen in normal operation - static files are always committed + // before RocksDB. If we get here, something is seriously wrong. The unwind is a + // best-effort attempt but is probably futile. + tracing::warn!( + target: "reth::providers::rocksdb", + sf_tip, + checkpoint, + "TransactionHashNumbers: static file tip behind checkpoint, unwind needed" + ); + return Ok(Some(sf_tip)); + } + + // sf_tip == checkpoint - nothing to do + if sf_tip == checkpoint { + return Ok(None); + } + + // Get end tx from static files (authoritative for sf_tip) + let sf_tip_end_tx = provider .static_file_provider() - .get_highest_static_file_tx(StaticFileSegment::Transactions); + .get_highest_static_file_tx(StaticFileSegment::Transactions) + .unwrap_or(0); - match (mdbx_last, highest_static_tx) { - (Some((mdbx_tx, mdbx_block)), Some(highest_tx)) if highest_tx > mdbx_tx => { - // Static files are ahead of MDBX - prune RocksDB entries for the excess range. - // This is the common case during recovery from a crash during unwinding. - tracing::info!( - target: "reth::providers::rocksdb", - mdbx_last_tx = mdbx_tx, - mdbx_block, - highest_static_tx = highest_tx, - "Static files ahead of MDBX, pruning TransactionHashNumbers excess data" - ); - self.prune_transaction_hash_numbers_in_range(provider, (mdbx_tx + 1)..=highest_tx)?; - - // After pruning, check if MDBX is behind checkpoint - if checkpoint > mdbx_block { - tracing::warn!( - target: "reth::providers::rocksdb", - mdbx_block, - checkpoint, - "MDBX behind checkpoint after pruning, unwind needed" - ); - return Ok(Some(mdbx_block)); - } - } - (Some((_mdbx_tx, mdbx_block)), _) => { - // MDBX and static files are in sync (or static files don't have more data). - // Check if MDBX is behind checkpoint. - if checkpoint > mdbx_block { - tracing::warn!( - target: "reth::providers::rocksdb", - mdbx_block, - checkpoint, - "MDBX behind checkpoint, unwind needed" - ); - return Ok(Some(mdbx_block)); - } - } - (None, Some(highest_tx)) => { - // MDBX has no transactions but static files have data. - // This means RocksDB might have stale entries - prune them all. - tracing::info!( - target: "reth::providers::rocksdb", - highest_static_tx = highest_tx, - "MDBX empty but static files have data, pruning all TransactionHashNumbers" - ); - self.prune_transaction_hash_numbers_in_range(provider, 0..=highest_tx)?; - } - (None, None) => { - // Both MDBX and static files are empty, nothing to check. - } + // Get the first tx after the checkpoint block from MDBX (authoritative up to checkpoint) + let checkpoint_next_tx = provider + .block_body_indices(checkpoint)? + .map(|indices| indices.next_tx_num()) + .unwrap_or(0); + + if sf_tip_end_tx < checkpoint_next_tx { + // This should never happen in normal operation - static files should have all + // transactions up to sf_tip. If we get here, something is seriously wrong. + // The unwind is a best-effort attempt but is probably futile. + tracing::warn!( + target: "reth::providers::rocksdb", + sf_tip_end_tx, + checkpoint_next_tx, + checkpoint, + sf_tip, + "TransactionHashNumbers: static file tx tip behind checkpoint, unwind needed" + ); + return Ok(Some(sf_tip)); + } + + tracing::info!( + target: "reth::providers::rocksdb", + checkpoint, + sf_tip, + checkpoint_next_tx, + sf_tip_end_tx, + "TransactionHashNumbers: healing via transaction ranges" + ); + + const BATCH_SIZE: u64 = 10_000; + let mut batch_start = checkpoint_next_tx; + + while batch_start <= sf_tip_end_tx { + let batch_end = batch_start.saturating_add(BATCH_SIZE - 1).min(sf_tip_end_tx); + + tracing::debug!( + target: "reth::providers::rocksdb", + batch_start, + batch_end, + "Pruning TransactionHashNumbers batch" + ); + + self.prune_transaction_hash_numbers_in_range(provider, batch_start..=batch_end)?; + + batch_start = batch_end.saturating_add(1); } Ok(None) @@ -425,7 +445,7 @@ mod tests { BlockWriter, DatabaseProviderFactory, StageCheckpointWriter, TransactionsProvider, }; use alloy_primitives::{Address, B256}; - use reth_db::cursor::DbCursorRW; + use reth_db::cursor::{DbCursorRO, DbCursorRW}; use reth_db_api::{ models::{storage_sharded_key::StorageShardedKey, StorageSettings}, tables::{self, BlockNumberList}, @@ -520,13 +540,15 @@ mod tests { let provider = factory.database_provider_ro().unwrap(); // RocksDB is empty but checkpoint says block 100 was processed. - // This is treated as a first-run/migration scenario - no unwind needed. + // Since static file tip defaults to 0 when None, and 0 < 100, an unwind is triggered. let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!(result, None, "Empty data with checkpoint is treated as first run"); + assert_eq!(result, Some(0), "Static file tip (0) behind checkpoint (100) triggers unwind"); } + /// Tests that when checkpoint=0 and `RocksDB` has data, all entries are pruned. + /// This simulates a crash recovery scenario where the checkpoint was lost. #[test] - fn test_check_consistency_mdbx_empty_static_files_have_data_prunes_rocksdb() { + fn test_check_consistency_checkpoint_zero_with_rocksdb_data_prunes_all() { let temp_dir = TempDir::new().unwrap(); let rocksdb = RocksDBBuilder::new(temp_dir.path()) .with_table::() @@ -564,22 +586,12 @@ mod tests { provider.commit().unwrap(); } - // Simulate crash recovery: MDBX was reset but static files and RocksDB still have data. - // Clear TransactionBlocks to simulate empty MDBX state. + // Explicitly clear the TransactionLookup checkpoint to simulate crash recovery { let provider = factory.database_provider_rw().unwrap(); - let mut cursor = provider.tx_ref().cursor_write::().unwrap(); - let mut to_delete = Vec::new(); - let mut walker = cursor.walk(Some(0)).unwrap(); - while let Some((tx_num, _)) = walker.next().transpose().unwrap() { - to_delete.push(tx_num); - } - drop(walker); - for tx_num in to_delete { - cursor.seek_exact(tx_num).unwrap(); - cursor.delete_current().unwrap(); - } - // No checkpoint set (checkpoint = 0) + provider + .save_stage_checkpoint(StageId::TransactionLookup, StageCheckpoint::new(0)) + .unwrap(); provider.commit().unwrap(); } @@ -588,12 +600,12 @@ mod tests { let provider = factory.database_provider_ro().unwrap(); - // MDBX TransactionBlocks is empty, but static files have transaction data. - // This means RocksDB has stale data that should be pruned (healed). + // checkpoint = 0 but RocksDB has data. + // This means RocksDB has stale data that should be cleared. let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!(result, None, "Should heal by pruning, no unwind needed"); + assert_eq!(result, None, "Should heal by clearing, no unwind needed"); - // Verify data was pruned + // Verify data was cleared for hash in &tx_hashes { assert!( rocksdb.get::(*hash).unwrap().is_none(), @@ -669,7 +681,6 @@ mod tests { "RocksDB should be empty after pruning" ); } - #[test] fn test_check_consistency_mdbx_behind_checkpoint_needs_unwind() { let temp_dir = TempDir::new().unwrap(); @@ -707,9 +718,9 @@ mod tests { provider.commit().unwrap(); } - // Now simulate a scenario where checkpoint is ahead of MDBX. - // This happens when the checkpoint was saved but MDBX data was lost/corrupted. // Set checkpoint to block 10 (beyond our actual data at block 2) + // sf_tip is at block 2, checkpoint is at block 10 + // Since sf_tip < checkpoint, we need to unwind to sf_tip { let provider = factory.database_provider_rw().unwrap(); provider @@ -720,15 +731,9 @@ mod tests { let provider = factory.database_provider_ro().unwrap(); - // MDBX has data up to block 2, but checkpoint says block 10 was processed. - // The static files highest tx matches MDBX last tx (both at block 2). - // Checkpoint > mdbx_block means we need to unwind to rebuild. + // sf_tip (2) < checkpoint (10), so unwind to sf_tip is needed let result = rocksdb.check_consistency(&provider).unwrap(); - assert_eq!( - result, - Some(2), - "Should require unwind to block 2 (MDBX's last block) to rebuild from checkpoint" - ); + assert_eq!(result, Some(2), "sf_tip < checkpoint requires unwind to sf_tip"); } #[test] From 9285f7eafc9b2bf59be4c1e8a1514abe86615474 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 23 Jan 2026 21:14:55 +0000 Subject: [PATCH 187/267] ci: use depot for docker (#20380) Co-authored-by: Georgios Konstantopoulos --- .github/workflows/docker-git.yml | 54 ---------- .github/workflows/docker-nightly.yml | 65 ------------ .github/workflows/docker.yml | 143 ++++++++++++++------------- Dockerfile.depot | 86 ++++++++++++++++ docker-bake.hcl | 104 +++++++++++++++++++ 5 files changed, 263 insertions(+), 189 deletions(-) delete mode 100644 .github/workflows/docker-git.yml delete mode 100644 .github/workflows/docker-nightly.yml create mode 100644 Dockerfile.depot create mode 100644 docker-bake.hcl diff --git a/.github/workflows/docker-git.yml b/.github/workflows/docker-git.yml deleted file mode 100644 index 68bcdad0dfd..00000000000 --- a/.github/workflows/docker-git.yml +++ /dev/null @@ -1,54 +0,0 @@ -# Publishes the Docker image, only to be used with `workflow_dispatch`. The -# images from this workflow will be tagged with the git sha of the branch used -# and will NOT tag it as `latest`. - -name: docker-git - -on: - workflow_dispatch: {} - -env: - REPO_NAME: ${{ github.repository_owner }}/reth - IMAGE_NAME: ${{ github.repository_owner }}/reth - OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth - CARGO_TERM_COLOR: always - DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/reth - OP_DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/op-reth - DOCKER_USERNAME: ${{ github.actor }} - GIT_SHA: ${{ github.sha }} - -jobs: - build: - name: build and push - runs-on: ubuntu-24.04 - permissions: - packages: write - contents: read - strategy: - fail-fast: false - matrix: - build: - - name: 'Build and push the git-sha-tagged reth image' - command: 'make PROFILE=maxperf GIT_SHA=$GIT_SHA docker-build-push-git-sha' - - name: 'Build and push the git-sha-tagged op-reth image' - command: 'make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME GIT_SHA=$GIT_SHA PROFILE=maxperf op-docker-build-push-git-sha' - steps: - - uses: actions/checkout@v6 - - uses: rui314/setup-mold@v1 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: Install cross main - id: cross_main - run: | - cargo install cross --git https://github.com/cross-rs/cross - - name: Log in to Docker - run: | - echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin - - name: Set up Docker builder - run: | - docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 - docker buildx create --use --name cross-builder - - name: Build and push ${{ matrix.build.name }} - run: ${{ matrix.build.command }} diff --git a/.github/workflows/docker-nightly.yml b/.github/workflows/docker-nightly.yml deleted file mode 100644 index 67d276f094d..00000000000 --- a/.github/workflows/docker-nightly.yml +++ /dev/null @@ -1,65 +0,0 @@ -# Publishes the nightly Docker image. - -name: docker-nightly - -on: - workflow_dispatch: - schedule: - - cron: "0 1 * * *" -env: - REPO_NAME: ${{ github.repository_owner }}/reth - IMAGE_NAME: ${{ github.repository_owner }}/reth - OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth - CARGO_TERM_COLOR: always - DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/reth - OP_DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/op-reth - DOCKER_USERNAME: ${{ github.actor }} - -jobs: - build: - name: build and push - runs-on: ubuntu-24.04 - permissions: - packages: write - contents: read - strategy: - fail-fast: false - matrix: - build: - - name: 'Build and push the nightly reth image' - command: 'make PROFILE=maxperf docker-build-push-nightly' - - name: 'Build and push the nightly edge profiling reth image' - command: 'make PROFILE=profiling docker-build-push-nightly-edge-profiling' - - name: 'Build and push the nightly profiling reth image' - command: 'make PROFILE=profiling docker-build-push-nightly-profiling' - - name: 'Build and push the nightly op-reth image' - command: 'make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push-nightly' - - name: 'Build and push the nightly edge profiling op-reth image' - command: 'make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=profiling op-docker-build-push-nightly-edge-profiling' - - name: 'Build and push the nightly profiling op-reth image' - command: 'make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=profiling op-docker-build-push-nightly-profiling' - steps: - - uses: actions/checkout@v6 - - name: Remove bloatware - uses: laverdet/remove-bloatware@v1.0.0 - with: - docker: true - lang: rust - - uses: rui314/setup-mold@v1 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: Install cross main - id: cross_main - run: | - cargo install cross --git https://github.com/cross-rs/cross - - name: Log in to Docker - run: | - echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin - - name: Set up Docker builder - run: | - docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 - docker buildx create --use --name cross-builder - - name: Build and push ${{ matrix.build.name }} - run: ${{ matrix.build.command }} \ No newline at end of file diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index e9226772251..3b8f5b4e9bd 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,4 +1,9 @@ -# Publishes the Docker image. +# Publishes Docker images. +# +# Triggers: +# - Push tag v*: builds release (RC or latest) +# - Schedule: builds nightly + profiling +# - Manual: builds git-sha or nightly name: docker @@ -6,84 +11,82 @@ on: push: tags: - v* - -env: - IMAGE_NAME: ${{ github.repository_owner }}/reth - OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth - CARGO_TERM_COLOR: always - DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/reth - OP_DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/op-reth - DOCKER_USERNAME: ${{ github.actor }} + schedule: + - cron: "0 1 * * *" + workflow_dispatch: + inputs: + build_type: + description: "Build type" + required: true + type: choice + options: + - git-sha + - nightly + default: git-sha + dry_run: + description: "Skip pushing images (dry run)" + required: false + type: boolean + default: false jobs: - build-rc: - if: contains(github.ref, '-rc') - name: build and push as release candidate + build: + name: Build Docker images runs-on: ubuntu-24.04 permissions: packages: write contents: read - strategy: - fail-fast: false - matrix: - build: - - name: "Build and push reth image" - command: "make IMAGE_NAME=$IMAGE_NAME DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME PROFILE=maxperf docker-build-push" - - name: "Build and push op-reth image" - command: "make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push" + id-token: write steps: - uses: actions/checkout@v6 - - uses: rui314/setup-mold@v1 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + + - name: Set up Depot CLI + uses: depot/setup-action@v1 + + - name: Log in to GHCR + uses: docker/login-action@v3 with: - cache-on-failure: true - - name: Install cross main - id: cross_main - run: | - cargo install cross --git https://github.com/cross-rs/cross - - name: Log in to Docker - run: | - echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin - - name: Set up Docker builder + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Determine build parameters + id: params run: | - docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 - docker buildx create --use --name cross-builder - - name: Build and push ${{ matrix.build.name }} - run: ${{ matrix.build.command }} + REGISTRY="ghcr.io/${{ github.repository_owner }}" - build: - if: ${{ !contains(github.ref, '-rc') }} - name: build and push as latest - runs-on: ubuntu-24.04 - permissions: - packages: write - contents: read - strategy: - fail-fast: false - matrix: - build: - - name: "Build and push reth image" - command: "make IMAGE_NAME=$IMAGE_NAME DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME PROFILE=maxperf docker-build-push-latest" - - name: "Build and push op-reth image" - command: "make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push-latest" - steps: - - uses: actions/checkout@v6 - - uses: rui314/setup-mold@v1 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + if [[ "${{ github.event_name }}" == "push" ]]; then + VERSION="${GITHUB_REF#refs/tags/}" + echo "targets=ethereum optimism" >> "$GITHUB_OUTPUT" + + # Add 'latest' tag for non-RC releases + if [[ ! "$VERSION" =~ -rc ]]; then + echo "ethereum_tags=${REGISTRY}/reth:${VERSION},${REGISTRY}/reth:latest" >> "$GITHUB_OUTPUT" + echo "optimism_tags=${REGISTRY}/op-reth:${VERSION},${REGISTRY}/op-reth:latest" >> "$GITHUB_OUTPUT" + else + echo "ethereum_tags=${REGISTRY}/reth:${VERSION}" >> "$GITHUB_OUTPUT" + echo "optimism_tags=${REGISTRY}/op-reth:${VERSION}" >> "$GITHUB_OUTPUT" + fi + + elif [[ "${{ github.event_name }}" == "schedule" ]] || [[ "${{ inputs.build_type }}" == "nightly" ]]; then + echo "targets=nightly" >> "$GITHUB_OUTPUT" + echo "ethereum_tags=${REGISTRY}/reth:nightly" >> "$GITHUB_OUTPUT" + echo "optimism_tags=${REGISTRY}/op-reth:nightly" >> "$GITHUB_OUTPUT" + + else + # git-sha build + echo "targets=ethereum optimism" >> "$GITHUB_OUTPUT" + echo "ethereum_tags=${REGISTRY}/reth:${{ github.sha }}" >> "$GITHUB_OUTPUT" + echo "optimism_tags=${REGISTRY}/op-reth:${{ github.sha }}" >> "$GITHUB_OUTPUT" + fi + + - name: Build and push images + uses: depot/bake-action@v1 with: - cache-on-failure: true - - name: Install cross main - id: cross_main - run: | - cargo install cross --git https://github.com/cross-rs/cross - - name: Log in to Docker - run: | - echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin - - name: Set up Docker builder - run: | - docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 - docker buildx create --use --name cross-builder - - name: Build and push ${{ matrix.build.name }} - run: ${{ matrix.build.command }} + project: ${{ vars.DEPOT_PROJECT_ID }} + files: docker-bake.hcl + targets: ${{ steps.params.outputs.targets }} + push: ${{ !(github.event_name == 'workflow_dispatch' && inputs.dry_run) }} + set: | + ethereum.tags=${{ steps.params.outputs.ethereum_tags }} + optimism.tags=${{ steps.params.outputs.optimism_tags }} diff --git a/Dockerfile.depot b/Dockerfile.depot new file mode 100644 index 00000000000..46b76d54730 --- /dev/null +++ b/Dockerfile.depot @@ -0,0 +1,86 @@ +# syntax=docker.io/docker/dockerfile:1.7-labs + +# Unified Dockerfile for reth and op-reth, optimized for Depot builds +# Usage: +# reth: --build-arg BINARY=reth +# op-reth: --build-arg BINARY=op-reth --build-arg MANIFEST_PATH=crates/optimism/bin + +FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef +WORKDIR /app + +LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth +LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" + +RUN apt-get update && apt-get install -y libclang-dev pkg-config + +# Install sccache for compilation caching +RUN cargo install sccache --locked +ENV RUSTC_WRAPPER=sccache +ENV SCCACHE_DIR=/sccache + +# Builds a cargo-chef plan +FROM chef AS planner +COPY --exclude=.git . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json + +# Binary to build (reth or op-reth) +ARG BINARY=reth + +# Manifest path for the binary +ARG MANIFEST_PATH=bin/reth + +# Build profile, release by default +ARG BUILD_PROFILE=release +ENV BUILD_PROFILE=$BUILD_PROFILE + +# Extra Cargo flags +ARG RUSTFLAGS="" +ENV RUSTFLAGS="$RUSTFLAGS" + +# Extra Cargo features +ARG FEATURES="" +ENV FEATURES=$FEATURES + +# Build dependencies with cache mounts +RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=locked \ + --mount=type=cache,target=/usr/local/cargo/git,sharing=locked \ + --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ + cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --locked --recipe-path recipe.json --manifest-path $MANIFEST_PATH/Cargo.toml + +# Build application with cache mounts +COPY --exclude=.git . . +RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=locked \ + --mount=type=cache,target=/usr/local/cargo/git,sharing=locked \ + --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ + cargo build --profile $BUILD_PROFILE --features "$FEATURES" --locked --bin $BINARY --manifest-path $MANIFEST_PATH/Cargo.toml + +# Copy binary to a known location (ARG not resolved in COPY) +# Note: Custom profiles like maxperf/profiling output to target//, not target/release/ +RUN cp /app/target/$BUILD_PROFILE/$BINARY /app/binary || \ + cp /app/target/release/$BINARY /app/binary + +FROM ubuntu:24.04 AS runtime +WORKDIR /app + +# Binary name for entrypoint +ARG BINARY=reth + +# Install runtime dependencies +RUN apt-get update && \ + apt-get install -y --no-install-recommends ca-certificates && \ + rm -rf /var/lib/apt/lists/* + +# Copy binary from build stage and create canonical symlink for entrypoint +COPY --from=builder /app/binary /usr/local/bin/ +RUN mv /usr/local/bin/binary /usr/local/bin/$BINARY && \ + ln -s /usr/local/bin/$BINARY /usr/local/bin/reth-binary && \ + chmod +x /usr/local/bin/$BINARY + +# Copy licenses +COPY LICENSE-* ./ + +EXPOSE 30303 30303/udp 9001 8545 8546 +ENTRYPOINT ["/usr/local/bin/reth-binary"] diff --git a/docker-bake.hcl b/docker-bake.hcl new file mode 100644 index 00000000000..6dcca621ae1 --- /dev/null +++ b/docker-bake.hcl @@ -0,0 +1,104 @@ +// Docker Bake configuration for reth and op-reth images +// Usage: +// docker buildx bake ethereum # Build reth +// docker buildx bake optimism # Build op-reth +// docker buildx bake # Build all + +variable "REGISTRY" { + default = "ghcr.io/paradigmxyz" +} + +variable "TAG" { + default = "latest" +} + +variable "BUILD_PROFILE" { + default = "maxperf" +} + +variable "FEATURES" { + default = "jemalloc asm-keccak min-debug-logs" +} + +// Common settings for all targets +group "default" { + targets = ["ethereum", "optimism"] +} + +group "nightly" { + targets = ["ethereum", "ethereum-profiling", "ethereum-edge-profiling", "optimism", "optimism-profiling", "optimism-edge-profiling"] +} + +// Base target with shared configuration +target "_base" { + dockerfile = "Dockerfile.depot" + platforms = ["linux/amd64", "linux/arm64"] + args = { + BUILD_PROFILE = "${BUILD_PROFILE}" + FEATURES = "${FEATURES}" + } +} + +// Ethereum (reth) +target "ethereum" { + inherits = ["_base"] + args = { + BINARY = "reth" + MANIFEST_PATH = "bin/reth" + } + tags = ["${REGISTRY}/reth:${TAG}"] +} + +target "ethereum-profiling" { + inherits = ["_base"] + args = { + BINARY = "reth" + MANIFEST_PATH = "bin/reth" + BUILD_PROFILE = "profiling" + FEATURES = "jemalloc jemalloc-prof asm-keccak min-debug-logs" + } + tags = ["${REGISTRY}/reth:nightly-profiling"] +} + +target "ethereum-edge-profiling" { + inherits = ["_base"] + args = { + BINARY = "reth" + MANIFEST_PATH = "bin/reth" + BUILD_PROFILE = "profiling" + FEATURES = "jemalloc jemalloc-prof asm-keccak min-debug-logs edge" + } + tags = ["${REGISTRY}/reth:nightly-edge-profiling"] +} + +// Optimism (op-reth) +target "optimism" { + inherits = ["_base"] + args = { + BINARY = "op-reth" + MANIFEST_PATH = "crates/optimism/bin" + } + tags = ["${REGISTRY}/op-reth:${TAG}"] +} + +target "optimism-profiling" { + inherits = ["_base"] + args = { + BINARY = "op-reth" + MANIFEST_PATH = "crates/optimism/bin" + BUILD_PROFILE = "profiling" + FEATURES = "jemalloc jemalloc-prof asm-keccak min-debug-logs" + } + tags = ["${REGISTRY}/op-reth:nightly-profiling"] +} + +target "optimism-edge-profiling" { + inherits = ["_base"] + args = { + BINARY = "op-reth" + MANIFEST_PATH = "crates/optimism/bin" + BUILD_PROFILE = "profiling" + FEATURES = "jemalloc jemalloc-prof asm-keccak min-debug-logs edge" + } + tags = ["${REGISTRY}/op-reth:nightly-edge-profiling"] +} From 820c112e8ecad6a87ebc61ab93eab52b825f1a4f Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 23 Jan 2026 21:57:15 +0000 Subject: [PATCH 188/267] feat(engine): add metric for forkchoiceUpdated response -> newPayload (#21380) --- crates/engine/tree/src/tree/metrics.rs | 7 +++++++ crates/engine/tree/src/tree/mod.rs | 10 ++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index e97decbcc16..943661a5c5b 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -254,6 +254,8 @@ pub(crate) struct NewPayloadStatusMetrics { pub(crate) time_between_new_payloads: Histogram, /// Time from previous payload start to current payload start (total interval). pub(crate) new_payload_interval: Histogram, + /// Time diff between forkchoice updated call response and the next new payload call request. + pub(crate) forkchoice_updated_new_payload_time_diff: Histogram, } impl NewPayloadStatusMetrics { @@ -261,6 +263,7 @@ impl NewPayloadStatusMetrics { pub(crate) fn update_response_metrics( &mut self, start: Instant, + latest_forkchoice_updated_at: &mut Option, result: &Result, InsertBlockFatalError>, gas_used: u64, ) { @@ -293,6 +296,10 @@ impl NewPayloadStatusMetrics { self.new_payload_messages.increment(1); self.new_payload_latency.record(elapsed); self.new_payload_last.set(elapsed); + if let Some(latest_forkchoice_updated_at) = latest_forkchoice_updated_at.take() { + self.forkchoice_updated_new_payload_time_diff + .record(start - latest_forkchoice_updated_at); + } } } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 1e4a6315946..2a5eb68bee2 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1514,10 +1514,12 @@ where let gas_used = payload.gas_used(); let num_hash = payload.num_hash(); let mut output = self.on_new_payload(payload); - self.metrics - .engine - .new_payload - .update_response_metrics(start, &output, gas_used); + self.metrics.engine.new_payload.update_response_metrics( + start, + &mut self.metrics.engine.forkchoice_updated.latest_finish_at, + &output, + gas_used, + ); let maybe_event = output.as_mut().ok().and_then(|out| out.event.take()); From ffbef9e3cdb01fa2412aff2cde7dc18550f67382 Mon Sep 17 00:00:00 2001 From: iPLAY888 <133153661+letmehateu@users.noreply.github.com> Date: Sat, 24 Jan 2026 00:59:19 +0300 Subject: [PATCH 189/267] chore: removed needless collect (#21381) --- crates/storage/provider/src/providers/consistent.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 7fadea95dac..076e0e3d1ff 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1985,14 +1985,11 @@ mod tests { database_state.into_iter().map(|(address, (account, _))| { (address, None, Some(account.into()), Default::default()) }), - database_changesets - .iter() - .map(|block_changesets| { - block_changesets.iter().map(|(address, account, _)| { - (*address, Some(Some((*account).into())), []) - }) + database_changesets.iter().map(|block_changesets| { + block_changesets.iter().map(|(address, account, _)| { + (*address, Some(Some((*account).into())), []) }) - .collect::>(), + }), Vec::new(), ), first_block: first_database_block, From d5a36dcc001a8a085c3ce734e4bf1f48f58fec72 Mon Sep 17 00:00:00 2001 From: ethfanWilliam Date: Sat, 24 Jan 2026 02:26:07 +0400 Subject: [PATCH 190/267] perf(trie): parallelize merge_ancestors_into_overlay extend ops (#21379) --- crates/chain-state/src/deferred_trie.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/crates/chain-state/src/deferred_trie.rs b/crates/chain-state/src/deferred_trie.rs index 479c86cad5b..6ba84589008 100644 --- a/crates/chain-state/src/deferred_trie.rs +++ b/crates/chain-state/src/deferred_trie.rs @@ -284,8 +284,17 @@ impl DeferredTrieData { } // Extend with current block's sorted data last (takes precedence) - state_mut.extend_ref_and_sort(sorted_hashed_state); - nodes_mut.extend_ref_and_sort(sorted_trie_updates); + #[cfg(feature = "rayon")] + rayon::join( + || state_mut.extend_ref_and_sort(sorted_hashed_state), + || nodes_mut.extend_ref_and_sort(sorted_trie_updates), + ); + + #[cfg(not(feature = "rayon"))] + { + state_mut.extend_ref_and_sort(sorted_hashed_state); + nodes_mut.extend_ref_and_sort(sorted_trie_updates); + } overlay } From fb05a0654f756f6729b69b84a8d57875d18a3303 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 23 Jan 2026 22:32:21 +0000 Subject: [PATCH 191/267] fix(engine): use LazyTrieData::deferred for chain notification (#21383) Co-authored-by: Matthias Seitz --- crates/chain-state/src/in_memory.rs | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 311830dbc69..bd88f8eb75b 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -17,7 +17,10 @@ use reth_primitives_traits::{ SignedTransaction, }; use reth_storage_api::StateProviderBox; -use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted, LazyTrieData, TrieInputSorted}; +use reth_trie::{ + updates::TrieUpdatesSorted, HashedPostStateSorted, LazyTrieData, SortedTrieData, + TrieInputSorted, +}; use std::{collections::BTreeMap, sync::Arc, time::Instant}; use tokio::sync::{broadcast, watch}; @@ -948,22 +951,36 @@ impl> NewCanonicalChain { match blocks { [] => Chain::default(), [first, rest @ ..] => { + let trie_data_handle = first.trie_data_handle(); let mut chain = Chain::from_block( first.recovered_block().clone(), ExecutionOutcome::from(( first.execution_outcome().clone(), first.block_number(), )), - LazyTrieData::ready(first.hashed_state(), first.trie_updates()), + LazyTrieData::deferred(move || { + let trie_data = trie_data_handle.wait_cloned(); + SortedTrieData { + hashed_state: trie_data.hashed_state, + trie_updates: trie_data.trie_updates, + } + }), ); for exec in rest { + let trie_data_handle = exec.trie_data_handle(); chain.append_block( exec.recovered_block().clone(), ExecutionOutcome::from(( exec.execution_outcome().clone(), exec.block_number(), )), - LazyTrieData::ready(exec.hashed_state(), exec.trie_updates()), + LazyTrieData::deferred(move || { + let trie_data = trie_data_handle.wait_cloned(); + SortedTrieData { + hashed_state: trie_data.hashed_state, + trie_updates: trie_data.trie_updates, + } + }), ); } chain From eb788cc7cf23253209df97208cb17da72e166dee Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 24 Jan 2026 03:21:43 +0100 Subject: [PATCH 192/267] fix(docker): pass vergen git vars as build args (#21384) Co-authored-by: Amp --- .github/workflows/docker.yml | 11 +++++++++++ Dockerfile.depot | 8 ++++++++ docker-bake.hcl | 20 ++++++++++++++++++-- 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 3b8f5b4e9bd..c0b77e4ac0f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -50,6 +50,13 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} + - name: Get git info for vergen + id: git + run: | + echo "sha=${{ github.sha }}" >> "$GITHUB_OUTPUT" + echo "describe=$(git describe --always --tags)" >> "$GITHUB_OUTPUT" + echo "dirty=false" >> "$GITHUB_OUTPUT" + - name: Determine build parameters id: params run: | @@ -82,6 +89,10 @@ jobs: - name: Build and push images uses: depot/bake-action@v1 + env: + VERGEN_GIT_SHA: ${{ steps.git.outputs.sha }} + VERGEN_GIT_DESCRIBE: ${{ steps.git.outputs.describe }} + VERGEN_GIT_DIRTY: ${{ steps.git.outputs.dirty }} with: project: ${{ vars.DEPOT_PROJECT_ID }} files: docker-bake.hcl diff --git a/Dockerfile.depot b/Dockerfile.depot index 46b76d54730..a94eedd11b4 100644 --- a/Dockerfile.depot +++ b/Dockerfile.depot @@ -44,6 +44,14 @@ ENV RUSTFLAGS="$RUSTFLAGS" ARG FEATURES="" ENV FEATURES=$FEATURES +# Git info for vergen (since .git is excluded from Docker context) +ARG VERGEN_GIT_SHA="" +ARG VERGEN_GIT_DESCRIBE="" +ARG VERGEN_GIT_DIRTY="false" +ENV VERGEN_GIT_SHA=$VERGEN_GIT_SHA +ENV VERGEN_GIT_DESCRIBE=$VERGEN_GIT_DESCRIBE +ENV VERGEN_GIT_DIRTY=$VERGEN_GIT_DIRTY + # Build dependencies with cache mounts RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=locked \ --mount=type=cache,target=/usr/local/cargo/git,sharing=locked \ diff --git a/docker-bake.hcl b/docker-bake.hcl index 6dcca621ae1..9b6f11788cf 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -20,6 +20,19 @@ variable "FEATURES" { default = "jemalloc asm-keccak min-debug-logs" } +// Git info for vergen (since .git is excluded from Docker context) +variable "VERGEN_GIT_SHA" { + default = "" +} + +variable "VERGEN_GIT_DESCRIBE" { + default = "" +} + +variable "VERGEN_GIT_DIRTY" { + default = "false" +} + // Common settings for all targets group "default" { targets = ["ethereum", "optimism"] @@ -34,8 +47,11 @@ target "_base" { dockerfile = "Dockerfile.depot" platforms = ["linux/amd64", "linux/arm64"] args = { - BUILD_PROFILE = "${BUILD_PROFILE}" - FEATURES = "${FEATURES}" + BUILD_PROFILE = "${BUILD_PROFILE}" + FEATURES = "${FEATURES}" + VERGEN_GIT_SHA = "${VERGEN_GIT_SHA}" + VERGEN_GIT_DESCRIBE = "${VERGEN_GIT_DESCRIBE}" + VERGEN_GIT_DIRTY = "${VERGEN_GIT_DIRTY}" } } From ccff9a08f049158f11717132b998358c2af7b5b4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 24 Jan 2026 04:13:49 +0100 Subject: [PATCH 193/267] chore: fix clippy unnecessary_sort_by lint (#21385) --- crates/era-downloader/src/fs.rs | 2 +- crates/net/discv4/src/lib.rs | 2 +- crates/stages/stages/src/stages/hashing_account.rs | 2 +- crates/trie/common/src/updates.rs | 8 ++++---- crates/trie/trie/src/hashed_cursor/post_state.rs | 4 ++-- crates/trie/trie/src/trie_cursor/in_memory.rs | 4 ++-- crates/trie/trie/src/verify.rs | 4 ++-- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/crates/era-downloader/src/fs.rs b/crates/era-downloader/src/fs.rs index eaab1f3f4b4..f504fdd2694 100644 --- a/crates/era-downloader/src/fs.rs +++ b/crates/era-downloader/src/fs.rs @@ -52,7 +52,7 @@ pub fn read_dir( checksums.next().transpose()?.ok_or_eyre("Got less checksums than ERA files")?; } - entries.sort_by(|(left, _), (right, _)| left.cmp(right)); + entries.sort_by_key(|(left, _)| *left); Ok(stream::iter(entries.into_iter().skip_while(move |(n, _)| *n < start_index).map( move |(_, path)| { diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 0255dac1a72..4ce240ed2f1 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -1631,7 +1631,7 @@ impl Discv4Service { .filter(|entry| entry.node.value.is_expired()) .map(|n| n.node.value) .collect::>(); - nodes.sort_by(|a, b| a.last_seen.cmp(&b.last_seen)); + nodes.sort_by_key(|a| a.last_seen); let to_ping = nodes.into_iter().map(|n| n.record).take(MAX_NODES_PING).collect::>(); for node in to_ping { self.try_ping(node, PingReason::RePing) diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index d9e5c9e2c3a..f38b6384059 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -99,7 +99,7 @@ impl AccountHashingStage { // Account State generator let mut account_cursor = provider.tx_ref().cursor_write::()?; - accounts.sort_by(|a, b| a.0.cmp(&b.0)); + accounts.sort_by_key(|a| a.0); for (addr, acc) in &accounts { account_cursor.append(*addr, acc)?; } diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index 26985108089..255979df9d2 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -169,7 +169,7 @@ impl TrieUpdates { .collect::>(); account_nodes.extend(self.removed_nodes.drain().map(|path| (path, None))); - account_nodes.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + account_nodes.sort_unstable_by_key(|a| a.0); let storage_tries = self .storage_tries @@ -195,7 +195,7 @@ impl TrieUpdates { .filter(|path| !self.account_nodes.contains_key(*path)) .map(|path| (*path, None)), ); - account_nodes.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + account_nodes.sort_unstable_by_key(|a| a.0); let storage_tries = self .storage_tries @@ -373,7 +373,7 @@ impl StorageTrieUpdates { .collect::>(); storage_nodes.extend(self.removed_nodes.into_iter().map(|path| (path, None))); - storage_nodes.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + storage_nodes.sort_unstable_by_key(|a| a.0); StorageTrieUpdatesSorted { is_deleted: self.is_deleted, storage_nodes } } @@ -394,7 +394,7 @@ impl StorageTrieUpdates { .filter(|path| !self.storage_nodes.contains_key(*path)) .map(|path| (*path, None)), ); - storage_nodes.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + storage_nodes.sort_unstable_by_key(|a| a.0); StorageTrieUpdatesSorted { is_deleted: self.is_deleted, storage_nodes } } diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index fe2c6b9e0c9..7436e466949 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -420,7 +420,7 @@ mod tests { .into_iter() .map(|(byte, value)| (B256::repeat_byte(byte), value)) .collect(); - result.sort_by(|a, b| a.0.cmp(&b.0)); + result.sort_by_key(|a| a.0); result.dedup_by(|a, b| a.0 == b.0); result }) @@ -438,7 +438,7 @@ mod tests { (B256::repeat_byte(byte), effective_value) }) .collect(); - result.sort_by(|a, b| a.0.cmp(&b.0)); + result.sort_by_key(|a| a.0); result.dedup_by(|a, b| a.0 == b.0); result }, diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index 941fbf96338..cfde9674099 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -758,7 +758,7 @@ mod tests { .into_iter() .map(|(bytes, node)| (Nibbles::from_nibbles_unchecked(bytes), node)) .collect(); - result.sort_by(|a, b| a.0.cmp(&b.0)); + result.sort_by_key(|a| a.0); result.dedup_by(|a, b| a.0 == b.0); result }) @@ -780,7 +780,7 @@ mod tests { .into_iter() .map(|(bytes, node)| (Nibbles::from_nibbles_unchecked(bytes), node)) .collect(); - result.sort_by(|a, b| a.0.cmp(&b.0)); + result.sort_by_key(|a| a.0); result.dedup_by(|a, b| a.0 == b.0); result }) diff --git a/crates/trie/trie/src/verify.rs b/crates/trie/trie/src/verify.rs index ce17dc30d4a..fc371058ce1 100644 --- a/crates/trie/trie/src/verify.rs +++ b/crates/trie/trie/src/verify.rs @@ -13,7 +13,7 @@ use alloy_primitives::B256; use alloy_trie::BranchNodeCompact; use reth_execution_errors::StateRootError; use reth_storage_errors::db::DatabaseError; -use std::cmp::Ordering; +use std::cmp::{Ordering, Reverse}; use tracing::trace; /// Used by [`StateRootBranchNodesIter`] to iterate over branch nodes in a state root. @@ -141,7 +141,7 @@ impl Iterator for StateRootBranchNodesIter { // By sorting by the account we ensure that we continue with the partially processed // trie (the last of the previous run) first. We sort in reverse order because we pop // off of this Vec. - self.storage_tries.sort_unstable_by(|a, b| b.0.cmp(&a.0)); + self.storage_tries.sort_unstable_by_key(|a| Reverse(a.0)); // loop back to the top. } From 0aa922c4e869aeb7e4de8a3826dd8e865d64c45c Mon Sep 17 00:00:00 2001 From: cui Date: Sat, 24 Jan 2026 19:21:47 +0800 Subject: [PATCH 194/267] feat: change from stable sort to unstable sort (#21387) --- crates/transaction-pool/src/pool/pending.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 8f434934103..a1c1af60b0b 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -431,7 +431,7 @@ impl PendingPool { // we prefer removing transactions with lower ordering let mut worst_transactions = self.highest_nonces.values().collect::>(); - worst_transactions.sort(); + worst_transactions.sort_unstable(); // loop through the highest nonces set, removing transactions until we reach the limit for tx in worst_transactions { From 7feb56d5f6690726abb93decc84d84e5477adbcb Mon Sep 17 00:00:00 2001 From: cui Date: Sat, 24 Jan 2026 19:30:34 +0800 Subject: [PATCH 195/267] feat: prealloc vec (#21391) --- crates/evm/evm/src/execute.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 540f86692d4..00010db95c2 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -75,9 +75,11 @@ pub trait Executor: Sized { where I: IntoIterator::Block>>, { - let mut results = Vec::new(); + let blocks_iter = blocks.into_iter(); + let capacity = blocks_iter.size_hint().0; + let mut results = Vec::with_capacity(capacity); let mut first_block = None; - for block in blocks { + for block in blocks_iter { if first_block.is_none() { first_block = Some(block.header().number()); } From 8a1702cd74cdf7ba9607b816dd55f7be649a0ef2 Mon Sep 17 00:00:00 2001 From: YK Date: Sat, 24 Jan 2026 14:07:16 +0100 Subject: [PATCH 196/267] fix(rocksdb): filter history writes to only changed accounts/storage (#21339) Co-authored-by: Tempo AI --- .../src/providers/rocksdb/provider.rs | 35 ++++++++++++++----- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs index 06e5837a917..a0e10cf47f4 100644 --- a/crates/storage/provider/src/providers/rocksdb/provider.rs +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -1206,6 +1206,8 @@ impl RocksDBProvider { } /// Writes account history indices for the given blocks. + /// + /// Derives history indices from reverts (same source as changesets) to ensure consistency. #[instrument(level = "debug", target = "providers::rocksdb", skip_all)] fn write_account_history( &self, @@ -1214,11 +1216,17 @@ impl RocksDBProvider { ) -> ProviderResult<()> { let mut batch = self.batch(); let mut account_history: BTreeMap> = BTreeMap::new(); + for (block_idx, block) in blocks.iter().enumerate() { let block_number = ctx.first_block_number + block_idx as u64; - let bundle = &block.execution_outcome().state; - for &address in bundle.state().keys() { - account_history.entry(address).or_default().push(block_number); + let reverts = block.execution_outcome().state.reverts.to_plain_state_reverts(); + + // Iterate through account reverts - these are exactly the accounts that have + // changesets written, ensuring history indices match changeset entries. + for account_block_reverts in reverts.accounts { + for (address, _) in account_block_reverts { + account_history.entry(address).or_default().push(block_number); + } } } @@ -1231,6 +1239,8 @@ impl RocksDBProvider { } /// Writes storage history indices for the given blocks. + /// + /// Derives history indices from reverts (same source as changesets) to ensure consistency. #[instrument(level = "debug", target = "providers::rocksdb", skip_all)] fn write_storage_history( &self, @@ -1239,13 +1249,22 @@ impl RocksDBProvider { ) -> ProviderResult<()> { let mut batch = self.batch(); let mut storage_history: BTreeMap<(Address, B256), Vec> = BTreeMap::new(); + for (block_idx, block) in blocks.iter().enumerate() { let block_number = ctx.first_block_number + block_idx as u64; - let bundle = &block.execution_outcome().state; - for (&address, account) in bundle.state() { - for &slot in account.storage.keys() { - let key = B256::new(slot.to_be_bytes()); - storage_history.entry((address, key)).or_default().push(block_number); + let reverts = block.execution_outcome().state.reverts.to_plain_state_reverts(); + + // Iterate through storage reverts - these are exactly the slots that have + // changesets written, ensuring history indices match changeset entries. + for storage_block_reverts in reverts.storage { + for revert in storage_block_reverts { + for (slot, _) in revert.storage_revert { + let key = B256::new(slot.to_be_bytes()); + storage_history + .entry((revert.address, key)) + .or_default() + .push(block_number); + } } } } From 0eea4d76e9f60c1fe74008e5d509e629ad454dc4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 24 Jan 2026 16:49:21 +0100 Subject: [PATCH 197/267] chore: remove unused imports in storage-api (#21400) Co-authored-by: Amp --- crates/storage/storage-api/src/noop.rs | 14 ++++++++------ crates/storage/storage-api/src/storage.rs | 5 ++--- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index a12a6cfcc95..c6f0a30e08a 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -28,9 +28,7 @@ use reth_db_api::mock::{DatabaseMock, TxMock}; use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_ethereum_primitives::EthPrimitives; use reth_execution_types::ExecutionOutcome; -use reth_primitives_traits::{ - Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader, StorageEntry, -}; +use reth_primitives_traits::{Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader}; #[cfg(feature = "db-api")] use reth_prune_types::PruneModes; use reth_prune_types::{PruneCheckpoint, PruneSegment}; @@ -415,7 +413,9 @@ impl StorageChangeSetReader for NoopProvider< fn storage_changeset( &self, _block_number: BlockNumber, - ) -> ProviderResult> { + ) -> ProviderResult< + Vec<(reth_db_api::models::BlockNumberAddress, reth_primitives_traits::StorageEntry)>, + > { Ok(Vec::default()) } @@ -424,14 +424,16 @@ impl StorageChangeSetReader for NoopProvider< _block_number: BlockNumber, _address: Address, _storage_key: B256, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(None) } fn storage_changesets_range( &self, _range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult< + Vec<(reth_db_api::models::BlockNumberAddress, reth_primitives_traits::StorageEntry)>, + > { Ok(Vec::default()) } diff --git a/crates/storage/storage-api/src/storage.rs b/crates/storage/storage-api/src/storage.rs index ecd47ff50db..66f74e7f0ce 100644 --- a/crates/storage/storage-api/src/storage.rs +++ b/crates/storage/storage-api/src/storage.rs @@ -4,7 +4,6 @@ use alloc::{ }; use alloy_primitives::{Address, BlockNumber, B256}; use core::ops::RangeInclusive; -use reth_db_models::StorageBeforeTx; use reth_primitives_traits::StorageEntry; use reth_storage_errors::provider::ProviderResult; @@ -70,11 +69,11 @@ pub trait StorageChangeSetReader: Send { fn storage_block_changeset( &self, block_number: BlockNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.storage_changeset(block_number).map(|changesets| { changesets .into_iter() - .map(|(block_address, entry)| StorageBeforeTx { + .map(|(block_address, entry)| reth_db_models::StorageBeforeTx { address: block_address.address(), key: entry.key, value: entry.value, From 5440d0d89a544d5cd9300cf5ec9fb819087cfa63 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 25 Jan 2026 10:39:48 +0000 Subject: [PATCH 198/267] chore(deps): weekly `cargo update` (#21406) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 13f08781ab7..8d944d04daf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,9 +106,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.29" +version = "0.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef3a72a2247c34a8545ee99e562b1b9b69168e5000567257ae51e91b4e6b1193" +checksum = "90f374d3c6d729268bbe2d0e0ff992bb97898b2df756691a62ee1d5f0506bc39" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -249,9 +249,9 @@ dependencies = [ [[package]] name = "alloy-eip7928" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6adac476434bf024279164dcdca299309f0c7d1e3557024eb7a83f8d9d01c6b5" +checksum = "d3231de68d5d6e75332b7489cfcc7f4dfabeba94d990a10e4b923af0e6623540" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -5553,9 +5553,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "libp2p-identity" @@ -6153,9 +6153,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" [[package]] name = "num-integer" @@ -12786,9 +12786,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.45" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" +checksum = "9da98b7d9b7dad93488a84b8248efc35352b0b2657397d4167e7ad67e5d535e5" dependencies = [ "deranged", "itoa", @@ -12804,15 +12804,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.25" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" +checksum = "78cc610bac2dcee56805c99642447d4c5dbde4d01f752ffea0199aee1f601dc4" dependencies = [ "num-conv", "time-core", @@ -13542,9 +13542,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" dependencies = [ "getrandom 0.3.4", "js-sys", From 0ad8c772e1dcd27809d84f06665c19117a593323 Mon Sep 17 00:00:00 2001 From: Fallengirl <155266340+Fallengirl@users.noreply.github.com> Date: Sun, 25 Jan 2026 15:36:24 +0100 Subject: [PATCH 199/267] fix(era-utils): export correct era1 CompressedBody payload (#21409) --- crates/era-utils/src/export.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/era-utils/src/export.rs b/crates/era-utils/src/export.rs index db8538d3c41..a54d2e83cdd 100644 --- a/crates/era-utils/src/export.rs +++ b/crates/era-utils/src/export.rs @@ -20,6 +20,7 @@ use reth_era::{ }, }; use reth_fs_util as fs; +use reth_primitives_traits::Block; use reth_storage_api::{BlockNumReader, BlockReader, HeaderProvider}; use std::{ path::PathBuf, @@ -295,9 +296,11 @@ where return Err(eyre!("Expected block {expected_block_number}, got {actual_block_number}")); } + // CompressedBody must contain the block *body* (rlp(body)), not the full block (rlp(block)). let body = provider .block_by_number(actual_block_number)? - .ok_or_else(|| eyre!("Block body not found for block {}", actual_block_number))?; + .ok_or_else(|| eyre!("Block not found for block {}", actual_block_number))? + .into_body(); let receipts = provider .receipts_by_block(actual_block_number.into())? From 6870747246d6cbd1c2157a6d24e9bc9e8afeca30 Mon Sep 17 00:00:00 2001 From: Ahsen Kamal <82591228+ahsenkamal@users.noreply.github.com> Date: Sun, 25 Jan 2026 20:17:22 +0530 Subject: [PATCH 200/267] feat(payload): add fn for system transaction check (#21407) Signed-off-by: Ahsen Kamal Co-authored-by: Matthias Seitz Co-authored-by: Amp --- .../tree/src/tree/payload_processor/prewarm.rs | 18 ++---------------- .../primitives/src/transaction/signed.rs | 4 ++++ .../src/transaction/signed.rs | 10 ++++++++++ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index e68342112a3..81e29eea3fa 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -24,14 +24,13 @@ use crate::tree::{ }; use alloy_consensus::transaction::TxHashRef; use alloy_eip7928::BlockAccessList; -use alloy_eips::Typed2718; use alloy_evm::Database; use alloy_primitives::{keccak256, map::B256Set, B256}; use crossbeam_channel::Sender as CrossbeamSender; use metrics::{Counter, Gauge, Histogram}; use reth_evm::{execute::ExecutableTxFor, ConfigureEvm, Evm, EvmFor, RecoveredTx, SpecFor}; use reth_metrics::Metrics; -use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::{NodePrimitives, SignedTransaction}; use reth_provider::{ AccountReader, BlockExecutionOutput, BlockReader, StateProvider, StateProviderFactory, StateReader, @@ -66,19 +65,6 @@ struct IndexedTransaction { tx: Tx, } -/// Maximum standard Ethereum transaction type value. -/// -/// Standard transaction types are: -/// - Type 0: Legacy transactions (original Ethereum) -/// - Type 1: EIP-2930 (access list transactions) -/// - Type 2: EIP-1559 (dynamic fee transactions) -/// - Type 3: EIP-4844 (blob transactions) -/// - Type 4: EIP-7702 (set code authorization transactions) -/// -/// Any transaction with a type > 4 is considered a non-standard/system transaction, -/// typically used by L2s for special purposes (e.g., Optimism deposit transactions use type 126). -const MAX_STANDARD_TX_TYPE: u8 = 4; - /// A task that is responsible for caching and prewarming the cache by executing transactions /// individually in parallel. /// @@ -193,7 +179,7 @@ where } let indexed_tx = IndexedTransaction { index: tx_index, tx }; - let is_system_tx = indexed_tx.tx.tx().ty() > MAX_STANDARD_TX_TYPE; + let is_system_tx = indexed_tx.tx.tx().is_system_tx(); // System transactions (type > 4) in the first position set critical metadata // that affects all subsequent transactions (e.g., L1 block info on L2s). diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs index fc2f63abd83..896e62b3045 100644 --- a/crates/optimism/primitives/src/transaction/signed.rs +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -155,6 +155,10 @@ impl IsTyped2718 for OpTransactionSigned { } impl SignedTransaction for OpTransactionSigned { + fn is_system_tx(&self) -> bool { + self.is_deposit() + } + fn recalculate_hash(&self) -> B256 { keccak256(self.encoded_2718()) } diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index a6212a6c687..9c92f221eb3 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -48,6 +48,16 @@ pub trait SignedTransaction: + TxHashRef + IsTyped2718 { + /// Returns whether this is a system transaction. + /// + /// System transactions are created at the protocol level rather than by users. They are + /// typically used by L2s for special purposes (e.g., Optimism deposit transactions with type + /// 126) and may have different validation rules or fee handling compared to standard + /// user-initiated transactions. + fn is_system_tx(&self) -> bool { + false + } + /// Returns whether this transaction type can be __broadcasted__ as full transaction over the /// network. /// From 31fdbe914cb8bcf773d1852899ff911329d4215b Mon Sep 17 00:00:00 2001 From: Hwangjae Lee Date: Mon, 26 Jan 2026 19:19:36 +0900 Subject: [PATCH 201/267] docs(tracing): fix incorrect example description in lib.rs (#21417) Signed-off-by: Hwangjae Lee --- crates/tracing/src/lib.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/tracing/src/lib.rs b/crates/tracing/src/lib.rs index c484520b45a..72857de02a7 100644 --- a/crates/tracing/src/lib.rs +++ b/crates/tracing/src/lib.rs @@ -32,8 +32,7 @@ //! } //! ``` //! -//! This example sets up a tracer with JSON format logging for journald and terminal-friendly -//! format for file logging. +//! This example sets up a tracer with JSON format logging to stdout. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", From afe164baca920e7da341d79643dec9c4c289fce4 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Mon, 26 Jan 2026 02:24:10 -0800 Subject: [PATCH 202/267] test: add E2E test for RocksDB provider functionality (#21419) Co-authored-by: Amp Co-authored-by: yongkangc --- crates/e2e-test-utils/Cargo.toml | 8 + crates/e2e-test-utils/tests/rocksdb/main.rs | 173 ++++++++++++++++++++ 2 files changed, 181 insertions(+) create mode 100644 crates/e2e-test-utils/tests/rocksdb/main.rs diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 673193ddd9a..ad5a30b7ec6 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -72,3 +72,11 @@ derive_more.workspace = true [[test]] name = "e2e_testsuite" path = "tests/e2e-testsuite/main.rs" + +[[test]] +name = "rocksdb" +path = "tests/rocksdb/main.rs" +required-features = ["edge"] + +[features] +edge = ["reth-node-core/edge"] diff --git a/crates/e2e-test-utils/tests/rocksdb/main.rs b/crates/e2e-test-utils/tests/rocksdb/main.rs new file mode 100644 index 00000000000..90289dc2485 --- /dev/null +++ b/crates/e2e-test-utils/tests/rocksdb/main.rs @@ -0,0 +1,173 @@ +//! E2E tests for `RocksDB` provider functionality. + +#![cfg(all(feature = "edge", unix))] + +use alloy_consensus::BlockHeader; +use alloy_primitives::B256; +use alloy_rpc_types_eth::{Transaction, TransactionReceipt}; +use eyre::Result; +use jsonrpsee::core::client::ClientT; +use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; +use reth_e2e_test_utils::{transaction::TransactionTestContext, E2ETestSetupBuilder}; +use reth_node_builder::NodeConfig; +use reth_node_core::args::RocksDbArgs; +use reth_node_ethereum::EthereumNode; +use reth_payload_builder::EthPayloadBuilderAttributes; +use std::sync::Arc; + +/// Returns the test chain spec for `RocksDB` tests. +fn test_chain_spec() -> Arc { + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis( + serde_json::from_str(include_str!("../../src/testsuite/assets/genesis.json")) + .expect("failed to parse genesis.json"), + ) + .cancun_activated() + .build(), + ) +} + +/// Returns test payload attributes for the given timestamp. +fn test_attributes_generator(timestamp: u64) -> EthPayloadBuilderAttributes { + let attributes = alloy_rpc_types_engine::PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: alloy_primitives::Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + EthPayloadBuilderAttributes::new(B256::ZERO, attributes) +} + +/// Enables `RocksDB` for all supported tables. +fn with_rocksdb_enabled(mut config: NodeConfig) -> NodeConfig { + config.rocksdb = RocksDbArgs { all: true, ..Default::default() }; + config +} + +/// Smoke test: node boots with `RocksDB` routing enabled. +#[tokio::test] +async fn test_rocksdb_node_startup() -> Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = test_chain_spec(); + + let (nodes, _tasks, _wallet) = + E2ETestSetupBuilder::::new(1, chain_spec, test_attributes_generator) + .with_node_config_modifier(with_rocksdb_enabled) + .build() + .await?; + + assert_eq!(nodes.len(), 1); + + // Verify RocksDB directory exists + let rocksdb_path = nodes[0].inner.data_dir.rocksdb(); + assert!(rocksdb_path.exists(), "RocksDB directory should exist at {rocksdb_path:?}"); + assert!( + std::fs::read_dir(&rocksdb_path).map(|mut d| d.next().is_some()).unwrap_or(false), + "RocksDB directory should be non-empty" + ); + + let genesis_hash = nodes[0].block_hash(0); + assert_ne!(genesis_hash, B256::ZERO); + + Ok(()) +} + +/// Block mining works with `RocksDB` storage. +#[tokio::test] +async fn test_rocksdb_block_mining() -> Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = test_chain_spec(); + + let (mut nodes, _tasks, _wallet) = + E2ETestSetupBuilder::::new(1, chain_spec, test_attributes_generator) + .with_node_config_modifier(with_rocksdb_enabled) + .build() + .await?; + + assert_eq!(nodes.len(), 1); + + let genesis_hash = nodes[0].block_hash(0); + assert_ne!(genesis_hash, B256::ZERO); + + // Mine 3 blocks + for i in 1..=3 { + let payload = nodes[0].advance_block().await?; + let block = payload.block(); + assert_eq!(block.number(), i); + assert_ne!(block.hash(), B256::ZERO); + } + + // Verify all blocks are stored + for i in 0..=3 { + let block_hash = nodes[0].block_hash(i); + assert_ne!(block_hash, B256::ZERO); + } + + Ok(()) +} + +/// Tx hash lookup exercises `TransactionHashNumbers` table. +#[tokio::test] +async fn test_rocksdb_transaction_queries() -> Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = test_chain_spec(); + let chain_id = chain_spec.chain().id(); + + let (mut nodes, _tasks, wallet) = + E2ETestSetupBuilder::::new(1, chain_spec, test_attributes_generator) + .with_node_config_modifier(with_rocksdb_enabled) + .build() + .await?; + + assert_eq!(nodes.len(), 1); + + let mut tx_hashes = Vec::new(); + + // Inject and mine 3 transactions (new wallet per tx to avoid nonce tracking) + for i in 0..3 { + let wallets = wallet.wallet_gen(); + let signer = wallets[0].clone(); + + let raw_tx = TransactionTestContext::transfer_tx_bytes(chain_id, signer).await; + let tx_hash = nodes[0].rpc.inject_tx(raw_tx).await?; + tx_hashes.push(tx_hash); + + let payload = nodes[0].advance_block().await?; + assert_eq!(payload.block().number(), i + 1); + } + + let client = nodes[0].rpc_client().expect("RPC client should be available"); + + // Query each transaction by hash + for (i, tx_hash) in tx_hashes.iter().enumerate() { + let expected_block_number = (i + 1) as u64; + + let tx: Option = client.request("eth_getTransactionByHash", [tx_hash]).await?; + let tx = tx.expect("Transaction should be found"); + assert_eq!(tx.block_number, Some(expected_block_number)); + + let receipt: Option = + client.request("eth_getTransactionReceipt", [tx_hash]).await?; + let receipt = receipt.expect("Receipt should be found"); + assert_eq!(receipt.block_number, Some(expected_block_number)); + assert!(receipt.status()); + } + + // Negative test: querying a non-existent tx hash returns None + let missing_hash = B256::from([0xde; 32]); + let missing_tx: Option = + client.request("eth_getTransactionByHash", [missing_hash]).await?; + assert!(missing_tx.is_none(), "expected no transaction for missing hash"); + + let missing_receipt: Option = + client.request("eth_getTransactionReceipt", [missing_hash]).await?; + assert!(missing_receipt.is_none(), "expected no receipt for missing hash"); + + Ok(()) +} From 0b5f79e8c9226ee66c96eb35d2a5f4408e8c48ac Mon Sep 17 00:00:00 2001 From: emmmm <155267286+eeemmmmmm@users.noreply.github.com> Date: Mon, 26 Jan 2026 07:48:35 -0300 Subject: [PATCH 203/267] docs(rpc): add reth_subscribePersistedBlock method (#21420) --- docs/vocs/docs/pages/jsonrpc/reth.mdx | 32 +++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/vocs/docs/pages/jsonrpc/reth.mdx b/docs/vocs/docs/pages/jsonrpc/reth.mdx index 4a7f7d48f58..80e49d33ab6 100644 --- a/docs/vocs/docs/pages/jsonrpc/reth.mdx +++ b/docs/vocs/docs/pages/jsonrpc/reth.mdx @@ -87,3 +87,35 @@ This is particularly useful for applications that need to react immediately to c :::note This subscription is only available over WebSocket and IPC transports, as HTTP does not support server-initiated messages. ::: + +## `reth_subscribePersistedBlock`, `reth_unsubscribePersistedBlock` + +Subscribe to persisted block notifications. This creates a subscription that emits a notification with the block number and hash when a new block is persisted to disk. + +Like other subscription methods, this returns the ID of the subscription, which is then used in all events subsequently. + +To unsubscribe from persisted block notifications, call `reth_unsubscribePersistedBlock` with the subscription ID. + +| Client | Method invocation | +| ------ | -------------------------------------------------------------------------- | +| RPC | `{"method": "reth_subscribePersistedBlock", "params": []}` | +| RPC | `{"method": "reth_unsubscribePersistedBlock", "params": [subscription_id]}` | + +### Example + +```js +// > {"jsonrpc":"2.0","id":1,"method":"reth_subscribePersistedBlock","params":[]} +// responds with subscription ID +{"jsonrpc":"2.0","id":1,"result":"0xab1c2d3e4f590364c09d0fa6a1210faf5"} + +// Example notification when a block is persisted +{"jsonrpc":"2.0","method":"reth_subscription","params":{"subscription":"0xab1c2d3e4f590364c09d0fa6a1210faf5","result":{"number":"0x1a2b3c","hash":"0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"}}} + +// Unsubscribe +// > {"jsonrpc":"2.0","id":2,"method":"reth_unsubscribePersistedBlock","params":["0xab1c2d3e4f590364c09d0fa6a1210faf5"]} +{"jsonrpc":"2.0","id":2,"result":true} +``` + +:::note +This subscription is only available over WebSocket and IPC transports, as HTTP does not support server-initiated messages. +::: From 4baf2baec44d3fb596cbdab2145af96b0c3dc87c Mon Sep 17 00:00:00 2001 From: Andrey Kolishchak Date: Mon, 26 Jan 2026 07:34:07 -0500 Subject: [PATCH 204/267] fix(net): FetchFullBlockRangeFuture can get stuck forever after partial body fetch + error (#21411) Co-authored-by: Matthias Seitz Co-authored-by: Amp --- crates/net/p2p/src/full_block.rs | 90 ++++++++++++++++++++++++++++++-- 1 file changed, 86 insertions(+), 4 deletions(-) diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 0ff6c898382..a548ebab2bc 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -571,8 +571,8 @@ where debug!(target: "downloaders", %err, ?this.start_hash, "Body range download failed"); } } - if this.bodies.is_empty() { - // received bad response, re-request headers + if this.request.bodies.is_none() && !this.is_bodies_complete() { + // no pending bodies request (e.g., request error), retry remaining bodies // TODO: convert this into two futures, one which is a headers range // future, and one which is a bodies range future. // @@ -751,8 +751,12 @@ mod tests { use reth_ethereum_primitives::BlockBody; use super::*; - use crate::test_utils::TestFullBlockClient; - use std::ops::Range; + use crate::{error::RequestError, test_utils::TestFullBlockClient}; + use std::{ + ops::Range, + sync::atomic::{AtomicUsize, Ordering}, + }; + use tokio::time::{timeout, Duration}; #[tokio::test] async fn download_single_full_block() { @@ -800,6 +804,65 @@ mod tests { (sealed_header, body) } + #[derive(Clone, Debug)] + struct FailingBodiesClient { + inner: TestFullBlockClient, + fail_on: usize, + body_requests: Arc, + } + + impl FailingBodiesClient { + fn new(inner: TestFullBlockClient, fail_on: usize) -> Self { + Self { inner, fail_on, body_requests: Arc::new(AtomicUsize::new(0)) } + } + } + + impl DownloadClient for FailingBodiesClient { + fn report_bad_message(&self, peer_id: PeerId) { + self.inner.report_bad_message(peer_id); + } + + fn num_connected_peers(&self) -> usize { + self.inner.num_connected_peers() + } + } + + impl HeadersClient for FailingBodiesClient { + type Header = ::Header; + type Output = ::Output; + + fn get_headers_with_priority( + &self, + request: HeadersRequest, + priority: Priority, + ) -> Self::Output { + self.inner.get_headers_with_priority(request, priority) + } + } + + impl BodiesClient for FailingBodiesClient { + type Body = ::Body; + type Output = ::Output; + + fn get_block_bodies_with_priority_and_range_hint( + &self, + hashes: Vec, + priority: Priority, + range_hint: Option>, + ) -> Self::Output { + let attempt = self.body_requests.fetch_add(1, Ordering::SeqCst); + if attempt == self.fail_on { + return futures::future::ready(Err(RequestError::Timeout)) + } + + self.inner.get_block_bodies_with_priority_and_range_hint(hashes, priority, range_hint) + } + } + + impl BlockClient for FailingBodiesClient { + type Block = reth_ethereum_primitives::Block; + } + #[tokio::test] async fn download_full_block_range() { let client = TestFullBlockClient::default(); @@ -837,6 +900,25 @@ mod tests { } } + #[tokio::test] + async fn download_full_block_range_retries_after_body_error() { + let mut client = TestFullBlockClient::default(); + client.set_soft_limit(2); + let (header, _) = insert_headers_into_client(&client, 0..3); + + let client = FailingBodiesClient::new(client, 1); + let body_requests = Arc::clone(&client.body_requests); + let client = FullBlockClient::test_client(client); + + let received = + timeout(Duration::from_secs(1), client.get_full_block_range(header.hash(), 3)) + .await + .expect("body request retry should complete"); + + assert_eq!(received.len(), 3); + assert_eq!(body_requests.load(Ordering::SeqCst), 3); + } + #[tokio::test] async fn download_full_block_range_with_invalid_header() { let client = TestFullBlockClient::default(); From 6d19c0ed8e9d3e6714292c3990effe0955aa375c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 26 Jan 2026 13:36:49 +0100 Subject: [PATCH 205/267] fix(engine): only warn for critical capability mismatches (#21398) Co-authored-by: Amp --- crates/rpc/rpc-engine-api/src/capabilities.rs | 54 +++++++++++++++++-- 1 file changed, 49 insertions(+), 5 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/capabilities.rs b/crates/rpc/rpc-engine-api/src/capabilities.rs index 75583c821e2..cf69279870e 100644 --- a/crates/rpc/rpc-engine-api/src/capabilities.rs +++ b/crates/rpc/rpc-engine-api/src/capabilities.rs @@ -3,6 +3,13 @@ use std::collections::HashSet; use tracing::warn; +/// Critical Engine API method prefixes that warrant warnings on capability mismatches. +/// +/// These are essential for block production and chain synchronization. Missing support +/// for these methods indicates a significant version mismatch that operators should address. +const CRITICAL_METHOD_PREFIXES: &[&str] = + &["engine_forkchoiceUpdated", "engine_getPayload", "engine_newPayload"]; + /// All Engine API capabilities supported by Reth (Ethereum mainnet). /// /// See for updates. @@ -72,31 +79,52 @@ impl EngineCapabilities { CapabilityMismatches { missing_in_el, missing_in_cl } } - /// Logs warnings if CL and EL capabilities don't match. + /// Logs warnings if CL and EL capabilities don't match for critical methods. /// /// Called during `engine_exchangeCapabilities` to warn operators about /// version mismatches between the consensus layer and execution layer. + /// + /// Only warns about critical methods (`engine_forkchoiceUpdated`, `engine_getPayload`, + /// `engine_newPayload`) that are essential for block production and chain synchronization. + /// Non-critical methods like `engine_getBlobs` are not warned about since not all + /// clients support them. pub fn log_capability_mismatches(&self, cl_capabilities: &[String]) { let mismatches = self.get_capability_mismatches(cl_capabilities); - if !mismatches.missing_in_el.is_empty() { + let critical_missing_in_el: Vec<_> = + mismatches.missing_in_el.iter().filter(|m| is_critical_method(m)).cloned().collect(); + + let critical_missing_in_cl: Vec<_> = + mismatches.missing_in_cl.iter().filter(|m| is_critical_method(m)).cloned().collect(); + + if !critical_missing_in_el.is_empty() { warn!( target: "rpc::engine", - missing = ?mismatches.missing_in_el, + missing = ?critical_missing_in_el, "CL supports Engine API methods that Reth doesn't. Consider upgrading Reth." ); } - if !mismatches.missing_in_cl.is_empty() { + if !critical_missing_in_cl.is_empty() { warn!( target: "rpc::engine", - missing = ?mismatches.missing_in_cl, + missing = ?critical_missing_in_cl, "Reth supports Engine API methods that CL doesn't. Consider upgrading your consensus client." ); } } } +/// Returns `true` if the method is critical for block production and chain synchronization. +fn is_critical_method(method: &str) -> bool { + CRITICAL_METHOD_PREFIXES.iter().any(|prefix| { + method.starts_with(prefix) && + method[prefix.len()..] + .strip_prefix('V') + .is_some_and(|s| s.chars().next().is_some_and(|c| c.is_ascii_digit())) + }) +} + impl Default for EngineCapabilities { fn default() -> Self { Self::new(CAPABILITIES.iter().copied()) @@ -173,4 +201,20 @@ mod tests { assert_eq!(result.missing_in_el, vec!["a_other", "z_other"]); assert_eq!(result.missing_in_cl, vec!["a_method", "z_method"]); } + + #[test] + fn test_is_critical_method() { + assert!(is_critical_method("engine_forkchoiceUpdatedV1")); + assert!(is_critical_method("engine_forkchoiceUpdatedV3")); + assert!(is_critical_method("engine_getPayloadV1")); + assert!(is_critical_method("engine_getPayloadV4")); + assert!(is_critical_method("engine_newPayloadV1")); + assert!(is_critical_method("engine_newPayloadV4")); + + assert!(!is_critical_method("engine_getBlobsV1")); + assert!(!is_critical_method("engine_getBlobsV3")); + assert!(!is_critical_method("engine_getPayloadBodiesByHashV1")); + assert!(!is_critical_method("engine_getPayloadBodiesByRangeV1")); + assert!(!is_critical_method("engine_getClientVersionV1")); + } } From adbe6d9da0a338b265ed83edd3b6a7f748de9b4d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 26 Jan 2026 13:39:15 +0100 Subject: [PATCH 206/267] fix(rpc): cap simulate_v1 default gas limit to RPC gas cap (#21402) Co-authored-by: Amp --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 359ce9fd0fa..6e7bbdd3898 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -152,7 +152,17 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } if txs_without_gas_limit > 0 { - (block_gas_limit - total_specified_gas) / txs_without_gas_limit as u64 + // Per spec: "gasLimit: blockGasLimit - soFarUsedGasInBlock" + // Divide remaining gas equally among transactions without gas + let gas_per_tx = (block_gas_limit - total_specified_gas) / + txs_without_gas_limit as u64; + // Cap to RPC gas limit, matching spec behavior + let call_gas_limit = this.call_gas_limit(); + if call_gas_limit > 0 { + gas_per_tx.min(call_gas_limit) + } else { + gas_per_tx + } } else { 0 } From 963bfeeeed0b77a9b1f1779392f8780e67e62a86 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 26 Jan 2026 13:39:37 +0100 Subject: [PATCH 207/267] fix(rpc): set prevrandao to zero for eth_simulateV1 simulated blocks (#21399) Co-authored-by: Amp --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 6e7bbdd3898..50a293f55cb 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -116,6 +116,11 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let SimBlock { block_overrides, state_overrides, calls } = block; + // Set prevrandao to zero for simulated blocks by default, + // matching spec behavior where MixDigest is zero-initialized. + // If user provides an override, it will be applied by apply_block_overrides. + evm_env.block_env.inner_mut().prevrandao = Some(B256::ZERO); + if let Some(block_overrides) = block_overrides { // ensure we don't allow uncapped gas limit per block if let Some(gas_limit_override) = block_overrides.gas_limit && From d4f28b02ffcae342d04d12d55de43862dc7a0049 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 26 Jan 2026 13:40:12 +0100 Subject: [PATCH 208/267] feat(rpc): implement movePrecompileToAddress for eth_simulateV1 (#21414) Co-authored-by: Amp --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 30 +++++++-- crates/rpc/rpc-eth-types/src/simulate.rs | 76 ++++++++++++++++++++++ 2 files changed, 100 insertions(+), 6 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 50a293f55cb..7617130dd75 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -20,8 +20,8 @@ use alloy_rpc_types_eth::{ use futures::Future; use reth_errors::{ProviderError, RethError}; use reth_evm::{ - env::BlockEnvironment, ConfigureEvm, Evm, EvmEnvFor, HaltReasonFor, InspectorFor, - TransactionEnv, TxEnvFor, + env::BlockEnvironment, execute::BlockBuilder, ConfigureEvm, Evm, EvmEnvFor, HaltReasonFor, + InspectorFor, TransactionEnv, TxEnvFor, }; use reth_node_api::BlockBody; use reth_primitives_traits::Recovered; @@ -135,8 +135,8 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA evm_env.block_env.inner_mut(), ); } - if let Some(state_overrides) = state_overrides { - apply_state_overrides(state_overrides, &mut db) + if let Some(ref state_overrides) = state_overrides { + apply_state_overrides(state_overrides.clone(), &mut db) .map_err(Self::Error::from_eth_err)?; } @@ -192,7 +192,16 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let evm = this .evm_config() .evm_with_env_and_inspector(&mut db, evm_env, inspector); - let builder = this.evm_config().create_block_builder(evm, &parent, ctx); + let mut builder = this.evm_config().create_block_builder(evm, &parent, ctx); + + if let Some(ref state_overrides) = state_overrides { + simulate::apply_precompile_overrides( + state_overrides, + builder.evm_mut().precompiles_mut(), + ) + .map_err(|e| Self::Error::from_eth_err(EthApiError::other(e)))?; + } + simulate::execute_transactions( builder, calls, @@ -203,7 +212,16 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA .map_err(map_err)? } else { let evm = this.evm_config().evm_with_env(&mut db, evm_env); - let builder = this.evm_config().create_block_builder(evm, &parent, ctx); + let mut builder = this.evm_config().create_block_builder(evm, &parent, ctx); + + if let Some(ref state_overrides) = state_overrides { + simulate::apply_precompile_overrides( + state_overrides, + builder.evm_mut().precompiles_mut(), + ) + .map_err(|e| Self::Error::from_eth_err(EthApiError::other(e)))?; + } + simulate::execute_transactions( builder, calls, diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 98658a75b52..2c30556c1cc 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -6,9 +6,11 @@ use crate::{ }; use alloy_consensus::{transaction::TxHashRef, BlockHeader, Transaction as _}; use alloy_eips::eip2718::WithEncoded; +use alloy_evm::precompiles::PrecompilesMap; use alloy_network::TransactionBuilder; use alloy_rpc_types_eth::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, + state::StateOverride, BlockTransactionsKind, }; use jsonrpsee_types::ErrorObject; @@ -79,6 +81,9 @@ pub enum EthSimulateError { /// Multiple `MovePrecompileToAddress` referencing the same address. #[error("Multiple MovePrecompileToAddress referencing the same address")] PrecompileDuplicateAddress, + /// Attempted to move a non-precompile address. + #[error("account {0} is not a precompile")] + NotAPrecompile(Address), } impl EthSimulateError { @@ -98,6 +103,7 @@ impl EthSimulateError { Self::SenderNotEOA => -38024, Self::MaxInitCodeSizeExceeded => -38025, Self::GasLimitReached => -38026, + Self::NotAPrecompile(_) => -32000, } } } @@ -108,6 +114,76 @@ impl ToRpcError for EthSimulateError { } } +/// Applies precompile move overrides from state overrides to the EVM's precompiles map. +/// +/// This function processes `movePrecompileToAddress` entries from the state overrides and +/// moves precompiles from their original addresses to new addresses. The original address +/// is cleared (precompile removed) and the precompile is installed at the destination address. +/// +/// # Validation +/// +/// - The source address must be a precompile (exists in the precompiles map) +/// - Moving multiple precompiles to the same destination is allowed +/// - Self-references (moving to the same address) are not explicitly forbidden here since that +/// would be a no-op +/// +/// # Arguments +/// +/// * `state_overrides` - The state overrides containing potential `movePrecompileToAddress` entries +/// * `precompiles` - Mutable reference to the EVM's precompiles map +/// +/// # Returns +/// +/// Returns `Ok(())` on success, or an `EthSimulateError::NotAPrecompile` if a source address +/// is not a precompile. +pub fn apply_precompile_overrides( + state_overrides: &StateOverride, + precompiles: &mut PrecompilesMap, +) -> Result<(), EthSimulateError> { + use alloy_evm::precompiles::DynPrecompile; + + let moves: Vec<_> = state_overrides + .iter() + .filter_map(|(source, account_override)| { + account_override.move_precompile_to.map(|dest| (*source, dest)) + }) + .collect(); + + if moves.is_empty() { + return Ok(()); + } + + for (source, _dest) in &moves { + if precompiles.get(source).is_none() { + return Err(EthSimulateError::NotAPrecompile(*source)); + } + } + + let mut extracted: Vec<(Address, Address, DynPrecompile)> = Vec::with_capacity(moves.len()); + + for (source, dest) in moves { + if source == dest { + continue; + } + + let mut found_precompile: Option = None; + precompiles.apply_precompile(&source, |existing| { + found_precompile = existing; + None + }); + + if let Some(precompile) = found_precompile { + extracted.push((source, dest, precompile)); + } + } + + for (_source, dest, precompile) in extracted { + precompiles.apply_precompile(&dest, |_| Some(precompile)); + } + + Ok(()) +} + /// Converts all [`TransactionRequest`]s into [`Recovered`] transactions and applies them to the /// given [`BlockExecutor`]. /// From 934f462d0131457261eaa97aed3e673ce4e54fdb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 26 Jan 2026 13:41:06 +0100 Subject: [PATCH 209/267] feat(cli): make stopping on invalid block the default for reth import (#21403) Co-authored-by: Amp --- crates/cli/commands/src/import.rs | 47 ++++++++++++- crates/cli/commands/src/import_core.rs | 90 +++++++++++++++++++++--- docs/vocs/docs/pages/cli/reth/import.mdx | 7 ++ 3 files changed, 132 insertions(+), 12 deletions(-) diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index e8493c9ab33..bbf48209f8d 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -26,6 +26,14 @@ pub struct ImportCommand { #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] chunk_len: Option, + /// Fail immediately when an invalid block is encountered. + /// + /// By default, the import will stop at the last valid block if an invalid block is + /// encountered during execution or validation, leaving the database at the last valid + /// block state. When this flag is set, the import will instead fail with an error. + #[arg(long, verbatim_doc_comment)] + fail_on_invalid_block: bool, + /// The path(s) to block file(s) for import. /// /// The online stages (headers and bodies) are replaced by a file import, after which the @@ -52,7 +60,11 @@ impl> ImportComm info!(target: "reth::cli", "Starting import of {} file(s)", self.paths.len()); - let import_config = ImportConfig { no_state: self.no_state, chunk_len: self.chunk_len }; + let import_config = ImportConfig { + no_state: self.no_state, + chunk_len: self.chunk_len, + fail_on_invalid_block: self.fail_on_invalid_block, + }; let executor = components.evm_config().clone(); let consensus = Arc::new(components.consensus().clone()); @@ -81,7 +93,20 @@ impl> ImportComm total_decoded_blocks += result.total_decoded_blocks; total_decoded_txns += result.total_decoded_txns; - if !result.is_complete() { + // Check if we stopped due to an invalid block + if result.stopped_on_invalid_block { + info!(target: "reth::cli", + "Stopped at last valid block {} due to invalid block {} in file: {}. Imported {} blocks, {} transactions", + result.last_valid_block.unwrap_or(0), + result.bad_block.unwrap_or(0), + path.display(), + result.total_imported_blocks, + result.total_imported_txns); + // Stop importing further files and exit successfully + break; + } + + if !result.is_successful() { return Err(eyre::eyre!( "Chain was partially imported from file: {}. Imported {}/{} blocks, {}/{} transactions", path.display(), @@ -98,7 +123,7 @@ impl> ImportComm } info!(target: "reth::cli", - "All files imported successfully. Total: {}/{} blocks, {}/{} transactions", + "Import complete. Total: {}/{} blocks, {}/{} transactions", total_imported_blocks, total_decoded_blocks, total_imported_txns, total_decoded_txns); Ok(()) @@ -139,4 +164,20 @@ mod tests { assert_eq!(args.paths[1], PathBuf::from("file2.rlp")); assert_eq!(args.paths[2], PathBuf::from("file3.rlp")); } + + #[test] + fn parse_import_command_with_fail_on_invalid_block() { + let args: ImportCommand = + ImportCommand::parse_from(["reth", "--fail-on-invalid-block", "chain.rlp"]); + assert!(args.fail_on_invalid_block); + assert_eq!(args.paths.len(), 1); + assert_eq!(args.paths[0], PathBuf::from("chain.rlp")); + } + + #[test] + fn parse_import_command_default_stops_on_invalid_block() { + let args: ImportCommand = + ImportCommand::parse_from(["reth", "chain.rlp"]); + assert!(!args.fail_on_invalid_block); + } } diff --git a/crates/cli/commands/src/import_core.rs b/crates/cli/commands/src/import_core.rs index b5bf55a6b59..37e0cf0868c 100644 --- a/crates/cli/commands/src/import_core.rs +++ b/crates/cli/commands/src/import_core.rs @@ -22,11 +22,11 @@ use reth_provider::{ StageCheckpointReader, }; use reth_prune::PruneModes; -use reth_stages::{prelude::*, Pipeline, StageId, StageSet}; +use reth_stages::{prelude::*, ControlFlow, Pipeline, StageId, StageSet}; use reth_static_file::StaticFileProducer; use std::{path::Path, sync::Arc}; use tokio::sync::watch; -use tracing::{debug, error, info}; +use tracing::{debug, error, info, warn}; /// Configuration for importing blocks from RLP files. #[derive(Debug, Clone, Default)] @@ -35,6 +35,9 @@ pub struct ImportConfig { pub no_state: bool, /// Chunk byte length to read from file. pub chunk_len: Option, + /// If true, fail immediately when an invalid block is encountered. + /// By default (false), the import stops at the last valid block and exits successfully. + pub fail_on_invalid_block: bool, } /// Result of an import operation. @@ -48,6 +51,12 @@ pub struct ImportResult { pub total_imported_blocks: usize, /// Total number of transactions imported into the database. pub total_imported_txns: usize, + /// Whether the import was stopped due to an invalid block. + pub stopped_on_invalid_block: bool, + /// The block number that was invalid, if any. + pub bad_block: Option, + /// The last valid block number when stopped due to invalid block. + pub last_valid_block: Option, } impl ImportResult { @@ -56,6 +65,14 @@ impl ImportResult { self.total_decoded_blocks == self.total_imported_blocks && self.total_decoded_txns == self.total_imported_txns } + + /// Returns true if the import was successful, considering stop-on-invalid-block mode. + /// + /// In stop-on-invalid-block mode, a partial import is considered successful if we + /// stopped due to an invalid block (leaving the DB at the last valid block). + pub fn is_successful(&self) -> bool { + self.is_complete() || self.stopped_on_invalid_block + } } /// Imports blocks from an RLP-encoded file into the database. @@ -103,6 +120,11 @@ where let static_file_producer = StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); + // Track if we stopped due to an invalid block + let mut stopped_on_invalid_block = false; + let mut bad_block_number: Option = None; + let mut last_valid_block_number: Option = None; + while let Some(file_client) = reader.next_chunk::>(consensus.clone(), Some(sealed_header)).await? { @@ -137,12 +159,51 @@ where // Run pipeline info!(target: "reth::import", "Starting sync pipeline"); - tokio::select! { - res = pipeline.run() => res?, - _ = tokio::signal::ctrl_c() => { - info!(target: "reth::import", "Import interrupted by user"); - break; - }, + if import_config.fail_on_invalid_block { + // Original behavior: fail on unwind + tokio::select! { + res = pipeline.run() => res?, + _ = tokio::signal::ctrl_c() => { + info!(target: "reth::import", "Import interrupted by user"); + break; + }, + } + } else { + // Default behavior: Use run_loop() to handle unwinds gracefully + let result = tokio::select! { + res = pipeline.run_loop() => res, + _ = tokio::signal::ctrl_c() => { + info!(target: "reth::import", "Import interrupted by user"); + break; + }, + }; + + match result { + Ok(ControlFlow::Unwind { target, bad_block }) => { + // An invalid block was encountered; stop at last valid block + let bad = bad_block.block.number; + warn!( + target: "reth::import", + bad_block = bad, + last_valid_block = target, + "Invalid block encountered during import; stopping at last valid block" + ); + stopped_on_invalid_block = true; + bad_block_number = Some(bad); + last_valid_block_number = Some(target); + break; + } + Ok(ControlFlow::Continue { block_number }) => { + debug!(target: "reth::import", block_number, "Pipeline chunk completed"); + } + Ok(ControlFlow::NoProgress { block_number }) => { + debug!(target: "reth::import", ?block_number, "Pipeline made no progress"); + } + Err(e) => { + // Propagate other pipeline errors + return Err(e.into()); + } + } } sealed_header = provider_factory @@ -160,9 +221,20 @@ where total_decoded_txns, total_imported_blocks, total_imported_txns, + stopped_on_invalid_block, + bad_block: bad_block_number, + last_valid_block: last_valid_block_number, }; - if !result.is_complete() { + if result.stopped_on_invalid_block { + info!(target: "reth::import", + total_imported_blocks, + total_imported_txns, + bad_block = ?result.bad_block, + last_valid_block = ?result.last_valid_block, + "Import stopped at last valid block due to invalid block" + ); + } else if !result.is_complete() { error!(target: "reth::import", total_decoded_blocks, total_imported_blocks, diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 50ed891bcf9..ed6a5d7f599 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -187,6 +187,13 @@ RocksDB: --chunk-len Chunk byte length to read from file. + --fail-on-invalid-block + Fail immediately when an invalid block is encountered. + + By default, the import will stop at the last valid block if an invalid block is + encountered during execution or validation, leaving the database at the last valid + block state. When this flag is set, the import will instead fail with an error. + ... The path(s) to block file(s) for import. From 6cfd369d17f3a01c58e353119960ec1f80a43c12 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 26 Jan 2026 13:41:19 +0100 Subject: [PATCH 210/267] fix(rpc): populate block_hash in eth_simulateV1 logs (#21413) Co-authored-by: Amp --- crates/rpc/rpc-eth-types/src/simulate.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 2c30556c1cc..4ba0f331d04 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -375,6 +375,7 @@ where log_index: Some(log_index - 1), transaction_index: Some(index as u64), transaction_hash: Some(*tx.tx_hash()), + block_hash: Some(block.hash()), block_number: Some(block.header().number()), block_timestamp: Some(block.header().timestamp()), ..Default::default() From 507cf58db0ce4a3923ec9da7c1ee0c60ae8cecb9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 26 Jan 2026 14:47:20 +0100 Subject: [PATCH 211/267] fix(rpc): add block number validation in eth_simulateV1 (#21396) Co-authored-by: Amp --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 19 +++++++++++++++++++ crates/rpc/rpc-eth-types/src/simulate.rs | 11 ++++++++--- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 7617130dd75..d37f0b5bb41 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -96,7 +96,23 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA self.spawn_with_state_at_block(block, move |this, mut db| { let mut blocks: Vec>> = Vec::with_capacity(block_state_calls.len()); + + // Track previous block number for validation + let mut prev_block_number = parent.number(); + for block in block_state_calls { + // Validate block number ordering if overridden + if let Some(number) = block.block_overrides.as_ref().and_then(|o| o.number) { + let number: u64 = number.try_into().unwrap_or(u64::MAX); + if number <= prev_block_number { + return Err(EthApiError::other(EthSimulateError::BlockNumberInvalid { + got: number, + parent: prev_block_number, + }) + .into()); + } + } + let mut evm_env = this .evm_config() .next_evm_env(&parent, &this.next_env_attributes(&parent)?) @@ -234,6 +250,9 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA parent = result.block.clone_sealed_header(); + // Update tracking for next iteration's validation + prev_block_number = parent.number(); + let block = simulate::build_simulated_block::( result.block, results, diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 4ba0f331d04..d7f248f0e02 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -39,8 +39,13 @@ pub enum EthSimulateError { #[error("Client adjustable limit reached")] GasLimitReached, /// Block number in sequence did not increase. - #[error("Block number in sequence did not increase")] - BlockNumberInvalid, + #[error("block numbers must be in order: {got} <= {parent}")] + BlockNumberInvalid { + /// The block number that was provided. + got: u64, + /// The parent block number. + parent: u64, + }, /// Block timestamp in sequence did not increase or stay the same. #[error("Block timestamp in sequence did not increase")] BlockTimestampInvalid, @@ -96,7 +101,7 @@ impl EthSimulateError { Self::IntrinsicGasTooLow => -38013, Self::InsufficientFunds { .. } => -38014, Self::BlockGasLimitExceeded => -38015, - Self::BlockNumberInvalid => -38020, + Self::BlockNumberInvalid { .. } => -38020, Self::BlockTimestampInvalid => -38021, Self::PrecompileSelfReference => -38022, Self::PrecompileDuplicateAddress => -38023, From 935a2cc056fcffb3c316b97a34c0329dab2395ac Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 26 Jan 2026 15:06:38 +0100 Subject: [PATCH 212/267] fix(rpc): use correct error codes for eth_simulateV1 reverts and halts (#21412) Co-authored-by: Amp --- crates/rpc/rpc-eth-types/src/simulate.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index d7f248f0e02..25b87bd1b20 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -29,6 +29,16 @@ use revm::{ Database, }; +/// Error code for execution reverted in `eth_simulateV1`. +/// +/// +pub const SIMULATE_REVERT_CODE: i32 = -32000; + +/// Error code for VM execution errors (e.g., out of gas) in `eth_simulateV1`. +/// +/// +pub const SIMULATE_VM_ERROR_CODE: i32 = -32015; + /// Errors which may occur during `eth_simulateV1` execution. #[derive(Debug, thiserror::Error)] pub enum EthSimulateError { @@ -344,7 +354,7 @@ where return_data: Bytes::new(), error: Some(SimulateError { message: error.to_string(), - code: error.into().code(), + code: SIMULATE_VM_ERROR_CODE, ..SimulateError::invalid_params() }), gas_used, @@ -359,7 +369,7 @@ where return_data: output, error: Some(SimulateError { message: error.to_string(), - code: error.into().code(), + code: SIMULATE_REVERT_CODE, ..SimulateError::invalid_params() }), gas_used, From c7faafd183c1786c322c75e39939cbe9889390ca Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 26 Jan 2026 15:12:28 +0100 Subject: [PATCH 213/267] fix(rpc): add block timestamp validation in eth_simulateV1 (#21397) Co-authored-by: Amp --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 17 ++++++++++++++++- crates/rpc/rpc-eth-types/src/simulate.rs | 13 +++++++++---- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index d37f0b5bb41..836ba3c495c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -97,8 +97,9 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let mut blocks: Vec>> = Vec::with_capacity(block_state_calls.len()); - // Track previous block number for validation + // Track previous block number and timestamp for validation let mut prev_block_number = parent.number(); + let mut prev_timestamp = parent.timestamp(); for block in block_state_calls { // Validate block number ordering if overridden @@ -112,6 +113,19 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA .into()); } } + // Validate timestamp ordering if overridden + if let Some(time) = block + .block_overrides + .as_ref() + .and_then(|o| o.time) + .filter(|&t| t <= prev_timestamp) + { + return Err(EthApiError::other(EthSimulateError::BlockTimestampInvalid { + got: time, + parent: prev_timestamp, + }) + .into()); + } let mut evm_env = this .evm_config() @@ -252,6 +266,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA // Update tracking for next iteration's validation prev_block_number = parent.number(); + prev_timestamp = parent.timestamp(); let block = simulate::build_simulated_block::( result.block, diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 25b87bd1b20..c54e31d8a7b 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -56,9 +56,14 @@ pub enum EthSimulateError { /// The parent block number. parent: u64, }, - /// Block timestamp in sequence did not increase or stay the same. - #[error("Block timestamp in sequence did not increase")] - BlockTimestampInvalid, + /// Block timestamp in sequence did not increase. + #[error("block timestamps must be in order: {got} <= {parent}")] + BlockTimestampInvalid { + /// The block timestamp that was provided. + got: u64, + /// The parent block timestamp. + parent: u64, + }, /// Transaction nonce is too low. #[error("nonce too low: next nonce {state}, tx nonce {tx}")] NonceTooLow { @@ -112,7 +117,7 @@ impl EthSimulateError { Self::InsufficientFunds { .. } => -38014, Self::BlockGasLimitExceeded => -38015, Self::BlockNumberInvalid { .. } => -38020, - Self::BlockTimestampInvalid => -38021, + Self::BlockTimestampInvalid { .. } => -38021, Self::PrecompileSelfReference => -38022, Self::PrecompileDuplicateAddress => -38023, Self::SenderNotEOA => -38024, From ab685579f038f445e7601ec877a3914af22a2d66 Mon Sep 17 00:00:00 2001 From: figtracer <1gusredo@gmail.com> Date: Mon, 26 Jan 2026 14:37:53 +0000 Subject: [PATCH 214/267] feat(rpc): add transaction hash caching to EthStateCache (#21180) Co-authored-by: Matthias Seitz Co-authored-by: Amp --- crates/node/core/src/args/rpc_server.rs | 1 + crates/node/core/src/args/rpc_state_cache.rs | 10 ++- crates/rpc/rpc-builder/src/config.rs | 1 + .../rpc-eth-api/src/helpers/transaction.rs | 15 +++- crates/rpc/rpc-eth-types/src/block.rs | 57 ++++++++++++++- crates/rpc/rpc-eth-types/src/cache/config.rs | 5 +- crates/rpc/rpc-eth-types/src/cache/mod.rs | 73 ++++++++++++++++++- crates/rpc/rpc-eth-types/src/lib.rs | 1 + crates/rpc/rpc-eth-types/src/simulate.rs | 4 +- crates/rpc/rpc-server-types/src/constants.rs | 3 + docs/vocs/docs/pages/cli/op-reth/node.mdx | 5 ++ docs/vocs/docs/pages/cli/reth/node.mdx | 5 ++ 12 files changed, 169 insertions(+), 11 deletions(-) diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 0b0dcc066a3..e44433b1dba 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -1025,6 +1025,7 @@ mod tests { max_receipts: 2000, max_headers: 1000, max_concurrent_db_requests: 512, + max_cached_tx_hashes: 30_000, }, gas_price_oracle: GasPriceOracleArgs { blocks: 20, diff --git a/crates/node/core/src/args/rpc_state_cache.rs b/crates/node/core/src/args/rpc_state_cache.rs index 9568c09f3d4..054fe78e5e4 100644 --- a/crates/node/core/src/args/rpc_state_cache.rs +++ b/crates/node/core/src/args/rpc_state_cache.rs @@ -1,7 +1,7 @@ use clap::Args; use reth_rpc_server_types::constants::cache::{ DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_HEADER_CACHE_MAX_LEN, - DEFAULT_RECEIPT_CACHE_MAX_LEN, + DEFAULT_MAX_CACHED_TX_HASHES, DEFAULT_RECEIPT_CACHE_MAX_LEN, }; /// Parameters to configure RPC state cache. @@ -36,6 +36,13 @@ pub struct RpcStateCacheArgs { default_value_t = DEFAULT_CONCURRENT_DB_REQUESTS, )] pub max_concurrent_db_requests: usize, + + /// Maximum number of transaction hashes to cache for transaction lookups. + #[arg( + long = "rpc-cache.max-cached-tx-hashes", + default_value_t = DEFAULT_MAX_CACHED_TX_HASHES, + )] + pub max_cached_tx_hashes: u32, } impl RpcStateCacheArgs { @@ -54,6 +61,7 @@ impl Default for RpcStateCacheArgs { max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN, max_headers: DEFAULT_HEADER_CACHE_MAX_LEN, max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS, + max_cached_tx_hashes: DEFAULT_MAX_CACHED_TX_HASHES, } } } diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 1acd6744ed9..2d90d3c55c9 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -122,6 +122,7 @@ impl RethRpcServerConfig for RpcServerArgs { max_receipts: self.rpc_state_cache.max_receipts, max_headers: self.rpc_state_cache.max_headers, max_concurrent_db_requests: self.rpc_state_cache.max_concurrent_db_requests, + max_cached_tx_hashes: self.rpc_state_cache.max_cached_tx_hashes, } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 7861362a382..7a073834904 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -619,7 +619,20 @@ pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt { Output = Result>>, Self::Error>, > + Send { async move { - // Try to find the transaction on disk + // First, try the RPC cache + if let Some(cached) = self.cache().get_transaction_by_hash(hash).await && + let Some(tx) = cached.recovered_transaction() + { + return Ok(Some(TransactionSource::Block { + transaction: tx.cloned(), + index: cached.tx_index as u64, + block_hash: cached.block.hash(), + block_number: cached.block.number(), + base_fee: cached.block.base_fee_per_gas(), + })); + } + + // Cache miss - try to find the transaction on disk if let Some((tx, meta)) = self .spawn_blocking_io(move |this| { this.provider() diff --git a/crates/rpc/rpc-eth-types/src/block.rs b/crates/rpc/rpc-eth-types/src/block.rs index 8e8420f180f..6316effa369 100644 --- a/crates/rpc/rpc-eth-types/src/block.rs +++ b/crates/rpc/rpc-eth-types/src/block.rs @@ -2,15 +2,68 @@ use std::sync::Arc; -use alloy_consensus::TxReceipt; +use alloy_consensus::{transaction::TxHashRef, TxReceipt}; use alloy_primitives::TxHash; use reth_primitives_traits::{ - BlockTy, IndexedTx, NodePrimitives, ReceiptTy, RecoveredBlock, SealedBlock, + Block, BlockBody, BlockTy, IndexedTx, NodePrimitives, ReceiptTy, Recovered, RecoveredBlock, + SealedBlock, }; use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert, RpcTypes}; use crate::utils::calculate_gas_used_and_next_log_index; +/// Cached data for a transaction lookup. +#[derive(Debug, Clone)] +pub struct CachedTransaction { + /// The block containing this transaction. + pub block: Arc>, + /// Index of the transaction within the block. + pub tx_index: usize, + /// Receipts for the block, if available. + pub receipts: Option>>, +} + +impl CachedTransaction { + /// Creates a new cached transaction entry. + pub const fn new( + block: Arc>, + tx_index: usize, + receipts: Option>>, + ) -> Self { + Self { block, tx_index, receipts } + } + + /// Returns the `Recovered<&T>` transaction at the cached index. + pub fn recovered_transaction(&self) -> Option::Transaction>> { + self.block.recovered_transaction(self.tx_index) + } + + /// Converts this cached transaction into an RPC receipt using the given converter. + /// + /// Returns `None` if receipts are not available or the transaction index is out of bounds. + pub fn into_receipt( + self, + converter: &C, + ) -> Option::Receipt, C::Error>> + where + N: NodePrimitives, + R: TxReceipt + Clone, + C: RpcConvert, + { + let receipts = self.receipts?; + let receipt = receipts.get(self.tx_index)?; + let tx_hash = *self.block.body().transactions().get(self.tx_index)?.tx_hash(); + let tx = self.block.find_indexed(tx_hash)?; + convert_transaction_receipt::( + self.block.as_ref(), + receipts.as_ref(), + tx, + receipt, + converter, + ) + } +} + /// A pair of an [`Arc`] wrapped [`RecoveredBlock`] and its corresponding receipts. /// /// This type is used throughout the RPC layer to efficiently pass around diff --git a/crates/rpc/rpc-eth-types/src/cache/config.rs b/crates/rpc/rpc-eth-types/src/cache/config.rs index 001a5b4d4d5..1f908e57aa9 100644 --- a/crates/rpc/rpc-eth-types/src/cache/config.rs +++ b/crates/rpc/rpc-eth-types/src/cache/config.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use reth_rpc_server_types::constants::cache::{ DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_HEADER_CACHE_MAX_LEN, - DEFAULT_RECEIPT_CACHE_MAX_LEN, + DEFAULT_MAX_CACHED_TX_HASHES, DEFAULT_RECEIPT_CACHE_MAX_LEN, }; /// Settings for the [`EthStateCache`](super::EthStateCache). @@ -27,6 +27,8 @@ pub struct EthStateCacheConfig { /// /// Default is 512. pub max_concurrent_db_requests: usize, + /// Maximum number of transaction hashes to cache for transaction lookups. + pub max_cached_tx_hashes: u32, } impl Default for EthStateCacheConfig { @@ -36,6 +38,7 @@ impl Default for EthStateCacheConfig { max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN, max_headers: DEFAULT_HEADER_CACHE_MAX_LEN, max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS, + max_cached_tx_hashes: DEFAULT_MAX_CACHED_TX_HASHES, } } } diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index 73d8072e6d8..7ae10da83a6 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -1,17 +1,18 @@ //! Async caching support for eth RPC use super::{EthStateCacheConfig, MultiConsumerLruCache}; -use alloy_consensus::BlockHeader; +use crate::block::CachedTransaction; +use alloy_consensus::{transaction::TxHashRef, BlockHeader}; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::B256; +use alloy_primitives::{TxHash, B256}; use futures::{stream::FuturesOrdered, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; use reth_execution_types::Chain; -use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock}; +use reth_primitives_traits::{Block, BlockBody, NodePrimitives, RecoveredBlock}; use reth_storage_api::{BlockReader, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; -use schnellru::{ByLength, Limiter}; +use schnellru::{ByLength, Limiter, LruMap}; use std::{ future::Future, pin::Pin, @@ -47,6 +48,9 @@ type HeaderResponseSender = oneshot::Sender>; /// The type that can send the response with a chain of cached blocks type CachedParentBlocksResponseSender = oneshot::Sender>>>; +/// The type that can send the response for a transaction hash lookup +type TransactionHashResponseSender = oneshot::Sender>>; + type BlockLruCache = MultiConsumerLruCache>, L, BlockWithSendersResponseSender>; @@ -79,11 +83,13 @@ impl EthStateCache { max_receipts: u32, max_headers: u32, max_concurrent_db_operations: usize, + max_cached_tx_hashes: u32, ) -> (Self, EthStateCacheService) where Provider: BlockReader, { let (to_service, rx) = unbounded_channel(); + let service = EthStateCacheService { provider, full_block_cache: BlockLruCache::new(max_blocks, "blocks"), @@ -93,6 +99,7 @@ impl EthStateCache { action_rx: UnboundedReceiverStream::new(rx), action_task_spawner, rate_limiter: Arc::new(Semaphore::new(max_concurrent_db_operations)), + tx_hash_index: LruMap::new(ByLength::new(max_cached_tx_hashes)), }; let cache = Self { to_service }; (cache, service) @@ -127,6 +134,7 @@ impl EthStateCache { max_receipts, max_headers, max_concurrent_db_requests, + max_cached_tx_hashes, } = config; let (this, service) = Self::create( provider, @@ -135,6 +143,7 @@ impl EthStateCache { max_receipts, max_headers, max_concurrent_db_requests, + max_cached_tx_hashes, ); executor.spawn_critical("eth state cache", Box::pin(service)); this @@ -255,6 +264,19 @@ impl EthStateCache { Some(blocks) } } + + /// Looks up a transaction by its hash in the cache index. + /// + /// Returns the cached block, transaction index, and optionally receipts if the transaction + /// is in a cached block. + pub async fn get_transaction_by_hash( + &self, + tx_hash: TxHash, + ) -> Option> { + let (response_tx, rx) = oneshot::channel(); + let _ = self.to_service.send(CacheAction::GetTransactionByHash { tx_hash, response_tx }); + rx.await.ok()? + } } /// Thrown when the cache service task dropped. #[derive(Debug, thiserror::Error)] @@ -317,6 +339,8 @@ pub(crate) struct EthStateCacheService< /// /// This restricts the max concurrent fetch tasks at the same time. rate_limiter: Arc, + /// LRU index mapping transaction hashes to their block hash and index within the block. + tx_hash_index: LruMap, } impl EthStateCacheService @@ -324,6 +348,29 @@ where Provider: BlockReader + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, { + /// Indexes all transactions in a block by transaction hash. + fn index_block_transactions(&mut self, block: &RecoveredBlock) { + let block_hash = block.hash(); + for (tx_idx, tx) in block.body().transactions().iter().enumerate() { + self.tx_hash_index.insert(*tx.tx_hash(), (block_hash, tx_idx)); + } + } + + /// Removes transaction index entries for a reorged block. + /// + /// Only removes entries that still point to this block, preserving mappings for transactions + /// that were re-mined in a new canonical block. + fn remove_block_transactions(&mut self, block: &RecoveredBlock) { + let block_hash = block.hash(); + for tx in block.body().transactions() { + if let Some((mapped_hash, _)) = self.tx_hash_index.get(tx.tx_hash()) && + *mapped_hash == block_hash + { + self.tx_hash_index.remove(tx.tx_hash()); + } + } + } + fn on_new_block( &mut self, block_hash: B256, @@ -550,6 +597,8 @@ where } CacheAction::CacheNewCanonicalChain { chain_change } => { for block in chain_change.blocks { + // Index transactions before caching the block + this.index_block_transactions(&block); this.on_new_block(block.hash(), Ok(Some(Arc::new(block)))); } @@ -562,6 +611,8 @@ where } CacheAction::RemoveReorgedChain { chain_change } => { for block in chain_change.blocks { + // Remove transaction index entries for reorged blocks + this.remove_block_transactions(&block); this.on_reorg_block(block.hash(), Ok(Some(block))); } @@ -596,6 +647,15 @@ where let _ = response_tx.send(blocks); } + CacheAction::GetTransactionByHash { tx_hash, response_tx } => { + let result = + this.tx_hash_index.get(&tx_hash).and_then(|(block_hash, idx)| { + let block = this.full_block_cache.get(block_hash).cloned()?; + let receipts = this.receipts_cache.get(block_hash).cloned(); + Some(CachedTransaction::new(block, *idx, receipts)) + }); + let _ = response_tx.send(result); + } }; this.update_cached_metrics(); } @@ -649,6 +709,11 @@ enum CacheAction { max_blocks: usize, response_tx: CachedParentBlocksResponseSender, }, + /// Look up a transaction's cached data by its hash + GetTransactionByHash { + tx_hash: TxHash, + response_tx: TransactionHashResponseSender, + }, } struct BlockReceipts { diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index f5b7e07cea0..ef234e33aad 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -27,6 +27,7 @@ pub mod tx_forward; pub mod utils; pub use alloy_rpc_types_eth::FillTransaction; +pub use block::CachedTransaction; pub use builder::config::{EthConfig, EthFilterConfig}; pub use cache::{ config::EthStateCacheConfig, db::StateCacheDb, multi_consumer::MultiConsumerLruCache, diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index c54e31d8a7b..70c1267dc5f 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -173,7 +173,7 @@ pub fn apply_precompile_overrides( return Ok(()); } - for (source, _dest) in &moves { + for (source, _) in &moves { if precompiles.get(source).is_none() { return Err(EthSimulateError::NotAPrecompile(*source)); } @@ -197,7 +197,7 @@ pub fn apply_precompile_overrides( } } - for (_source, dest, precompile) in extracted { + for (_, dest, precompile) in extracted { precompiles.apply_precompile(&dest, |_| Some(precompile)); } diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index acf5294fe94..8f52f611dbc 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -132,4 +132,7 @@ pub mod cache { /// Default number of concurrent database requests. pub const DEFAULT_CONCURRENT_DB_REQUESTS: usize = 512; + + /// Default maximum number of transaction hashes to cache for lookups. + pub const DEFAULT_MAX_CACHED_TX_HASHES: u32 = 30_000; } diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index 98205ad008e..be719a4d1b2 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -501,6 +501,11 @@ RPC State Cache: [default: 512] + --rpc-cache.max-cached-tx-hashes + Maximum number of transaction hashes to cache for transaction lookups + + [default: 30000] + Gas Price Oracle: --gpo.blocks Number of recent blocks to check for gas price diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 75ee10f7c17..eb8e4d437c9 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -501,6 +501,11 @@ RPC State Cache: [default: 512] + --rpc-cache.max-cached-tx-hashes + Maximum number of transaction hashes to cache for transaction lookups + + [default: 30000] + Gas Price Oracle: --gpo.blocks Number of recent blocks to check for gas price From b87cde5479e6eb3d80c3faa71ed21201bd3320e2 Mon Sep 17 00:00:00 2001 From: Rez Date: Tue, 27 Jan 2026 02:27:09 +1100 Subject: [PATCH 215/267] feat: configurable EVM execution limits (#21088) Co-authored-by: Arsenii Kulikov --- Cargo.lock | 5 + crates/chainspec/src/lib.rs | 1 + crates/consensus/common/src/validation.rs | 24 +- crates/ethereum/evm/Cargo.toml | 1 + crates/ethereum/node/src/node.rs | 40 +-- crates/evm/evm/src/lib.rs | 3 +- crates/exex/test-utils/src/lib.rs | 8 +- crates/net/network/Cargo.toml | 3 + crates/net/network/src/test_utils/testnet.rs | 13 +- crates/net/network/tests/it/connect.rs | 4 +- crates/net/network/tests/it/txgossip.rs | 10 +- crates/node/builder/src/components/builder.rs | 125 +++++---- crates/node/builder/src/components/pool.rs | 10 +- crates/optimism/node/src/node.rs | 57 ++-- crates/optimism/node/src/rpc.rs | 4 +- crates/optimism/txpool/Cargo.toml | 1 + crates/optimism/txpool/src/lib.rs | 5 +- crates/optimism/txpool/src/transaction.rs | 17 +- crates/optimism/txpool/src/validator.rs | 25 +- .../storage/provider/src/test_utils/mock.rs | 15 + crates/transaction-pool/Cargo.toml | 7 + crates/transaction-pool/src/lib.rs | 59 ++-- crates/transaction-pool/src/maintain.rs | 10 +- .../src/validate/constants.rs | 6 - crates/transaction-pool/src/validate/eth.rs | 264 ++++++++++-------- crates/transaction-pool/src/validate/mod.rs | 4 +- crates/transaction-pool/src/validate/task.rs | 32 ++- examples/custom-node-components/src/main.rs | 35 ++- 28 files changed, 440 insertions(+), 348 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8d944d04daf..ea860db7ca8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9130,6 +9130,7 @@ dependencies = [ "reth-eth-wire-types", "reth-ethereum-forks", "reth-ethereum-primitives", + "reth-evm-ethereum", "reth-fs-util", "reth-metrics", "reth-net-banlist", @@ -9997,6 +9998,7 @@ dependencies = [ "parking_lot", "reth-chain-state", "reth-chainspec", + "reth-evm", "reth-metrics", "reth-optimism-chainspec", "reth-optimism-evm", @@ -11051,6 +11053,8 @@ dependencies = [ "reth-chainspec", "reth-eth-wire-types", "reth-ethereum-primitives", + "reth-evm", + "reth-evm-ethereum", "reth-execution-types", "reth-fs-util", "reth-metrics", @@ -11059,6 +11063,7 @@ dependencies = [ "reth-storage-api", "reth-tasks", "reth-tracing", + "revm", "revm-interpreter", "revm-primitives", "rustc-hash", diff --git a/crates/chainspec/src/lib.rs b/crates/chainspec/src/lib.rs index 2ba17ebf2ae..8a30084eaa2 100644 --- a/crates/chainspec/src/lib.rs +++ b/crates/chainspec/src/lib.rs @@ -25,6 +25,7 @@ pub use alloy_chains::{Chain, ChainKind, NamedChain}; /// Re-export for convenience pub use reth_ethereum_forks::*; +pub use alloy_evm::EvmLimitParams; pub use api::EthChainSpec; pub use info::ChainInfo; #[cfg(any(test, feature = "test-utils"))] diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 6db265befd6..6ba01122c46 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,14 +1,11 @@ //! Collection of methods for block validation. -use alloy_consensus::{BlockHeader as _, Transaction, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{BlockHeader as _, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip4844::DATA_GAS_PER_BLOB, eip7840::BlobParams}; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; -use reth_consensus::{ConsensusError, TxGasLimitTooHighErr}; +use reth_consensus::ConsensusError; use reth_primitives_traits::{ - constants::{ - GAS_LIMIT_BOUND_DIVISOR, MAXIMUM_GAS_LIMIT_BLOCK, MAX_TX_GAS_LIMIT_OSAKA, MINIMUM_GAS_LIMIT, - }, - transaction::TxHashRef, + constants::{GAS_LIMIT_BOUND_DIVISOR, MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT}, Block, BlockBody, BlockHeader, GotExpected, SealedBlock, SealedHeader, }; @@ -146,7 +143,7 @@ pub fn validate_block_pre_execution( ) -> Result<(), ConsensusError> where B: Block, - ChainSpec: EthereumHardforks, + ChainSpec: EthChainSpec + EthereumHardforks, { post_merge_hardfork_fields(block, chain_spec)?; @@ -154,19 +151,6 @@ where if let Err(error) = block.ensure_transaction_root_valid() { return Err(ConsensusError::BodyTransactionRootDiff(error.into())) } - // EIP-7825 validation - if chain_spec.is_osaka_active_at_timestamp(block.timestamp()) { - for tx in block.body().transactions() { - if tx.gas_limit() > MAX_TX_GAS_LIMIT_OSAKA { - return Err(TxGasLimitTooHighErr { - tx_hash: *tx.tx_hash(), - gas_limit: tx.gas_limit(), - max_allowed: MAX_TX_GAS_LIMIT_OSAKA, - } - .into()); - } - } - } Ok(()) } diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index fbbbeeed836..643e0483246 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -59,6 +59,7 @@ std = [ "reth-storage-errors/std", ] test-utils = [ + "std", "dep:parking_lot", "dep:derive_more", "reth-chainspec/test-utils", diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index da93390154a..2a645ad2741 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -18,7 +18,7 @@ use reth_evm::{ }; use reth_network::{primitives::BasicNetworkPrimitives, NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, BlockTy, FullNodeComponents, HeaderTy, NodeAddOns, NodePrimitives, + AddOnsContext, FullNodeComponents, HeaderTy, NodeAddOns, NodePrimitives, PayloadAttributesBuilder, PrimitivesTy, TxTy, }; use reth_node_builder::{ @@ -53,8 +53,8 @@ use reth_rpc_eth_types::{error::FromEvmError, EthApiError}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ - blobstore::DiskFileBlobStore, EthPooledTransaction, EthTransactionPool, PoolPooledTx, - PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, + blobstore::DiskFileBlobStore, EthTransactionPool, PoolPooledTx, PoolTransaction, + TransactionPool, TransactionValidationTaskExecutor, }; use revm::context::TxEnv; use std::{marker::PhantomData, sync::Arc, time::SystemTime}; @@ -456,18 +456,22 @@ pub struct EthereumPoolBuilder { // TODO add options for txpool args } -impl PoolBuilder for EthereumPoolBuilder +impl PoolBuilder for EthereumPoolBuilder where Types: NodeTypes< ChainSpec: EthereumHardforks, Primitives: NodePrimitives, >, Node: FullNodeTypes, + Evm: ConfigureEvm> + Clone + 'static, { - type Pool = - EthTransactionPool>; + type Pool = EthTransactionPool; - async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { + async fn build_pool( + self, + ctx: &BuilderContext, + evm_config: Evm, + ) -> eyre::Result { let pool_config = ctx.pool_config(); let blobs_disabled = ctx.config().txpool.disable_blobs_support || @@ -493,17 +497,17 @@ where let blob_store = reth_node_builder::components::create_blob_store_with_cache(ctx, blob_cache_size)?; - let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) - .with_head_timestamp(ctx.head().timestamp) - .set_eip4844(!blobs_disabled) - .kzg_settings(ctx.kzg_settings()?) - .with_max_tx_input_bytes(ctx.config().txpool.max_tx_input_bytes) - .with_local_transactions_config(pool_config.local_transactions_config.clone()) - .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) - .with_max_tx_gas_limit(ctx.config().txpool.max_tx_gas_limit) - .with_minimum_priority_fee(ctx.config().txpool.minimum_priority_fee) - .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) - .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()); + let validator = + TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone(), evm_config) + .set_eip4844(!blobs_disabled) + .kzg_settings(ctx.kzg_settings()?) + .with_max_tx_input_bytes(ctx.config().txpool.max_tx_input_bytes) + .with_local_transactions_config(pool_config.local_transactions_config.clone()) + .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) + .with_max_tx_gas_limit(ctx.config().txpool.max_tx_gas_limit) + .with_minimum_priority_fee(ctx.config().txpool.minimum_priority_fee) + .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) + .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()); if validator.validator().eip4844() { // initializing the KZG settings can be expensive, this should be done upfront so that diff --git a/crates/evm/evm/src/lib.rs b/crates/evm/evm/src/lib.rs index e5bd089255a..bf5ca7ae1de 100644 --- a/crates/evm/evm/src/lib.rs +++ b/crates/evm/evm/src/lib.rs @@ -35,7 +35,7 @@ use reth_execution_errors::BlockExecutionError; use reth_primitives_traits::{ BlockTy, HeaderTy, NodePrimitives, ReceiptTy, SealedBlock, SealedHeader, TxTy, }; -use revm::{context::TxEnv, database::State}; +use revm::{context::TxEnv, database::State, primitives::hardfork::SpecId}; pub mod either; /// EVM environment configuration. @@ -203,6 +203,7 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { + FromRecoveredTx> + FromTxWithEncoded>, Precompiles = PrecompilesMap, + Spec: Into, >, >; diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 8430ea5d91f..3fc75488e18 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -66,13 +66,17 @@ use tokio::sync::mpsc::{Sender, UnboundedReceiver}; #[non_exhaustive] pub struct TestPoolBuilder; -impl PoolBuilder for TestPoolBuilder +impl PoolBuilder for TestPoolBuilder where Node: FullNodeTypes>>, { type Pool = TestPool; - async fn build_pool(self, _ctx: &BuilderContext) -> eyre::Result { + async fn build_pool( + self, + _ctx: &BuilderContext, + _evm_config: Evm, + ) -> eyre::Result { Ok(testing_pool()) } } diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index cbe93a2386e..62252155e3b 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-evm-ethereum = { workspace = true, optional = true } reth-fs-util.workspace = true reth-primitives-traits.workspace = true reth-net-banlist.workspace = true @@ -136,6 +137,8 @@ test-utils = [ "reth-primitives-traits/test-utils", "reth-provider/test-utils", "reth-ethereum-primitives/test-utils", + "dep:reth-evm-ethereum", + "reth-evm-ethereum?/test-utils", ] [[bench]] diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index aae1f7708e0..2212963c916 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -19,6 +19,7 @@ use reth_eth_wire::{ protocol::Protocol, DisconnectReason, EthNetworkPrimitives, HelloMessageWithProtocols, }; use reth_ethereum_primitives::{PooledTransactionVariant, TransactionSigned}; +use reth_evm_ethereum::EthEvmConfig; use reth_network_api::{ events::{PeerEvent, SessionInfo}, test_utils::{PeersHandle, PeersHandleProvider}, @@ -182,17 +183,20 @@ where C: ChainSpecProvider + StateProviderFactory + BlockReaderIdExt - + HeaderProvider + + HeaderProvider
+ Clone + 'static, Pool: TransactionPool, { /// Installs an eth pool on each peer - pub fn with_eth_pool(self) -> Testnet> { + pub fn with_eth_pool( + self, + ) -> Testnet> { self.map_pool(|peer| { let blob_store = InMemoryBlobStore::default(); let pool = TransactionValidationTaskExecutor::eth( peer.client.clone(), + EthEvmConfig::mainnet(), blob_store.clone(), TokioTaskExecutor::default(), ); @@ -208,7 +212,7 @@ where pub fn with_eth_pool_config( self, tx_manager_config: TransactionsManagerConfig, - ) -> Testnet> { + ) -> Testnet> { self.with_eth_pool_config_and_policy(tx_manager_config, Default::default()) } @@ -217,11 +221,12 @@ where self, tx_manager_config: TransactionsManagerConfig, policy: TransactionPropagationKind, - ) -> Testnet> { + ) -> Testnet> { self.map_pool(|peer| { let blob_store = InMemoryBlobStore::default(); let pool = TransactionValidationTaskExecutor::eth( peer.client.clone(), + EthEvmConfig::mainnet(), blob_store.clone(), TokioTaskExecutor::default(), ); diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index d11c6b95418..a8d409ad69b 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -20,6 +20,7 @@ use reth_network_p2p::{ }; use reth_network_peers::{mainnet_nodes, NodeRecord, TrustedPeer}; use reth_network_types::peers::config::PeerBackoffDurations; +use reth_provider::test_utils::MockEthProvider; use reth_storage_api::noop::NoopProvider; use reth_tracing::init_test_tracing; use reth_transaction_pool::test_utils::testing_pool; @@ -655,7 +656,8 @@ async fn new_random_peer( async fn test_connect_many() { reth_tracing::init_test_tracing(); - let net = Testnet::create_with(5, NoopProvider::default()).await; + let provider = MockEthProvider::default().with_genesis_block(); + let net = Testnet::create_with(5, provider).await; // install request handlers let net = net.with_eth_pool(); diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index d0f192cff5e..1b518c50e9c 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -22,7 +22,7 @@ use tokio::join; async fn test_tx_gossip() { reth_tracing::init_test_tracing(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); let net = Testnet::create_with(2, provider.clone()).await; // install request handlers @@ -61,7 +61,7 @@ async fn test_tx_gossip() { async fn test_tx_propagation_policy_trusted_only() { reth_tracing::init_test_tracing(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); let policy = TransactionPropagationKind::Trusted; let net = Testnet::create_with(2, provider.clone()).await; @@ -129,7 +129,7 @@ async fn test_tx_propagation_policy_trusted_only() { async fn test_tx_ingress_policy_trusted_only() { reth_tracing::init_test_tracing(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); let tx_manager_config = TransactionsManagerConfig { ingress_policy: TransactionIngressPolicy::Trusted, @@ -195,7 +195,7 @@ async fn test_tx_ingress_policy_trusted_only() { #[tokio::test(flavor = "multi_thread")] async fn test_4844_tx_gossip_penalization() { reth_tracing::init_test_tracing(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); let net = Testnet::create_with(2, provider.clone()).await; // install request handlers @@ -246,7 +246,7 @@ async fn test_4844_tx_gossip_penalization() { #[tokio::test(flavor = "multi_thread")] async fn test_sending_invalid_transactions() { reth_tracing::init_test_tracing(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); let net = Testnet::create_with(2, provider.clone()).await; // install request handlers let net = net.with_eth_pool(); diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 216025d8e56..1d1600f6a4c 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -62,12 +62,12 @@ impl pool_builder, payload_builder, network_builder, - executor_builder: evm_builder, + executor_builder, consensus_builder, _marker, } = self; ComponentsBuilder { - executor_builder: evm_builder, + executor_builder, pool_builder, payload_builder, network_builder, @@ -149,15 +149,12 @@ where pub fn pool( self, pool_builder: PB, - ) -> ComponentsBuilder - where - PB: PoolBuilder, - { + ) -> ComponentsBuilder { let Self { pool_builder: _, payload_builder, network_builder, - executor_builder: evm_builder, + executor_builder, consensus_builder, _marker, } = self; @@ -165,7 +162,7 @@ where pool_builder, payload_builder, network_builder, - executor_builder: evm_builder, + executor_builder, consensus_builder, _marker, } @@ -185,30 +182,23 @@ where _marker: self._marker, } } -} -impl - ComponentsBuilder -where - Node: FullNodeTypes, - PoolB: PoolBuilder, -{ - /// Configures the network builder. + /// Configures the executor builder. /// - /// This accepts a [`NetworkBuilder`] instance that will be used to create the node's network - /// stack. - pub fn network( + /// This accepts a [`ExecutorBuilder`] instance that will be used to create the node's + /// components for execution. + pub fn executor( self, - network_builder: NB, - ) -> ComponentsBuilder + executor_builder: EB, + ) -> ComponentsBuilder where - NB: NetworkBuilder, + EB: ExecutorBuilder, { let Self { pool_builder, payload_builder, - network_builder: _, - executor_builder: evm_builder, + network_builder, + executor_builder: _, consensus_builder, _marker, } = self; @@ -216,58 +206,65 @@ where pool_builder, payload_builder, network_builder, - executor_builder: evm_builder, + executor_builder, consensus_builder, _marker, } } - /// Configures the payload builder. + /// Configures the consensus builder. /// - /// This accepts a [`PayloadServiceBuilder`] instance that will be used to create the node's - /// payload builder service. - pub fn payload( + /// This accepts a [`ConsensusBuilder`] instance that will be used to create the node's + /// components for consensus. + pub fn consensus( self, - payload_builder: PB, - ) -> ComponentsBuilder + consensus_builder: CB, + ) -> ComponentsBuilder where - ExecB: ExecutorBuilder, - PB: PayloadServiceBuilder, + CB: ConsensusBuilder, { let Self { pool_builder, - payload_builder: _, + payload_builder, network_builder, - executor_builder: evm_builder, - consensus_builder, + executor_builder, + consensus_builder: _, _marker, } = self; ComponentsBuilder { pool_builder, payload_builder, network_builder, - executor_builder: evm_builder, + executor_builder, consensus_builder, _marker, } } +} - /// Configures the executor builder. +impl + ComponentsBuilder +where + Node: FullNodeTypes, + ExecB: ExecutorBuilder, + PoolB: PoolBuilder, +{ + /// Configures the network builder. /// - /// This accepts a [`ExecutorBuilder`] instance that will be used to create the node's - /// components for execution. - pub fn executor( + /// This accepts a [`NetworkBuilder`] instance that will be used to create the node's network + /// stack. + pub fn network( self, - executor_builder: EB, - ) -> ComponentsBuilder + network_builder: NB, + ) -> ComponentsBuilder where - EB: ExecutorBuilder, + NB: NetworkBuilder, { let Self { pool_builder, payload_builder, - network_builder, - executor_builder: _, + network_builder: _, + executor_builder, consensus_builder, _marker, } = self; @@ -281,24 +278,23 @@ where } } - /// Configures the consensus builder. + /// Configures the payload builder. /// - /// This accepts a [`ConsensusBuilder`] instance that will be used to create the node's - /// components for consensus. - pub fn consensus( + /// This accepts a [`PayloadServiceBuilder`] instance that will be used to create the node's + /// payload builder service. + pub fn payload( self, - consensus_builder: CB, - ) -> ComponentsBuilder + payload_builder: PB, + ) -> ComponentsBuilder where - CB: ConsensusBuilder, + PB: PayloadServiceBuilder, { let Self { pool_builder, - payload_builder, + payload_builder: _, network_builder, executor_builder, - consensus_builder: _, - + consensus_builder, _marker, } = self; ComponentsBuilder { @@ -358,7 +354,7 @@ impl NodeComponentsBuilder for ComponentsBuilder where Node: FullNodeTypes, - PoolB: PoolBuilder, + PoolB: PoolBuilder, NetworkB: NetworkBuilder< Node, PoolB::Pool, @@ -384,13 +380,13 @@ where pool_builder, payload_builder, network_builder, - executor_builder: evm_builder, + executor_builder, consensus_builder, _marker, } = self; - let evm_config = evm_builder.build_evm(context).await?; - let pool = pool_builder.build_pool(context).await?; + let evm_config = executor_builder.build_evm(context).await?; + let pool = pool_builder.build_pool(context, evm_config.clone()).await?; let network = network_builder.build_network(context, pool.clone()).await?; let payload_builder_handle = payload_builder .spawn_payload_builder_service(context, pool.clone(), evm_config.clone()) @@ -471,14 +467,19 @@ where #[derive(Debug, Clone)] pub struct NoopTransactionPoolBuilder(PhantomData); -impl PoolBuilder for NoopTransactionPoolBuilder +impl PoolBuilder for NoopTransactionPoolBuilder where N: FullNodeTypes, Tx: EthPoolTransaction> + Unpin, + Evm: Send, { type Pool = NoopTransactionPool; - async fn build_pool(self, _ctx: &BuilderContext) -> eyre::Result { + async fn build_pool( + self, + _ctx: &BuilderContext, + _evm_config: Evm, + ) -> eyre::Result { Ok(NoopTransactionPool::::new()) } } diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index 9f32b279154..3ca42282f40 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -12,7 +12,7 @@ use reth_transaction_pool::{ use std::{collections::HashSet, future::Future}; /// A type that knows how to build the transaction pool. -pub trait PoolBuilder: Send { +pub trait PoolBuilder: Send { /// The transaction pool to build. type Pool: TransactionPool>> + Unpin @@ -22,16 +22,17 @@ pub trait PoolBuilder: Send { fn build_pool( self, ctx: &BuilderContext, + evm_config: Evm, ) -> impl Future> + Send; } -impl PoolBuilder for F +impl PoolBuilder for F where Node: FullNodeTypes, Pool: TransactionPool>> + Unpin + 'static, - F: FnOnce(&BuilderContext) -> Fut + Send, + F: FnOnce(&BuilderContext, Evm) -> Fut + Send, Fut: Future> + Send, { type Pool = Pool; @@ -39,8 +40,9 @@ where fn build_pool( self, ctx: &BuilderContext, + evm_config: Evm, ) -> impl Future> { - self(ctx) + self(ctx, evm_config) } } diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 6ee024ace97..51d0ff3022d 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -16,7 +16,7 @@ use reth_network::{ PeersInfo, }; use reth_node_api::{ - AddOnsContext, BlockTy, BuildNextEnv, EngineTypes, FullNodeComponents, HeaderTy, NodeAddOns, + AddOnsContext, BuildNextEnv, EngineTypes, FullNodeComponents, HeaderTy, NodeAddOns, NodePrimitives, PayloadAttributesBuilder, PayloadTypes, PrimitivesTy, TxTy, }; use reth_node_builder::{ @@ -165,6 +165,7 @@ impl OpNode { self.args; ComponentsBuilder::default() .node_types::() + .executor(OpExecutorBuilder::default()) .pool( OpPoolBuilder::default() .with_enable_tx_conditional(self.args.enable_tx_conditional) @@ -173,7 +174,6 @@ impl OpNode { self.args.supervisor_safety_level, ), ) - .executor(OpExecutorBuilder::default()) .payload(BasicPayloadServiceBuilder::new( OpPayloadBuilder::new(compute_pending_block) .with_da_config(self.da_config.clone()) @@ -957,14 +957,19 @@ impl OpPoolBuilder { } } -impl PoolBuilder for OpPoolBuilder +impl PoolBuilder for OpPoolBuilder where Node: FullNodeTypes>, T: EthPoolTransaction> + OpPooledTx, + Evm: ConfigureEvm> + Clone + 'static, { - type Pool = OpTransactionPool>; + type Pool = OpTransactionPool; - async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { + async fn build_pool( + self, + ctx: &BuilderContext, + evm_config: Evm, + ) -> eyre::Result { let Self { pool_config_overrides, .. } = self; // supervisor used for interop @@ -982,27 +987,27 @@ where .await; let blob_store = reth_node_builder::components::create_blob_store(ctx)?; - let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) - .no_eip4844() - .with_head_timestamp(ctx.head().timestamp) - .with_max_tx_input_bytes(ctx.config().txpool.max_tx_input_bytes) - .kzg_settings(ctx.kzg_settings()?) - .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) - .with_max_tx_gas_limit(ctx.config().txpool.max_tx_gas_limit) - .with_minimum_priority_fee(ctx.config().txpool.minimum_priority_fee) - .with_additional_tasks( - pool_config_overrides - .additional_validation_tasks - .unwrap_or_else(|| ctx.config().txpool.additional_validation_tasks), - ) - .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()) - .map(|validator| { - OpTransactionValidator::new(validator) - // In --dev mode we can't require gas fees because we're unable to decode - // the L1 block info - .require_l1_data_gas_fee(!ctx.config().dev.dev) - .with_supervisor(supervisor_client.clone()) - }); + let validator = + TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone(), evm_config) + .no_eip4844() + .with_max_tx_input_bytes(ctx.config().txpool.max_tx_input_bytes) + .kzg_settings(ctx.kzg_settings()?) + .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) + .with_max_tx_gas_limit(ctx.config().txpool.max_tx_gas_limit) + .with_minimum_priority_fee(ctx.config().txpool.minimum_priority_fee) + .with_additional_tasks( + pool_config_overrides + .additional_validation_tasks + .unwrap_or_else(|| ctx.config().txpool.additional_validation_tasks), + ) + .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()) + .map(|validator| { + OpTransactionValidator::new(validator) + // In --dev mode we can't require gas fees because we're unable to decode + // the L1 block info + .require_l1_data_gas_fee(!ctx.config().dev.dev) + .with_supervisor(supervisor_client.clone()) + }); let final_pool_config = pool_config_overrides.apply(ctx.pool_config()); diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index 9030935d64f..33de471753c 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -52,9 +52,9 @@ //! ComponentsBuilder::default() //! .node_types::>() //! .noop_pool::() -//! .noop_network::() -//! .noop_consensus() //! .executor(OpExecutorBuilder::default()) +//! .noop_consensus() +//! .noop_network::() //! .noop_payload(), //! Box::new(()) as Box>, //! ) diff --git a/crates/optimism/txpool/Cargo.toml b/crates/optimism/txpool/Cargo.toml index a524f3f4df1..3737d23cf0d 100644 --- a/crates/optimism/txpool/Cargo.toml +++ b/crates/optimism/txpool/Cargo.toml @@ -23,6 +23,7 @@ alloy-serde.workspace = true # reth reth-chainspec.workspace = true +reth-evm.workspace = true reth-primitives-traits.workspace = true reth-chain-state.workspace = true reth-storage-api.workspace = true diff --git a/crates/optimism/txpool/src/lib.rs b/crates/optimism/txpool/src/lib.rs index b2c240abe14..5cee963064b 100644 --- a/crates/optimism/txpool/src/lib.rs +++ b/crates/optimism/txpool/src/lib.rs @@ -9,7 +9,6 @@ #![cfg_attr(docsrs, feature(doc_cfg))] mod validator; -use op_alloy_consensus::OpBlock; pub use validator::{OpL1BlockInfo, OpTransactionValidator}; pub mod conditional; @@ -25,8 +24,8 @@ pub mod estimated_da_size; use reth_transaction_pool::{CoinbaseTipOrdering, Pool, TransactionValidationTaskExecutor}; /// Type alias for default optimism transaction pool -pub type OpTransactionPool = Pool< - TransactionValidationTaskExecutor>, +pub type OpTransactionPool = Pool< + TransactionValidationTaskExecutor>, CoinbaseTipOrdering, S, >; diff --git a/crates/optimism/txpool/src/transaction.rs b/crates/optimism/txpool/src/transaction.rs index fa2ec80e4d8..d13ba555ba1 100644 --- a/crates/optimism/txpool/src/transaction.rs +++ b/crates/optimism/txpool/src/transaction.rs @@ -316,7 +316,8 @@ mod tests { use alloy_primitives::{TxKind, U256}; use op_alloy_consensus::TxDeposit; use reth_optimism_chainspec::OP_MAINNET; - use reth_optimism_primitives::OpTransactionSigned; + use reth_optimism_evm::OpEvmConfig; + use reth_optimism_primitives::{OpPrimitives, OpTransactionSigned}; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, TransactionOrigin, @@ -324,12 +325,14 @@ mod tests { }; #[tokio::test] async fn validate_optimism_transaction() { - let client = MockEthProvider::default().with_chain_spec(OP_MAINNET.clone()); - let validator = - EthTransactionValidatorBuilder::new(client) - .no_shanghai() - .no_cancun() - .build::<_, _, reth_optimism_primitives::OpBlock>(InMemoryBlobStore::default()); + let client = MockEthProvider::::new() + .with_chain_spec(OP_MAINNET.clone()) + .with_genesis_block(); + let evm_config = OpEvmConfig::optimism(OP_MAINNET.clone()); + let validator = EthTransactionValidatorBuilder::new(client, evm_config) + .no_shanghai() + .no_cancun() + .build(InMemoryBlobStore::default()); let validator = OpTransactionValidator::new(validator); let origin = TransactionOrigin::External; diff --git a/crates/optimism/txpool/src/validator.rs b/crates/optimism/txpool/src/validator.rs index 900a005d852..99f03e1a1e7 100644 --- a/crates/optimism/txpool/src/validator.rs +++ b/crates/optimism/txpool/src/validator.rs @@ -1,13 +1,14 @@ use crate::{supervisor::SupervisorClient, InvalidCrossTx, OpPooledTx}; use alloy_consensus::{BlockHeader, Transaction}; -use op_alloy_consensus::OpBlock; use op_revm::L1BlockInfo; use parking_lot::RwLock; use reth_chainspec::ChainSpecProvider; +use reth_evm::ConfigureEvm; use reth_optimism_evm::RethL1BlockInfo; use reth_optimism_forks::OpHardforks; use reth_primitives_traits::{ - transaction::error::InvalidTransactionError, Block, BlockBody, GotExpected, SealedBlock, + transaction::error::InvalidTransactionError, Block, BlockBody, BlockTy, GotExpected, + SealedBlock, }; use reth_storage_api::{AccountInfoReader, BlockReaderIdExt, StateProviderFactory}; use reth_transaction_pool::{ @@ -40,9 +41,9 @@ impl OpL1BlockInfo { /// Validator for Optimism transactions. #[derive(Debug, Clone)] -pub struct OpTransactionValidator { +pub struct OpTransactionValidator { /// The type that performs the actual validation. - inner: Arc>, + inner: Arc>, /// Additional block info required for validation. block_info: Arc, /// If true, ensure that the transaction's sender has enough balance to cover the L1 gas fee @@ -55,7 +56,7 @@ pub struct OpTransactionValidator { fork_tracker: Arc, } -impl OpTransactionValidator { +impl OpTransactionValidator { /// Returns the configured chain spec pub fn chain_spec(&self) -> Arc where @@ -87,15 +88,15 @@ impl OpTransactionValidator { } } -impl OpTransactionValidator +impl OpTransactionValidator where Client: ChainSpecProvider + StateProviderFactory + BlockReaderIdExt + Sync, Tx: EthPoolTransaction + OpPooledTx, - B: Block, + Evm: ConfigureEvm, { /// Create a new [`OpTransactionValidator`]. - pub fn new(inner: EthTransactionValidator) -> Self { + pub fn new(inner: EthTransactionValidator) -> Self { let this = Self::with_block_info(inner, OpL1BlockInfo::default()); if let Ok(Some(block)) = this.inner.client().block_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest) @@ -114,7 +115,7 @@ where /// Create a new [`OpTransactionValidator`] with the given [`OpL1BlockInfo`]. pub fn with_block_info( - inner: EthTransactionValidator, + inner: EthTransactionValidator, block_info: OpL1BlockInfo, ) -> Self { Self { @@ -290,15 +291,15 @@ where } } -impl TransactionValidator for OpTransactionValidator +impl TransactionValidator for OpTransactionValidator where Client: ChainSpecProvider + StateProviderFactory + BlockReaderIdExt + Sync, Tx: EthPoolTransaction + OpPooledTx, - B: Block, + Evm: ConfigureEvm, { type Transaction = Tx; - type Block = B; + type Block = BlockTy; async fn validate_transaction( &self, diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index f9a2f980ef2..d54324c54c4 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -188,6 +188,21 @@ impl MockEthProvider { prune_modes: self.prune_modes, } } + + /// Adds the genesis block from the chain spec to the provider. + /// + /// This is useful for tests that require a valid latest block (e.g., transaction validation). + pub fn with_genesis_block(self) -> Self + where + ChainSpec: EthChainSpec
::Header>, + ::Body: Default, + { + let genesis_hash = self.chain_spec.genesis_hash(); + let genesis_header = self.chain_spec.genesis_header().clone(); + let genesis_block = T::Block::new(genesis_header, Default::default()); + self.add_block(genesis_hash, genesis_block); + self + } } impl Default for MockEthProvider { diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 02030719840..2fe1b88a6b2 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -17,11 +17,14 @@ reth-chain-state.workspace = true reth-ethereum-primitives.workspace = true reth-chainspec.workspace = true reth-eth-wire-types.workspace = true +reth-evm.workspace = true +reth-evm-ethereum.workspace = true reth-primitives-traits.workspace = true reth-execution-types.workspace = true reth-fs-util.workspace = true reth-storage-api.workspace = true reth-tasks.workspace = true +revm.workspace = true revm-interpreter.workspace = true revm-primitives.workspace = true @@ -92,6 +95,7 @@ serde = [ "reth-ethereum-primitives/serde", "reth-chain-state/serde", "reth-storage-api/serde", + "revm/serde", ] test-utils = [ "rand", @@ -103,6 +107,8 @@ test-utils = [ "reth-primitives-traits/test-utils", "reth-ethereum-primitives/test-utils", "alloy-primitives/rand", + "reth-evm/test-utils", + "reth-evm-ethereum/test-utils", ] arbitrary = [ "proptest", @@ -118,6 +124,7 @@ arbitrary = [ "revm-interpreter/arbitrary", "reth-ethereum-primitives/arbitrary", "revm-primitives/arbitrary", + "revm/arbitrary", ] [[bench]] diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 7fbdda5f291..37904e9d65a 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -197,16 +197,22 @@ //! //! ``` //! use reth_chainspec::MAINNET; -//! use reth_storage_api::StateProviderFactory; +//! use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; //! use reth_tasks::TokioTaskExecutor; //! use reth_chainspec::ChainSpecProvider; //! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool, TransactionPool}; //! use reth_transaction_pool::blobstore::InMemoryBlobStore; //! use reth_chainspec::EthereumHardforks; -//! async fn t(client: C) where C: ChainSpecProvider + StateProviderFactory + Clone + 'static{ +//! use reth_evm::ConfigureEvm; +//! use alloy_consensus::Header; +//! async fn t(client: C, evm_config: Evm) +//! where +//! C: ChainSpecProvider + StateProviderFactory + BlockReaderIdExt
+ Clone + 'static, +//! Evm: ConfigureEvm> + 'static, +//! { //! let blob_store = InMemoryBlobStore::default(); //! let pool = Pool::eth_pool( -//! TransactionValidationTaskExecutor::eth(client, blob_store.clone(), TokioTaskExecutor::default()), +//! TransactionValidationTaskExecutor::eth(client, evm_config, blob_store.clone(), TokioTaskExecutor::default()), //! blob_store, //! Default::default(), //! ); @@ -235,18 +241,21 @@ //! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool}; //! use reth_transaction_pool::blobstore::InMemoryBlobStore; //! use reth_transaction_pool::maintain::{maintain_transaction_pool_future}; +//! use reth_evm::ConfigureEvm; +//! use reth_ethereum_primitives::EthPrimitives; //! use alloy_consensus::Header; //! -//! async fn t(client: C, stream: St) +//! async fn t(client: C, stream: St, evm_config: Evm) //! where C: StateProviderFactory + BlockReaderIdExt
+ ChainSpecProvider + Clone + 'static, -//! St: Stream + Send + Unpin + 'static, +//! St: Stream> + Send + Unpin + 'static, +//! Evm: ConfigureEvm + 'static, //! { //! let blob_store = InMemoryBlobStore::default(); //! let rt = tokio::runtime::Runtime::new().unwrap(); //! let manager = TaskManager::new(rt.handle().clone()); //! let executor = manager.executor(); //! let pool = Pool::eth_pool( -//! TransactionValidationTaskExecutor::eth(client.clone(), blob_store.clone(), executor.clone()), +//! TransactionValidationTaskExecutor::eth(client.clone(), evm_config, blob_store.clone(), executor.clone()), //! blob_store, //! Default::default(), //! ); @@ -302,9 +311,11 @@ use alloy_primitives::{Address, TxHash, B256, U256}; use aquamarine as _; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_eth_wire_types::HandleMempoolData; +use reth_evm::ConfigureEvm; +use reth_evm_ethereum::EthEvmConfig; use reth_execution_types::ChangedAccount; -use reth_primitives_traits::Recovered; -use reth_storage_api::StateProviderFactory; +use reth_primitives_traits::{HeaderTy, Recovered}; +use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; use std::{collections::HashSet, sync::Arc}; use tokio::sync::mpsc::Receiver; use tracing::{instrument, trace}; @@ -328,13 +339,8 @@ mod traits; pub mod test_utils; /// Type alias for default ethereum transaction pool -pub type EthTransactionPool< - Client, - S, - T = EthPooledTransaction, - B = reth_ethereum_primitives::Block, -> = Pool< - TransactionValidationTaskExecutor>, +pub type EthTransactionPool = Pool< + TransactionValidationTaskExecutor>, CoinbaseTipOrdering, S, >; @@ -415,11 +421,15 @@ where } } -impl EthTransactionPool +impl EthTransactionPool where - Client: - ChainSpecProvider + StateProviderFactory + Clone + 'static, + Client: ChainSpecProvider + + StateProviderFactory + + Clone + + BlockReaderIdExt
> + + 'static, S: BlobStore, + Evm: ConfigureEvm + 'static, { /// Returns a new [`Pool`] that uses the default [`TransactionValidationTaskExecutor`] when /// validating [`EthPooledTransaction`]s and ords via [`CoinbaseTipOrdering`] @@ -428,18 +438,25 @@ where /// /// ``` /// use reth_chainspec::MAINNET; - /// use reth_storage_api::StateProviderFactory; + /// use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; /// use reth_tasks::TokioTaskExecutor; /// use reth_chainspec::ChainSpecProvider; /// use reth_transaction_pool::{ /// blobstore::InMemoryBlobStore, Pool, TransactionValidationTaskExecutor, /// }; /// use reth_chainspec::EthereumHardforks; - /// # fn t(client: C) where C: ChainSpecProvider + StateProviderFactory + Clone + 'static { + /// use reth_evm::ConfigureEvm; + /// use alloy_consensus::Header; + /// # fn t(client: C, evm_config: Evm) + /// # where + /// # C: ChainSpecProvider + StateProviderFactory + BlockReaderIdExt
+ Clone + 'static, + /// # Evm: ConfigureEvm> + 'static, + /// # { /// let blob_store = InMemoryBlobStore::default(); /// let pool = Pool::eth_pool( /// TransactionValidationTaskExecutor::eth( /// client, + /// evm_config, /// blob_store.clone(), /// TokioTaskExecutor::default(), /// ), @@ -450,7 +467,7 @@ where /// ``` pub fn eth_pool( validator: TransactionValidationTaskExecutor< - EthTransactionValidator, + EthTransactionValidator, >, blob_store: S, config: PoolConfig, diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index ca4546e7892..efe7a958871 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -857,12 +857,12 @@ mod tests { use super::*; use crate::{ blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, - CoinbaseTipOrdering, EthPooledTransaction, EthTransactionValidator, Pool, - TransactionOrigin, + CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionOrigin, }; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, U256}; use reth_ethereum_primitives::PooledTransactionVariant; + use reth_evm_ethereum::EthEvmConfig; use reth_fs_util as fs; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; use reth_tasks::TaskManager; @@ -886,14 +886,14 @@ mod tests { "02f87201830655c2808505ef61f08482565f94388c818ca8b9251b393131c08a736a67ccb192978801049e39c4b5b1f580c001a01764ace353514e8abdfb92446de356b260e3c1225b73fc4c8876a6258d12a129a04f02294aa61ca7676061cd99f29275491218b4754b46a0248e5e42bc5091f507" ); let tx = PooledTransactionVariant::decode_2718(&mut &tx_bytes[..]).unwrap(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); let transaction = EthPooledTransaction::from_pooled(tx.try_into_recovered().unwrap()); let tx_to_cmp = transaction.clone(); let sender = hex!("1f9090aaE28b8a3dCeaDf281B0F12828e676c326").into(); provider.add_account(sender, ExtendedAccount::new(42, U256::MAX)); let blob_store = InMemoryBlobStore::default(); - let validator: EthTransactionValidator<_, _, reth_ethereum_primitives::Block> = - EthTransactionValidatorBuilder::new(provider).build(blob_store.clone()); + let validator = EthTransactionValidatorBuilder::new(provider, EthEvmConfig::mainnet()) + .build(blob_store.clone()); let txpool = Pool::new( validator, diff --git a/crates/transaction-pool/src/validate/constants.rs b/crates/transaction-pool/src/validate/constants.rs index d4fca5a2aeb..cb207939172 100644 --- a/crates/transaction-pool/src/validate/constants.rs +++ b/crates/transaction-pool/src/validate/constants.rs @@ -10,9 +10,3 @@ pub const TX_SLOT_BYTE_SIZE: usize = 32 * 1024; /// to validate whether they fit into the pool or not. Default is 4 times [`TX_SLOT_BYTE_SIZE`], /// which defaults to 32 KiB, so 128 KiB. pub const DEFAULT_MAX_TX_INPUT_BYTES: usize = 4 * TX_SLOT_BYTE_SIZE; // 128KB - -/// Maximum bytecode to permit for a contract. -pub const MAX_CODE_BYTE_SIZE: usize = revm_primitives::eip170::MAX_CODE_SIZE; - -/// Maximum initcode to permit in a creation transaction and create instructions. -pub const MAX_INIT_CODE_BYTE_SIZE: usize = revm_primitives::eip3860::MAX_INITCODE_SIZE; diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 1775387c68f..c4c7d7ebda6 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -8,7 +8,7 @@ use crate::{ }, metrics::TxPoolValidationMetrics, traits::TransactionOrigin, - validate::{ValidTransaction, ValidationTask, MAX_INIT_CODE_BYTE_SIZE}, + validate::{ValidTransaction, ValidationTask}, Address, BlobTransactionSidecarVariant, EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, @@ -23,20 +23,22 @@ use alloy_consensus::{ }; use alloy_eips::{ eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M, eip4844::env_settings::EnvKzgSettings, - eip7840::BlobParams, + eip7840::BlobParams, BlockId, }; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; +use reth_evm::ConfigureEvm; use reth_primitives_traits::{ - constants::MAX_TX_GAS_LIMIT_OSAKA, transaction::error::InvalidTransactionError, Account, Block, - GotExpected, + transaction::error::InvalidTransactionError, Account, BlockTy, GotExpected, HeaderTy, + SealedBlock, }; -use reth_storage_api::{AccountInfoReader, BytecodeReader, StateProviderFactory}; +use reth_storage_api::{AccountInfoReader, BlockReaderIdExt, BytecodeReader, StateProviderFactory}; use reth_tasks::TaskSpawner; +use revm::context_interface::Cfg; use revm_primitives::U256; use std::{ marker::PhantomData, sync::{ - atomic::{AtomicBool, AtomicU64}, + atomic::{AtomicBool, AtomicU64, AtomicUsize}, Arc, }, time::{Instant, SystemTime}, @@ -58,7 +60,7 @@ use tokio::sync::Mutex; /// /// And adheres to the configured [`LocalTransactionConfig`]. #[derive(Debug)] -pub struct EthTransactionValidator { +pub struct EthTransactionValidator { /// This type fetches account info from the db client: Client, /// Blobstore used for fetching re-injected blob transactions. @@ -89,15 +91,17 @@ pub struct EthTransactionValidator, /// Disable balance checks during transaction validation disable_balance_check: bool, + /// EVM configuration for fetching execution limits + evm_config: Evm, /// Marker for the transaction type - _marker: PhantomData<(T, B)>, + _marker: PhantomData, /// Metrics for tsx pool validation validation_metrics: TxPoolValidationMetrics, /// Bitmap of custom transaction types that are allowed. other_tx_types: U256, } -impl EthTransactionValidator { +impl EthTransactionValidator { /// Returns the configured chain spec pub fn chain_spec(&self) -> Arc where @@ -176,10 +180,11 @@ impl EthTransactionValidator { } } -impl EthTransactionValidator +impl EthTransactionValidator where - Client: ChainSpecProvider + StateProviderFactory, + Client: ChainSpecProvider + StateProviderFactory, Tx: EthPoolTransaction, + Evm: ConfigureEvm, { /// Returns the current max gas limit pub fn block_gas_limit(&self) -> u64 { @@ -361,10 +366,12 @@ where } // Check whether the init code size has been exceeded. - if self.fork_tracker.is_shanghai_activated() && - let Err(err) = transaction.ensure_max_init_code_size(MAX_INIT_CODE_BYTE_SIZE) - { - return Err(TransactionValidationOutcome::Invalid(transaction, err)) + if self.fork_tracker.is_shanghai_activated() { + let max_initcode_size = + self.fork_tracker.max_initcode_size.load(std::sync::atomic::Ordering::Relaxed); + if let Err(err) = transaction.ensure_max_init_code_size(max_initcode_size) { + return Err(TransactionValidationOutcome::Invalid(transaction, err)) + } } // Checks for gas limit @@ -506,10 +513,10 @@ where } } - // Osaka validation of max tx gas. - if self.fork_tracker.is_osaka_activated() && - transaction.gas_limit() > MAX_TX_GAS_LIMIT_OSAKA - { + // Transaction gas limit validation (EIP-7825 for Osaka+) + let tx_gas_limit_cap = + self.fork_tracker.tx_gas_limit_cap.load(std::sync::atomic::Ordering::Relaxed); + if tx_gas_limit_cap > 0 && transaction.gas_limit() > tx_gas_limit_cap { return Err(TransactionValidationOutcome::Invalid( transaction, InvalidTransactionError::GasLimitTooHigh.into(), @@ -743,7 +750,7 @@ where .collect() } - fn on_new_head_block(&self, new_tip_block: &T) { + fn on_new_head_block(&self, new_tip_block: &HeaderTy) { // update all forks if self.chain_spec().is_shanghai_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.shanghai.store(true, std::sync::atomic::Ordering::Relaxed); @@ -774,6 +781,19 @@ where } self.block_gas_limit.store(new_tip_block.gas_limit(), std::sync::atomic::Ordering::Relaxed); + + // Get EVM limits from evm_config.evm_env() + let evm_env = self + .evm_config + .evm_env(new_tip_block) + .expect("evm_env should not fail for executed block"); + + self.fork_tracker + .max_initcode_size + .store(evm_env.cfg_env.max_initcode_size(), std::sync::atomic::Ordering::Relaxed); + self.fork_tracker + .tx_gas_limit_cap + .store(evm_env.cfg_env.tx_gas_limit_cap(), std::sync::atomic::Ordering::Relaxed); } fn max_gas_limit(&self) -> u64 { @@ -799,14 +819,14 @@ where } } -impl TransactionValidator for EthTransactionValidator +impl TransactionValidator for EthTransactionValidator where - Client: ChainSpecProvider + StateProviderFactory, + Client: ChainSpecProvider + StateProviderFactory, Tx: EthPoolTransaction, - B: Block, + Evm: ConfigureEvm, { type Transaction = Tx; - type Block = B; + type Block = BlockTy; async fn validate_transaction( &self, @@ -831,15 +851,17 @@ where self.validate_batch_with_origin(origin, transactions) } - fn on_new_head_block(&self, new_tip_block: &reth_primitives_traits::SealedBlock) { + fn on_new_head_block(&self, new_tip_block: &SealedBlock) { Self::on_new_head_block(self, new_tip_block.header()) } } /// A builder for [`EthTransactionValidator`] and [`TransactionValidationTaskExecutor`] #[derive(Debug)] -pub struct EthTransactionValidatorBuilder { +pub struct EthTransactionValidatorBuilder { client: Client, + /// The EVM configuration to use for validation. + evm_config: Evm, /// Fork indicator whether we are in the Shanghai stage. shanghai: bool, /// Fork indicator whether we are in the Cancun hardfork. @@ -883,10 +905,14 @@ pub struct EthTransactionValidatorBuilder { disable_balance_check: bool, /// Bitmap of custom transaction types that are allowed. other_tx_types: U256, + /// Cached max initcode size from EVM config + max_initcode_size: usize, + /// Cached transaction gas limit cap from EVM config (0 = no cap) + tx_gas_limit_cap: u64, } -impl EthTransactionValidatorBuilder { - /// Creates a new builder for the given client +impl EthTransactionValidatorBuilder { + /// Creates a new builder for the given client and EVM config /// /// By default this assumes the network is on the `Prague` hardfork and the following /// transactions are allowed: @@ -895,10 +921,24 @@ impl EthTransactionValidatorBuilder { /// - EIP-1559 /// - EIP-4844 /// - EIP-7702 - pub fn new(client: Client) -> Self { + pub fn new(client: Client, evm_config: Evm) -> Self + where + Client: ChainSpecProvider + + BlockReaderIdExt
>, + Evm: ConfigureEvm, + { + let chain_spec = client.chain_spec(); + let tip = client + .header_by_id(BlockId::latest()) + .expect("failed to fetch latest header") + .expect("latest header is not found"); + let evm_env = + evm_config.evm_env(&tip).expect("evm_env should not fail for existing blocks"); + Self { block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M.into(), client, + evm_config, minimum_priority_fee: None, additional_tasks: 1, kzg_settings: EnvKzgSettings::Default, @@ -912,28 +952,26 @@ impl EthTransactionValidatorBuilder { eip4844: true, eip7702: true, - // shanghai is activated by default - shanghai: true, - - // cancun is activated by default - cancun: true, - - // prague is activated by default - prague: true, + shanghai: chain_spec.is_shanghai_active_at_timestamp(tip.timestamp()), + cancun: chain_spec.is_cancun_active_at_timestamp(tip.timestamp()), + prague: chain_spec.is_prague_active_at_timestamp(tip.timestamp()), + osaka: chain_spec.is_osaka_active_at_timestamp(tip.timestamp()), - // osaka not yet activated - osaka: false, + tip_timestamp: tip.timestamp(), - tip_timestamp: 0, - - // max blob count is prague by default - max_blob_count: BlobParams::prague().max_blobs_per_tx, + max_blob_count: chain_spec + .blob_params_at_timestamp(tip.timestamp()) + .unwrap_or_else(BlobParams::prague) + .max_blobs_per_tx, // balance checks are enabled by default disable_balance_check: false, // no custom transaction types by default other_tx_types: U256::ZERO, + + tx_gas_limit_cap: evm_env.cfg_env.tx_gas_limit_cap(), + max_initcode_size: evm_env.cfg_env.max_initcode_size(), } } @@ -1041,28 +1079,6 @@ impl EthTransactionValidatorBuilder { self } - /// Configures validation rules based on the head block's timestamp. - /// - /// For example, whether the Shanghai and Cancun hardfork is activated at launch, or max blob - /// counts. - pub fn with_head_timestamp(mut self, timestamp: u64) -> Self - where - Client: ChainSpecProvider, - { - self.shanghai = self.client.chain_spec().is_shanghai_active_at_timestamp(timestamp); - self.cancun = self.client.chain_spec().is_cancun_active_at_timestamp(timestamp); - self.prague = self.client.chain_spec().is_prague_active_at_timestamp(timestamp); - self.osaka = self.client.chain_spec().is_osaka_active_at_timestamp(timestamp); - self.tip_timestamp = timestamp; - self.max_blob_count = self - .client - .chain_spec() - .blob_params_at_timestamp(timestamp) - .unwrap_or_else(BlobParams::cancun) - .max_blobs_per_tx; - self - } - /// Sets a max size in bytes of a single transaction allowed into the pool pub const fn with_max_tx_input_bytes(mut self, max_tx_input_bytes: usize) -> Self { self.max_tx_input_bytes = max_tx_input_bytes; @@ -1104,13 +1120,13 @@ impl EthTransactionValidatorBuilder { } /// Builds a the [`EthTransactionValidator`] without spawning validator tasks. - pub fn build(self, blob_store: S) -> EthTransactionValidator + pub fn build(self, blob_store: S) -> EthTransactionValidator where S: BlobStore, - B: Block, { let Self { client, + evm_config, shanghai, cancun, prague, @@ -1131,6 +1147,8 @@ impl EthTransactionValidatorBuilder { max_blob_count, additional_tasks: _, other_tx_types, + max_initcode_size, + tx_gas_limit_cap, } = self; let fork_tracker = ForkTracker { @@ -1140,6 +1158,8 @@ impl EthTransactionValidatorBuilder { osaka: AtomicBool::new(osaka), tip_timestamp: AtomicU64::new(tip_timestamp), max_blob_count: AtomicU64::new(max_blob_count), + max_initcode_size: AtomicUsize::new(max_initcode_size), + tx_gas_limit_cap: AtomicU64::new(tx_gas_limit_cap), }; EthTransactionValidator { @@ -1158,6 +1178,7 @@ impl EthTransactionValidatorBuilder { max_tx_input_bytes, max_tx_gas_limit, disable_balance_check, + evm_config, _marker: Default::default(), validation_metrics: TxPoolValidationMetrics::default(), other_tx_types, @@ -1170,18 +1191,17 @@ impl EthTransactionValidatorBuilder { /// The validator will spawn `additional_tasks` additional tasks for validation. /// /// By default this will spawn 1 additional task. - pub fn build_with_tasks( + pub fn build_with_tasks( self, tasks: T, blob_store: S, - ) -> TransactionValidationTaskExecutor> + ) -> TransactionValidationTaskExecutor> where T: TaskSpawner, S: BlobStore, - B: Block, { let additional_tasks = self.additional_tasks; - let validator = self.build::(blob_store); + let validator = self.build::(blob_store); let (tx, task) = ValidationTask::new(); @@ -1223,6 +1243,10 @@ pub struct ForkTracker { pub max_blob_count: AtomicU64, /// Tracks the timestamp of the tip block. pub tip_timestamp: AtomicU64, + /// Cached max initcode size from EVM config + pub max_initcode_size: AtomicUsize, + /// Cached transaction gas limit cap from EVM config (0 = no cap) + pub tx_gas_limit_cap: AtomicU64, } impl ForkTracker { @@ -1304,8 +1328,14 @@ mod tests { use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, U256}; use reth_ethereum_primitives::PooledTransactionVariant; + use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::SignedTransaction; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; + use revm_primitives::eip3860::MAX_INITCODE_SIZE; + + fn test_evm_config() -> EthEvmConfig { + EthEvmConfig::mainnet() + } fn get_transaction() -> EthPooledTransaction { let raw = "0x02f914950181ad84b2d05e0085117553845b830f7df88080b9143a6040608081523462000414576200133a803803806200001e8162000419565b9283398101608082820312620004145781516001600160401b03908181116200041457826200004f9185016200043f565b92602092838201519083821162000414576200006d9183016200043f565b8186015190946001600160a01b03821692909183900362000414576060015190805193808511620003145760038054956001938488811c9816801562000409575b89891014620003f3578190601f988981116200039d575b50899089831160011462000336576000926200032a575b505060001982841b1c191690841b1781555b8751918211620003145760049788548481811c9116801562000309575b89821014620002f457878111620002a9575b5087908784116001146200023e5793839491849260009562000232575b50501b92600019911b1c19161785555b6005556007805460ff60a01b19169055600880546001600160a01b0319169190911790553015620001f3575060025469d3c21bcecceda100000092838201809211620001de57506000917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9160025530835282815284832084815401905584519384523093a351610e889081620004b28239f35b601190634e487b7160e01b6000525260246000fd5b90606493519262461bcd60e51b845283015260248201527f45524332303a206d696e7420746f20746865207a65726f2061646472657373006044820152fd5b0151935038806200013a565b9190601f198416928a600052848a6000209460005b8c8983831062000291575050501062000276575b50505050811b0185556200014a565b01519060f884600019921b161c191690553880808062000267565b86860151895590970196948501948893500162000253565b89600052886000208880860160051c8201928b8710620002ea575b0160051c019085905b828110620002dd5750506200011d565b60008155018590620002cd565b92508192620002c4565b60228a634e487b7160e01b6000525260246000fd5b90607f16906200010b565b634e487b7160e01b600052604160045260246000fd5b015190503880620000dc565b90869350601f19831691856000528b6000209260005b8d8282106200038657505084116200036d575b505050811b018155620000ee565b015160001983861b60f8161c191690553880806200035f565b8385015186558a979095019493840193016200034c565b90915083600052896000208980850160051c8201928c8610620003e9575b918891869594930160051c01915b828110620003d9575050620000c5565b60008155859450889101620003c9565b92508192620003bb565b634e487b7160e01b600052602260045260246000fd5b97607f1697620000ae565b600080fd5b6040519190601f01601f191682016001600160401b038111838210176200031457604052565b919080601f84011215620004145782516001600160401b038111620003145760209062000475601f8201601f1916830162000419565b92818452828287010111620004145760005b8181106200049d57508260009394955001015290565b85810183015184820184015282016200048756fe608060408181526004918236101561001657600080fd5b600092833560e01c91826306fdde0314610a1c57508163095ea7b3146109f257816318160ddd146109d35781631b4c84d2146109ac57816323b872dd14610833578163313ce5671461081757816339509351146107c357816370a082311461078c578163715018a6146107685781638124f7ac146107495781638da5cb5b1461072057816395d89b411461061d578163a457c2d714610575578163a9059cbb146104e4578163c9567bf914610120575063dd62ed3e146100d557600080fd5b3461011c578060031936011261011c57806020926100f1610b5a565b6100f9610b75565b6001600160a01b0391821683526001865283832091168252845220549051908152f35b5080fd5b905082600319360112610338576008546001600160a01b039190821633036104975760079283549160ff8360a01c1661045557737a250d5630b4cf539739df2c5dacb4c659f2488d92836bffffffffffffffffffffffff60a01b8092161786553087526020938785528388205430156104065730895260018652848920828a52865280858a205584519081527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925863092a38554835163c45a015560e01b815290861685828581845afa9182156103dd57849187918b946103e7575b5086516315ab88c960e31b815292839182905afa9081156103dd576044879289928c916103c0575b508b83895196879586946364e329cb60e11b8652308c870152166024850152165af19081156103b6579086918991610389575b50169060065416176006558385541660604730895288865260c4858a20548860085416928751958694859363f305d71960e01b8552308a86015260248501528d60448501528d606485015260848401524260a48401525af1801561037f579084929161034c575b50604485600654169587541691888551978894859363095ea7b360e01b855284015260001960248401525af1908115610343575061030c575b5050805460ff60a01b1916600160a01b17905580f35b81813d831161033c575b6103208183610b8b565b8101031261033857518015150361011c5738806102f6565b8280fd5b503d610316565b513d86823e3d90fd5b6060809293503d8111610378575b6103648183610b8b565b81010312610374578290386102bd565b8580fd5b503d61035a565b83513d89823e3d90fd5b6103a99150863d88116103af575b6103a18183610b8b565b810190610e33565b38610256565b503d610397565b84513d8a823e3d90fd5b6103d79150843d86116103af576103a18183610b8b565b38610223565b85513d8b823e3d90fd5b6103ff919450823d84116103af576103a18183610b8b565b92386101fb565b845162461bcd60e51b81528085018790526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f206164646044820152637265737360e01b6064820152608490fd5b6020606492519162461bcd60e51b8352820152601760248201527f74726164696e6720697320616c7265616479206f70656e0000000000000000006044820152fd5b608490602084519162461bcd60e51b8352820152602160248201527f4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6044820152603760f91b6064820152fd5b9050346103385781600319360112610338576104fe610b5a565b9060243593303303610520575b602084610519878633610bc3565b5160018152f35b600594919454808302908382041483151715610562576127109004820391821161054f5750925080602061050b565b634e487b7160e01b815260118552602490fd5b634e487b7160e01b825260118652602482fd5b9050823461061a578260031936011261061a57610590610b5a565b918360243592338152600160205281812060018060a01b03861682526020522054908282106105c9576020856105198585038733610d31565b608490602086519162461bcd60e51b8352820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f77604482015264207a65726f60d81b6064820152fd5b80fd5b83833461011c578160031936011261011c57805191809380549160019083821c92828516948515610716575b6020958686108114610703578589529081156106df5750600114610687575b6106838787610679828c0383610b8b565b5191829182610b11565b0390f35b81529295507f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b5b8284106106cc57505050826106839461067992820101948680610668565b80548685018801529286019281016106ae565b60ff19168887015250505050151560051b8301019250610679826106838680610668565b634e487b7160e01b845260228352602484fd5b93607f1693610649565b50503461011c578160031936011261011c5760085490516001600160a01b039091168152602090f35b50503461011c578160031936011261011c576020906005549051908152f35b833461061a578060031936011261061a57600880546001600160a01b031916905580f35b50503461011c57602036600319011261011c5760209181906001600160a01b036107b4610b5a565b16815280845220549051908152f35b82843461061a578160031936011261061a576107dd610b5a565b338252600160209081528383206001600160a01b038316845290528282205460243581019290831061054f57602084610519858533610d31565b50503461011c578160031936011261011c576020905160128152f35b83833461011c57606036600319011261011c5761084e610b5a565b610856610b75565b6044359160018060a01b0381169485815260209560018752858220338352875285822054976000198903610893575b505050906105199291610bc3565b85891061096957811561091a5733156108cc5750948481979861051997845260018a528284203385528a52039120558594938780610885565b865162461bcd60e51b8152908101889052602260248201527f45524332303a20617070726f766520746f20746865207a65726f206164647265604482015261737360f01b6064820152608490fd5b865162461bcd60e51b81529081018890526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f206164646044820152637265737360e01b6064820152608490fd5b865162461bcd60e51b8152908101889052601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e63650000006044820152606490fd5b50503461011c578160031936011261011c5760209060ff60075460a01c1690519015158152f35b50503461011c578160031936011261011c576020906002549051908152f35b50503461011c578060031936011261011c57602090610519610a12610b5a565b6024359033610d31565b92915034610b0d5783600319360112610b0d57600354600181811c9186908281168015610b03575b6020958686108214610af05750848852908115610ace5750600114610a75575b6106838686610679828b0383610b8b565b929550600383527fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b5b828410610abb575050508261068394610679928201019438610a64565b8054868501880152928601928101610a9e565b60ff191687860152505050151560051b83010192506106798261068338610a64565b634e487b7160e01b845260229052602483fd5b93607f1693610a44565b8380fd5b6020808252825181830181905290939260005b828110610b4657505060409293506000838284010152601f8019910116010190565b818101860151848201604001528501610b24565b600435906001600160a01b0382168203610b7057565b600080fd5b602435906001600160a01b0382168203610b7057565b90601f8019910116810190811067ffffffffffffffff821117610bad57604052565b634e487b7160e01b600052604160045260246000fd5b6001600160a01b03908116918215610cde5716918215610c8d57600082815280602052604081205491808310610c3957604082827fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef958760209652828652038282205586815220818154019055604051908152a3565b60405162461bcd60e51b815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e7420657863656564732062604482015265616c616e636560d01b6064820152608490fd5b60405162461bcd60e51b815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201526265737360e81b6064820152608490fd5b60405162461bcd60e51b815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f206164604482015264647265737360d81b6064820152608490fd5b6001600160a01b03908116918215610de25716918215610d925760207f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925918360005260018252604060002085600052825280604060002055604051908152a3565b60405162461bcd60e51b815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f206164647265604482015261737360f01b6064820152608490fd5b60405162461bcd60e51b8152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f206164646044820152637265737360e01b6064820152608490fd5b90816020910312610b7057516001600160a01b0381168103610b70579056fea2646970667358221220285c200b3978b10818ff576bb83f2dc4a2a7c98dfb6a36ea01170de792aa652764736f6c63430008140033000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000d3fd4f95820a9aa848ce716d6c200eaefb9a2e4900000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000003543131000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000035431310000000000000000000000000000000000000000000000000000000000c001a04e551c75810ffdfe6caff57da9f5a8732449f42f0f4c57f935b05250a76db3b6a046cd47e6d01914270c1ec0d9ac7fae7dfb240ec9a8b6ec7898c4d6aa174388f2"; @@ -1327,6 +1357,8 @@ mod tests { osaka: false.into(), tip_timestamp: 0.into(), max_blob_count: 0.into(), + max_initcode_size: AtomicUsize::new(MAX_INITCODE_SIZE), + tx_gas_limit_cap: AtomicU64::new(0), }; let res = ensure_intrinsic_gas(&transaction, &fork_tracker); @@ -1336,14 +1368,14 @@ mod tests { let res = ensure_intrinsic_gas(&transaction, &fork_tracker); assert!(res.is_ok()); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); provider.add_account( transaction.sender(), ExtendedAccount::new(transaction.nonce(), U256::MAX), ); let blob_store = InMemoryBlobStore::default(); - let validator: EthTransactionValidator<_, _> = - EthTransactionValidatorBuilder::new(provider).build(blob_store.clone()); + let validator = EthTransactionValidatorBuilder::new(provider, test_evm_config()) + .build(blob_store.clone()); let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); @@ -1363,17 +1395,16 @@ mod tests { async fn invalid_on_gas_limit_too_high() { let transaction = get_transaction(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); provider.add_account( transaction.sender(), ExtendedAccount::new(transaction.nonce(), U256::MAX), ); let blob_store = InMemoryBlobStore::default(); - let validator: EthTransactionValidator<_, _> = - EthTransactionValidatorBuilder::new(provider) - .set_block_gas_limit(1_000_000) // tx gas limit is 1_015_288 - .build(blob_store.clone()); + let validator = EthTransactionValidatorBuilder::new(provider, test_evm_config()) + .set_block_gas_limit(1_000_000) // tx gas limit is 1_015_288 + .build(blob_store.clone()); let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); @@ -1397,17 +1428,16 @@ mod tests { #[tokio::test] async fn invalid_on_fee_cap_exceeded() { let transaction = get_transaction(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); provider.add_account( transaction.sender(), ExtendedAccount::new(transaction.nonce(), U256::MAX), ); let blob_store = InMemoryBlobStore::default(); - let validator: EthTransactionValidator<_, _> = - EthTransactionValidatorBuilder::new(provider) - .set_tx_fee_cap(100) // 100 wei cap - .build(blob_store.clone()); + let validator = EthTransactionValidatorBuilder::new(provider, test_evm_config()) + .set_tx_fee_cap(100) // 100 wei cap + .build(blob_store.clone()); let outcome = validator.validate_one(TransactionOrigin::Local, transaction.clone()); assert!(outcome.is_invalid()); @@ -1435,17 +1465,16 @@ mod tests { #[tokio::test] async fn valid_on_zero_fee_cap() { let transaction = get_transaction(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); provider.add_account( transaction.sender(), ExtendedAccount::new(transaction.nonce(), U256::MAX), ); let blob_store = InMemoryBlobStore::default(); - let validator: EthTransactionValidator<_, _> = - EthTransactionValidatorBuilder::new(provider) - .set_tx_fee_cap(0) // no cap - .build(blob_store); + let validator = EthTransactionValidatorBuilder::new(provider, EthEvmConfig::mainnet()) + .set_tx_fee_cap(0) // no cap + .build(blob_store); let outcome = validator.validate_one(TransactionOrigin::Local, transaction); assert!(outcome.is_valid()); @@ -1454,17 +1483,16 @@ mod tests { #[tokio::test] async fn valid_on_normal_fee_cap() { let transaction = get_transaction(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); provider.add_account( transaction.sender(), ExtendedAccount::new(transaction.nonce(), U256::MAX), ); let blob_store = InMemoryBlobStore::default(); - let validator: EthTransactionValidator<_, _> = - EthTransactionValidatorBuilder::new(provider) - .set_tx_fee_cap(2e18 as u128) // 2 ETH cap - .build(blob_store); + let validator = EthTransactionValidatorBuilder::new(provider, EthEvmConfig::mainnet()) + .set_tx_fee_cap(2e18 as u128) // 2 ETH cap + .build(blob_store); let outcome = validator.validate_one(TransactionOrigin::Local, transaction); assert!(outcome.is_valid()); @@ -1473,17 +1501,16 @@ mod tests { #[tokio::test] async fn invalid_on_max_tx_gas_limit_exceeded() { let transaction = get_transaction(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); provider.add_account( transaction.sender(), ExtendedAccount::new(transaction.nonce(), U256::MAX), ); let blob_store = InMemoryBlobStore::default(); - let validator: EthTransactionValidator<_, _> = - EthTransactionValidatorBuilder::new(provider) - .with_max_tx_gas_limit(Some(500_000)) // Set limit lower than transaction gas limit (1_015_288) - .build(blob_store.clone()); + let validator = EthTransactionValidatorBuilder::new(provider, EthEvmConfig::mainnet()) + .with_max_tx_gas_limit(Some(500_000)) // Set limit lower than transaction gas limit (1_015_288) + .build(blob_store.clone()); let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); assert!(outcome.is_invalid()); @@ -1506,17 +1533,16 @@ mod tests { #[tokio::test] async fn valid_on_max_tx_gas_limit_disabled() { let transaction = get_transaction(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); provider.add_account( transaction.sender(), ExtendedAccount::new(transaction.nonce(), U256::MAX), ); let blob_store = InMemoryBlobStore::default(); - let validator: EthTransactionValidator<_, _> = - EthTransactionValidatorBuilder::new(provider) - .with_max_tx_gas_limit(None) // disabled - .build(blob_store); + let validator = EthTransactionValidatorBuilder::new(provider, EthEvmConfig::mainnet()) + .with_max_tx_gas_limit(None) // disabled + .build(blob_store); let outcome = validator.validate_one(TransactionOrigin::External, transaction); assert!(outcome.is_valid()); @@ -1525,17 +1551,16 @@ mod tests { #[tokio::test] async fn valid_on_max_tx_gas_limit_within_limit() { let transaction = get_transaction(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); provider.add_account( transaction.sender(), ExtendedAccount::new(transaction.nonce(), U256::MAX), ); let blob_store = InMemoryBlobStore::default(); - let validator: EthTransactionValidator<_, _> = - EthTransactionValidatorBuilder::new(provider) - .with_max_tx_gas_limit(Some(2_000_000)) // Set limit higher than transaction gas limit (1_015_288) - .build(blob_store); + let validator = EthTransactionValidatorBuilder::new(provider, EthEvmConfig::mainnet()) + .with_max_tx_gas_limit(Some(2_000_000)) // Set limit higher than transaction gas limit (1_015_288) + .build(blob_store); let outcome = validator.validate_one(TransactionOrigin::External, transaction); assert!(outcome.is_valid()); @@ -1544,7 +1569,7 @@ mod tests { // Helper function to set up common test infrastructure for priority fee tests fn setup_priority_fee_test() -> (EthPooledTransaction, MockEthProvider) { let transaction = get_transaction(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); provider.add_account( transaction.sender(), ExtendedAccount::new(transaction.nonce(), U256::MAX), @@ -1557,9 +1582,9 @@ mod tests { provider: MockEthProvider, minimum_priority_fee: Option, local_config: Option, - ) -> EthTransactionValidator { + ) -> EthTransactionValidator { let blob_store = InMemoryBlobStore::default(); - let mut builder = EthTransactionValidatorBuilder::new(provider) + let mut builder = EthTransactionValidatorBuilder::new(provider, test_evm_config()) .with_minimum_priority_fee(minimum_priority_fee); if let Some(config) = local_config { @@ -1714,7 +1739,7 @@ mod tests { fn reject_oversized_tx() { let mut transaction = get_transaction(); transaction.encoded_length = DEFAULT_MAX_TX_INPUT_BYTES + 1; - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); // No minimum priority fee set (default is None) let validator = create_validator_with_minimum_fee(provider, None, None); @@ -1727,7 +1752,7 @@ mod tests { #[tokio::test] async fn valid_with_disabled_balance_check() { let transaction = get_transaction(); - let provider = MockEthProvider::default(); + let provider = MockEthProvider::default().with_genesis_block(); // Set account with 0 balance provider.add_account( @@ -1736,8 +1761,8 @@ mod tests { ); // Validate with balance check enabled - let validator: EthTransactionValidator<_, _> = - EthTransactionValidatorBuilder::new(provider.clone()) + let validator = + EthTransactionValidatorBuilder::new(provider.clone(), EthEvmConfig::mainnet()) .build(InMemoryBlobStore::default()); let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); @@ -1753,10 +1778,9 @@ mod tests { } // Validate with balance check disabled - let validator: EthTransactionValidator<_, _> = - EthTransactionValidatorBuilder::new(provider) - .disable_balance_check() - .build(InMemoryBlobStore::default()); + let validator = EthTransactionValidatorBuilder::new(provider, EthEvmConfig::mainnet()) + .disable_balance_check() + .build(InMemoryBlobStore::default()); let outcome = validator.validate_one(TransactionOrigin::External, transaction); assert!(outcome.is_valid()); // Should be valid because balance check is disabled diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index d3ec6b36160..7344ac76090 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -21,9 +21,7 @@ pub use eth::*; pub use task::{TransactionValidationTaskExecutor, ValidationTask}; /// Validation constants. -pub use constants::{ - DEFAULT_MAX_TX_INPUT_BYTES, MAX_CODE_BYTE_SIZE, MAX_INIT_CODE_BYTE_SIZE, TX_SLOT_BYTE_SIZE, -}; +pub use constants::{DEFAULT_MAX_TX_INPUT_BYTES, TX_SLOT_BYTE_SIZE}; /// A Result type returned after checking a transaction's validity. #[derive(Debug)] diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index 39ae41bc3de..e25c47b2489 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -8,7 +8,10 @@ use crate::{ TransactionValidator, }; use futures_util::{lock::Mutex, StreamExt}; -use reth_primitives_traits::SealedBlock; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_evm::ConfigureEvm; +use reth_primitives_traits::{HeaderTy, SealedBlock}; +use reth_storage_api::BlockReaderIdExt; use reth_tasks::TaskSpawner; use std::{future::Future, pin::Pin, sync::Arc}; use tokio::{ @@ -116,8 +119,16 @@ impl Clone for TransactionValidationTaskExecutor { impl TransactionValidationTaskExecutor<()> { /// Convenience method to create a [`EthTransactionValidatorBuilder`] - pub fn eth_builder(client: Client) -> EthTransactionValidatorBuilder { - EthTransactionValidatorBuilder::new(client) + pub fn eth_builder( + client: Client, + evm_config: Evm, + ) -> EthTransactionValidatorBuilder + where + Client: ChainSpecProvider + + BlockReaderIdExt
>, + Evm: ConfigureEvm, + { + EthTransactionValidatorBuilder::new(client, evm_config) } } @@ -139,16 +150,19 @@ impl TransactionValidationTaskExecutor { } } -impl TransactionValidationTaskExecutor> { +impl TransactionValidationTaskExecutor> { /// Creates a new instance for the given client /// /// This will spawn a single validation tasks that performs the actual validation. /// See [`TransactionValidationTaskExecutor::eth_with_additional_tasks`] - pub fn eth(client: Client, blob_store: S, tasks: T) -> Self + pub fn eth(client: Client, evm_config: Evm, blob_store: S, tasks: T) -> Self where T: TaskSpawner, + Client: ChainSpecProvider + + BlockReaderIdExt
>, + Evm: ConfigureEvm, { - Self::eth_with_additional_tasks(client, blob_store, tasks, 0) + Self::eth_with_additional_tasks(client, evm_config, blob_store, tasks, 0) } /// Creates a new instance for the given client @@ -162,14 +176,18 @@ impl TransactionValidationTaskExecutor( client: Client, + evm_config: Evm, blob_store: S, tasks: T, num_additional_tasks: usize, ) -> Self where T: TaskSpawner, + Client: ChainSpecProvider + + BlockReaderIdExt
>, + Evm: ConfigureEvm, { - EthTransactionValidatorBuilder::new(client) + EthTransactionValidatorBuilder::new(client, evm_config) .with_additional_tasks(num_additional_tasks) .build_with_tasks(tasks, blob_store) } diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index 2150118173e..ebb1246457a 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -5,15 +5,16 @@ use reth_ethereum::{ chainspec::ChainSpec, cli::interface::Cli, + evm::EthEvmConfig, node::{ - api::{BlockTy, FullNodeTypes, NodeTypes}, + api::{FullNodeTypes, NodeTypes}, builder::{components::PoolBuilder, BuilderContext}, node::EthereumAddOns, EthereumNode, }, pool::{ - blobstore::InMemoryBlobStore, CoinbaseTipOrdering, EthPooledTransaction, - EthTransactionPool, Pool, PoolConfig, TransactionValidationTaskExecutor, + blobstore::InMemoryBlobStore, CoinbaseTipOrdering, EthTransactionPool, Pool, PoolConfig, + TransactionValidationTaskExecutor, }, provider::CanonStateSubscriptions, EthPrimitives, @@ -49,28 +50,24 @@ pub struct CustomPoolBuilder { /// Implement the [`PoolBuilder`] trait for the custom pool builder /// /// This will be used to build the transaction pool and its maintenance tasks during launch. -impl PoolBuilder for CustomPoolBuilder +impl PoolBuilder for CustomPoolBuilder where Node: FullNodeTypes>, { - type Pool = EthTransactionPool< - Node::Provider, - InMemoryBlobStore, - EthPooledTransaction, - BlockTy, - >; + type Pool = EthTransactionPool; - async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { + async fn build_pool( + self, + ctx: &BuilderContext, + evm_config: EthEvmConfig, + ) -> eyre::Result { let data_dir = ctx.config().datadir(); let blob_store = InMemoryBlobStore::default(); - let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) - .with_head_timestamp(ctx.head().timestamp) - .kzg_settings(ctx.kzg_settings()?) - .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) - .build_with_tasks::<_, _, _, BlockTy>( - ctx.task_executor().clone(), - blob_store.clone(), - ); + let validator = + TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone(), evm_config) + .kzg_settings(ctx.kzg_settings()?) + .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) + .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()); let transaction_pool = Pool::new(validator, CoinbaseTipOrdering::default(), blob_store, self.pool_config); From 768a687189d304355a68f1271f17c1a77011f2d6 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 26 Jan 2026 19:49:44 +0400 Subject: [PATCH 216/267] perf: use shared channel for prewarm workers (#21429) --- .../src/tree/payload_processor/prewarm.rs | 67 ++++++------------- 1 file changed, 22 insertions(+), 45 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 81e29eea3fa..4ecb6fd1656 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -26,11 +26,11 @@ use alloy_consensus::transaction::TxHashRef; use alloy_eip7928::BlockAccessList; use alloy_evm::Database; use alloy_primitives::{keccak256, map::B256Set, B256}; -use crossbeam_channel::Sender as CrossbeamSender; +use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use metrics::{Counter, Gauge, Histogram}; use reth_evm::{execute::ExecutableTxFor, ConfigureEvm, Evm, EvmFor, RecoveredTx, SpecFor}; use reth_metrics::Metrics; -use reth_primitives_traits::{NodePrimitives, SignedTransaction}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ AccountReader, BlockExecutionOutput, BlockReader, StateProvider, StateProviderFactory, StateReader, @@ -163,8 +163,8 @@ where transaction_count_hint.min(max_concurrency) }; - // Initialize worker handles container - let handles = ctx.clone().spawn_workers(workers_needed, &executor, actions_tx.clone(), done_tx.clone()); + // Spawn workers + let tx_sender = ctx.clone().spawn_workers(workers_needed, &executor, actions_tx.clone(), done_tx.clone()); // Distribute transactions to workers let mut tx_index = 0usize; @@ -179,37 +179,18 @@ where } let indexed_tx = IndexedTransaction { index: tx_index, tx }; - let is_system_tx = indexed_tx.tx.tx().is_system_tx(); - - // System transactions (type > 4) in the first position set critical metadata - // that affects all subsequent transactions (e.g., L1 block info on L2s). - // Broadcast the first system transaction to all workers to ensure they have - // the critical state. This is particularly important for L2s like Optimism - // where the first deposit transaction (type 126) contains essential block metadata. - if tx_index == 0 && is_system_tx { - for handle in &handles { - // Ignore send errors: workers listen to terminate_execution and may - // exit early when signaled. Sending to a disconnected worker is - // possible and harmless and should happen at most once due to - // the terminate_execution check above. - let _ = handle.send(indexed_tx.clone()); - } - } else { - // Round-robin distribution for all other transactions - let worker_idx = tx_index % workers_needed; - // Ignore send errors: workers listen to terminate_execution and may - // exit early when signaled. Sending to a disconnected worker is - // possible and harmless and should happen at most once due to - // the terminate_execution check above. - let _ = handles[worker_idx].send(indexed_tx); - } + + // Send transaction to the workers + // Ignore send errors: workers listen to terminate_execution and may + // exit early when signaled. + let _ = tx_sender.send(indexed_tx); tx_index += 1; } - // drop handle and wait for all tasks to finish and drop theirs + // drop sender and wait for all tasks to finish drop(done_tx); - drop(handles); + drop(tx_sender); while done_rx.recv().is_ok() {} let _ = actions_tx @@ -548,7 +529,7 @@ where Some((evm, metrics, terminate_execution, v2_proofs_enabled)) } - /// Accepts an [`mpsc::Receiver`] of transactions and a handle to prewarm task. Executes + /// Accepts a [`CrossbeamReceiver`] of transactions and a handle to prewarm task. Executes /// transactions and streams [`PrewarmTaskEvent::Outcome`] messages for each transaction. /// /// This function processes transactions sequentially from the receiver and emits outcome events @@ -560,7 +541,7 @@ where #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn transact_batch( self, - txs: mpsc::Receiver>, + txs: CrossbeamReceiver>, sender: Sender>, done_tx: Sender<()>, ) where @@ -647,35 +628,31 @@ where let _ = done_tx.send(()); } - /// Spawns a worker task for transaction execution and returns its sender channel. + /// Spawns worker tasks that pull transactions from a shared channel. + /// + /// Returns the sender for distributing transactions to workers. fn spawn_workers( self, workers_needed: usize, task_executor: &WorkloadExecutor, actions_tx: Sender>, done_tx: Sender<()>, - ) -> Vec>> + ) -> CrossbeamSender> where Tx: ExecutableTxFor + Send + 'static, { - let mut handles = Vec::with_capacity(workers_needed); - let mut receivers = Vec::with_capacity(workers_needed); - - for _ in 0..workers_needed { - let (tx, rx) = mpsc::channel(); - handles.push(tx); - receivers.push(rx); - } + let (tx_sender, tx_receiver) = crossbeam_channel::unbounded(); - // Spawn a separate task spawning workers in parallel. + // Spawn workers that all pull from the shared receiver let executor = task_executor.clone(); let span = Span::current(); task_executor.spawn_blocking(move || { let _enter = span.entered(); - for (idx, rx) in receivers.into_iter().enumerate() { + for idx in 0..workers_needed { let ctx = self.clone(); let actions_tx = actions_tx.clone(); let done_tx = done_tx.clone(); + let rx = tx_receiver.clone(); let span = debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm worker", idx); executor.spawn_blocking(move || { let _enter = span.entered(); @@ -684,7 +661,7 @@ where } }); - handles + tx_sender } /// Spawns a worker task for BAL slot prefetching. From f9ec2fafa0e51aa334954581c6133ed540273b6c Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 26 Jan 2026 18:02:06 +0100 Subject: [PATCH 217/267] refactor(trie): always use ParallelSparseTrie, deprecate config flags (#21435) --- crates/engine/primitives/src/config.rs | 19 -- .../configured_sparse_trie.rs | 188 ------------------ .../tree/src/tree/payload_processor/mod.rs | 38 ++-- .../src/tree/payload_processor/sparse_trie.rs | 10 +- crates/node/core/src/args/engine.rs | 23 +-- crates/stateless/src/trie.rs | 4 +- crates/trie/sparse-parallel/src/trie.rs | 7 +- crates/trie/sparse/benches/rlp_node.rs | 2 +- crates/trie/sparse/benches/root.rs | 32 +-- crates/trie/sparse/src/state.rs | 69 +++---- crates/trie/sparse/src/traits.rs | 4 +- crates/trie/sparse/src/trie.rs | 46 +++-- crates/trie/trie/src/witness.rs | 2 +- docs/vocs/docs/pages/cli/op-reth/node.mdx | 3 - docs/vocs/docs/pages/cli/reth/node.mdx | 3 - 15 files changed, 112 insertions(+), 338 deletions(-) delete mode 100644 crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 20902705dc7..6b17a196fd8 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -110,8 +110,6 @@ pub struct TreeConfig { disable_state_cache: bool, /// Whether to disable parallel prewarming. disable_prewarming: bool, - /// Whether to disable the parallel sparse trie state root algorithm. - disable_parallel_sparse_trie: bool, /// Whether to enable state provider metrics. state_provider_metrics: bool, /// Cross-block cache size in bytes. @@ -168,7 +166,6 @@ impl Default for TreeConfig { always_compare_trie_updates: false, disable_state_cache: false, disable_prewarming: false, - disable_parallel_sparse_trie: false, state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE, has_enough_parallelism: has_enough_parallelism(), @@ -201,7 +198,6 @@ impl TreeConfig { always_compare_trie_updates: bool, disable_state_cache: bool, disable_prewarming: bool, - disable_parallel_sparse_trie: bool, state_provider_metrics: bool, cross_block_cache_size: usize, has_enough_parallelism: bool, @@ -228,7 +224,6 @@ impl TreeConfig { always_compare_trie_updates, disable_state_cache, disable_prewarming, - disable_parallel_sparse_trie, state_provider_metrics, cross_block_cache_size, has_enough_parallelism, @@ -309,11 +304,6 @@ impl TreeConfig { self.state_provider_metrics } - /// Returns whether or not the parallel sparse trie is disabled. - pub const fn disable_parallel_sparse_trie(&self) -> bool { - self.disable_parallel_sparse_trie - } - /// Returns whether or not state cache is disabled. pub const fn disable_state_cache(&self) -> bool { self.disable_state_cache @@ -451,15 +441,6 @@ impl TreeConfig { self } - /// Setter for whether to disable the parallel sparse trie - pub const fn with_disable_parallel_sparse_trie( - mut self, - disable_parallel_sparse_trie: bool, - ) -> Self { - self.disable_parallel_sparse_trie = disable_parallel_sparse_trie; - self - } - /// Setter for whether multiproof task should chunk proof targets. pub const fn with_multiproof_chunking_enabled( mut self, diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs deleted file mode 100644 index c4f4e0ec366..00000000000 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ /dev/null @@ -1,188 +0,0 @@ -//! Configured sparse trie enum for switching between serial and parallel implementations. - -use alloy_primitives::B256; -use reth_trie::{BranchNodeMasks, Nibbles, ProofTrieNode, TrieNode}; -use reth_trie_sparse::{ - errors::SparseTrieResult, provider::TrieNodeProvider, LeafLookup, LeafLookupError, - SerialSparseTrie, SparseTrieInterface, SparseTrieUpdates, -}; -use reth_trie_sparse_parallel::ParallelSparseTrie; -use std::borrow::Cow; - -/// Enum for switching between serial and parallel sparse trie implementations. -/// -/// This type allows runtime selection between different sparse trie implementations, -/// providing flexibility in choosing the appropriate implementation based on workload -/// characteristics. -#[derive(Debug, Clone)] -pub(crate) enum ConfiguredSparseTrie { - /// Serial implementation of the sparse trie. - Serial(Box), - /// Parallel implementation of the sparse trie. - Parallel(Box), -} - -impl From for ConfiguredSparseTrie { - fn from(trie: SerialSparseTrie) -> Self { - Self::Serial(Box::new(trie)) - } -} - -impl From for ConfiguredSparseTrie { - fn from(trie: ParallelSparseTrie) -> Self { - Self::Parallel(Box::new(trie)) - } -} - -impl Default for ConfiguredSparseTrie { - fn default() -> Self { - Self::Serial(Default::default()) - } -} - -impl SparseTrieInterface for ConfiguredSparseTrie { - fn with_root( - self, - root: TrieNode, - masks: Option, - retain_updates: bool, - ) -> SparseTrieResult { - match self { - Self::Serial(trie) => { - trie.with_root(root, masks, retain_updates).map(|t| Self::Serial(Box::new(t))) - } - Self::Parallel(trie) => { - trie.with_root(root, masks, retain_updates).map(|t| Self::Parallel(Box::new(t))) - } - } - } - - fn with_updates(self, retain_updates: bool) -> Self { - match self { - Self::Serial(trie) => Self::Serial(Box::new(trie.with_updates(retain_updates))), - Self::Parallel(trie) => Self::Parallel(Box::new(trie.with_updates(retain_updates))), - } - } - - fn reserve_nodes(&mut self, additional: usize) { - match self { - Self::Serial(trie) => trie.reserve_nodes(additional), - Self::Parallel(trie) => trie.reserve_nodes(additional), - } - } - - fn reveal_node( - &mut self, - path: Nibbles, - node: TrieNode, - masks: Option, - ) -> SparseTrieResult<()> { - match self { - Self::Serial(trie) => trie.reveal_node(path, node, masks), - Self::Parallel(trie) => trie.reveal_node(path, node, masks), - } - } - - fn reveal_nodes(&mut self, nodes: Vec) -> SparseTrieResult<()> { - match self { - Self::Serial(trie) => trie.reveal_nodes(nodes), - Self::Parallel(trie) => trie.reveal_nodes(nodes), - } - } - - fn update_leaf( - &mut self, - full_path: Nibbles, - value: Vec, - provider: P, - ) -> SparseTrieResult<()> { - match self { - Self::Serial(trie) => trie.update_leaf(full_path, value, provider), - Self::Parallel(trie) => trie.update_leaf(full_path, value, provider), - } - } - - fn remove_leaf( - &mut self, - full_path: &Nibbles, - provider: P, - ) -> SparseTrieResult<()> { - match self { - Self::Serial(trie) => trie.remove_leaf(full_path, provider), - Self::Parallel(trie) => trie.remove_leaf(full_path, provider), - } - } - - fn root(&mut self) -> B256 { - match self { - Self::Serial(trie) => trie.root(), - Self::Parallel(trie) => trie.root(), - } - } - - fn update_subtrie_hashes(&mut self) { - match self { - Self::Serial(trie) => trie.update_subtrie_hashes(), - Self::Parallel(trie) => trie.update_subtrie_hashes(), - } - } - - fn get_leaf_value(&self, full_path: &Nibbles) -> Option<&Vec> { - match self { - Self::Serial(trie) => trie.get_leaf_value(full_path), - Self::Parallel(trie) => trie.get_leaf_value(full_path), - } - } - - fn find_leaf( - &self, - full_path: &Nibbles, - expected_value: Option<&Vec>, - ) -> Result { - match self { - Self::Serial(trie) => trie.find_leaf(full_path, expected_value), - Self::Parallel(trie) => trie.find_leaf(full_path, expected_value), - } - } - - fn take_updates(&mut self) -> SparseTrieUpdates { - match self { - Self::Serial(trie) => trie.take_updates(), - Self::Parallel(trie) => trie.take_updates(), - } - } - - fn wipe(&mut self) { - match self { - Self::Serial(trie) => trie.wipe(), - Self::Parallel(trie) => trie.wipe(), - } - } - - fn clear(&mut self) { - match self { - Self::Serial(trie) => trie.clear(), - Self::Parallel(trie) => trie.clear(), - } - } - - fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { - match self { - Self::Serial(trie) => trie.updates_ref(), - Self::Parallel(trie) => trie.updates_ref(), - } - } - fn shrink_nodes_to(&mut self, size: usize) { - match self { - Self::Serial(trie) => trie.shrink_nodes_to(size), - Self::Parallel(trie) => trie.shrink_nodes_to(size), - } - } - - fn shrink_values_to(&mut self, size: usize) { - match self { - Self::Serial(trie) => trie.shrink_values_to(size), - Self::Parallel(trie) => trie.shrink_values_to(size), - } - } -} diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 24af9148731..dc2ac40068f 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -41,7 +41,7 @@ use reth_trie_parallel::{ }; use reth_trie_sparse::{ provider::{TrieNodeProvider, TrieNodeProviderFactory}, - ClearedSparseStateTrie, SparseStateTrie, SparseTrie, + ClearedSparseStateTrie, RevealableSparseTrie, SparseStateTrie, }; use reth_trie_sparse_parallel::{ParallelSparseTrie, ParallelismThresholds}; use std::{ @@ -57,15 +57,12 @@ use std::{ use tracing::{debug, debug_span, instrument, warn, Span}; pub mod bal; -mod configured_sparse_trie; pub mod executor; pub mod multiproof; pub mod prewarm; pub mod receipt_root_task; pub mod sparse_trie; -use configured_sparse_trie::ConfiguredSparseTrie; - /// Default parallelism thresholds to use with the [`ParallelSparseTrie`]. /// /// These values were determined by performing benchmarks using gradually increasing values to judge @@ -131,12 +128,8 @@ where /// A cleared `SparseStateTrie`, kept around to be reused for the state root computation so /// that allocations can be minimized. sparse_state_trie: Arc< - parking_lot::Mutex< - Option>, - >, + parking_lot::Mutex>>, >, - /// Whether to disable the parallel sparse trie. - disable_parallel_sparse_trie: bool, /// Maximum concurrency for prewarm task. prewarm_max_concurrency: usize, /// Whether to disable cache metrics recording. @@ -171,7 +164,6 @@ where precompile_cache_disabled: config.precompile_cache_disabled(), precompile_cache_map, sparse_state_trie: Arc::default(), - disable_parallel_sparse_trie: config.disable_parallel_sparse_trie(), prewarm_max_concurrency: config.prewarm_max_concurrency(), disable_cache_metrics: config.disable_cache_metrics(), } @@ -514,7 +506,6 @@ where BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, { let cleared_sparse_trie = Arc::clone(&self.sparse_state_trie); - let disable_parallel_sparse_trie = self.disable_parallel_sparse_trie; let trie_metrics = self.trie_metrics.clone(); let span = Span::current(); @@ -524,14 +515,10 @@ where // Reuse a stored SparseStateTrie, or create a new one using the desired configuration // if there's none to reuse. let sparse_state_trie = cleared_sparse_trie.lock().take().unwrap_or_else(|| { - let default_trie = SparseTrie::blind_from(if disable_parallel_sparse_trie { - ConfiguredSparseTrie::Serial(Default::default()) - } else { - ConfiguredSparseTrie::Parallel(Box::new( - ParallelSparseTrie::default() - .with_parallelism_thresholds(PARALLEL_SPARSE_TRIE_PARALLELISM_THRESHOLDS), - )) - }); + let default_trie = RevealableSparseTrie::blind_from( + ParallelSparseTrie::default() + .with_parallelism_thresholds(PARALLEL_SPARSE_TRIE_PARALLELISM_THRESHOLDS), + ); ClearedSparseStateTrie::from_state_trie( SparseStateTrie::new() .with_accounts_trie(default_trie.clone()) @@ -540,12 +527,13 @@ where ) }); - let task = SparseTrieTask::<_, ConfiguredSparseTrie, ConfiguredSparseTrie>::new_with_cleared_trie( - sparse_trie_rx, - proof_worker_handle, - trie_metrics, - sparse_state_trie, - ); + let task = + SparseTrieTask::<_, ParallelSparseTrie, ParallelSparseTrie>::new_with_cleared_trie( + sparse_trie_rx, + proof_worker_handle, + trie_metrics, + sparse_state_trie, + ); let (result, trie) = task.run(); // Send state root computation result diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index 052fd8672b2..a1df41ee12f 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -8,7 +8,7 @@ use reth_trie_parallel::{proof_task::ProofResult, root::ParallelStateRootError}; use reth_trie_sparse::{ errors::{SparseStateTrieResult, SparseTrieErrorKind}, provider::{TrieNodeProvider, TrieNodeProviderFactory}, - ClearedSparseStateTrie, SerialSparseTrie, SparseStateTrie, SparseTrieInterface, + ClearedSparseStateTrie, SerialSparseTrie, SparseStateTrie, SparseTrie, }; use smallvec::SmallVec; use std::{ @@ -38,8 +38,8 @@ where BPF: TrieNodeProviderFactory + Send + Sync + Clone, BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, - A: SparseTrieInterface + Send + Sync + Default, - S: SparseTrieInterface + Send + Sync + Default + Clone, + A: SparseTrie + Send + Sync + Default, + S: SparseTrie + Send + Sync + Default + Clone, { /// Creates a new sparse trie, pre-populating with a [`ClearedSparseStateTrie`]. pub(super) fn new_with_cleared_trie( @@ -150,8 +150,8 @@ where BPF: TrieNodeProviderFactory + Send + Sync, BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, - A: SparseTrieInterface + Send + Sync + Default, - S: SparseTrieInterface + Send + Sync + Default + Clone, + A: SparseTrie + Send + Sync + Default, + S: SparseTrie + Send + Sync + Default + Clone, { trace!(target: "engine::root::sparse", "Updating sparse trie"); let started_at = Instant::now(); diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index bd16e3b359b..9c3864b367a 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -22,7 +22,6 @@ pub struct DefaultEngineValues { legacy_state_root_task_enabled: bool, state_cache_disabled: bool, prewarming_disabled: bool, - parallel_sparse_trie_disabled: bool, state_provider_metrics: bool, cross_block_cache_size: usize, state_root_task_compare_updates: bool, @@ -81,12 +80,6 @@ impl DefaultEngineValues { self } - /// Set whether to disable parallel sparse trie by default - pub const fn with_parallel_sparse_trie_disabled(mut self, v: bool) -> Self { - self.parallel_sparse_trie_disabled = v; - self - } - /// Set whether to enable state provider metrics by default pub const fn with_state_provider_metrics(mut self, v: bool) -> Self { self.state_provider_metrics = v; @@ -189,7 +182,6 @@ impl Default for DefaultEngineValues { legacy_state_root_task_enabled: false, state_cache_disabled: false, prewarming_disabled: false, - parallel_sparse_trie_disabled: false, state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, state_root_task_compare_updates: false, @@ -244,14 +236,14 @@ pub struct EngineArgs { #[arg(long = "engine.disable-prewarming", alias = "engine.disable-caching-and-prewarming", default_value_t = DefaultEngineValues::get_global().prewarming_disabled)] pub prewarming_disabled: bool, - /// CAUTION: This CLI flag has no effect anymore, use --engine.disable-parallel-sparse-trie - /// if you want to disable usage of the `ParallelSparseTrie`. + /// CAUTION: This CLI flag has no effect anymore. The parallel sparse trie is always enabled. #[deprecated] #[arg(long = "engine.parallel-sparse-trie", default_value = "true", hide = true)] pub parallel_sparse_trie_enabled: bool, - /// Disable the parallel sparse trie in the engine. - #[arg(long = "engine.disable-parallel-sparse-trie", default_value_t = DefaultEngineValues::get_global().parallel_sparse_trie_disabled)] + /// CAUTION: This CLI flag has no effect anymore. The parallel sparse trie is always enabled. + #[deprecated] + #[arg(long = "engine.disable-parallel-sparse-trie", default_value = "false", hide = true)] pub parallel_sparse_trie_disabled: bool, /// Enable state provider latency metrics. This allows the engine to collect and report stats @@ -343,7 +335,6 @@ impl Default for EngineArgs { legacy_state_root_task_enabled, state_cache_disabled, prewarming_disabled, - parallel_sparse_trie_disabled, state_provider_metrics, cross_block_cache_size, state_root_task_compare_updates, @@ -369,7 +360,7 @@ impl Default for EngineArgs { state_cache_disabled, prewarming_disabled, parallel_sparse_trie_enabled: true, - parallel_sparse_trie_disabled, + parallel_sparse_trie_disabled: false, state_provider_metrics, cross_block_cache_size, accept_execution_requests_hash, @@ -398,7 +389,6 @@ impl EngineArgs { .with_legacy_state_root(self.legacy_state_root_task_enabled) .without_state_cache(self.state_cache_disabled) .without_prewarming(self.prewarming_disabled) - .with_disable_parallel_sparse_trie(self.parallel_sparse_trie_disabled) .with_state_provider_metrics(self.state_provider_metrics) .with_always_compare_trie_updates(self.state_root_task_compare_updates) .with_cross_block_cache_size(self.cross_block_cache_size * 1024 * 1024) @@ -457,7 +447,7 @@ mod tests { state_cache_disabled: true, prewarming_disabled: true, parallel_sparse_trie_enabled: true, - parallel_sparse_trie_disabled: true, + parallel_sparse_trie_disabled: false, state_provider_metrics: true, cross_block_cache_size: 256, state_root_task_compare_updates: true, @@ -485,7 +475,6 @@ mod tests { "--engine.legacy-state-root", "--engine.disable-state-cache", "--engine.disable-prewarming", - "--engine.disable-parallel-sparse-trie", "--engine.state-provider-metrics", "--engine.cross-block-cache-size", "256", diff --git a/crates/stateless/src/trie.rs b/crates/stateless/src/trie.rs index 49d1f6cf0fd..c4bfc762afb 100644 --- a/crates/stateless/src/trie.rs +++ b/crates/stateless/src/trie.rs @@ -11,7 +11,7 @@ use reth_trie_common::{HashedPostState, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE}; use reth_trie_sparse::{ errors::SparseStateTrieResult, provider::{DefaultTrieNodeProvider, DefaultTrieNodeProviderFactory}, - SparseStateTrie, SparseTrie, SparseTrieInterface, + RevealableSparseTrie, SparseStateTrie, SparseTrie, }; /// Trait for stateless trie implementations that can be used for stateless validation. @@ -245,7 +245,7 @@ fn calculate_state_root( for (address, storage) in state.storages.into_iter().sorted_unstable_by_key(|(addr, _)| *addr) { // Take the existing storage trie (or create an empty, “revealed” one) let mut storage_trie = - trie.take_storage_trie(&address).unwrap_or_else(SparseTrie::revealed_empty); + trie.take_storage_trie(&address).unwrap_or_else(RevealableSparseTrie::revealed_empty); if storage.wiped { storage_trie.wipe()?; diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index af24f510b3d..7b55b66fd40 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -14,7 +14,7 @@ use reth_trie_common::{ }; use reth_trie_sparse::{ provider::{RevealedNode, TrieNodeProvider}, - LeafLookup, LeafLookupError, RlpNodeStackItem, SparseNode, SparseNodeType, SparseTrieInterface, + LeafLookup, LeafLookupError, RlpNodeStackItem, SparseNode, SparseNodeType, SparseTrie, SparseTrieUpdates, }; use smallvec::SmallVec; @@ -147,7 +147,7 @@ impl Default for ParallelSparseTrie { } } -impl SparseTrieInterface for ParallelSparseTrie { +impl SparseTrie for ParallelSparseTrie { fn with_root( mut self, root: TrieNode, @@ -2704,8 +2704,7 @@ mod tests { use reth_trie_db::DatabaseTrieCursorFactory; use reth_trie_sparse::{ provider::{DefaultTrieNodeProvider, RevealedNode, TrieNodeProvider}, - LeafLookup, LeafLookupError, SerialSparseTrie, SparseNode, SparseTrieInterface, - SparseTrieUpdates, + LeafLookup, LeafLookupError, SerialSparseTrie, SparseNode, SparseTrie, SparseTrieUpdates, }; use std::collections::{BTreeMap, BTreeSet}; diff --git a/crates/trie/sparse/benches/rlp_node.rs b/crates/trie/sparse/benches/rlp_node.rs index 9f2337f31b8..8054d30a2d8 100644 --- a/crates/trie/sparse/benches/rlp_node.rs +++ b/crates/trie/sparse/benches/rlp_node.rs @@ -7,7 +7,7 @@ use proptest::{prelude::*, test_runner::TestRunner}; use rand::{seq::IteratorRandom, Rng}; use reth_testing_utils::generators; use reth_trie::Nibbles; -use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrieInterface}; +use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; fn update_rlp_node_level(c: &mut Criterion) { let mut rng = generators::rng(); diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index ece0aa5313d..1725f529359 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -13,7 +13,7 @@ use reth_trie::{ HashedStorage, }; use reth_trie_common::{updates::TrieUpdatesSorted, HashBuilder, Nibbles}; -use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; +use reth_trie_sparse::{provider::DefaultTrieNodeProvider, RevealableSparseTrie, SerialSparseTrie}; fn calculate_root_from_leaves(c: &mut Criterion) { let mut group = c.benchmark_group("calculate root from leaves"); @@ -42,19 +42,22 @@ fn calculate_root_from_leaves(c: &mut Criterion) { // sparse trie let provider = DefaultTrieNodeProvider; group.bench_function(BenchmarkId::new("sparse trie", size), |b| { - b.iter_with_setup(SparseTrie::::revealed_empty, |mut sparse| { - for (key, value) in &state { + b.iter_with_setup( + RevealableSparseTrie::::revealed_empty, + |mut sparse| { + for (key, value) in &state { + sparse + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(value).to_vec(), + &provider, + ) + .unwrap(); + } + sparse.root().unwrap(); sparse - .update_leaf( - Nibbles::unpack(key), - alloy_rlp::encode_fixed_size(value).to_vec(), - &provider, - ) - .unwrap(); - } - sparse.root().unwrap(); - sparse - }) + }, + ) }); } } @@ -206,7 +209,8 @@ fn calculate_root_from_leaves_repeated(c: &mut Criterion) { group.bench_function(benchmark_id, |b| { b.iter_with_setup( || { - let mut sparse = SparseTrie::::revealed_empty(); + let mut sparse = + RevealableSparseTrie::::revealed_empty(); for (key, value) in &init_state { sparse .update_leaf( diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 415938915ad..84f57cde78e 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,7 +1,7 @@ use crate::{ provider::{TrieNodeProvider, TrieNodeProviderFactory}, - traits::SparseTrieInterface, - SerialSparseTrie, SparseTrie, + traits::SparseTrie as SparseTrieTrait, + RevealableSparseTrie, SerialSparseTrie, }; use alloc::{collections::VecDeque, vec::Vec}; use alloy_primitives::{ @@ -31,8 +31,8 @@ pub struct ClearedSparseStateTrie< impl ClearedSparseStateTrie where - A: SparseTrieInterface, - S: SparseTrieInterface, + A: SparseTrieTrait, + S: SparseTrieTrait, { /// Creates a [`ClearedSparseStateTrie`] by clearing all the existing internal state of a /// [`SparseStateTrie`] and then storing that instance for later re-use. @@ -83,7 +83,7 @@ pub struct SparseStateTrie< S = SerialSparseTrie, // Storage trie implementation > { /// Sparse account trie. - state: SparseTrie, + state: RevealableSparseTrie, /// Collection of revealed account trie paths. revealed_account_paths: HashSet, /// State related to storage tries. @@ -118,7 +118,7 @@ where #[cfg(test)] impl SparseStateTrie { /// Create state trie from state trie. - pub fn from_state(state: SparseTrie) -> Self { + pub fn from_state(state: RevealableSparseTrie) -> Self { Self { state, ..Default::default() } } } @@ -130,14 +130,15 @@ impl SparseStateTrie { self } - /// Set the accounts trie to the given `SparseTrie`. - pub fn with_accounts_trie(mut self, trie: SparseTrie) -> Self { + /// Set the accounts trie to the given `RevealableSparseTrie`. + pub fn with_accounts_trie(mut self, trie: RevealableSparseTrie) -> Self { self.state = trie; self } - /// Set the default trie which will be cloned when creating new storage [`SparseTrie`]s. - pub fn with_default_storage_trie(mut self, trie: SparseTrie) -> Self { + /// Set the default trie which will be cloned when creating new storage + /// [`RevealableSparseTrie`]s. + pub fn with_default_storage_trie(mut self, trie: RevealableSparseTrie) -> Self { self.storage.default_trie = trie; self } @@ -145,8 +146,8 @@ impl SparseStateTrie { impl SparseStateTrie where - A: SparseTrieInterface + Default, - S: SparseTrieInterface + Default + Clone, + A: SparseTrieTrait + Default, + S: SparseTrieTrait + Default + Clone, { /// Create new [`SparseStateTrie`] pub fn new() -> Self { @@ -214,12 +215,12 @@ where } /// Takes the storage trie for the provided address. - pub fn take_storage_trie(&mut self, address: &B256) -> Option> { + pub fn take_storage_trie(&mut self, address: &B256) -> Option> { self.storage.tries.remove(address) } /// Inserts storage trie for the provided address. - pub fn insert_storage_trie(&mut self, address: B256, storage_trie: SparseTrie) { + pub fn insert_storage_trie(&mut self, address: B256, storage_trie: RevealableSparseTrie) { self.storage.tries.insert(address, storage_trie); } @@ -270,8 +271,8 @@ where let retain_updates = self.retain_updates; // Process all storage trie revealings in parallel, having first removed the - // `reveal_nodes` tracking and `SparseTrie`s for each account from their HashMaps. - // These will be returned after processing. + // `reveal_nodes` tracking and `RevealableSparseTrie`s for each account from their + // HashMaps. These will be returned after processing. let results: Vec<_> = storages .into_iter() .map(|(account, storage_subtree)| { @@ -293,8 +294,8 @@ where }) .collect(); - // Return `revealed_nodes` and `SparseTrie` for each account, incrementing metrics and - // returning the last error seen if any. + // Return `revealed_nodes` and `RevealableSparseTrie` for each account, incrementing + // metrics and returning the last error seen if any. let mut any_err = Ok(()); for (account, revealed_nodes, trie, result) in results { self.storage.revealed_paths.insert(account, revealed_nodes); @@ -352,8 +353,8 @@ where let retain_updates = self.retain_updates; // Process all storage trie revealings in parallel, having first removed the - // `reveal_nodes` tracking and `SparseTrie`s for each account from their HashMaps. - // These will be returned after processing. + // `reveal_nodes` tracking and `RevealableSparseTrie`s for each account from their + // HashMaps. These will be returned after processing. let results: Vec<_> = multiproof .storage_proofs .into_iter() @@ -506,7 +507,7 @@ where account: B256, nodes: Vec, revealed_nodes: &mut HashSet, - trie: &mut SparseTrie, + trie: &mut RevealableSparseTrie, retain_updates: bool, ) -> SparseStateTrieResult { let FilteredV2ProofNodes { root_node, nodes, new_nodes, metric_values } = @@ -566,7 +567,7 @@ where account: B256, storage_subtree: DecodedStorageMultiProof, revealed_nodes: &mut HashSet, - trie: &mut SparseTrie, + trie: &mut RevealableSparseTrie, retain_updates: bool, ) -> SparseStateTrieResult { let FilterMappedProofNodes { root_node, nodes, new_nodes, metric_values } = @@ -707,7 +708,7 @@ where /// If the trie has not been revealed, this function does nothing. #[instrument(target = "trie::sparse", skip_all)] pub fn calculate_subtries(&mut self) { - if let SparseTrie::Revealed(trie) = &mut self.state { + if let RevealableSparseTrie::Revealed(trie) = &mut self.state { trie.update_subtrie_hashes(); } } @@ -725,7 +726,7 @@ where provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<&mut A> { match self.state { - SparseTrie::Blind(_) => { + RevealableSparseTrie::Blind(_) => { let (root_node, hash_mask, tree_mask) = provider_factory .account_node_provider() .trie_node(&Nibbles::default())? @@ -738,7 +739,7 @@ where let masks = BranchNodeMasks::from_optional(hash_mask, tree_mask); self.state.reveal_root(root_node, masks, self.retain_updates).map_err(Into::into) } - SparseTrie::Revealed(ref mut trie) => Ok(trie), + RevealableSparseTrie::Revealed(ref mut trie) => Ok(trie), } } @@ -977,18 +978,18 @@ where #[derive(Debug, Default)] struct StorageTries { /// Sparse storage tries. - tries: B256Map>, + tries: B256Map>, /// Cleared storage tries, kept for re-use. - cleared_tries: Vec>, + cleared_tries: Vec>, /// Collection of revealed storage trie paths, per account. revealed_paths: B256Map>, /// Cleared revealed storage trie path collections, kept for re-use. cleared_revealed_paths: Vec>, /// A default cleared trie instance, which will be cloned when creating new tries. - default_trie: SparseTrie, + default_trie: RevealableSparseTrie, } -impl StorageTries { +impl StorageTries { /// Returns all fields to a cleared state, equivalent to the default state, keeping cleared /// collections for re-use later when possible. fn clear(&mut self) { @@ -1025,7 +1026,7 @@ impl StorageTries { } } -impl StorageTries { +impl StorageTries { /// Returns the set of already revealed trie node paths for an account's storage, creating the /// set if it didn't previously exist. fn get_revealed_paths_mut(&mut self, account: B256) -> &mut HashSet { @@ -1034,12 +1035,12 @@ impl StorageTries { .or_insert_with(|| self.cleared_revealed_paths.pop().unwrap_or_default()) } - /// Returns the `SparseTrie` and the set of already revealed trie node paths for an account's - /// storage, creating them if they didn't previously exist. + /// Returns the `RevealableSparseTrie` and the set of already revealed trie node paths for an + /// account's storage, creating them if they didn't previously exist. fn get_trie_and_revealed_paths_mut( &mut self, account: B256, - ) -> (&mut SparseTrie, &mut HashSet) { + ) -> (&mut RevealableSparseTrie, &mut HashSet) { let trie = self.tries.entry(account).or_insert_with(|| { self.cleared_tries.pop().unwrap_or_else(|| self.default_trie.clone()) }); @@ -1055,7 +1056,7 @@ impl StorageTries { /// Takes the storage trie for the account from the internal `HashMap`, creating it if it /// doesn't already exist. #[cfg(feature = "std")] - fn take_or_create_trie(&mut self, account: &B256) -> SparseTrie { + fn take_or_create_trie(&mut self, account: &B256) -> RevealableSparseTrie { self.tries.remove(account).unwrap_or_else(|| { self.cleared_tries.pop().unwrap_or_else(|| self.default_trie.clone()) }) diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 1d652284a40..15f474c6a2c 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -17,8 +17,8 @@ use crate::provider::TrieNodeProvider; /// /// This trait abstracts over different sparse trie implementations (serial vs parallel) /// while providing a unified interface for the core trie operations needed by the -/// [`crate::SparseTrie`] enum. -pub trait SparseTrieInterface: Sized + Debug + Send + Sync { +/// [`crate::RevealableSparseTrie`] enum. +pub trait SparseTrie: Sized + Debug + Send + Sync { /// Configures the trie to have the given root node revealed. /// /// # Arguments diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 17809bb4cf1..0ca4a20cb7d 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,6 +1,6 @@ use crate::{ provider::{RevealedNode, TrieNodeProvider}, - LeafLookup, LeafLookupError, SparseTrieInterface, SparseTrieUpdates, + LeafLookup, LeafLookupError, SparseTrie as SparseTrieTrait, SparseTrieUpdates, }; use alloc::{ borrow::Cow, @@ -43,14 +43,15 @@ const SPARSE_TRIE_SUBTRIE_HASHES_LEVEL: usize = 2; /// 3. Incremental operations - nodes can be revealed as needed without loading the entire trie. /// This is what gives rise to the notion of a "sparse" trie. #[derive(PartialEq, Eq, Debug, Clone)] -pub enum SparseTrie { +pub enum RevealableSparseTrie { /// The trie is blind -- no nodes have been revealed /// /// This is the default state. In this state, the trie cannot be directly queried or modified /// until nodes are revealed. /// - /// In this state the `SparseTrie` can optionally carry with it a cleared `SerialSparseTrie`. - /// This allows for reusing the trie's allocations between payload executions. + /// In this state the `RevealableSparseTrie` can optionally carry with it a cleared + /// `SerialSparseTrie`. This allows for reusing the trie's allocations between payload + /// executions. Blind(Option>), /// Some nodes in the Trie have been revealed. /// @@ -60,21 +61,23 @@ pub enum SparseTrie { Revealed(Box), } -impl Default for SparseTrie { +impl Default for RevealableSparseTrie { fn default() -> Self { Self::Blind(None) } } -impl SparseTrie { +impl RevealableSparseTrie { /// Creates a new revealed but empty sparse trie with `SparseNode::Empty` as root node. /// /// # Examples /// /// ``` - /// use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; + /// use reth_trie_sparse::{ + /// provider::DefaultTrieNodeProvider, RevealableSparseTrie, SerialSparseTrie, + /// }; /// - /// let trie = SparseTrie::::revealed_empty(); + /// let trie = RevealableSparseTrie::::revealed_empty(); /// assert!(!trie.is_blind()); /// ``` pub fn revealed_empty() -> Self { @@ -91,7 +94,7 @@ impl SparseTrie { /// /// # Returns /// - /// A mutable reference to the underlying [`SparseTrieInterface`]. + /// A mutable reference to the underlying [`RevealableSparseTrie`](SparseTrieTrait). pub fn reveal_root( &mut self, root: TrieNode, @@ -115,17 +118,19 @@ impl SparseTrie { } } -impl SparseTrie { +impl RevealableSparseTrie { /// Creates a new blind sparse trie. /// /// # Examples /// /// ``` - /// use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; + /// use reth_trie_sparse::{ + /// provider::DefaultTrieNodeProvider, RevealableSparseTrie, SerialSparseTrie, + /// }; /// - /// let trie = SparseTrie::::blind(); + /// let trie = RevealableSparseTrie::::blind(); /// assert!(trie.is_blind()); - /// let trie = SparseTrie::::default(); + /// let trie = RevealableSparseTrie::::default(); /// assert!(trie.is_blind()); /// ``` pub const fn blind() -> Self { @@ -133,7 +138,7 @@ impl SparseTrie { } /// Creates a new blind sparse trie, clearing and later reusing the given - /// [`SparseTrieInterface`]. + /// [`RevealableSparseTrie`](SparseTrieTrait). pub fn blind_from(mut trie: T) -> Self { trie.clear(); Self::Blind(Some(Box::new(trie))) @@ -212,9 +217,10 @@ impl SparseTrie { Some((revealed.root(), revealed.take_updates())) } - /// Returns a [`SparseTrie::Blind`] based on this one. If this instance was revealed, or was - /// itself a `Blind` with a pre-allocated [`SparseTrieInterface`], this will return - /// a `Blind` carrying a cleared pre-allocated [`SparseTrieInterface`]. + /// Returns a [`RevealableSparseTrie::Blind`] based on this one. If this instance was revealed, + /// or was itself a `Blind` with a pre-allocated [`RevealableSparseTrie`](SparseTrieTrait), + /// this will return a `Blind` carrying a cleared pre-allocated + /// [`RevealableSparseTrie`](SparseTrieTrait). pub fn clear(self) -> Self { match self { Self::Blind(_) => self, @@ -415,7 +421,7 @@ impl Default for SerialSparseTrie { } } -impl SparseTrieInterface for SerialSparseTrie { +impl SparseTrieTrait for SerialSparseTrie { fn with_root( mut self, root: TrieNode, @@ -2486,8 +2492,8 @@ mod tests { #[test] fn sparse_trie_is_blind() { - assert!(SparseTrie::::blind().is_blind()); - assert!(!SparseTrie::::revealed_empty().is_blind()); + assert!(RevealableSparseTrie::::blind().is_blind()); + assert!(!RevealableSparseTrie::::revealed_empty().is_blind()); } #[test] diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index a740b698fa3..de444815fee 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -7,7 +7,7 @@ use crate::{ use alloy_rlp::EMPTY_STRING_CODE; use alloy_trie::EMPTY_ROOT_HASH; use reth_trie_common::HashedPostState; -use reth_trie_sparse::SparseTrieInterface; +use reth_trie_sparse::SparseTrie; use alloy_primitives::{ keccak256, diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index be719a4d1b2..00eef4064ef 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -954,9 +954,6 @@ Engine: --engine.disable-prewarming Disable parallel prewarming - --engine.disable-parallel-sparse-trie - Disable the parallel sparse trie in the engine - --engine.state-provider-metrics Enable state provider latency metrics. This allows the engine to collect and report stats about how long state provider calls took during execution, but this does introduce slight overhead to state provider calls diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index eb8e4d437c9..d0b2aad65d8 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -954,9 +954,6 @@ Engine: --engine.disable-prewarming Disable parallel prewarming - --engine.disable-parallel-sparse-trie - Disable the parallel sparse trie in the engine - --engine.state-provider-metrics Enable state provider latency metrics. This allows the engine to collect and report stats about how long state provider calls took during execution, but this does introduce slight overhead to state provider calls From 7fe60017cfdf5d7634b415f37d39b5d85362ddf6 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 26 Jan 2026 17:54:20 +0000 Subject: [PATCH 218/267] chore(metrics): add a gas_last metric similar to new_payload_last (#21437) --- crates/engine/tree/src/tree/metrics.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 943661a5c5b..5a97eae36df 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -242,6 +242,8 @@ pub(crate) struct NewPayloadStatusMetrics { pub(crate) new_payload_error: Counter, /// The total gas of valid new payload messages received. pub(crate) new_payload_total_gas: Histogram, + /// The gas used for the last valid new payload. + pub(crate) new_payload_total_gas_last: Gauge, /// The gas per second of valid new payload messages received. pub(crate) new_payload_gas_per_second: Histogram, /// The gas per second for the last new payload call. @@ -283,6 +285,7 @@ impl NewPayloadStatusMetrics { PayloadStatusEnum::Valid => { self.new_payload_valid.increment(1); self.new_payload_total_gas.record(gas_used as f64); + self.new_payload_total_gas_last.set(gas_used as f64); let gas_per_second = gas_used as f64 / elapsed.as_secs_f64(); self.new_payload_gas_per_second.record(gas_per_second); self.new_payload_gas_per_second_last.set(gas_per_second); From 94235d64a83cc62e62fde458699b912ff6a0facb Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 26 Jan 2026 19:28:18 +0000 Subject: [PATCH 219/267] fix(pruner): prune account and storage changeset static files (#21346) --- Cargo.lock | 1 + crates/prune/prune/Cargo.toml | 1 + crates/prune/prune/src/builder.rs | 7 +- crates/prune/prune/src/segments/set.rs | 5 +- .../src/segments/user/account_history.rs | 354 ++++++++++++++--- .../prune/prune/src/segments/user/history.rs | 63 +++ .../src/segments/user/storage_history.rs | 369 ++++++++++++++---- crates/prune/types/src/segment.rs | 4 +- crates/stages/stages/src/stages/prune.rs | 9 +- .../stages/stages/src/test_utils/test_db.rs | 47 ++- 10 files changed, 728 insertions(+), 132 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea860db7ca8..f6427d8b865 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10217,6 +10217,7 @@ dependencies = [ "reth-stages", "reth-stages-types", "reth-static-file-types", + "reth-storage-api", "reth-testing-utils", "reth-tokio-util", "reth-tracing", diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index ff4d47054e7..d86a35eaf14 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -17,6 +17,7 @@ reth-exex-types.workspace = true reth-db-api.workspace = true reth-errors.workspace = true reth-provider.workspace = true +reth-storage-api.workspace = true reth-tokio-util.workspace = true reth-config.workspace = true reth-prune-types.workspace = true diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index 41a496bd92b..52b175c66a1 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -10,6 +10,7 @@ use reth_provider::{ StageCheckpointReader, StaticFileProviderFactory, StorageSettingsCache, }; use reth_prune_types::PruneModes; +use reth_storage_api::{ChangeSetReader, StorageChangeSetReader}; use std::time::Duration; use tokio::sync::watch; @@ -82,6 +83,8 @@ impl PrunerBuilder { + ChainStateBlockReader + StorageSettingsCache + StageCheckpointReader + + ChangeSetReader + + StorageChangeSetReader + StaticFileProviderFactory< Primitives: NodePrimitives, >, @@ -116,7 +119,9 @@ impl PrunerBuilder { + PruneCheckpointWriter + PruneCheckpointReader + StorageSettingsCache - + StageCheckpointReader, + + StageCheckpointReader + + ChangeSetReader + + StorageChangeSetReader, { let segments = SegmentSet::::from_components(static_file_provider, self.segments); diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index f5ceae63256..3e56664f26d 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -10,6 +10,7 @@ use reth_provider::{ PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, StorageSettingsCache, }; use reth_prune_types::PruneModes; +use reth_storage_api::{ChangeSetReader, StorageChangeSetReader}; /// Collection of [`Segment`]. Thread-safe, allocated on the heap. #[derive(Debug)] @@ -52,7 +53,9 @@ where + PruneCheckpointReader + BlockReader + ChainStateBlockReader - + StorageSettingsCache, + + StorageSettingsCache + + ChangeSetReader + + StorageChangeSetReader, { /// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and /// [`PruneModes`]. diff --git a/crates/prune/prune/src/segments/user/account_history.rs b/crates/prune/prune/src/segments/user/account_history.rs index 317337f050e..9bdd26d1114 100644 --- a/crates/prune/prune/src/segments/user/account_history.rs +++ b/crates/prune/prune/src/segments/user/account_history.rs @@ -1,14 +1,22 @@ use crate::{ db_ext::DbTxPruneExt, - segments::{user::history::prune_history_indices, PruneInput, Segment}, + segments::{ + user::history::{finalize_history_prune, HistoryPruneResult}, + PruneInput, Segment, + }, PrunerError, }; -use itertools::Itertools; +use alloy_primitives::BlockNumber; use reth_db_api::{models::ShardedKey, tables, transaction::DbTxMut}; -use reth_provider::DBProvider; +use reth_provider::{ + changeset_walker::StaticFileAccountChangesetWalker, DBProvider, EitherWriter, + StaticFileProviderFactory, StorageSettingsCache, +}; use reth_prune_types::{ PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; +use reth_static_file_types::StaticFileSegment; +use reth_storage_api::ChangeSetReader; use rustc_hash::FxHashMap; use tracing::{instrument, trace}; @@ -31,7 +39,10 @@ impl AccountHistory { impl Segment for AccountHistory where - Provider: DBProvider, + Provider: DBProvider + + StaticFileProviderFactory + + StorageSettingsCache + + ChangeSetReader, { fn segment(&self) -> PruneSegment { PruneSegment::AccountHistory @@ -56,11 +67,33 @@ where }; let range_end = *range.end(); + // Check where account changesets are stored + if EitherWriter::account_changesets_destination(provider).is_static_file() { + self.prune_static_files(provider, input, range, range_end) + } else { + self.prune_database(provider, input, range, range_end) + } + } +} + +impl AccountHistory { + /// Prunes account history when changesets are stored in static files. + fn prune_static_files( + &self, + provider: &Provider, + input: PruneInput, + range: std::ops::RangeInclusive, + range_end: BlockNumber, + ) -> Result + where + Provider: DBProvider + StaticFileProviderFactory + ChangeSetReader, + { let mut limiter = if let Some(limit) = input.limiter.deleted_entries_limit() { input.limiter.set_deleted_entries_limit(limit / ACCOUNT_HISTORY_TABLES_TO_PRUNE) } else { input.limiter }; + if limiter.is_limit_reached() { return Ok(SegmentOutput::not_done( limiter.interrupt_reason(), @@ -68,15 +101,86 @@ where )) } + // The size of this map it's limited by `prune_delete_limit * blocks_since_last_run / + // ACCOUNT_HISTORY_TABLES_TO_PRUNE`, and with the current defaults it's usually `3500 * 5 / + // 2`, so 8750 entries. Each entry is `160 bit + 64 bit`, so the total size should be up to + // ~0.25MB + some hashmap overhead. `blocks_since_last_run` is additionally limited by the + // `max_reorg_depth`, so no OOM is expected here. + let mut highest_deleted_accounts = FxHashMap::default(); let mut last_changeset_pruned_block = None; + let mut pruned_changesets = 0; + let mut done = true; + + let walker = StaticFileAccountChangesetWalker::new(provider, range); + for result in walker { + if limiter.is_limit_reached() { + done = false; + break; + } + let (block_number, changeset) = result?; + highest_deleted_accounts.insert(changeset.address, block_number); + last_changeset_pruned_block = Some(block_number); + pruned_changesets += 1; + limiter.increment_deleted_entries_count(); + } + + // Delete static file jars below the pruned block + if let Some(last_block) = last_changeset_pruned_block { + provider + .static_file_provider() + .delete_segment_below_block(StaticFileSegment::AccountChangeSets, last_block + 1)?; + } + trace!(target: "pruner", pruned = %pruned_changesets, %done, "Pruned account history (changesets from static files)"); + + let result = HistoryPruneResult { + highest_deleted: highest_deleted_accounts, + last_pruned_block: last_changeset_pruned_block, + pruned_count: pruned_changesets, + done, + }; + finalize_history_prune::<_, tables::AccountsHistory, _, _>( + provider, + result, + range_end, + &limiter, + ShardedKey::new, + |a, b| a.key == b.key, + ) + .map_err(Into::into) + } + + fn prune_database( + &self, + provider: &Provider, + input: PruneInput, + range: std::ops::RangeInclusive, + range_end: BlockNumber, + ) -> Result + where + Provider: DBProvider, + { + let mut limiter = if let Some(limit) = input.limiter.deleted_entries_limit() { + input.limiter.set_deleted_entries_limit(limit / ACCOUNT_HISTORY_TABLES_TO_PRUNE) + } else { + input.limiter + }; + + if limiter.is_limit_reached() { + return Ok(SegmentOutput::not_done( + limiter.interrupt_reason(), + input.previous_checkpoint.map(SegmentOutputCheckpoint::from_prune_checkpoint), + )) + } + // Deleted account changeset keys (account addresses) with the highest block number deleted // for that key. // - // The size of this map it's limited by `prune_delete_limit * blocks_since_last_run / - // ACCOUNT_HISTORY_TABLES_TO_PRUNE`, and with current default it's usually `3500 * 5 - // / 2`, so 8750 entries. Each entry is `160 bit + 256 bit + 64 bit`, so the total - // size should be up to 0.5MB + some hashmap overhead. `blocks_since_last_run` is - // additionally limited by the `max_reorg_depth`, so no OOM is expected here. + // The size of this map is limited by `prune_delete_limit * blocks_since_last_run / + // ACCOUNT_HISTORY_TABLES_TO_PRUNE`, and with the current defaults it's usually `3500 * 5 / + // 2`, so 8750 entries. Each entry is `160 bit + 64 bit`, so the total size should be up to + // ~0.25MB + some hashmap overhead. `blocks_since_last_run` is additionally limited by the + // `max_reorg_depth`, so no OOM is expected here. + let mut last_changeset_pruned_block = None; let mut highest_deleted_accounts = FxHashMap::default(); let (pruned_changesets, done) = provider.tx_ref().prune_table_with_range::( @@ -88,69 +192,52 @@ where last_changeset_pruned_block = Some(block_number); }, )?; - trace!(target: "pruner", pruned = %pruned_changesets, %done, "Pruned account history (changesets)"); - - let last_changeset_pruned_block = last_changeset_pruned_block - // If there's more account changesets to prune, set the checkpoint block number to - // previous, so we could finish pruning its account changesets on the next run. - .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) - .unwrap_or(range_end); - - // Sort highest deleted block numbers by account address and turn them into sharded keys. - // We did not use `BTreeMap` from the beginning, because it's inefficient for hashes. - let highest_sharded_keys = highest_deleted_accounts - .into_iter() - .sorted_unstable() // Unstable is fine because no equal keys exist in the map - .map(|(address, block_number)| { - ShardedKey::new(address, block_number.min(last_changeset_pruned_block)) - }); - let outcomes = prune_history_indices::( + trace!(target: "pruner", pruned = %pruned_changesets, %done, "Pruned account history (changesets from database)"); + + let result = HistoryPruneResult { + highest_deleted: highest_deleted_accounts, + last_pruned_block: last_changeset_pruned_block, + pruned_count: pruned_changesets, + done, + }; + finalize_history_prune::<_, tables::AccountsHistory, _, _>( provider, - highest_sharded_keys, + result, + range_end, + &limiter, + ShardedKey::new, |a, b| a.key == b.key, - )?; - trace!(target: "pruner", ?outcomes, %done, "Pruned account history (indices)"); - - let progress = limiter.progress(done); - - Ok(SegmentOutput { - progress, - pruned: pruned_changesets + outcomes.deleted, - checkpoint: Some(SegmentOutputCheckpoint { - block_number: Some(last_changeset_pruned_block), - tx_number: None, - }), - }) + ) + .map_err(Into::into) } } #[cfg(test)] mod tests { - use crate::segments::{ - user::account_history::ACCOUNT_HISTORY_TABLES_TO_PRUNE, AccountHistory, PruneInput, - PruneLimiter, Segment, SegmentOutput, - }; + use super::ACCOUNT_HISTORY_TABLES_TO_PRUNE; + use crate::segments::{AccountHistory, PruneInput, PruneLimiter, Segment, SegmentOutput}; use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; - use reth_db_api::{tables, BlockNumberList}; + use reth_db_api::{models::StorageSettings, tables, BlockNumberList}; use reth_provider::{DBProvider, DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, }; use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_storage_api::StorageSettingsCache; use reth_testing_utils::generators::{ self, random_block_range, random_changeset_range, random_eoa_accounts, BlockRangeParams, }; use std::{collections::BTreeMap, ops::AddAssign}; #[test] - fn prune() { + fn prune_legacy() { let db = TestStageDB::default(); let mut rng = generators::rng(); let blocks = random_block_range( &mut rng, - 1..=5000, + 0..=5000, BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, ); db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); @@ -202,6 +289,9 @@ mod tests { let segment = AccountHistory::new(prune_mode); let provider = db.factory.database_provider_rw().unwrap(); + provider.set_storage_settings_cache( + StorageSettings::default().with_account_changesets_in_static_files(false), + ); let result = segment.prune(&provider, input).unwrap(); limiter.increment_deleted_entries_count_by(result.pruned); @@ -239,20 +329,18 @@ mod tests { .map(|(i, _)| i) .unwrap_or_default(); - let mut pruned_changesets = changesets - .iter() - // Skip what we've pruned so far, subtracting one to get last pruned block - // number further down - .skip(pruned.saturating_sub(1)); + // Skip what we've pruned so far, subtracting one to get last pruned block number + // further down + let mut pruned_changesets = changesets.iter().skip(pruned.saturating_sub(1)); let last_pruned_block_number = pruned_changesets - .next() - .map(|(block_number, _)| if result.progress.is_finished() { - *block_number - } else { - block_number.saturating_sub(1) - } as BlockNumber) - .unwrap_or(to_block); + .next() + .map(|(block_number, _)| if result.progress.is_finished() { + *block_number + } else { + block_number.saturating_sub(1) + } as BlockNumber) + .unwrap_or(to_block); let pruned_changesets = pruned_changesets.fold( BTreeMap::<_, Vec<_>>::new(), @@ -303,4 +391,152 @@ mod tests { test_prune(998, 2, (PruneProgress::Finished, 998)); test_prune(1400, 3, (PruneProgress::Finished, 804)); } + + #[test] + fn prune_static_file() { + let db = TestStageDB::default(); + let mut rng = generators::rng(); + + let blocks = random_block_range( + &mut rng, + 0..=5000, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); + + let accounts = random_eoa_accounts(&mut rng, 2).into_iter().collect::>(); + + let (changesets, _) = random_changeset_range( + &mut rng, + blocks.iter(), + accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))), + 0..0, + 0..0, + ); + + db.insert_changesets_to_static_files(changesets.clone(), None) + .expect("insert changesets to static files"); + db.insert_history(changesets.clone(), None).expect("insert history"); + + let account_occurrences = db.table::().unwrap().into_iter().fold( + BTreeMap::<_, usize>::new(), + |mut map, (key, _)| { + map.entry(key.key).or_default().add_assign(1); + map + }, + ); + assert!(account_occurrences.into_iter().any(|(_, occurrences)| occurrences > 1)); + + let original_shards = db.table::().unwrap(); + + let test_prune = + |to_block: BlockNumber, run: usize, expected_result: (PruneProgress, usize)| { + let prune_mode = PruneMode::Before(to_block); + let deleted_entries_limit = 2000; + let mut limiter = + PruneLimiter::default().set_deleted_entries_limit(deleted_entries_limit); + let input = PruneInput { + previous_checkpoint: db + .factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::AccountHistory) + .unwrap(), + to_block, + limiter: limiter.clone(), + }; + let segment = AccountHistory::new(prune_mode); + + let provider = db.factory.database_provider_rw().unwrap(); + provider.set_storage_settings_cache( + StorageSettings::default().with_account_changesets_in_static_files(true), + ); + let result = segment.prune(&provider, input).unwrap(); + limiter.increment_deleted_entries_count_by(result.pruned); + + assert_matches!( + result, + SegmentOutput {progress, pruned, checkpoint: Some(_)} + if (progress, pruned) == expected_result + ); + + segment + .save_checkpoint( + &provider, + result.checkpoint.unwrap().as_prune_checkpoint(prune_mode), + ) + .unwrap(); + provider.commit().expect("commit"); + + let changesets = changesets + .iter() + .enumerate() + .flat_map(|(block_number, changeset)| { + changeset.iter().map(move |change| (block_number, change)) + }) + .collect::>(); + + #[expect(clippy::skip_while_next)] + let pruned = changesets + .iter() + .enumerate() + .skip_while(|(i, (block_number, _))| { + *i < deleted_entries_limit / ACCOUNT_HISTORY_TABLES_TO_PRUNE * run && + *block_number <= to_block as usize + }) + .next() + .map(|(i, _)| i) + .unwrap_or_default(); + + // Skip what we've pruned so far, subtracting one to get last pruned block number + // further down + let mut pruned_changesets = changesets.iter().skip(pruned.saturating_sub(1)); + + let last_pruned_block_number = pruned_changesets + .next() + .map(|(block_number, _)| { + (if result.progress.is_finished() { + *block_number + } else { + block_number.saturating_sub(1) + }) as BlockNumber + }) + .unwrap_or(to_block); + + let actual_shards = db.table::().unwrap(); + + let expected_shards = original_shards + .iter() + .filter(|(key, _)| key.highest_block_number > last_pruned_block_number) + .map(|(key, blocks)| { + let new_blocks = + blocks.iter().skip_while(|block| *block <= last_pruned_block_number); + (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) + }) + .collect::>(); + + assert_eq!(actual_shards, expected_shards); + + assert_eq!( + db.factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::AccountHistory) + .unwrap(), + Some(PruneCheckpoint { + block_number: Some(last_pruned_block_number), + tx_number: None, + prune_mode + }) + ); + }; + + test_prune( + 998, + 1, + (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 1000), + ); + test_prune(998, 2, (PruneProgress::Finished, 1000)); + test_prune(1400, 3, (PruneProgress::Finished, 804)); + } } diff --git a/crates/prune/prune/src/segments/user/history.rs b/crates/prune/prune/src/segments/user/history.rs index 9d95b2fd3ba..d4e6ddcf78d 100644 --- a/crates/prune/prune/src/segments/user/history.rs +++ b/crates/prune/prune/src/segments/user/history.rs @@ -1,4 +1,6 @@ +use crate::PruneLimiter; use alloy_primitives::BlockNumber; +use itertools::Itertools; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, models::ShardedKey, @@ -7,6 +9,8 @@ use reth_db_api::{ BlockNumberList, DatabaseError, RawKey, RawTable, RawValue, }; use reth_provider::DBProvider; +use reth_prune_types::{SegmentOutput, SegmentOutputCheckpoint}; +use rustc_hash::FxHashMap; enum PruneShardOutcome { Deleted, @@ -21,6 +25,65 @@ pub(crate) struct PrunedIndices { pub(crate) unchanged: usize, } +/// Result of pruning history changesets, used to build the final output. +pub(crate) struct HistoryPruneResult { + /// Map of the highest deleted changeset keys to their block numbers. + pub(crate) highest_deleted: FxHashMap, + /// The last block number that had changesets pruned. + pub(crate) last_pruned_block: Option, + /// Number of changesets pruned. + pub(crate) pruned_count: usize, + /// Whether pruning is complete. + pub(crate) done: bool, +} + +/// Finalizes history pruning by sorting sharded keys, pruning history indices, and building output. +/// +/// This is shared between static file and database pruning for both account and storage history. +pub(crate) fn finalize_history_prune( + provider: &Provider, + result: HistoryPruneResult, + range_end: BlockNumber, + limiter: &PruneLimiter, + to_sharded_key: impl Fn(K, BlockNumber) -> T::Key, + key_matches: impl Fn(&T::Key, &T::Key) -> bool, +) -> Result +where + Provider: DBProvider, + T: Table, + T::Key: AsRef>, + K: Ord, +{ + let HistoryPruneResult { highest_deleted, last_pruned_block, pruned_count, done } = result; + + // If there's more changesets to prune, set the checkpoint block number to previous, + // so we could finish pruning its changesets on the next run. + let last_changeset_pruned_block = last_pruned_block + .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) + .unwrap_or(range_end); + + // Sort highest deleted block numbers and turn them into sharded keys. + // We use `sorted_unstable` because no equal keys exist in the map. + let highest_sharded_keys = + highest_deleted.into_iter().sorted_unstable().map(|(key, block_number)| { + to_sharded_key(key, block_number.min(last_changeset_pruned_block)) + }); + + let outcomes = + prune_history_indices::(provider, highest_sharded_keys, key_matches)?; + + let progress = limiter.progress(done); + + Ok(SegmentOutput { + progress, + pruned: pruned_count + outcomes.deleted, + checkpoint: Some(SegmentOutputCheckpoint { + block_number: Some(last_changeset_pruned_block), + tx_number: None, + }), + }) +} + /// Prune history indices according to the provided list of highest sharded keys. /// /// Returns total number of deleted, updated and unchanged entities. diff --git a/crates/prune/prune/src/segments/user/storage_history.rs b/crates/prune/prune/src/segments/user/storage_history.rs index 556babb9a7a..7abe709e11e 100644 --- a/crates/prune/prune/src/segments/user/storage_history.rs +++ b/crates/prune/prune/src/segments/user/storage_history.rs @@ -1,20 +1,27 @@ use crate::{ db_ext::DbTxPruneExt, - segments::{user::history::prune_history_indices, PruneInput, Segment, SegmentOutput}, + segments::{ + user::history::{finalize_history_prune, HistoryPruneResult}, + PruneInput, Segment, + }, PrunerError, }; -use itertools::Itertools; +use alloy_primitives::{Address, BlockNumber, B256}; use reth_db_api::{ models::{storage_sharded_key::StorageShardedKey, BlockNumberAddress}, tables, transaction::DbTxMut, }; -use reth_provider::DBProvider; -use reth_prune_types::{PruneMode, PrunePurpose, PruneSegment, SegmentOutputCheckpoint}; +use reth_provider::{DBProvider, EitherWriter, StaticFileProviderFactory}; +use reth_prune_types::{ + PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, +}; +use reth_static_file_types::StaticFileSegment; +use reth_storage_api::{StorageChangeSetReader, StorageSettingsCache}; use rustc_hash::FxHashMap; use tracing::{instrument, trace}; -/// Number of storage history tables to prune in one step +/// Number of storage history tables to prune in one step. /// /// Storage History consists of two tables: [`tables::StorageChangeSets`] and /// [`tables::StoragesHistory`]. We want to prune them to the same block number. @@ -33,7 +40,10 @@ impl StorageHistory { impl Segment for StorageHistory where - Provider: DBProvider, + Provider: DBProvider + + StaticFileProviderFactory + + StorageChangeSetReader + + StorageSettingsCache, { fn segment(&self) -> PruneSegment { PruneSegment::StorageHistory @@ -58,11 +68,32 @@ where }; let range_end = *range.end(); + if EitherWriter::storage_changesets_destination(provider).is_static_file() { + self.prune_static_files(provider, input, range, range_end) + } else { + self.prune_database(provider, input, range, range_end) + } + } +} + +impl StorageHistory { + /// Prunes storage history when changesets are stored in static files. + fn prune_static_files( + &self, + provider: &Provider, + input: PruneInput, + range: std::ops::RangeInclusive, + range_end: BlockNumber, + ) -> Result + where + Provider: DBProvider + StaticFileProviderFactory, + { let mut limiter = if let Some(limit) = input.limiter.deleted_entries_limit() { input.limiter.set_deleted_entries_limit(limit / STORAGE_HISTORY_TABLES_TO_PRUNE) } else { input.limiter }; + if limiter.is_limit_reached() { return Ok(SegmentOutput::not_done( limiter.interrupt_reason(), @@ -70,15 +101,90 @@ where )) } + // The size of this map is limited by `prune_delete_limit * blocks_since_last_run / + // STORAGE_HISTORY_TABLES_TO_PRUNE`, and with current defaults it's usually `3500 * 5 + // / 2`, so 8750 entries. Each entry is `160 bit + 256 bit + 64 bit`, so the total + // size should be up to ~0.5MB + some hashmap overhead. `blocks_since_last_run` is + // additionally limited by the `max_reorg_depth`, so no OOM is expected here. + let mut highest_deleted_storages = FxHashMap::default(); let mut last_changeset_pruned_block = None; + let mut pruned_changesets = 0; + let mut done = true; + + let walker = provider.static_file_provider().walk_storage_changeset_range(range); + for result in walker { + if limiter.is_limit_reached() { + done = false; + break; + } + let (block_address, entry) = result?; + let block_number = block_address.block_number(); + let address = block_address.address(); + highest_deleted_storages.insert((address, entry.key), block_number); + last_changeset_pruned_block = Some(block_number); + pruned_changesets += 1; + limiter.increment_deleted_entries_count(); + } + + // Delete static file jars below the pruned block + if let Some(last_block) = last_changeset_pruned_block { + provider + .static_file_provider() + .delete_segment_below_block(StaticFileSegment::StorageChangeSets, last_block + 1)?; + } + trace!(target: "pruner", pruned = %pruned_changesets, %done, "Pruned storage history (changesets from static files)"); + + let result = HistoryPruneResult { + highest_deleted: highest_deleted_storages, + last_pruned_block: last_changeset_pruned_block, + pruned_count: pruned_changesets, + done, + }; + finalize_history_prune::<_, tables::StoragesHistory, (Address, B256), _>( + provider, + result, + range_end, + &limiter, + |(address, storage_key), block_number| { + StorageShardedKey::new(address, storage_key, block_number) + }, + |a, b| a.address == b.address && a.sharded_key.key == b.sharded_key.key, + ) + .map_err(Into::into) + } + + fn prune_database( + &self, + provider: &Provider, + input: PruneInput, + range: std::ops::RangeInclusive, + range_end: BlockNumber, + ) -> Result + where + Provider: DBProvider, + { + let mut limiter = if let Some(limit) = input.limiter.deleted_entries_limit() { + input.limiter.set_deleted_entries_limit(limit / STORAGE_HISTORY_TABLES_TO_PRUNE) + } else { + input.limiter + }; + + if limiter.is_limit_reached() { + return Ok(SegmentOutput::not_done( + limiter.interrupt_reason(), + input.previous_checkpoint.map(SegmentOutputCheckpoint::from_prune_checkpoint), + )) + } + // Deleted storage changeset keys (account addresses and storage slots) with the highest // block number deleted for that key. // - // The size of this map it's limited by `prune_delete_limit * blocks_since_last_run / - // STORAGE_HISTORY_TABLES_TO_PRUNE`, and with current default it's usually `3500 * 5 + // The size of this map is limited by `prune_delete_limit * blocks_since_last_run / + // STORAGE_HISTORY_TABLES_TO_PRUNE`, and with current defaults it's usually `3500 * 5 // / 2`, so 8750 entries. Each entry is `160 bit + 256 bit + 64 bit`, so the total - // size should be up to 0.5MB + some hashmap overhead. `blocks_since_last_run` is + // size should be up to ~0.5MB + some hashmap overhead. `blocks_since_last_run` is // additionally limited by the `max_reorg_depth`, so no OOM is expected here. + let mut last_changeset_pruned_block = None; let mut highest_deleted_storages = FxHashMap::default(); let (pruned_changesets, done) = provider.tx_ref().prune_table_with_range::( @@ -92,64 +198,46 @@ where )?; trace!(target: "pruner", deleted = %pruned_changesets, %done, "Pruned storage history (changesets)"); - let last_changeset_pruned_block = last_changeset_pruned_block - // If there's more storage changesets to prune, set the checkpoint block number to - // previous, so we could finish pruning its storage changesets on the next run. - .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) - .unwrap_or(range_end); - - // Sort highest deleted block numbers by account address and storage key and turn them into - // sharded keys. - // We did not use `BTreeMap` from the beginning, because it's inefficient for hashes. - let highest_sharded_keys = highest_deleted_storages - .into_iter() - .sorted_unstable() // Unstable is fine because no equal keys exist in the map - .map(|((address, storage_key), block_number)| { - StorageShardedKey::new( - address, - storage_key, - block_number.min(last_changeset_pruned_block), - ) - }); - let outcomes = prune_history_indices::( + let result = HistoryPruneResult { + highest_deleted: highest_deleted_storages, + last_pruned_block: last_changeset_pruned_block, + pruned_count: pruned_changesets, + done, + }; + finalize_history_prune::<_, tables::StoragesHistory, (Address, B256), _>( provider, - highest_sharded_keys, + result, + range_end, + &limiter, + |(address, storage_key), block_number| { + StorageShardedKey::new(address, storage_key, block_number) + }, |a, b| a.address == b.address && a.sharded_key.key == b.sharded_key.key, - )?; - trace!(target: "pruner", ?outcomes, %done, "Pruned storage history (indices)"); - - let progress = limiter.progress(done); - - Ok(SegmentOutput { - progress, - pruned: pruned_changesets + outcomes.deleted, - checkpoint: Some(SegmentOutputCheckpoint { - block_number: Some(last_changeset_pruned_block), - tx_number: None, - }), - }) + ) + .map_err(Into::into) } } #[cfg(test)] mod tests { - use crate::segments::{ - user::storage_history::STORAGE_HISTORY_TABLES_TO_PRUNE, PruneInput, PruneLimiter, Segment, - SegmentOutput, StorageHistory, - }; + use super::STORAGE_HISTORY_TABLES_TO_PRUNE; + use crate::segments::{PruneInput, PruneLimiter, Segment, SegmentOutput, StorageHistory}; use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; - use reth_db_api::{tables, BlockNumberList}; + use reth_db_api::{models::StorageSettings, tables, BlockNumberList}; use reth_provider::{DBProvider, DatabaseProviderFactory, PruneCheckpointReader}; - use reth_prune_types::{PruneCheckpoint, PruneMode, PruneProgress, PruneSegment}; + use reth_prune_types::{ + PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, + }; use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_storage_api::StorageSettingsCache; use reth_testing_utils::generators::{ self, random_block_range, random_changeset_range, random_eoa_accounts, BlockRangeParams, }; use std::{collections::BTreeMap, ops::AddAssign}; #[test] - fn prune() { + fn prune_legacy() { let db = TestStageDB::default(); let mut rng = generators::rng(); @@ -208,6 +296,9 @@ mod tests { let segment = StorageHistory::new(prune_mode); let provider = db.factory.database_provider_rw().unwrap(); + provider.set_storage_settings_cache( + StorageSettings::default().with_storage_changesets_in_static_files(false), + ); let result = segment.prune(&provider, input).unwrap(); limiter.increment_deleted_entries_count_by(result.pruned); @@ -247,19 +338,19 @@ mod tests { .map(|(i, _)| i) .unwrap_or_default(); - let mut pruned_changesets = changesets - .iter() - // Skip what we've pruned so far, subtracting one to get last pruned block number - // further down - .skip(pruned.saturating_sub(1)); + // Skip what we've pruned so far, subtracting one to get last pruned block number + // further down + let mut pruned_changesets = changesets.iter().skip(pruned.saturating_sub(1)); let last_pruned_block_number = pruned_changesets .next() - .map(|(block_number, _, _)| if result.progress.is_finished() { - *block_number - } else { - block_number.saturating_sub(1) - } as BlockNumber) + .map(|(block_number, _, _)| { + (if result.progress.is_finished() { + *block_number + } else { + block_number.saturating_sub(1) + }) as BlockNumber + }) .unwrap_or(to_block); let pruned_changesets = pruned_changesets.fold( @@ -306,14 +397,160 @@ mod tests { test_prune( 998, 1, - ( - PruneProgress::HasMoreData( - reth_prune_types::PruneInterruptReason::DeletedEntriesLimitReached, - ), - 500, - ), + (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 500), ); test_prune(998, 2, (PruneProgress::Finished, 499)); test_prune(1200, 3, (PruneProgress::Finished, 202)); } + + #[test] + fn prune_static_file() { + let db = TestStageDB::default(); + let mut rng = generators::rng(); + + let blocks = random_block_range( + &mut rng, + 0..=5000, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); + + let accounts = random_eoa_accounts(&mut rng, 2).into_iter().collect::>(); + + let (changesets, _) = random_changeset_range( + &mut rng, + blocks.iter(), + accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))), + 1..2, + 1..2, + ); + + db.insert_changesets_to_static_files(changesets.clone(), None) + .expect("insert changesets to static files"); + db.insert_history(changesets.clone(), None).expect("insert history"); + + let storage_occurrences = db.table::().unwrap().into_iter().fold( + BTreeMap::<_, usize>::new(), + |mut map, (key, _)| { + map.entry((key.address, key.sharded_key.key)).or_default().add_assign(1); + map + }, + ); + assert!(storage_occurrences.into_iter().any(|(_, occurrences)| occurrences > 1)); + + let original_shards = db.table::().unwrap(); + + let test_prune = |to_block: BlockNumber, + run: usize, + expected_result: (PruneProgress, usize)| { + let prune_mode = PruneMode::Before(to_block); + let deleted_entries_limit = 1000; + let mut limiter = + PruneLimiter::default().set_deleted_entries_limit(deleted_entries_limit); + let input = PruneInput { + previous_checkpoint: db + .factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::StorageHistory) + .unwrap(), + to_block, + limiter: limiter.clone(), + }; + let segment = StorageHistory::new(prune_mode); + + let provider = db.factory.database_provider_rw().unwrap(); + provider.set_storage_settings_cache( + StorageSettings::default().with_storage_changesets_in_static_files(true), + ); + let result = segment.prune(&provider, input).unwrap(); + limiter.increment_deleted_entries_count_by(result.pruned); + + assert_matches!( + result, + SegmentOutput {progress, pruned, checkpoint: Some(_)} + if (progress, pruned) == expected_result + ); + + segment + .save_checkpoint( + &provider, + result.checkpoint.unwrap().as_prune_checkpoint(prune_mode), + ) + .unwrap(); + provider.commit().expect("commit"); + + let changesets = changesets + .iter() + .enumerate() + .flat_map(|(block_number, changeset)| { + changeset.iter().flat_map(move |(address, _, entries)| { + entries.iter().map(move |entry| (block_number, address, entry)) + }) + }) + .collect::>(); + + #[expect(clippy::skip_while_next)] + let pruned = changesets + .iter() + .enumerate() + .skip_while(|(i, (block_number, _, _))| { + *i < deleted_entries_limit / STORAGE_HISTORY_TABLES_TO_PRUNE * run && + *block_number <= to_block as usize + }) + .next() + .map(|(i, _)| i) + .unwrap_or_default(); + + // Skip what we've pruned so far, subtracting one to get last pruned block number + // further down + let mut pruned_changesets = changesets.iter().skip(pruned.saturating_sub(1)); + + let last_pruned_block_number = pruned_changesets + .next() + .map(|(block_number, _, _)| { + (if result.progress.is_finished() { + *block_number + } else { + block_number.saturating_sub(1) + }) as BlockNumber + }) + .unwrap_or(to_block); + + let actual_shards = db.table::().unwrap(); + + let expected_shards = original_shards + .iter() + .filter(|(key, _)| key.sharded_key.highest_block_number > last_pruned_block_number) + .map(|(key, blocks)| { + let new_blocks = + blocks.iter().skip_while(|block| *block <= last_pruned_block_number); + (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) + }) + .collect::>(); + + assert_eq!(actual_shards, expected_shards); + + assert_eq!( + db.factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::StorageHistory) + .unwrap(), + Some(PruneCheckpoint { + block_number: Some(last_pruned_block_number), + tx_number: None, + prune_mode + }) + ); + }; + + test_prune( + 998, + 1, + (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 500), + ); + test_prune(998, 2, (PruneProgress::Finished, 500)); + test_prune(1200, 3, (PruneProgress::Finished, 202)); + } } diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index d3643b2ee8d..0e3f4e1edc6 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -24,9 +24,9 @@ pub enum PruneSegment { Receipts, /// Prune segment responsible for some rows in `Receipts` table filtered by logs. ContractLogs, - /// Prune segment responsible for the `AccountChangeSets` and `AccountsHistory` tables. + /// Prunes account changesets (static files/MDBX) and `AccountsHistory`. AccountHistory, - /// Prune segment responsible for the `StorageChangeSets` and `StoragesHistory` tables. + /// Prunes storage changesets (static files/MDBX) and `StoragesHistory`. StorageHistory, #[deprecated = "Variant indexes cannot be changed"] #[strum(disabled)] diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index efb17e8e950..98c9a578c64 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -10,6 +10,7 @@ use reth_prune::{ use reth_stages_api::{ ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, }; +use reth_storage_api::{ChangeSetReader, StorageChangeSetReader}; use tracing::info; /// The prune stage that runs the pruner with the provided prune modes. @@ -46,7 +47,9 @@ where + StageCheckpointReader + StaticFileProviderFactory< Primitives: NodePrimitives, - > + StorageSettingsCache, + > + StorageSettingsCache + + ChangeSetReader + + StorageChangeSetReader, { fn id(&self) -> StageId { StageId::Prune @@ -151,7 +154,9 @@ where + StageCheckpointReader + StaticFileProviderFactory< Primitives: NodePrimitives, - > + StorageSettingsCache, + > + StorageSettingsCache + + ChangeSetReader + + StorageChangeSetReader, { fn id(&self) -> StageId { StageId::PruneSenderRecovery diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index fd9a456cc21..00cd834f700 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -11,7 +11,7 @@ use reth_db_api::{ common::KeyValue, cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, database::Database, - models::{AccountBeforeTx, StoredBlockBodyIndices}, + models::{AccountBeforeTx, StorageBeforeTx, StoredBlockBodyIndices}, table::Table, tables, transaction::{DbTx, DbTxMut}, @@ -473,6 +473,51 @@ impl TestStageDB { }) } + /// Insert collection of [`ChangeSet`] into static files (account and storage changesets). + pub fn insert_changesets_to_static_files( + &self, + changesets: I, + block_offset: Option, + ) -> ProviderResult<()> + where + I: IntoIterator, + { + let offset = block_offset.unwrap_or_default(); + let static_file_provider = self.factory.static_file_provider(); + + let mut account_changeset_writer = + static_file_provider.latest_writer(StaticFileSegment::AccountChangeSets)?; + let mut storage_changeset_writer = + static_file_provider.latest_writer(StaticFileSegment::StorageChangeSets)?; + + for (block, changeset) in changesets.into_iter().enumerate() { + let block_number = offset + block as u64; + + let mut account_changesets = Vec::new(); + let mut storage_changesets = Vec::new(); + + for (address, old_account, old_storage) in changeset { + account_changesets.push(AccountBeforeTx { address, info: Some(old_account) }); + + for entry in old_storage { + storage_changesets.push(StorageBeforeTx { + address, + key: entry.key, + value: entry.value, + }); + } + } + + account_changeset_writer.append_account_changeset(account_changesets, block_number)?; + storage_changeset_writer.append_storage_changeset(storage_changesets, block_number)?; + } + + account_changeset_writer.commit()?; + storage_changeset_writer.commit()?; + + Ok(()) + } + pub fn insert_history(&self, changesets: I, _block_offset: Option) -> ProviderResult<()> where I: IntoIterator, From f1459fcf911e74164f4389192794799e5ec2a2a6 Mon Sep 17 00:00:00 2001 From: ethfanWilliam Date: Mon, 26 Jan 2026 23:43:11 +0400 Subject: [PATCH 220/267] fix(stages): retain RocksDB TempDir in TestStageDB to prevent premature deletion (#21444) --- crates/stages/stages/src/test_utils/test_db.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 00cd834f700..5f00a498c45 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -38,15 +38,17 @@ use tempfile::TempDir; pub struct TestStageDB { pub factory: ProviderFactory, pub temp_static_files_dir: TempDir, + pub temp_rocksdb_dir: TempDir, } impl Default for TestStageDB { /// Create a new instance of [`TestStageDB`] fn default() -> Self { let (static_dir, static_dir_path) = create_test_static_files_dir(); - let (_, rocksdb_dir_path) = create_test_rocksdb_dir(); + let (rocksdb_dir, rocksdb_dir_path) = create_test_rocksdb_dir(); Self { temp_static_files_dir: static_dir, + temp_rocksdb_dir: rocksdb_dir, factory: ProviderFactory::new( create_test_rw_db(), MAINNET.clone(), @@ -61,10 +63,11 @@ impl Default for TestStageDB { impl TestStageDB { pub fn new(path: &Path) -> Self { let (static_dir, static_dir_path) = create_test_static_files_dir(); - let (_, rocksdb_dir_path) = create_test_rocksdb_dir(); + let (rocksdb_dir, rocksdb_dir_path) = create_test_rocksdb_dir(); Self { temp_static_files_dir: static_dir, + temp_rocksdb_dir: rocksdb_dir, factory: ProviderFactory::new( create_test_rw_db_with_path(path), MAINNET.clone(), From 1ccc174e7be9b023c30ca35fb5d931d910a86f71 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 26 Jan 2026 20:53:55 +0100 Subject: [PATCH 221/267] chore: remove unused docker from makefile (#21445) --- Dockerfile.cross | 15 ----- DockerfileOp.cross | 15 ----- Makefile | 134 --------------------------------------------- 3 files changed, 164 deletions(-) delete mode 100644 Dockerfile.cross delete mode 100644 DockerfileOp.cross diff --git a/Dockerfile.cross b/Dockerfile.cross deleted file mode 100644 index f477f1ed3e0..00000000000 --- a/Dockerfile.cross +++ /dev/null @@ -1,15 +0,0 @@ -# This image is meant to enable cross-architecture builds. -# It assumes the reth binary has already been compiled for `$TARGETPLATFORM` and is -# locatable in `./dist/bin/$TARGETARCH` -FROM --platform=$TARGETPLATFORM ubuntu:22.04 - -LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth -LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" - -# Filled by docker buildx -ARG TARGETARCH - -COPY ./dist/bin/$TARGETARCH/reth /usr/local/bin/reth - -EXPOSE 30303 30303/udp 9001 8545 8546 -ENTRYPOINT ["/usr/local/bin/reth"] diff --git a/DockerfileOp.cross b/DockerfileOp.cross deleted file mode 100644 index 47606a82830..00000000000 --- a/DockerfileOp.cross +++ /dev/null @@ -1,15 +0,0 @@ -# This image is meant to enable cross-architecture builds. -# It assumes the reth binary has already been compiled for `$TARGETPLATFORM` and is -# locatable in `./dist/bin/$TARGETARCH` -FROM --platform=$TARGETPLATFORM ubuntu:22.04 - -LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth -LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" - -# Filled by docker buildx -ARG TARGETARCH - -COPY ./dist/bin/$TARGETARCH/op-reth /usr/local/bin/op-reth - -EXPOSE 30303 30303/udp 9001 8545 8546 -ENTRYPOINT ["/usr/local/bin/op-reth"] diff --git a/Makefile b/Makefile index 703fc0b58ae..651adc9d963 100644 --- a/Makefile +++ b/Makefile @@ -35,9 +35,6 @@ EEST_TESTS_TAG := v4.5.0 EEST_TESTS_URL := https://github.com/ethereum/execution-spec-tests/releases/download/$(EEST_TESTS_TAG)/fixtures_stable.tar.gz EEST_TESTS_DIR := ./testing/ef-tests/execution-spec-tests -# The docker image name -DOCKER_IMAGE_NAME ?= ghcr.io/paradigmxyz/reth - ##@ Help .PHONY: help @@ -242,137 +239,6 @@ install-reth-bench: ## Build and install the reth binary under `$(CARGO_HOME)/bi --features "$(FEATURES)" \ --profile "$(PROFILE)" -##@ Docker - -# Note: This requires a buildx builder with emulation support. For example: -# -# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` -# `docker buildx create --use --driver docker-container --name cross-builder` -.PHONY: docker-build-push -docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag. - $(call docker_build_push,$(GIT_TAG),$(GIT_TAG)) - -# Note: This requires a buildx builder with emulation support. For example: -# -# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` -# `docker buildx create --use --driver docker-container --name cross-builder` -.PHONY: docker-build-push-git-sha -docker-build-push-git-sha: ## Build and push a cross-arch Docker image tagged with the latest git sha. - $(call docker_build_push,$(GIT_SHA),$(GIT_SHA)) - -# Note: This requires a buildx builder with emulation support. For example: -# -# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` -# `docker buildx create --use --driver docker-container --name cross-builder` -.PHONY: docker-build-push-latest -docker-build-push-latest: ## Build and push a cross-arch Docker image tagged with the latest git tag and `latest`. - $(call docker_build_push,$(GIT_TAG),latest) - -# Note: This requires a buildx builder with emulation support. For example: -# -# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` -# `docker buildx create --use --name cross-builder` -.PHONY: docker-build-push-nightly -docker-build-push-nightly: ## Build and push cross-arch Docker image tagged with the latest git tag with a `-nightly` suffix, and `latest-nightly`. - $(call docker_build_push,nightly,nightly) - -.PHONY: docker-build-push-nightly-edge-profiling -docker-build-push-nightly-edge-profiling: FEATURES := $(FEATURES) edge -docker-build-push-nightly-edge-profiling: ## Build and push cross-arch Docker image with edge features tagged with `nightly-edge-profiling`. - $(call docker_build_push,nightly-edge-profiling,nightly-edge-profiling) - -# Create a cross-arch Docker image with the given tags and push it -define docker_build_push - $(MAKE) FEATURES="$(FEATURES)" build-x86_64-unknown-linux-gnu - mkdir -p $(BIN_DIR)/amd64 - cp $(CARGO_TARGET_DIR)/x86_64-unknown-linux-gnu/$(PROFILE)/reth $(BIN_DIR)/amd64/reth - - $(MAKE) FEATURES="$(FEATURES)" build-aarch64-unknown-linux-gnu - mkdir -p $(BIN_DIR)/arm64 - cp $(CARGO_TARGET_DIR)/aarch64-unknown-linux-gnu/$(PROFILE)/reth $(BIN_DIR)/arm64/reth - - docker buildx build --file ./Dockerfile.cross . \ - --platform linux/amd64,linux/arm64 \ - --tag $(DOCKER_IMAGE_NAME):$(1) \ - --tag $(DOCKER_IMAGE_NAME):$(2) \ - --provenance=false \ - --push -endef - -##@ Optimism docker - -# Note: This requires a buildx builder with emulation support. For example: -# -# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` -# `docker buildx create --use --driver docker-container --name cross-builder` -.PHONY: op-docker-build-push -op-docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag. - $(call op_docker_build_push,$(GIT_TAG),$(GIT_TAG)) - -# Note: This requires a buildx builder with emulation support. For example: -# -# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` -# `docker buildx create --use --driver docker-container --name cross-builder` -.PHONY: op-docker-build-push-git-sha -op-docker-build-push-git-sha: ## Build and push a cross-arch Docker image tagged with the latest git sha. - $(call op_docker_build_push,$(GIT_SHA),$(GIT_SHA)) - -# Note: This requires a buildx builder with emulation support. For example: -# -# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` -# `docker buildx create --use --driver docker-container --name cross-builder` -.PHONY: op-docker-build-push-latest -op-docker-build-push-latest: ## Build and push a cross-arch Docker image tagged with the latest git tag and `latest`. - $(call op_docker_build_push,$(GIT_TAG),latest) - -# Note: This requires a buildx builder with emulation support. For example: -# -# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` -# `docker buildx create --use --name cross-builder` -.PHONY: op-docker-build-push-nightly -op-docker-build-push-nightly: ## Build and push cross-arch Docker image tagged with the latest git tag with a `-nightly` suffix, and `latest-nightly`. - $(call op_docker_build_push,nightly,nightly) - -.PHONY: op-docker-build-push-nightly-edge-profiling -op-docker-build-push-nightly-edge-profiling: FEATURES := $(FEATURES) edge -op-docker-build-push-nightly-edge-profiling: ## Build and push cross-arch Docker image with edge features tagged with `nightly-edge-profiling`. - $(call op_docker_build_push,nightly-edge-profiling,nightly-edge-profiling) - -# Note: This requires a buildx builder with emulation support. For example: -# -# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` -# `docker buildx create --use --name cross-builder` -.PHONY: docker-build-push-nightly-profiling -docker-build-push-nightly-profiling: ## Build and push cross-arch Docker image with profiling profile tagged with nightly-profiling. - $(call docker_build_push,nightly-profiling,nightly-profiling) - - # Note: This requires a buildx builder with emulation support. For example: -# -# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` -# `docker buildx create --use --name cross-builder` -.PHONY: op-docker-build-push-nightly-profiling -op-docker-build-push-nightly-profiling: ## Build and push cross-arch Docker image tagged with the latest git tag with a `-nightly` suffix, and `latest-nightly`. - $(call op_docker_build_push,nightly-profiling,nightly-profiling) - - -# Create a cross-arch Docker image with the given tags and push it -define op_docker_build_push - $(MAKE) FEATURES="$(FEATURES)" op-build-x86_64-unknown-linux-gnu - mkdir -p $(BIN_DIR)/amd64 - cp $(CARGO_TARGET_DIR)/x86_64-unknown-linux-gnu/$(PROFILE)/op-reth $(BIN_DIR)/amd64/op-reth - - $(MAKE) FEATURES="$(FEATURES)" op-build-aarch64-unknown-linux-gnu - mkdir -p $(BIN_DIR)/arm64 - cp $(CARGO_TARGET_DIR)/aarch64-unknown-linux-gnu/$(PROFILE)/op-reth $(BIN_DIR)/arm64/op-reth - - docker buildx build --file ./DockerfileOp.cross . \ - --platform linux/amd64,linux/arm64 \ - --tag $(DOCKER_IMAGE_NAME):$(1) \ - --tag $(DOCKER_IMAGE_NAME):$(2) \ - --provenance=false \ - --push -endef - ##@ Other .PHONY: clean From c41c8e6caed4c94103b4237cef8dc172790a9c6e Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 26 Jan 2026 21:06:09 +0100 Subject: [PATCH 222/267] chore: reduce number of nightly builds (#21446) --- docker-bake.hcl | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/docker-bake.hcl b/docker-bake.hcl index 9b6f11788cf..2cec47ccdb8 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -39,7 +39,7 @@ group "default" { } group "nightly" { - targets = ["ethereum", "ethereum-profiling", "ethereum-edge-profiling", "optimism", "optimism-profiling", "optimism-edge-profiling"] + targets = ["ethereum", "ethereum-profiling", "ethereum-edge-profiling", "optimism", "optimism-profiling"] } // Base target with shared configuration @@ -54,6 +54,10 @@ target "_base" { VERGEN_GIT_DIRTY = "${VERGEN_GIT_DIRTY}" } } +target "_base_profiling" { + inherits = ["_base"] + platforms = ["linux/amd64"] +} // Ethereum (reth) target "ethereum" { @@ -66,7 +70,7 @@ target "ethereum" { } target "ethereum-profiling" { - inherits = ["_base"] + inherits = ["_base_profiling"] args = { BINARY = "reth" MANIFEST_PATH = "bin/reth" @@ -77,7 +81,7 @@ target "ethereum-profiling" { } target "ethereum-edge-profiling" { - inherits = ["_base"] + inherits = ["_base_profiling"] args = { BINARY = "reth" MANIFEST_PATH = "bin/reth" @@ -98,7 +102,7 @@ target "optimism" { } target "optimism-profiling" { - inherits = ["_base"] + inherits = ["_base_profiling"] args = { BINARY = "op-reth" MANIFEST_PATH = "crates/optimism/bin" @@ -109,7 +113,7 @@ target "optimism-profiling" { } target "optimism-edge-profiling" { - inherits = ["_base"] + inherits = ["_base_profiling"] args = { BINARY = "op-reth" MANIFEST_PATH = "crates/optimism/bin" From da92733be8fe0f5ca11c6aeb0b5e32d1e16bee08 Mon Sep 17 00:00:00 2001 From: ethfanWilliam Date: Tue, 27 Jan 2026 00:19:28 +0400 Subject: [PATCH 223/267] fix: use unwrap_or_else for lazy evaluation of BlobParams::cancun (#21442) --- crates/rpc/rpc-eth-api/src/helpers/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/config.rs b/crates/rpc/rpc-eth-api/src/helpers/config.rs index fd07651672d..6e6dad6b82c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/config.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/config.rs @@ -79,7 +79,7 @@ where blob_schedule: chain_spec .blob_params_at_timestamp(timestamp) // no blob support, so we set this to original cancun values as defined in eip-4844 - .unwrap_or(BlobParams::cancun()), + .unwrap_or_else(BlobParams::cancun), chain_id: chain_spec.chain().id(), fork_id, precompiles, From 1e33821e19d8cdc59cb6a47da142e8aa39963af1 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 26 Jan 2026 21:37:33 +0100 Subject: [PATCH 224/267] ci: use depot cache in Dockerfile.depot (#21450) --- .github/workflows/docker.yml | 1 + Dockerfile.depot | 13 ++++++++----- docker-bake.hcl | 8 +++++++- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c0b77e4ac0f..6d4c4cad181 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -93,6 +93,7 @@ jobs: VERGEN_GIT_SHA: ${{ steps.git.outputs.sha }} VERGEN_GIT_DESCRIBE: ${{ steps.git.outputs.describe }} VERGEN_GIT_DIRTY: ${{ steps.git.outputs.dirty }} + DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }} with: project: ${{ vars.DEPOT_PROJECT_ID }} files: docker-bake.hcl diff --git a/Dockerfile.depot b/Dockerfile.depot index a94eedd11b4..341ce881e64 100644 --- a/Dockerfile.depot +++ b/Dockerfile.depot @@ -1,4 +1,4 @@ -# syntax=docker.io/docker/dockerfile:1.7-labs +# syntax=docker/dockerfile:1 # Unified Dockerfile for reth and op-reth, optimized for Depot builds # Usage: @@ -17,6 +17,7 @@ RUN apt-get update && apt-get install -y libclang-dev pkg-config RUN cargo install sccache --locked ENV RUSTC_WRAPPER=sccache ENV SCCACHE_DIR=/sccache +ENV SCCACHE_WEBDAV_ENDPOINT=https://cache.depot.dev # Builds a cargo-chef plan FROM chef AS planner @@ -52,15 +53,17 @@ ENV VERGEN_GIT_SHA=$VERGEN_GIT_SHA ENV VERGEN_GIT_DESCRIBE=$VERGEN_GIT_DESCRIBE ENV VERGEN_GIT_DIRTY=$VERGEN_GIT_DIRTY -# Build dependencies with cache mounts -RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=locked \ +# Build dependencies +RUN --mount=type=secret,id=DEPOT_TOKEN,env=SCCACHE_WEBDAV_TOKEN \ + --mount=type=cache,target=/usr/local/cargo/registry,sharing=locked \ --mount=type=cache,target=/usr/local/cargo/git,sharing=locked \ --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --locked --recipe-path recipe.json --manifest-path $MANIFEST_PATH/Cargo.toml -# Build application with cache mounts +# Build application COPY --exclude=.git . . -RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=locked \ +RUN --mount=type=secret,id=DEPOT_TOKEN,env=SCCACHE_WEBDAV_TOKEN \ + --mount=type=cache,target=/usr/local/cargo/registry,sharing=locked \ --mount=type=cache,target=/usr/local/cargo/git,sharing=locked \ --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ cargo build --profile $BUILD_PROFILE --features "$FEATURES" --locked --bin $BINARY --manifest-path $MANIFEST_PATH/Cargo.toml diff --git a/docker-bake.hcl b/docker-bake.hcl index 2cec47ccdb8..ffa824668b7 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -52,7 +52,13 @@ target "_base" { VERGEN_GIT_SHA = "${VERGEN_GIT_SHA}" VERGEN_GIT_DESCRIBE = "${VERGEN_GIT_DESCRIBE}" VERGEN_GIT_DIRTY = "${VERGEN_GIT_DIRTY}" - } + }, + secret = [ + { + type = "env" + id = "DEPOT_TOKEN" + } + ] } target "_base_profiling" { inherits = ["_base"] From 18bec10a0b002fd7d6877e20bbb1cf37887dcbd0 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Mon, 26 Jan 2026 13:00:45 -0800 Subject: [PATCH 225/267] perf(docker): use shared cache mounts for parallel builds (#21451) Co-authored-by: Amp --- Dockerfile.depot | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Dockerfile.depot b/Dockerfile.depot index 341ce881e64..a0c5d64f9ff 100644 --- a/Dockerfile.depot +++ b/Dockerfile.depot @@ -55,17 +55,17 @@ ENV VERGEN_GIT_DIRTY=$VERGEN_GIT_DIRTY # Build dependencies RUN --mount=type=secret,id=DEPOT_TOKEN,env=SCCACHE_WEBDAV_TOKEN \ - --mount=type=cache,target=/usr/local/cargo/registry,sharing=locked \ - --mount=type=cache,target=/usr/local/cargo/git,sharing=locked \ - --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ + --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \ + --mount=type=cache,target=/usr/local/cargo/git,sharing=shared \ + --mount=type=cache,target=$SCCACHE_DIR,sharing=shared \ cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --locked --recipe-path recipe.json --manifest-path $MANIFEST_PATH/Cargo.toml # Build application COPY --exclude=.git . . RUN --mount=type=secret,id=DEPOT_TOKEN,env=SCCACHE_WEBDAV_TOKEN \ - --mount=type=cache,target=/usr/local/cargo/registry,sharing=locked \ - --mount=type=cache,target=/usr/local/cargo/git,sharing=locked \ - --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ + --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \ + --mount=type=cache,target=/usr/local/cargo/git,sharing=shared \ + --mount=type=cache,target=$SCCACHE_DIR,sharing=shared \ cargo build --profile $BUILD_PROFILE --features "$FEATURES" --locked --bin $BINARY --manifest-path $MANIFEST_PATH/Cargo.toml # Copy binary to a known location (ARG not resolved in COPY) From 0bfa7fa5fa3f7c0338a9619778384f318d9b8252 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 26 Jan 2026 22:39:35 +0100 Subject: [PATCH 226/267] ci: typorino (#21453) --- docker-bake.hcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-bake.hcl b/docker-bake.hcl index ffa824668b7..51daea8c25f 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -52,7 +52,7 @@ target "_base" { VERGEN_GIT_SHA = "${VERGEN_GIT_SHA}" VERGEN_GIT_DESCRIBE = "${VERGEN_GIT_DESCRIBE}" VERGEN_GIT_DIRTY = "${VERGEN_GIT_DIRTY}" - }, + } secret = [ { type = "env" From 26a37f3c00649a0b8f5381b2b9a8b42a312c5d5f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 26 Jan 2026 23:15:41 +0100 Subject: [PATCH 227/267] chore: use Default::default() for TransactionInfo for forward compatibility (#21454) Co-authored-by: Amp --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- crates/primitives-traits/src/block/recovered.rs | 2 ++ crates/rpc/rpc-eth-api/src/helpers/trace.rs | 2 ++ crates/rpc/rpc-eth-api/src/helpers/transaction.rs | 4 ++++ crates/rpc/rpc-eth-types/src/transaction.rs | 4 ++++ 6 files changed, 15 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f6427d8b865..0f424bf169e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11408,9 +11408,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.34.0" +version = "0.34.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1ce3f52a052d78cc251714d57bf05dc8bc75e269677de11805d3153300a2cd" +checksum = "a24ca988ae1f7a0bb5688630579c00e867cd9f1df0a2f040623887f63d3b414c" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", diff --git a/Cargo.toml b/Cargo.toml index 9e83760c22c..9d774429bb7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -481,7 +481,7 @@ revm-primitives = { version = "22.0.0", default-features = false } revm-interpreter = { version = "32.0.0", default-features = false } revm-database-interface = { version = "9.0.0", default-features = false } op-revm = { version = "15.0.0", default-features = false } -revm-inspectors = "0.34.0" +revm-inspectors = "0.34.1" # eth alloy-chains = { version = "0.2.5", default-features = false } diff --git a/crates/primitives-traits/src/block/recovered.rs b/crates/primitives-traits/src/block/recovered.rs index 7d107af6ddb..be4564fc507 100644 --- a/crates/primitives-traits/src/block/recovered.rs +++ b/crates/primitives-traits/src/block/recovered.rs @@ -798,12 +798,14 @@ mod rpc_compat { .zip(senders) .enumerate() .map(|(idx, (tx, sender))| { + #[allow(clippy::needless_update)] let tx_info = TransactionInfo { hash: Some(*tx.tx_hash()), block_hash, block_number: Some(block_number), base_fee, index: Some(idx as u64), + ..Default::default() }; converter(Recovered::new_unchecked(tx, sender), tx_info) diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 13ac2479158..8948efe869d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -310,12 +310,14 @@ pub trait Trace: LoadState> + Call { .evm_factory() .create_tracer(&mut db, evm_env, inspector_setup()) .try_trace_many(block.transactions_recovered().take(max_transactions), |ctx| { + #[allow(clippy::needless_update)] let tx_info = TransactionInfo { hash: Some(*ctx.tx.tx_hash()), index: Some(idx), block_hash: Some(block_hash), block_number: Some(block_number), base_fee: Some(base_fee), + ..Default::default() }; idx += 1; diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 7a073834904..d8083d20c89 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -290,12 +290,14 @@ pub trait EthTransactions: LoadTransaction { let block_number = block.number(); let base_fee_per_gas = block.base_fee_per_gas(); if let Some((signer, tx)) = block.transactions_with_sender().nth(index) { + #[allow(clippy::needless_update)] let tx_info = TransactionInfo { hash: Some(*tx.tx_hash()), block_hash: Some(block_hash), block_number: Some(block_number), base_fee: base_fee_per_gas, index: Some(index as u64), + ..Default::default() }; return Ok(Some( @@ -366,12 +368,14 @@ pub trait EthTransactions: LoadTransaction { .enumerate() .find(|(_, (signer, tx))| **signer == sender && (*tx).nonce() == nonce) .map(|(index, (signer, tx))| { + #[allow(clippy::needless_update)] let tx_info = TransactionInfo { hash: Some(*tx.tx_hash()), block_hash: Some(block_hash), block_number: Some(block_number), base_fee: base_fee_per_gas, index: Some(index as u64), + ..Default::default() }; Ok(self.converter().fill(tx.clone().with_signer(*signer), tx_info)?) }) diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index de3323d61e6..682fa1b1cea 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -49,12 +49,14 @@ impl TransactionSource { match self { Self::Pool(tx) => resp_builder.fill_pending(tx), Self::Block { transaction, index, block_hash, block_number, base_fee } => { + #[allow(clippy::needless_update)] let tx_info = TransactionInfo { hash: Some(transaction.trie_hash()), index: Some(index), block_hash: Some(block_hash), block_number: Some(block_number), base_fee, + ..Default::default() }; resp_builder.fill(transaction, tx_info) @@ -69,6 +71,7 @@ impl TransactionSource { let hash = tx.trie_hash(); (tx, TransactionInfo { hash: Some(hash), ..Default::default() }) } + #[allow(clippy::needless_update)] Self::Block { transaction, index, block_hash, block_number, base_fee } => { let hash = transaction.trie_hash(); ( @@ -79,6 +82,7 @@ impl TransactionSource { block_hash: Some(block_hash), block_number: Some(block_number), base_fee, + ..Default::default() }, ) } From adecbd7814058b308c408e75f74a516a0e21a522 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 26 Jan 2026 23:30:20 +0100 Subject: [PATCH 228/267] chore: log docker sccache stats (#21455) --- Dockerfile.depot | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Dockerfile.depot b/Dockerfile.depot index a0c5d64f9ff..f04eca5740b 100644 --- a/Dockerfile.depot +++ b/Dockerfile.depot @@ -68,6 +68,8 @@ RUN --mount=type=secret,id=DEPOT_TOKEN,env=SCCACHE_WEBDAV_TOKEN \ --mount=type=cache,target=$SCCACHE_DIR,sharing=shared \ cargo build --profile $BUILD_PROFILE --features "$FEATURES" --locked --bin $BINARY --manifest-path $MANIFEST_PATH/Cargo.toml +RUN sccache --show-stats || true + # Copy binary to a known location (ARG not resolved in COPY) # Note: Custom profiles like maxperf/profiling output to target//, not target/release/ RUN cp /app/target/$BUILD_PROFILE/$BINARY /app/binary || \ From 71ed68e9445d9feaa07ac736f1f8c32006910631 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Mon, 26 Jan 2026 14:49:01 -0800 Subject: [PATCH 229/267] perf(db): flatten HashedPostState before persisting (#21422) --- .../src/providers/database/provider.rs | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index d693791fa5e..ba3292c51b9 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -630,27 +630,27 @@ impl DatabaseProvider Date: Mon, 26 Jan 2026 23:24:48 +0000 Subject: [PATCH 230/267] chore: add logging for internal fcu errors (#21456) --- crates/engine/tree/src/tree/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 2a5eb68bee2..0e4daeef244 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1492,6 +1492,10 @@ where self.on_maybe_tree_event(res.event.take())?; } + if let Err(ref err) = output { + error!(target: "engine::tree", %err, ?state, "Error processing forkchoice update"); + } + self.metrics.engine.forkchoice_updated.update_response_metrics( start, &mut self.metrics.engine.new_payload.latest_finish_at, From 226ce14ca1ca976bd6595c26a5fe89fd6b9909a9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 27 Jan 2026 01:42:26 +0100 Subject: [PATCH 231/267] perf(trie): use is_zero() check to avoid copy in is_storage_empty (#21459) Co-authored-by: Amp --- crates/trie/trie/src/hashed_cursor/post_state.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index 7436e466949..9e5bf66990a 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -79,7 +79,7 @@ impl HashedPostStateCursorValue for U256 { type NonZero = Self; fn into_option(self) -> Option { - (self != Self::ZERO).then_some(self) + (!self.is_zero()).then_some(self) } } @@ -351,7 +351,7 @@ where /// [`HashedCursor::next`]. fn is_storage_empty(&mut self) -> Result { // Storage is not empty if it has non-zero slots. - if self.post_state_cursor.has_any(|(_, value)| value.into_option().is_some()) { + if self.post_state_cursor.has_any(|(_, value)| !value.is_zero()) { return Ok(false); } From 11d9f3807788620ade985a6dcd63c656b719a6b3 Mon Sep 17 00:00:00 2001 From: YK Date: Tue, 27 Jan 2026 15:08:57 +0800 Subject: [PATCH 232/267] test(e2e): comprehensive RocksDB storage E2E tests (#21423) --- crates/e2e-test-utils/Cargo.toml | 2 +- crates/e2e-test-utils/src/transaction.rs | 12 + crates/e2e-test-utils/tests/rocksdb/main.rs | 383 +++++++++++++++++--- 3 files changed, 353 insertions(+), 44 deletions(-) diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index ad5a30b7ec6..263a2d2e6ad 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -79,4 +79,4 @@ path = "tests/rocksdb/main.rs" required-features = ["edge"] [features] -edge = ["reth-node-core/edge"] +edge = ["reth-node-core/edge", "reth-provider/rocksdb", "reth-cli-commands/edge"] diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index dd49ac76195..4cf0c94e84a 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -38,6 +38,18 @@ impl TransactionTestContext { signed.encoded_2718().into() } + /// Creates a transfer with a specific nonce and signs it, returning bytes. + /// Uses high `max_fee_per_gas` (1000 gwei) to ensure tx acceptance regardless of basefee. + pub async fn transfer_tx_bytes_with_nonce( + chain_id: u64, + wallet: PrivateKeySigner, + nonce: u64, + ) -> Bytes { + let tx = tx(chain_id, 21000, None, None, nonce, Some(1000e9 as u128)); + let signed = Self::sign_tx(wallet, tx).await; + signed.encoded_2718().into() + } + /// Creates a deployment transaction and signs it, returning an envelope. pub async fn deploy_tx( chain_id: u64, diff --git a/crates/e2e-test-utils/tests/rocksdb/main.rs b/crates/e2e-test-utils/tests/rocksdb/main.rs index 90289dc2485..2a3e0f62146 100644 --- a/crates/e2e-test-utils/tests/rocksdb/main.rs +++ b/crates/e2e-test-utils/tests/rocksdb/main.rs @@ -8,12 +8,68 @@ use alloy_rpc_types_eth::{Transaction, TransactionReceipt}; use eyre::Result; use jsonrpsee::core::client::ClientT; use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; -use reth_e2e_test_utils::{transaction::TransactionTestContext, E2ETestSetupBuilder}; +use reth_db::tables; +use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet, E2ETestSetupBuilder}; use reth_node_builder::NodeConfig; use reth_node_core::args::RocksDbArgs; use reth_node_ethereum::EthereumNode; use reth_payload_builder::EthPayloadBuilderAttributes; -use std::sync::Arc; +use reth_provider::RocksDBProviderFactory; +use std::{sync::Arc, time::Duration}; + +const ROCKSDB_POLL_TIMEOUT: Duration = Duration::from_secs(60); +const ROCKSDB_POLL_INTERVAL: Duration = Duration::from_millis(50); + +/// Polls RPC until the given `tx_hash` is visible as pending (not yet mined). +/// Prevents race conditions where `advance_block` is called before txs are in the pool. +/// Returns the pending transaction. +async fn wait_for_pending_tx(client: &C, tx_hash: B256) -> Transaction { + let start = std::time::Instant::now(); + loop { + let tx: Option = client + .request("eth_getTransactionByHash", [tx_hash]) + .await + .expect("RPC request failed"); + if let Some(tx) = tx { + assert!( + tx.block_number.is_none(), + "Expected pending tx but tx_hash={tx_hash:?} is already mined in block {:?}", + tx.block_number + ); + return tx; + } + assert!( + start.elapsed() < ROCKSDB_POLL_TIMEOUT, + "Timed out after {:?} waiting for tx_hash={tx_hash:?} to appear in pending pool", + start.elapsed() + ); + tokio::time::sleep(ROCKSDB_POLL_INTERVAL).await; + } +} + +/// Polls `RocksDB` until the given `tx_hash` appears in `TransactionHashNumbers`. +/// Returns the `tx_number` on success, or panics on timeout. +async fn poll_tx_in_rocksdb(provider: &P, tx_hash: B256) -> u64 { + let start = std::time::Instant::now(); + let mut interval = ROCKSDB_POLL_INTERVAL; + loop { + // Re-acquire handle each iteration to avoid stale snapshot reads + let rocksdb = provider.rocksdb_provider(); + let tx_number: Option = + rocksdb.get::(tx_hash).expect("RocksDB get failed"); + if let Some(n) = tx_number { + return n; + } + assert!( + start.elapsed() < ROCKSDB_POLL_TIMEOUT, + "Timed out after {:?} waiting for tx_hash={tx_hash:?} in RocksDB", + start.elapsed() + ); + tokio::time::sleep(interval).await; + // Simple backoff: 50ms -> 100ms -> 200ms (capped) + interval = std::cmp::min(interval * 2, Duration::from_millis(200)); + } +} /// Returns the test chain spec for `RocksDB` tests. fn test_chain_spec() -> Arc { @@ -41,9 +97,15 @@ fn test_attributes_generator(timestamp: u64) -> EthPayloadBuilderAttributes { EthPayloadBuilderAttributes::new(B256::ZERO, attributes) } -/// Enables `RocksDB` for all supported tables. +/// Enables `RocksDB` for `TransactionHashNumbers` table. +/// +/// Note: Static file changesets are disabled because `persistence_threshold(0)` causes +/// a race where the static file writer expects sequential block numbers but receives +/// them out of order, resulting in `UnexpectedStaticFileBlockNumber` errors. fn with_rocksdb_enabled(mut config: NodeConfig) -> NodeConfig { - config.rocksdb = RocksDbArgs { all: true, ..Default::default() }; + config.rocksdb = RocksDbArgs { tx_hash: true, ..Default::default() }; + config.static_files.storage_changesets = false; + config.static_files.account_changesets = false; config } @@ -62,13 +124,11 @@ async fn test_rocksdb_node_startup() -> Result<()> { assert_eq!(nodes.len(), 1); - // Verify RocksDB directory exists - let rocksdb_path = nodes[0].inner.data_dir.rocksdb(); - assert!(rocksdb_path.exists(), "RocksDB directory should exist at {rocksdb_path:?}"); - assert!( - std::fs::read_dir(&rocksdb_path).map(|mut d| d.next().is_some()).unwrap_or(false), - "RocksDB directory should be non-empty" - ); + // Verify RocksDB provider is functional (can query without error) + let rocksdb = nodes[0].inner.provider.rocksdb_provider(); + let missing_hash = B256::from([0xab; 32]); + let result: Option = rocksdb.get::(missing_hash)?; + assert!(result.is_none(), "Missing hash should return None"); let genesis_hash = nodes[0].block_hash(0); assert_ne!(genesis_hash, B256::ZERO); @@ -82,6 +142,7 @@ async fn test_rocksdb_block_mining() -> Result<()> { reth_tracing::init_test_tracing(); let chain_spec = test_chain_spec(); + let chain_id = chain_spec.chain().id(); let (mut nodes, _tasks, _wallet) = E2ETestSetupBuilder::::new(1, chain_spec, test_attributes_generator) @@ -94,12 +155,30 @@ async fn test_rocksdb_block_mining() -> Result<()> { let genesis_hash = nodes[0].block_hash(0); assert_ne!(genesis_hash, B256::ZERO); - // Mine 3 blocks - for i in 1..=3 { + // Mine 3 blocks with transactions + let wallets = wallet::Wallet::new(1).with_chain_id(chain_id).wallet_gen(); + let signer = wallets[0].clone(); + let client = nodes[0].rpc_client().expect("RPC client should be available"); + + for i in 1..=3u64 { + let raw_tx = + TransactionTestContext::transfer_tx_bytes_with_nonce(chain_id, signer.clone(), i - 1) + .await; + let tx_hash = nodes[0].rpc.inject_tx(raw_tx).await?; + + // Wait for tx to enter pending pool before mining + wait_for_pending_tx(&client, tx_hash).await; + let payload = nodes[0].advance_block().await?; let block = payload.block(); assert_eq!(block.number(), i); assert_ne!(block.hash(), B256::ZERO); + + // Verify tx was actually included in the block + let receipt: Option = + client.request("eth_getTransactionReceipt", [tx_hash]).await?; + let receipt = receipt.expect("Receipt should exist after mining"); + assert_eq!(receipt.block_number, Some(i), "Tx should be in block {i}"); } // Verify all blocks are stored @@ -119,48 +198,54 @@ async fn test_rocksdb_transaction_queries() -> Result<()> { let chain_spec = test_chain_spec(); let chain_id = chain_spec.chain().id(); - let (mut nodes, _tasks, wallet) = - E2ETestSetupBuilder::::new(1, chain_spec, test_attributes_generator) - .with_node_config_modifier(with_rocksdb_enabled) - .build() - .await?; + let (mut nodes, _tasks, _) = E2ETestSetupBuilder::::new( + 1, + chain_spec.clone(), + test_attributes_generator, + ) + .with_node_config_modifier(with_rocksdb_enabled) + .with_tree_config_modifier(|config| config.with_persistence_threshold(0)) + .build() + .await?; assert_eq!(nodes.len(), 1); - let mut tx_hashes = Vec::new(); + // Inject and mine a transaction + let wallets = wallet::Wallet::new(1).with_chain_id(chain_id).wallet_gen(); + let signer = wallets[0].clone(); + let client = nodes[0].rpc_client().expect("RPC client should be available"); - // Inject and mine 3 transactions (new wallet per tx to avoid nonce tracking) - for i in 0..3 { - let wallets = wallet.wallet_gen(); - let signer = wallets[0].clone(); + let raw_tx = TransactionTestContext::transfer_tx_bytes(chain_id, signer).await; + let tx_hash = nodes[0].rpc.inject_tx(raw_tx).await?; - let raw_tx = TransactionTestContext::transfer_tx_bytes(chain_id, signer).await; - let tx_hash = nodes[0].rpc.inject_tx(raw_tx).await?; - tx_hashes.push(tx_hash); - - let payload = nodes[0].advance_block().await?; - assert_eq!(payload.block().number(), i + 1); - } + // Wait for tx to enter pending pool before mining + wait_for_pending_tx(&client, tx_hash).await; - let client = nodes[0].rpc_client().expect("RPC client should be available"); + let payload = nodes[0].advance_block().await?; + assert_eq!(payload.block().number(), 1); // Query each transaction by hash - for (i, tx_hash) in tx_hashes.iter().enumerate() { - let expected_block_number = (i + 1) as u64; + let tx: Option = client.request("eth_getTransactionByHash", [tx_hash]).await?; + let tx = tx.expect("Transaction should be found"); + assert_eq!(tx.block_number, Some(1)); - let tx: Option = client.request("eth_getTransactionByHash", [tx_hash]).await?; - let tx = tx.expect("Transaction should be found"); - assert_eq!(tx.block_number, Some(expected_block_number)); + let receipt: Option = + client.request("eth_getTransactionReceipt", [tx_hash]).await?; + let receipt = receipt.expect("Receipt should be found"); + assert_eq!(receipt.block_number, Some(1)); + assert!(receipt.status()); - let receipt: Option = - client.request("eth_getTransactionReceipt", [tx_hash]).await?; - let receipt = receipt.expect("Receipt should be found"); - assert_eq!(receipt.block_number, Some(expected_block_number)); - assert!(receipt.status()); - } + // Direct RocksDB assertion - poll with timeout since persistence is async + let tx_number = poll_tx_in_rocksdb(&nodes[0].inner.provider, tx_hash).await; + assert_eq!(tx_number, 0, "First tx should have TxNumber 0"); - // Negative test: querying a non-existent tx hash returns None + // Verify missing hash returns None let missing_hash = B256::from([0xde; 32]); + let rocksdb = nodes[0].inner.provider.rocksdb_provider(); + let missing_tx_number: Option = + rocksdb.get::(missing_hash)?; + assert!(missing_tx_number.is_none()); + let missing_tx: Option = client.request("eth_getTransactionByHash", [missing_hash]).await?; assert!(missing_tx.is_none(), "expected no transaction for missing hash"); @@ -171,3 +256,215 @@ async fn test_rocksdb_transaction_queries() -> Result<()> { Ok(()) } + +/// Multiple transactions in the same block are correctly persisted to `RocksDB`. +#[tokio::test] +async fn test_rocksdb_multi_tx_same_block() -> Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = test_chain_spec(); + let chain_id = chain_spec.chain().id(); + + let (mut nodes, _tasks, _) = E2ETestSetupBuilder::::new( + 1, + chain_spec.clone(), + test_attributes_generator, + ) + .with_node_config_modifier(with_rocksdb_enabled) + .with_tree_config_modifier(|config| config.with_persistence_threshold(0)) + .build() + .await?; + + // Create 3 txs from the same wallet with sequential nonces + let wallets = wallet::Wallet::new(1).with_chain_id(chain_id).wallet_gen(); + let signer = wallets[0].clone(); + let client = nodes[0].rpc_client().expect("RPC client"); + + let mut tx_hashes = Vec::new(); + for nonce in 0..3 { + let raw_tx = + TransactionTestContext::transfer_tx_bytes_with_nonce(chain_id, signer.clone(), nonce) + .await; + let tx_hash = nodes[0].rpc.inject_tx(raw_tx).await?; + tx_hashes.push(tx_hash); + } + + // Wait for all txs to appear in pending pool before mining + for tx_hash in &tx_hashes { + wait_for_pending_tx(&client, *tx_hash).await; + } + + // Mine one block containing all 3 txs + let payload = nodes[0].advance_block().await?; + assert_eq!(payload.block().number(), 1); + + // Verify block contains all 3 txs + let block: Option = + client.request("eth_getBlockByNumber", ("0x1", true)).await?; + let block = block.expect("Block 1 should exist"); + assert_eq!(block.transactions.len(), 3, "Block should contain 3 txs"); + + // Verify each tx via RPC + for tx_hash in &tx_hashes { + let tx: Option = client.request("eth_getTransactionByHash", [tx_hash]).await?; + let tx = tx.expect("Transaction should be found"); + assert_eq!(tx.block_number, Some(1), "All txs should be in block 1"); + } + + // Poll RocksDB for all tx hashes and collect tx_numbers + let mut tx_numbers = Vec::new(); + for tx_hash in &tx_hashes { + let n = poll_tx_in_rocksdb(&nodes[0].inner.provider, *tx_hash).await; + tx_numbers.push(n); + } + + // Verify tx_numbers form the set {0, 1, 2} + tx_numbers.sort(); + assert_eq!(tx_numbers, vec![0, 1, 2], "TxNumbers should be 0, 1, 2"); + + Ok(()) +} + +/// Transactions across multiple blocks have globally continuous `tx_numbers`. +#[tokio::test] +async fn test_rocksdb_txs_across_blocks() -> Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = test_chain_spec(); + let chain_id = chain_spec.chain().id(); + + let (mut nodes, _tasks, _) = E2ETestSetupBuilder::::new( + 1, + chain_spec.clone(), + test_attributes_generator, + ) + .with_node_config_modifier(with_rocksdb_enabled) + .with_tree_config_modifier(|config| config.with_persistence_threshold(0)) + .build() + .await?; + + let wallets = wallet::Wallet::new(1).with_chain_id(chain_id).wallet_gen(); + let signer = wallets[0].clone(); + let client = nodes[0].rpc_client().expect("RPC client"); + + // Block 1: 2 transactions + let tx_hash_0 = nodes[0] + .rpc + .inject_tx( + TransactionTestContext::transfer_tx_bytes_with_nonce(chain_id, signer.clone(), 0).await, + ) + .await?; + let tx_hash_1 = nodes[0] + .rpc + .inject_tx( + TransactionTestContext::transfer_tx_bytes_with_nonce(chain_id, signer.clone(), 1).await, + ) + .await?; + + // Wait for both txs to appear in pending pool + wait_for_pending_tx(&client, tx_hash_0).await; + wait_for_pending_tx(&client, tx_hash_1).await; + + let payload1 = nodes[0].advance_block().await?; + assert_eq!(payload1.block().number(), 1); + + // Block 2: 1 transaction + let tx_hash_2 = nodes[0] + .rpc + .inject_tx( + TransactionTestContext::transfer_tx_bytes_with_nonce(chain_id, signer.clone(), 2).await, + ) + .await?; + + wait_for_pending_tx(&client, tx_hash_2).await; + + let payload2 = nodes[0].advance_block().await?; + assert_eq!(payload2.block().number(), 2); + + // Verify block contents via RPC + let tx0: Option = client.request("eth_getTransactionByHash", [tx_hash_0]).await?; + let tx1: Option = client.request("eth_getTransactionByHash", [tx_hash_1]).await?; + let tx2: Option = client.request("eth_getTransactionByHash", [tx_hash_2]).await?; + + assert_eq!(tx0.expect("tx0").block_number, Some(1)); + assert_eq!(tx1.expect("tx1").block_number, Some(1)); + assert_eq!(tx2.expect("tx2").block_number, Some(2)); + + // Poll RocksDB and verify global tx_number continuity + let all_tx_hashes = [tx_hash_0, tx_hash_1, tx_hash_2]; + let mut tx_numbers = Vec::new(); + for tx_hash in &all_tx_hashes { + let n = poll_tx_in_rocksdb(&nodes[0].inner.provider, *tx_hash).await; + tx_numbers.push(n); + } + + // Verify they form a continuous sequence {0, 1, 2} + tx_numbers.sort(); + assert_eq!(tx_numbers, vec![0, 1, 2], "TxNumbers should be globally continuous: 0, 1, 2"); + + // Re-query block 1 txs after block 2 is mined (regression guard) + let tx0_again: Option = + client.request("eth_getTransactionByHash", [tx_hash_0]).await?; + assert!(tx0_again.is_some(), "Block 1 tx should still be queryable after block 2"); + + Ok(()) +} + +/// Pending transactions should NOT appear in `RocksDB` until mined. +#[tokio::test] +async fn test_rocksdb_pending_tx_not_in_storage() -> Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = test_chain_spec(); + let chain_id = chain_spec.chain().id(); + + let (mut nodes, _tasks, _) = E2ETestSetupBuilder::::new( + 1, + chain_spec.clone(), + test_attributes_generator, + ) + .with_node_config_modifier(with_rocksdb_enabled) + .with_tree_config_modifier(|config| config.with_persistence_threshold(0)) + .build() + .await?; + + let wallets = wallet::Wallet::new(1).with_chain_id(chain_id).wallet_gen(); + let signer = wallets[0].clone(); + + // Inject tx but do NOT mine + let raw_tx = TransactionTestContext::transfer_tx_bytes(chain_id, signer).await; + let tx_hash = nodes[0].rpc.inject_tx(raw_tx).await?; + + // Verify tx is in pending pool via RPC + let client = nodes[0].rpc_client().expect("RPC client"); + wait_for_pending_tx(&client, tx_hash).await; + + let pending_tx: Option = + client.request("eth_getTransactionByHash", [tx_hash]).await?; + assert!(pending_tx.is_some(), "Pending tx should be visible via RPC"); + assert!(pending_tx.unwrap().block_number.is_none(), "Pending tx should have no block_number"); + + // Assert tx is NOT in RocksDB before mining (single check - tx is confirmed pending) + let rocksdb = nodes[0].inner.provider.rocksdb_provider(); + let tx_number: Option = rocksdb.get::(tx_hash)?; + assert!( + tx_number.is_none(), + "Pending tx should NOT be in RocksDB before mining, but found tx_number={:?}", + tx_number + ); + + // Now mine the block + let payload = nodes[0].advance_block().await?; + assert_eq!(payload.block().number(), 1); + + // Poll until tx appears in RocksDB + let tx_number = poll_tx_in_rocksdb(&nodes[0].inner.provider, tx_hash).await; + assert_eq!(tx_number, 0, "First tx should have tx_number 0"); + + // Verify tx is now mined via RPC + let mined_tx: Option = + client.request("eth_getTransactionByHash", [tx_hash]).await?; + assert_eq!(mined_tx.expect("mined tx").block_number, Some(1)); + + Ok(()) +} From 1e734936d8956e2e36d356f9d6fe8da23ee07c80 Mon Sep 17 00:00:00 2001 From: YK Date: Tue, 27 Jan 2026 18:34:44 +0800 Subject: [PATCH 233/267] fix(provider): skip storage changeset writes when routed to static files (#21468) --- .github/workflows/e2e.yml | 21 +++++ crates/e2e-test-utils/tests/rocksdb/main.rs | 14 ++- .../src/providers/database/provider.rs | 86 ++++++++++--------- .../src/providers/static_file/manager.rs | 42 ++++----- .../storage/storage-api/src/state_writer.rs | 8 +- 5 files changed, 100 insertions(+), 71 deletions(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index f31fefed35f..0a60f59367f 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -44,3 +44,24 @@ jobs: --exclude 'op-reth' \ --exclude 'reth' \ -E 'binary(e2e_testsuite)' + + rocksdb: + name: e2e-rocksdb + runs-on: depot-ubuntu-latest-4 + env: + RUST_BACKTRACE: 1 + timeout-minutes: 60 + steps: + - uses: actions/checkout@v6 + - uses: dtolnay/rust-toolchain@stable + - uses: mozilla-actions/sccache-action@v0.0.9 + - uses: taiki-e/install-action@nextest + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Run RocksDB e2e tests + run: | + cargo nextest run \ + --locked --features "edge" \ + -p reth-e2e-test-utils \ + -E 'binary(rocksdb)' diff --git a/crates/e2e-test-utils/tests/rocksdb/main.rs b/crates/e2e-test-utils/tests/rocksdb/main.rs index 2a3e0f62146..178ed5a25c0 100644 --- a/crates/e2e-test-utils/tests/rocksdb/main.rs +++ b/crates/e2e-test-utils/tests/rocksdb/main.rs @@ -98,14 +98,12 @@ fn test_attributes_generator(timestamp: u64) -> EthPayloadBuilderAttributes { } /// Enables `RocksDB` for `TransactionHashNumbers` table. -/// -/// Note: Static file changesets are disabled because `persistence_threshold(0)` causes -/// a race where the static file writer expects sequential block numbers but receives -/// them out of order, resulting in `UnexpectedStaticFileBlockNumber` errors. -fn with_rocksdb_enabled(mut config: NodeConfig) -> NodeConfig { - config.rocksdb = RocksDbArgs { tx_hash: true, ..Default::default() }; - config.static_files.storage_changesets = false; - config.static_files.account_changesets = false; +/// Explicitly enables static file changesets to test the fix for double-write bug. +const fn with_rocksdb_enabled(mut config: NodeConfig) -> NodeConfig { + config.rocksdb = + RocksDbArgs { all: true, tx_hash: true, storages_history: true, account_history: true }; + config.static_files.storage_changesets = true; + config.static_files.account_changesets = true; config } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index ba3292c51b9..e39336c6a8f 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -627,6 +627,7 @@ impl DatabaseProvider StateWriter config: StateWriteConfig, ) -> ProviderResult<()> { // Write storage changes - tracing::trace!("Writing storage changes"); - let mut storages_cursor = self.tx_ref().cursor_dup_write::()?; - for (block_index, mut storage_changes) in reverts.storage.into_iter().enumerate() { - let block_number = first_block + block_index as BlockNumber; - - tracing::trace!(block_number, "Writing block change"); - // sort changes by address. - storage_changes.par_sort_unstable_by_key(|a| a.address); - let total_changes = - storage_changes.iter().map(|change| change.storage_revert.len()).sum(); - let mut changeset = Vec::with_capacity(total_changes); - for PlainStorageRevert { address, wiped, storage_revert } in storage_changes { - let mut storage = storage_revert - .into_iter() - .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) - .collect::>(); - // sort storage slots by key. - storage.par_sort_unstable_by_key(|a| a.0); - - // If we are writing the primary storage wipe transition, the pre-existing plain - // storage state has to be taken from the database and written to storage history. - // See [StorageWipe::Primary] for more details. - // - // TODO(mediocregopher): This could be rewritten in a way which doesn't require - // collecting wiped entries into a Vec like this, see - // `write_storage_trie_changesets`. - let mut wiped_storage = Vec::new(); - if wiped { - tracing::trace!(?address, "Wiping storage"); - if let Some((_, entry)) = storages_cursor.seek_exact(address)? { - wiped_storage.push((entry.key, entry.value)); - while let Some(entry) = storages_cursor.next_dup_val()? { - wiped_storage.push((entry.key, entry.value)) + if config.write_storage_changesets { + tracing::trace!("Writing storage changes"); + let mut storages_cursor = + self.tx_ref().cursor_dup_write::()?; + for (block_index, mut storage_changes) in reverts.storage.into_iter().enumerate() { + let block_number = first_block + block_index as BlockNumber; + + tracing::trace!(block_number, "Writing block change"); + // sort changes by address. + storage_changes.par_sort_unstable_by_key(|a| a.address); + let total_changes = + storage_changes.iter().map(|change| change.storage_revert.len()).sum(); + let mut changeset = Vec::with_capacity(total_changes); + for PlainStorageRevert { address, wiped, storage_revert } in storage_changes { + let mut storage = storage_revert + .into_iter() + .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) + .collect::>(); + // sort storage slots by key. + storage.par_sort_unstable_by_key(|a| a.0); + + // If we are writing the primary storage wipe transition, the pre-existing plain + // storage state has to be taken from the database and written to storage + // history. See [StorageWipe::Primary] for more details. + // + // TODO(mediocregopher): This could be rewritten in a way which doesn't require + // collecting wiped entries into a Vec like this, see + // `write_storage_trie_changesets`. + let mut wiped_storage = Vec::new(); + if wiped { + tracing::trace!(?address, "Wiping storage"); + if let Some((_, entry)) = storages_cursor.seek_exact(address)? { + wiped_storage.push((entry.key, entry.value)); + while let Some(entry) = storages_cursor.next_dup_val()? { + wiped_storage.push((entry.key, entry.value)) + } } } - } - tracing::trace!(?address, ?storage, "Writing storage reverts"); - for (key, value) in StorageRevertsIter::new(storage, wiped_storage) { - changeset.push(StorageBeforeTx { address, key, value }); + tracing::trace!(?address, ?storage, "Writing storage reverts"); + for (key, value) in StorageRevertsIter::new(storage, wiped_storage) { + changeset.push(StorageBeforeTx { address, key, value }); + } } - } - let mut storage_changesets_writer = - EitherWriter::new_storage_changesets(self, block_number)?; - storage_changesets_writer.append_storage_changeset(block_number, changeset)?; + let mut storage_changesets_writer = + EitherWriter::new_storage_changesets(self, block_number)?; + storage_changesets_writer.append_storage_changeset(block_number, changeset)?; + } } if !config.write_account_changesets { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index dff1b6d303d..79b7b2a3d92 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -615,13 +615,13 @@ impl StaticFileProvider { let block_number = block.recovered_block().number(); let reverts = block.execution_outcome().state.reverts.to_plain_state_reverts(); - for account_block_reverts in reverts.accounts { - let changeset = account_block_reverts - .into_iter() - .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }) - .collect::>(); - w.append_account_changeset(changeset, block_number)?; - } + let changeset: Vec<_> = reverts + .accounts + .into_iter() + .flatten() + .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }) + .collect(); + w.append_account_changeset(changeset, block_number)?; } Ok(()) } @@ -636,21 +636,21 @@ impl StaticFileProvider { let block_number = block.recovered_block().number(); let reverts = block.execution_outcome().state.reverts.to_plain_state_reverts(); - for storage_block_reverts in reverts.storage { - let changeset = storage_block_reverts - .into_iter() - .flat_map(|revert| { - revert.storage_revert.into_iter().map(move |(key, revert_to_slot)| { - StorageBeforeTx { - address: revert.address, - key: B256::new(key.to_be_bytes()), - value: revert_to_slot.to_previous_value(), - } - }) + let changeset: Vec<_> = reverts + .storage + .into_iter() + .flatten() + .flat_map(|revert| { + revert.storage_revert.into_iter().map(move |(key, revert_to_slot)| { + StorageBeforeTx { + address: revert.address, + key: B256::new(key.to_be_bytes()), + value: revert_to_slot.to_previous_value(), + } }) - .collect::>(); - w.append_storage_changeset(changeset, block_number)?; - } + }) + .collect(); + w.append_storage_changeset(changeset, block_number)?; } Ok(()) } diff --git a/crates/storage/storage-api/src/state_writer.rs b/crates/storage/storage-api/src/state_writer.rs index f2c193559b9..36fbf5f94c1 100644 --- a/crates/storage/storage-api/src/state_writer.rs +++ b/crates/storage/storage-api/src/state_writer.rs @@ -136,10 +136,16 @@ pub struct StateWriteConfig { pub write_receipts: bool, /// Whether to write account changesets. pub write_account_changesets: bool, + /// Whether to write storage changesets. + pub write_storage_changesets: bool, } impl Default for StateWriteConfig { fn default() -> Self { - Self { write_receipts: true, write_account_changesets: true } + Self { + write_receipts: true, + write_account_changesets: true, + write_storage_changesets: true, + } } } From ed40ce8c4cc1b99692409a29c2bd0b0583d84743 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 27 Jan 2026 11:44:11 +0000 Subject: [PATCH 234/267] chore: simplify account_changesets_range (#21457) --- .../src/providers/database/provider.rs | 35 +++++-------------- 1 file changed, 9 insertions(+), 26 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index e39336c6a8f..82c054f6898 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1359,7 +1359,7 @@ impl StorageChangeSetReader for DatabaseProvider self.tx .cursor_dup_read::()? .walk_range(storage_range)? - .map(|result| -> ProviderResult<_> { Ok(result?) }) + .map(|r| r.map_err(Into::into)) .collect() } } @@ -1392,7 +1392,7 @@ impl StorageChangeSetReader for DatabaseProvider self.tx .cursor_dup_read::()? .walk_range(BlockNumberAddress::range(range))? - .map(|result| -> ProviderResult<_> { Ok(result?) }) + .map(|r| r.map_err(Into::into)) .collect() } } @@ -1449,32 +1449,15 @@ impl ChangeSetReader for DatabaseProvider { &self, range: impl core::ops::RangeBounds, ) -> ProviderResult> { - let range = to_range(range); - let mut changesets = Vec::new(); - if self.cached_storage_settings().account_changesets_in_static_files && - let Some(highest) = self - .static_file_provider - .get_highest_static_file_block(StaticFileSegment::AccountChangeSets) - { - let static_end = range.end.min(highest + 1); - if range.start < static_end { - for block in range.start..static_end { - let block_changesets = self.account_block_changeset(block)?; - for changeset in block_changesets { - changesets.push((block, changeset)); - } - } - } + if self.cached_storage_settings().account_changesets_in_static_files { + self.static_file_provider.account_changesets_range(range) } else { - // Fetch from database for blocks not in static files - let mut cursor = self.tx.cursor_read::()?; - for entry in cursor.walk_range(range)? { - let (block_num, account_before) = entry?; - changesets.push((block_num, account_before)); - } + self.tx + .cursor_read::()? + .walk_range(to_range(range))? + .map(|r| r.map_err(Into::into)) + .collect() } - - Ok(changesets) } fn account_changeset_count(&self) -> ProviderResult { From c8245594bcedeba153a52d854cc7e305308206c3 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 27 Jan 2026 11:59:06 +0000 Subject: [PATCH 235/267] fix(db): write genesis history to correct storage backend (#21471) Co-authored-by: Georgios Konstantopoulos Co-authored-by: Amp --- crates/storage/db-common/src/init.rs | 82 +++++++++++++++++++++------- 1 file changed, 63 insertions(+), 19 deletions(-) diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index e5cc7da7558..b4dfea23bb6 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -6,7 +6,12 @@ use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256}; use reth_chainspec::EthChainSpec; use reth_codecs::Compact; use reth_config::config::EtlConfig; -use reth_db_api::{tables, transaction::DbTxMut, DatabaseError}; +use reth_db_api::{ + models::{storage_sharded_key::StorageShardedKey, ShardedKey}, + tables, + transaction::DbTxMut, + BlockNumberList, DatabaseError, +}; use reth_etl::Collector; use reth_execution_errors::StateRootError; use reth_primitives_traits::{ @@ -14,11 +19,11 @@ use reth_primitives_traits::{ }; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader, - BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, - HashingWriter, HeaderProvider, HistoryWriter, MetadataProvider, MetadataWriter, - OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, - StateWriteConfig, StateWriter, StaticFileProviderFactory, StorageSettings, - StorageSettingsCache, TrieWriter, + BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, EitherWriter, + ExecutionOutcome, HashingWriter, HeaderProvider, HistoryWriter, MetadataProvider, + MetadataWriter, NodePrimitivesProvider, OriginalValuesKnown, ProviderError, RevertsInit, + RocksDBProviderFactory, StageCheckpointReader, StageCheckpointWriter, StateWriteConfig, + StateWriter, StaticFileProviderFactory, StorageSettings, StorageSettingsCache, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -103,6 +108,9 @@ where + TrieWriter + MetadataWriter + ChainSpecProvider + + StorageSettingsCache + + RocksDBProviderFactory + + NodePrimitivesProvider + AsRef, PF::ChainSpec: EthChainSpec
::BlockHeader>, { @@ -138,6 +146,9 @@ where + TrieWriter + MetadataWriter + ChainSpecProvider + + StorageSettingsCache + + RocksDBProviderFactory + + NodePrimitivesProvider + AsRef, PF::ChainSpec: EthChainSpec
::BlockHeader>, { @@ -386,37 +397,64 @@ where } /// Inserts history indices for genesis accounts and storage. +/// +/// Writes to either MDBX or `RocksDB` based on storage settings configuration, +/// using [`EitherWriter`] to abstract over the storage backend. pub fn insert_genesis_history<'a, 'b, Provider>( provider: &Provider, alloc: impl Iterator + Clone, ) -> ProviderResult<()> where - Provider: DBProvider + HistoryWriter + ChainSpecProvider, + Provider: DBProvider + + HistoryWriter + + ChainSpecProvider + + StorageSettingsCache + + RocksDBProviderFactory + + NodePrimitivesProvider, { let genesis_block_number = provider.chain_spec().genesis_header().number(); insert_history(provider, alloc, genesis_block_number) } /// Inserts history indices for genesis accounts and storage. +/// +/// Writes to either MDBX or `RocksDB` based on storage settings configuration, +/// using [`EitherWriter`] to abstract over the storage backend. pub fn insert_history<'a, 'b, Provider>( provider: &Provider, alloc: impl Iterator + Clone, block: u64, ) -> ProviderResult<()> where - Provider: DBProvider + HistoryWriter, + Provider: DBProvider + + HistoryWriter + + StorageSettingsCache + + RocksDBProviderFactory + + NodePrimitivesProvider, { - let account_transitions = alloc.clone().map(|(addr, _)| (*addr, [block])); - provider.insert_account_history_index(account_transitions)?; - - trace!(target: "reth::cli", "Inserted account history"); - - let storage_transitions = alloc - .filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage))) - .flat_map(|(addr, storage)| storage.keys().map(|key| ((*addr, *key), [block]))); - provider.insert_storage_history_index(storage_transitions)?; - - trace!(target: "reth::cli", "Inserted storage history"); + provider.with_rocksdb_batch(|batch| { + let mut writer = EitherWriter::new_accounts_history(provider, batch)?; + let list = BlockNumberList::new([block]).expect("single block always fits"); + for (addr, _) in alloc.clone() { + writer.upsert_account_history(ShardedKey::last(*addr), &list)?; + } + trace!(target: "reth::cli", "Inserted account history"); + Ok(((), writer.into_raw_rocksdb_batch())) + })?; + + provider.with_rocksdb_batch(|batch| { + let mut writer = EitherWriter::new_storages_history(provider, batch)?; + let list = BlockNumberList::new([block]).expect("single block always fits"); + for (addr, account) in alloc { + if let Some(storage) = &account.storage { + for key in storage.keys() { + writer.upsert_storage_history(StorageShardedKey::last(*addr, *key), &list)?; + } + } + } + trace!(target: "reth::cli", "Inserted storage history"); + Ok(((), writer.into_raw_rocksdb_batch())) + })?; Ok(()) } @@ -492,6 +530,9 @@ where + HashingWriter + TrieWriter + StateWriter + + StorageSettingsCache + + RocksDBProviderFactory + + NodePrimitivesProvider + AsRef, { if etl_config.file_size == 0 { @@ -628,6 +669,9 @@ where + HashingWriter + HistoryWriter + StateWriter + + StorageSettingsCache + + RocksDBProviderFactory + + NodePrimitivesProvider + AsRef, { let accounts_len = collector.len(); From e4e05e9ef99836577b5f0afbe9e5ad379889e959 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Tue, 27 Jan 2026 05:13:25 -0800 Subject: [PATCH 236/267] refactor: align RocksDbArgs defaults with StorageSettings::base() (#21472) Co-authored-by: Amp Co-authored-by: yongkangc --- crates/e2e-test-utils/tests/rocksdb/main.rs | 35 ++++++----- crates/node/core/src/args/rocksdb.rs | 60 ++++++++++++++----- docs/vocs/docs/pages/cli/op-reth/db.mdx | 4 +- .../vocs/docs/pages/cli/op-reth/import-op.mdx | 4 +- .../pages/cli/op-reth/import-receipts-op.mdx | 4 +- .../docs/pages/cli/op-reth/init-state.mdx | 4 +- docs/vocs/docs/pages/cli/op-reth/init.mdx | 4 +- docs/vocs/docs/pages/cli/op-reth/node.mdx | 4 +- docs/vocs/docs/pages/cli/op-reth/prune.mdx | 4 +- .../docs/pages/cli/op-reth/re-execute.mdx | 4 +- .../docs/pages/cli/op-reth/stage/drop.mdx | 4 +- .../docs/pages/cli/op-reth/stage/dump.mdx | 4 +- .../vocs/docs/pages/cli/op-reth/stage/run.mdx | 4 +- .../docs/pages/cli/op-reth/stage/unwind.mdx | 4 +- docs/vocs/docs/pages/cli/reth/db.mdx | 4 +- docs/vocs/docs/pages/cli/reth/download.mdx | 4 +- docs/vocs/docs/pages/cli/reth/export-era.mdx | 4 +- docs/vocs/docs/pages/cli/reth/import-era.mdx | 4 +- docs/vocs/docs/pages/cli/reth/import.mdx | 4 +- docs/vocs/docs/pages/cli/reth/init-state.mdx | 4 +- docs/vocs/docs/pages/cli/reth/init.mdx | 4 +- docs/vocs/docs/pages/cli/reth/node.mdx | 4 +- docs/vocs/docs/pages/cli/reth/prune.mdx | 4 +- docs/vocs/docs/pages/cli/reth/re-execute.mdx | 4 +- docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 4 +- docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 4 +- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 4 +- .../vocs/docs/pages/cli/reth/stage/unwind.mdx | 4 +- 28 files changed, 117 insertions(+), 82 deletions(-) diff --git a/crates/e2e-test-utils/tests/rocksdb/main.rs b/crates/e2e-test-utils/tests/rocksdb/main.rs index 178ed5a25c0..bca8a6f2e2d 100644 --- a/crates/e2e-test-utils/tests/rocksdb/main.rs +++ b/crates/e2e-test-utils/tests/rocksdb/main.rs @@ -10,11 +10,10 @@ use jsonrpsee::core::client::ClientT; use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; use reth_db::tables; use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet, E2ETestSetupBuilder}; -use reth_node_builder::NodeConfig; use reth_node_core::args::RocksDbArgs; use reth_node_ethereum::EthereumNode; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_provider::RocksDBProviderFactory; +use reth_provider::{RocksDBProviderFactory, StorageSettings}; use std::{sync::Arc, time::Duration}; const ROCKSDB_POLL_TIMEOUT: Duration = Duration::from_secs(60); @@ -97,14 +96,24 @@ fn test_attributes_generator(timestamp: u64) -> EthPayloadBuilderAttributes { EthPayloadBuilderAttributes::new(B256::ZERO, attributes) } -/// Enables `RocksDB` for `TransactionHashNumbers` table. -/// Explicitly enables static file changesets to test the fix for double-write bug. -const fn with_rocksdb_enabled(mut config: NodeConfig) -> NodeConfig { - config.rocksdb = - RocksDbArgs { all: true, tx_hash: true, storages_history: true, account_history: true }; - config.static_files.storage_changesets = true; - config.static_files.account_changesets = true; - config +/// Verifies that `RocksDB` CLI defaults match `StorageSettings::base()`. +#[test] +fn test_rocksdb_defaults_match_storage_settings() { + let args = RocksDbArgs::default(); + let settings = StorageSettings::base(); + + assert_eq!( + args.tx_hash, settings.transaction_hash_numbers_in_rocksdb, + "tx_hash default should match StorageSettings::base()" + ); + assert_eq!( + args.storages_history, settings.storages_history_in_rocksdb, + "storages_history default should match StorageSettings::base()" + ); + assert_eq!( + args.account_history, settings.account_history_in_rocksdb, + "account_history default should match StorageSettings::base()" + ); } /// Smoke test: node boots with `RocksDB` routing enabled. @@ -116,7 +125,6 @@ async fn test_rocksdb_node_startup() -> Result<()> { let (nodes, _tasks, _wallet) = E2ETestSetupBuilder::::new(1, chain_spec, test_attributes_generator) - .with_node_config_modifier(with_rocksdb_enabled) .build() .await?; @@ -144,7 +152,6 @@ async fn test_rocksdb_block_mining() -> Result<()> { let (mut nodes, _tasks, _wallet) = E2ETestSetupBuilder::::new(1, chain_spec, test_attributes_generator) - .with_node_config_modifier(with_rocksdb_enabled) .build() .await?; @@ -201,7 +208,6 @@ async fn test_rocksdb_transaction_queries() -> Result<()> { chain_spec.clone(), test_attributes_generator, ) - .with_node_config_modifier(with_rocksdb_enabled) .with_tree_config_modifier(|config| config.with_persistence_threshold(0)) .build() .await?; @@ -268,7 +274,6 @@ async fn test_rocksdb_multi_tx_same_block() -> Result<()> { chain_spec.clone(), test_attributes_generator, ) - .with_node_config_modifier(with_rocksdb_enabled) .with_tree_config_modifier(|config| config.with_persistence_threshold(0)) .build() .await?; @@ -336,7 +341,6 @@ async fn test_rocksdb_txs_across_blocks() -> Result<()> { chain_spec.clone(), test_attributes_generator, ) - .with_node_config_modifier(with_rocksdb_enabled) .with_tree_config_modifier(|config| config.with_persistence_threshold(0)) .build() .await?; @@ -421,7 +425,6 @@ async fn test_rocksdb_pending_tx_not_in_storage() -> Result<()> { chain_spec.clone(), test_attributes_generator, ) - .with_node_config_modifier(with_rocksdb_enabled) .with_tree_config_modifier(|config| config.with_persistence_threshold(0)) .build() .await?; diff --git a/crates/node/core/src/args/rocksdb.rs b/crates/node/core/src/args/rocksdb.rs index ad3b5dc8d31..e7931ef0f41 100644 --- a/crates/node/core/src/args/rocksdb.rs +++ b/crates/node/core/src/args/rocksdb.rs @@ -1,13 +1,27 @@ //! clap [Args](clap::Args) for `RocksDB` table routing configuration use clap::{ArgAction, Args}; +use reth_storage_api::StorageSettings; -/// Default value for `RocksDB` routing flags. +/// Default value for `tx_hash` routing flag. /// -/// When the `edge` feature is enabled, defaults to `true` to enable edge storage features. -/// Otherwise defaults to `false` for legacy behavior. -const fn default_rocksdb_flag() -> bool { - cfg!(feature = "edge") +/// Derived from [`StorageSettings::base()`] to ensure CLI defaults match storage defaults. +const fn default_tx_hash_in_rocksdb() -> bool { + StorageSettings::base().transaction_hash_numbers_in_rocksdb +} + +/// Default value for `storages_history` routing flag. +/// +/// Derived from [`StorageSettings::base()`] to ensure CLI defaults match storage defaults. +const fn default_storages_history_in_rocksdb() -> bool { + StorageSettings::base().storages_history_in_rocksdb +} + +/// Default value for `account_history` routing flag. +/// +/// Derived from [`StorageSettings::base()`] to ensure CLI defaults match storage defaults. +const fn default_account_history_in_rocksdb() -> bool { + StorageSettings::base().account_history_in_rocksdb } /// Parameters for `RocksDB` table routing configuration. @@ -28,21 +42,21 @@ pub struct RocksDbArgs { /// /// This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. /// Defaults to `true` when the `edge` feature is enabled, `false` otherwise. - #[arg(long = "rocksdb.tx-hash", default_value_t = default_rocksdb_flag(), action = ArgAction::Set)] + #[arg(long = "rocksdb.tx-hash", default_value_t = default_tx_hash_in_rocksdb(), action = ArgAction::Set)] pub tx_hash: bool, /// Route storages history tables to `RocksDB` instead of MDBX. /// /// This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. - /// Defaults to `true` when the `edge` feature is enabled, `false` otherwise. - #[arg(long = "rocksdb.storages-history", default_value_t = default_rocksdb_flag(), action = ArgAction::Set)] + /// Defaults to `false`. + #[arg(long = "rocksdb.storages-history", default_value_t = default_storages_history_in_rocksdb(), action = ArgAction::Set)] pub storages_history: bool, /// Route account history tables to `RocksDB` instead of MDBX. /// /// This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. - /// Defaults to `true` when the `edge` feature is enabled, `false` otherwise. - #[arg(long = "rocksdb.account-history", default_value_t = default_rocksdb_flag(), action = ArgAction::Set)] + /// Defaults to `false`. + #[arg(long = "rocksdb.account-history", default_value_t = default_account_history_in_rocksdb(), action = ArgAction::Set)] pub account_history: bool, } @@ -50,9 +64,9 @@ impl Default for RocksDbArgs { fn default() -> Self { Self { all: false, - tx_hash: default_rocksdb_flag(), - storages_history: default_rocksdb_flag(), - account_history: default_rocksdb_flag(), + tx_hash: default_tx_hash_in_rocksdb(), + storages_history: default_storages_history_in_rocksdb(), + account_history: default_account_history_in_rocksdb(), } } } @@ -106,7 +120,25 @@ mod tests { fn test_parse_all_flag() { let args = CommandParser::::parse_from(["reth", "--rocksdb.all"]).args; assert!(args.all); - assert_eq!(args.tx_hash, default_rocksdb_flag()); + assert_eq!(args.tx_hash, default_tx_hash_in_rocksdb()); + } + + #[test] + fn test_defaults_match_storage_settings() { + let args = RocksDbArgs::default(); + let settings = StorageSettings::base(); + assert_eq!( + args.tx_hash, settings.transaction_hash_numbers_in_rocksdb, + "tx_hash default should match StorageSettings::base()" + ); + assert_eq!( + args.storages_history, settings.storages_history_in_rocksdb, + "storages_history default should match StorageSettings::base()" + ); + assert_eq!( + args.account_history, settings.account_history_in_rocksdb, + "account_history default should match StorageSettings::base()" + ); } #[test] diff --git a/docs/vocs/docs/pages/cli/op-reth/db.mdx b/docs/vocs/docs/pages/cli/op-reth/db.mdx index d8a816e23ae..a4afaab93c0 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db.mdx @@ -184,7 +184,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -192,7 +192,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx index c5affadf9f5..0a832e34251 100644 --- a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx index 398086f9dc6..90503453cba 100644 --- a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx index 3e3e1ba019e..991a32fc72f 100644 --- a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/op-reth/init.mdx b/docs/vocs/docs/pages/cli/op-reth/init.mdx index 9a0930b4fe3..2ffe4138423 100644 --- a/docs/vocs/docs/pages/cli/op-reth/init.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/init.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index 00eef4064ef..3fe597815c4 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -919,7 +919,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -927,7 +927,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/op-reth/prune.mdx b/docs/vocs/docs/pages/cli/op-reth/prune.mdx index 603af5d99e4..c858c193031 100644 --- a/docs/vocs/docs/pages/cli/op-reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/prune.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx index c185b91027d..1765aa4c3bb 100644 --- a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx index d5034f0d4b8..c03f4aa36a3 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx index 9150154c31f..05dcf748a7c 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx @@ -175,7 +175,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -183,7 +183,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx index 75b39f76c77..cb77b37201a 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx index 37852456cfd..cda368aad0c 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx @@ -173,7 +173,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -181,7 +181,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 11f25e69730..09d81c42839 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -184,7 +184,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -192,7 +192,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 02ff7298c7f..257d7ad078e 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 9275a12059f..f67920173f0 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index aa13fd5f56a..866b15460ab 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index ed6a5d7f599..51112f21378 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index cbaf086f7c8..fb966cee1aa 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index bc4fe2c30c8..7bb3fb243e7 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index d0b2aad65d8..31896c640ac 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -919,7 +919,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -927,7 +927,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index a40d116b5f1..fef01ec44a2 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index 30f2f8fc213..eb9187a53f5 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 98352862663..d88581492ec 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 9aefa355425..048d66cbf71 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -175,7 +175,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -183,7 +183,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index c06c786879b..6da85ef82b7 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -168,7 +168,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -176,7 +176,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index af442b243d6..542572ff5c2 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -173,7 +173,7 @@ RocksDB: --rocksdb.storages-history Route storages history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] @@ -181,7 +181,7 @@ RocksDB: --rocksdb.account-history Route account history tables to `RocksDB` instead of MDBX. - This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `true` when the `edge` feature is enabled, `false` otherwise. + This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to `false`. [default: false] [possible values: true, false] From 08cd1cbda612929f1a3f3ddaee353abaf41cbe46 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 27 Jan 2026 14:01:32 +0000 Subject: [PATCH 237/267] fix(static-files): apply minimal blocks per file to all segments (#21479) Co-authored-by: Claude Sonnet 4.5 --- crates/node/core/src/args/static_files.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/node/core/src/args/static_files.rs b/crates/node/core/src/args/static_files.rs index ac710d0c9ee..18aa1bc0066 100644 --- a/crates/node/core/src/args/static_files.rs +++ b/crates/node/core/src/args/static_files.rs @@ -90,7 +90,7 @@ impl StaticFilesArgs { /// args. /// /// If `minimal` is true, uses [`MINIMAL_BLOCKS_PER_FILE`] blocks per file as the default for - /// headers, transactions, and receipts segments. + /// all segments. pub fn merge_with_config(&self, config: StaticFilesConfig, minimal: bool) -> StaticFilesConfig { let minimal_blocks_per_file = minimal.then_some(MINIMAL_BLOCKS_PER_FILE); StaticFilesConfig { @@ -109,12 +109,15 @@ impl StaticFilesArgs { .or(config.blocks_per_file.receipts), transaction_senders: self .blocks_per_file_transaction_senders + .or(minimal_blocks_per_file) .or(config.blocks_per_file.transaction_senders), account_change_sets: self .blocks_per_file_account_change_sets + .or(minimal_blocks_per_file) .or(config.blocks_per_file.account_change_sets), storage_change_sets: self .blocks_per_file_storage_change_sets + .or(minimal_blocks_per_file) .or(config.blocks_per_file.storage_change_sets), }, } From bff11ab663aa87b475e23cd34da18988471d7b8e Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Tue, 27 Jan 2026 15:54:56 +0100 Subject: [PATCH 238/267] refactor(trie): reuse shared StorageProofCalculator for V2 sync storage roots and add deferred encoder metrics (#21424) Co-authored-by: Amp --- crates/engine/primitives/src/config.rs | 16 -- .../tree/src/tree/payload_processor/mod.rs | 4 +- crates/trie/parallel/src/proof_task.rs | 207 ++++++++++++------ .../trie/parallel/src/proof_task_metrics.rs | 30 +++ crates/trie/parallel/src/value_encoder.rs | 172 +++++++++++---- crates/trie/trie/src/proof_v2/value.rs | 27 +-- 6 files changed, 295 insertions(+), 161 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 6b17a196fd8..aecf92eb48f 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -34,11 +34,6 @@ fn default_account_worker_count() -> usize { /// The size of proof targets chunk to spawn in one multiproof calculation. pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 60; -/// The size of proof targets chunk to spawn in one multiproof calculation when V2 proofs are -/// enabled. This is 4x the default chunk size to take advantage of more efficient V2 proof -/// computation. -pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE_V2: usize = DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE * 4; - /// Default number of reserved CPU cores for non-reth processes. /// /// This will be deducted from the thread count of main reth global threadpool. @@ -277,17 +272,6 @@ impl TreeConfig { self.multiproof_chunk_size } - /// Return the multiproof task chunk size, using the V2 default if V2 proofs are enabled - /// and the chunk size is at the default value. - pub const fn effective_multiproof_chunk_size(&self) -> usize { - if self.enable_proof_v2 && self.multiproof_chunk_size == DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE - { - DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE_V2 - } else { - self.multiproof_chunk_size - } - } - /// Return the number of reserved CPU cores for non-reth processes pub const fn reserved_cpu_cores(&self) -> usize { self.reserved_cpu_cores diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index dc2ac40068f..5d5fac93ee3 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -286,9 +286,7 @@ where let multi_proof_task = MultiProofTask::new( proof_handle.clone(), to_sparse_trie, - config - .multiproof_chunking_enabled() - .then_some(config.effective_multiproof_chunk_size()), + config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), to_multi_proof.clone(), from_multi_proof, ) diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index d8073d1acf5..3cbf5293d82 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -33,7 +33,7 @@ use crate::{ root::ParallelStateRootError, stats::{ParallelTrieStats, ParallelTrieTracker}, targets_v2::MultiProofTargetsV2, - value_encoder::AsyncAccountValueEncoder, + value_encoder::{AsyncAccountValueEncoder, ValueEncoderStats}, StorageRootTargets, }; use alloy_primitives::{ @@ -65,6 +65,8 @@ use reth_trie_common::{ }; use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}; use std::{ + cell::RefCell, + rc::Rc, sync::{ atomic::{AtomicUsize, Ordering}, mpsc::{channel, Receiver, Sender}, @@ -82,6 +84,22 @@ use crate::proof_task_metrics::{ type TrieNodeProviderResult = Result, SparseTrieError>; +/// Type alias for the V2 account proof calculator. +type V2AccountProofCalculator<'a, Provider> = proof_v2::ProofCalculator< + ::AccountTrieCursor<'a>, + ::AccountCursor<'a>, + AsyncAccountValueEncoder< + ::StorageTrieCursor<'a>, + ::StorageCursor<'a>, + >, +>; + +/// Type alias for the V2 storage proof calculator. +type V2StorageProofCalculator<'a, Provider> = proof_v2::StorageProofCalculator< + ::StorageTrieCursor<'a>, + ::StorageCursor<'a>, +>; + /// A handle that provides type-safe access to proof worker pools. /// /// The handle stores direct senders to both storage and account worker pools, @@ -543,15 +561,6 @@ where ProofBlindedStorageProvider::new(&self.provider, &self.provider, account); storage_node_provider.trie_node(path) } - - /// Process a blinded account node request. - /// - /// Used by account workers to retrieve blinded account trie nodes for proof construction. - fn process_blinded_account_node(&self, path: &Nibbles) -> TrieNodeProviderResult { - let account_node_provider = - ProofBlindedAccountProvider::new(&self.provider, &self.provider); - account_node_provider.trie_node(path) - } } impl TrieNodeProviderFactory for ProofWorkerHandle { type AccountNodeProvider = ProofTaskTrieNodeProvider; @@ -888,7 +897,12 @@ where // Initially mark this worker as available. self.available_workers.fetch_add(1, Ordering::Relaxed); + let mut total_idle_time = Duration::ZERO; + let mut idle_start = Instant::now(); + while let Ok(job) = self.work_rx.recv() { + total_idle_time += idle_start.elapsed(); + // Mark worker as busy. self.available_workers.fetch_sub(1, Ordering::Relaxed); @@ -918,6 +932,8 @@ where // Mark worker as available again. self.available_workers.fetch_add(1, Ordering::Relaxed); + + idle_start = Instant::now(); } trace!( @@ -925,12 +941,14 @@ where worker_id = self.worker_id, storage_proofs_processed, storage_nodes_processed, + total_idle_time_us = total_idle_time.as_micros(), "Storage worker shutting down" ); #[cfg(feature = "metrics")] { self.metrics.record_storage_nodes(storage_nodes_processed as usize); + self.metrics.record_storage_worker_idle_time(total_idle_time); self.cursor_metrics.record(&mut cursor_metrics_cache); } @@ -1094,7 +1112,7 @@ struct AccountProofWorker { work_rx: CrossbeamReceiver, /// Unique identifier for this worker (used for tracing) worker_id: usize, - /// Channel for dispatching storage proof work + /// Channel for dispatching storage proof work (for pre-dispatched target proofs) storage_work_tx: CrossbeamSender, /// Counter tracking worker availability available_workers: Arc, @@ -1165,9 +1183,7 @@ where /// If this function panics, the worker thread terminates but other workers /// continue operating and the system degrades gracefully. fn run(mut self) -> ProviderResult<()> { - // Create provider from factory let provider = self.task_ctx.factory.database_provider_ro()?; - let proof_tx = ProofTaskTx::new(provider, self.worker_id); trace!( target: "trie::proof_task", @@ -1179,39 +1195,64 @@ where let mut account_nodes_processed = 0u64; let mut cursor_metrics_cache = ProofTaskCursorMetricsCache::default(); - let mut v2_calculator = if self.v2_enabled { - let trie_cursor = proof_tx.provider.account_trie_cursor()?; - let hashed_cursor = proof_tx.provider.hashed_account_cursor()?; - Some(proof_v2::ProofCalculator::<_, _, AsyncAccountValueEncoder>::new( - trie_cursor, - hashed_cursor, - )) + // Create both account and storage calculators for V2 proofs. + // The storage calculator is wrapped in Rc> for sharing with value encoders. + let (mut v2_account_calculator, v2_storage_calculator) = if self.v2_enabled { + let account_trie_cursor = provider.account_trie_cursor()?; + let account_hashed_cursor = provider.hashed_account_cursor()?; + + let storage_trie_cursor = provider.storage_trie_cursor(B256::ZERO)?; + let storage_hashed_cursor = provider.hashed_storage_cursor(B256::ZERO)?; + + ( + Some(proof_v2::ProofCalculator::< + _, + _, + AsyncAccountValueEncoder< + ::StorageTrieCursor<'_>, + ::StorageCursor<'_>, + >, + >::new(account_trie_cursor, account_hashed_cursor)), + Some(Rc::new(RefCell::new(proof_v2::StorageProofCalculator::new_storage( + storage_trie_cursor, + storage_hashed_cursor, + )))), + ) } else { - None + (None, None) }; // Count this worker as available only after successful initialization. self.available_workers.fetch_add(1, Ordering::Relaxed); + let mut total_idle_time = Duration::ZERO; + let mut idle_start = Instant::now(); + let mut value_encoder_stats_cache = ValueEncoderStats::default(); + while let Ok(job) = self.work_rx.recv() { + total_idle_time += idle_start.elapsed(); + // Mark worker as busy. self.available_workers.fetch_sub(1, Ordering::Relaxed); match job { AccountWorkerJob::AccountMultiproof { input } => { - self.process_account_multiproof( - &proof_tx, - v2_calculator.as_mut(), + let value_encoder_stats = self.process_account_multiproof( + &provider, + v2_account_calculator.as_mut(), + v2_storage_calculator.clone(), *input, &mut account_proofs_processed, &mut cursor_metrics_cache, ); + total_idle_time += value_encoder_stats.storage_wait_time; + value_encoder_stats_cache.extend(&value_encoder_stats); } AccountWorkerJob::BlindedAccountNode { path, result_sender } => { Self::process_blinded_node( self.worker_id, - &proof_tx, + &provider, path, result_sender, &mut account_nodes_processed, @@ -1221,6 +1262,8 @@ where // Mark worker as available again. self.available_workers.fetch_add(1, Ordering::Relaxed); + + idle_start = Instant::now(); } trace!( @@ -1228,13 +1271,16 @@ where worker_id=self.worker_id, account_proofs_processed, account_nodes_processed, + total_idle_time_us = total_idle_time.as_micros(), "Account worker shutting down" ); #[cfg(feature = "metrics")] { self.metrics.record_account_nodes(account_nodes_processed as usize); + self.metrics.record_account_worker_idle_time(total_idle_time); self.cursor_metrics.record(&mut cursor_metrics_cache); + self.metrics.record_value_encoder_stats(&value_encoder_stats_cache); } Ok(()) @@ -1242,13 +1288,13 @@ where fn compute_legacy_account_multiproof( &self, - proof_tx: &ProofTaskTx, + provider: &Provider, targets: MultiProofTargets, mut prefix_sets: TriePrefixSets, collect_branch_node_masks: bool, multi_added_removed_keys: Option>, proof_cursor_metrics: &mut ProofTaskCursorMetricsCache, - ) -> Result + ) -> Result<(ProofResult, Duration), ParallelStateRootError> where Provider: TrieCursorFactory + HashedCursorFactory, { @@ -1293,28 +1339,27 @@ where cached_storage_roots: &self.cached_storage_roots, }; + let mut storage_wait_time = Duration::ZERO; let result = build_account_multiproof_with_storage_roots( - &proof_tx.provider, + provider, ctx, &mut tracker, proof_cursor_metrics, - ); + &mut storage_wait_time, + )?; let stats = tracker.finish(); - result.map(|proof| ProofResult::Legacy(proof, stats)) + Ok((ProofResult::Legacy(result, stats), storage_wait_time)) } - fn compute_v2_account_multiproof( + fn compute_v2_account_multiproof<'a, Provider>( &self, - v2_calculator: &mut proof_v2::ProofCalculator< - ::AccountTrieCursor<'_>, - ::AccountCursor<'_>, - AsyncAccountValueEncoder, - >, + v2_account_calculator: &mut V2AccountProofCalculator<'a, Provider>, + v2_storage_calculator: Rc>>, targets: MultiProofTargetsV2, - ) -> Result + ) -> Result<(ProofResult, ValueEncoderStats), ParallelStateRootError> where - Provider: TrieCursorFactory + HashedCursorFactory, + Provider: TrieCursorFactory + HashedCursorFactory + 'a, { let MultiProofTargetsV2 { mut account_targets, storage_targets } = targets; @@ -1333,64 +1378,75 @@ where dispatch_v2_storage_proofs(&self.storage_work_tx, &account_targets, storage_targets)?; let mut value_encoder = AsyncAccountValueEncoder::new( - self.storage_work_tx.clone(), storage_proof_receivers, self.cached_storage_roots.clone(), + v2_storage_calculator, ); - let proof = DecodedMultiProofV2 { - account_proofs: v2_calculator.proof(&mut value_encoder, &mut account_targets)?, - storage_proofs: value_encoder.into_storage_proofs()?, - }; + let account_proofs = + v2_account_calculator.proof(&mut value_encoder, &mut account_targets)?; + + let (storage_proofs, value_encoder_stats) = value_encoder.finalize()?; - Ok(ProofResult::V2(proof)) + let proof = DecodedMultiProofV2 { account_proofs, storage_proofs }; + + Ok((ProofResult::V2(proof), value_encoder_stats)) } /// Processes an account multiproof request. - fn process_account_multiproof( + /// + /// Returns stats from the value encoder used during proof computation. + fn process_account_multiproof<'a, Provider>( &self, - proof_tx: &ProofTaskTx, - v2_calculator: Option< - &mut proof_v2::ProofCalculator< - ::AccountTrieCursor<'_>, - ::AccountCursor<'_>, - AsyncAccountValueEncoder, - >, - >, + provider: &Provider, + v2_account_calculator: Option<&mut V2AccountProofCalculator<'a, Provider>>, + v2_storage_calculator: Option>>>, input: AccountMultiproofInput, account_proofs_processed: &mut u64, cursor_metrics_cache: &mut ProofTaskCursorMetricsCache, - ) where - Provider: TrieCursorFactory + HashedCursorFactory, + ) -> ValueEncoderStats + where + Provider: TrieCursorFactory + HashedCursorFactory + 'a, { let mut proof_cursor_metrics = ProofTaskCursorMetricsCache::default(); let proof_start = Instant::now(); - let (proof_result_sender, result) = match input { + let (proof_result_sender, result, value_encoder_stats) = match input { AccountMultiproofInput::Legacy { targets, prefix_sets, collect_branch_node_masks, multi_added_removed_keys, proof_result_sender, - } => ( - proof_result_sender, - self.compute_legacy_account_multiproof( - proof_tx, + } => { + let (result, value_encoder_stats) = match self.compute_legacy_account_multiproof( + provider, targets, prefix_sets, collect_branch_node_masks, multi_added_removed_keys, &mut proof_cursor_metrics, - ), - ), - AccountMultiproofInput::V2 { targets, proof_result_sender } => ( - proof_result_sender, - self.compute_v2_account_multiproof::( - v2_calculator.expect("v2 calculator provided"), - targets, - ), - ), + ) { + Ok((proof, wait_time)) => ( + Ok(proof), + ValueEncoderStats { storage_wait_time: wait_time, ..Default::default() }, + ), + Err(e) => (Err(e), ValueEncoderStats::default()), + }; + (proof_result_sender, result, value_encoder_stats) + } + AccountMultiproofInput::V2 { targets, proof_result_sender } => { + let (result, value_encoder_stats) = match self + .compute_v2_account_multiproof::( + v2_account_calculator.expect("v2 account calculator provided"), + v2_storage_calculator.expect("v2 storage calculator provided"), + targets, + ) { + Ok((proof, stats)) => (Ok(proof), stats), + Err(e) => (Err(e), ValueEncoderStats::default()), + }; + (proof_result_sender, result, value_encoder_stats) + } }; let ProofResultContext { @@ -1443,12 +1499,14 @@ where #[cfg(feature = "metrics")] // Accumulate per-proof metrics into the worker's cache cursor_metrics_cache.extend(&proof_cursor_metrics); + + value_encoder_stats } /// Processes a blinded account node lookup request. fn process_blinded_node( worker_id: usize, - proof_tx: &ProofTaskTx, + provider: &Provider, path: Nibbles, result_sender: Sender, account_nodes_processed: &mut u64, @@ -1469,7 +1527,8 @@ where ); let start = Instant::now(); - let result = proof_tx.process_blinded_account_node(&path); + let account_node_provider = ProofBlindedAccountProvider::new(provider, provider); + let result = account_node_provider.trie_node(&path); let elapsed = start.elapsed(); *account_nodes_processed += 1; @@ -1500,11 +1559,13 @@ where /// enabling interleaved parallelism between account trie traversal and storage proof computation. /// /// Returns a `DecodedMultiProof` containing the account subtree and storage proofs. +/// Also accumulates the time spent waiting for storage proofs into `storage_wait_time`. fn build_account_multiproof_with_storage_roots

( provider: &P, ctx: AccountMultiproofParams<'_>, tracker: &mut ParallelTrieTracker, proof_cursor_metrics: &mut ProofTaskCursorMetricsCache, + storage_wait_time: &mut Duration, ) -> Result where P: TrieCursorFactory + HashedCursorFactory, @@ -1568,6 +1629,7 @@ where ); // Block on this specific storage proof receiver - enables interleaved // parallelism + let wait_start = Instant::now(); let proof_msg = receiver.recv().map_err(|_| { ParallelStateRootError::StorageRoot( reth_execution_errors::StorageRootError::Database( @@ -1577,6 +1639,7 @@ where ), ) })?; + *storage_wait_time += wait_start.elapsed(); drop(_guard); @@ -1668,7 +1731,9 @@ where // Consume remaining storage proof receivers for accounts not encountered during trie walk. // Done last to allow storage workers more time to complete while we finalized the account trie. for (hashed_address, receiver) in storage_proof_receivers { + let wait_start = Instant::now(); if let Ok(proof_msg) = receiver.recv() { + *storage_wait_time += wait_start.elapsed(); let proof_result = proof_msg.result?; let proof = Into::>::into(proof_result) .expect("Partial proofs are not yet supported"); diff --git a/crates/trie/parallel/src/proof_task_metrics.rs b/crates/trie/parallel/src/proof_task_metrics.rs index f9b8d70c162..e303df287b9 100644 --- a/crates/trie/parallel/src/proof_task_metrics.rs +++ b/crates/trie/parallel/src/proof_task_metrics.rs @@ -1,9 +1,11 @@ +use crate::value_encoder::ValueEncoderStats; use reth_metrics::{metrics::Histogram, Metrics}; use reth_trie::{ hashed_cursor::{HashedCursorMetrics, HashedCursorMetricsCache}, trie_cursor::{TrieCursorMetrics, TrieCursorMetricsCache}, TrieType, }; +use std::time::Duration; /// Metrics for the proof task. #[derive(Clone, Metrics)] @@ -13,6 +15,17 @@ pub struct ProofTaskTrieMetrics { blinded_account_nodes: Histogram, /// A histogram for the number of blinded storage nodes fetched. blinded_storage_nodes: Histogram, + /// Histogram for storage worker idle time in seconds (waiting for proof jobs). + storage_worker_idle_time_seconds: Histogram, + /// Histogram for account worker idle time in seconds (waiting for proof jobs + storage + /// results). + account_worker_idle_time_seconds: Histogram, + /// Histogram for `Dispatched` deferred encoder variant count. + deferred_encoder_dispatched: Histogram, + /// Histogram for `FromCache` deferred encoder variant count. + deferred_encoder_from_cache: Histogram, + /// Histogram for `Sync` deferred encoder variant count. + deferred_encoder_sync: Histogram, } impl ProofTaskTrieMetrics { @@ -25,6 +38,23 @@ impl ProofTaskTrieMetrics { pub fn record_storage_nodes(&self, count: usize) { self.blinded_storage_nodes.record(count as f64); } + + /// Record storage worker idle time. + pub fn record_storage_worker_idle_time(&self, duration: Duration) { + self.storage_worker_idle_time_seconds.record(duration.as_secs_f64()); + } + + /// Record account worker idle time. + pub fn record_account_worker_idle_time(&self, duration: Duration) { + self.account_worker_idle_time_seconds.record(duration.as_secs_f64()); + } + + /// Record value encoder stats (deferred encoder variant counts). + pub(crate) fn record_value_encoder_stats(&self, stats: &ValueEncoderStats) { + self.deferred_encoder_dispatched.record(stats.dispatched_count as f64); + self.deferred_encoder_from_cache.record(stats.from_cache_count as f64); + self.deferred_encoder_sync.record(stats.sync_count as f64); + } } /// Cursor metrics for proof task operations. diff --git a/crates/trie/parallel/src/value_encoder.rs b/crates/trie/parallel/src/value_encoder.rs index 7b08d3e1b5e..0b082a08d7d 100644 --- a/crates/trie/parallel/src/value_encoder.rs +++ b/crates/trie/parallel/src/value_encoder.rs @@ -1,36 +1,79 @@ -use crate::proof_task::{ - StorageProofInput, StorageProofResult, StorageProofResultMessage, StorageWorkerJob, -}; +use crate::proof_task::{StorageProofResult, StorageProofResultMessage}; use alloy_primitives::{map::B256Map, B256}; use alloy_rlp::Encodable; use core::cell::RefCell; -use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; +use crossbeam_channel::Receiver as CrossbeamReceiver; use dashmap::DashMap; use reth_execution_errors::trie::StateProofError; use reth_primitives_traits::Account; use reth_storage_errors::db::DatabaseError; use reth_trie::{ - proof_v2::{DeferredValueEncoder, LeafValueEncoder, Target}, + hashed_cursor::HashedStorageCursor, + proof_v2::{DeferredValueEncoder, LeafValueEncoder, StorageProofCalculator}, + trie_cursor::TrieStorageCursor, ProofTrieNode, }; -use std::{rc::Rc, sync::Arc}; +use std::{ + rc::Rc, + sync::Arc, + time::{Duration, Instant}, +}; + +/// Stats collected by [`AsyncAccountValueEncoder`] during proof computation. +/// +/// Tracks time spent waiting for storage proofs and counts of each deferred encoder variant used. +#[derive(Debug, Default, Clone, Copy)] +pub(crate) struct ValueEncoderStats { + /// Accumulated time spent waiting for storage proof results from dispatched workers. + pub(crate) storage_wait_time: Duration, + /// Number of times the `Dispatched` variant was used (proof pre-dispatched to workers). + pub(crate) dispatched_count: u64, + /// Number of times the `FromCache` variant was used (storage root already cached). + pub(crate) from_cache_count: u64, + /// Number of times the `Sync` variant was used (synchronous computation). + pub(crate) sync_count: u64, +} + +impl ValueEncoderStats { + /// Extends this metrics by adding the values from another. + pub(crate) fn extend(&mut self, other: &Self) { + self.storage_wait_time += other.storage_wait_time; + self.dispatched_count += other.dispatched_count; + self.from_cache_count += other.from_cache_count; + self.sync_count += other.sync_count; + } +} /// Returned from [`AsyncAccountValueEncoder`], used to track an async storage root calculation. -pub(crate) enum AsyncAccountDeferredValueEncoder { +pub(crate) enum AsyncAccountDeferredValueEncoder { + /// A storage proof job was dispatched to the worker pool. Dispatched { hashed_address: B256, account: Account, proof_result_rx: Result, DatabaseError>, - // None if results shouldn't be retained for this dispatched proof. - storage_proof_results: Option>>>>, + /// Shared storage proof results. + storage_proof_results: Rc>>>, + /// Shared stats for tracking wait time and counts. + stats: Rc>, }, - FromCache { + /// The storage root was found in cache. + FromCache { account: Account, root: B256 }, + /// Synchronous storage root computation. + Sync { + /// Shared storage proof calculator for computing storage roots. + storage_calculator: Rc>>, + hashed_address: B256, account: Account, - root: B256, + /// Cache to store computed storage roots for future reuse. + cached_storage_roots: Arc>, }, } -impl DeferredValueEncoder for AsyncAccountDeferredValueEncoder { +impl DeferredValueEncoder for AsyncAccountDeferredValueEncoder +where + TC: TrieStorageCursor, + HC: HashedStorageCursor, +{ fn encode(self, buf: &mut Vec) -> Result<(), StateProofError> { let (account, root) = match self { Self::Dispatched { @@ -38,7 +81,9 @@ impl DeferredValueEncoder for AsyncAccountDeferredValueEncoder { account, proof_result_rx, storage_proof_results, + stats, } => { + let wait_start = Instant::now(); let result = proof_result_rx? .recv() .map_err(|_| { @@ -47,18 +92,27 @@ impl DeferredValueEncoder for AsyncAccountDeferredValueEncoder { ))) })? .result?; + stats.borrow_mut().storage_wait_time += wait_start.elapsed(); let StorageProofResult::V2 { root: Some(root), proof } = result else { panic!("StorageProofResult is not V2 with root: {result:?}") }; - if let Some(storage_proof_results) = storage_proof_results.as_ref() { - storage_proof_results.borrow_mut().insert(hashed_address, proof); - } + storage_proof_results.borrow_mut().insert(hashed_address, proof); (account, root) } Self::FromCache { account, root } => (account, root), + Self::Sync { storage_calculator, hashed_address, account, cached_storage_roots } => { + let mut calculator = storage_calculator.borrow_mut(); + let proof = calculator.storage_proof(hashed_address, &mut [B256::ZERO.into()])?; + let storage_root = calculator + .compute_root_hash(&proof)? + .expect("storage_proof with dummy target always returns root"); + + cached_storage_roots.insert(hashed_address, storage_root); + (account, storage_root) + } }; let account = account.into_trie_account(root); @@ -67,12 +121,15 @@ impl DeferredValueEncoder for AsyncAccountDeferredValueEncoder { } } -/// Implements the [`LeafValueEncoder`] trait for accounts using a [`CrossbeamSender`] to dispatch -/// and compute storage roots asynchronously. Can also accept a set of already dispatched account -/// storage proofs, for cases where it's possible to determine some necessary accounts ahead of -/// time. -pub(crate) struct AsyncAccountValueEncoder { - storage_work_tx: CrossbeamSender, +/// Implements the [`LeafValueEncoder`] trait for accounts. +/// +/// Accepts a set of pre-dispatched storage proof receivers for accounts whose storage roots are +/// being computed asynchronously by worker threads. +/// +/// For accounts without pre-dispatched proofs or cached roots, uses a shared +/// [`StorageProofCalculator`] to compute storage roots synchronously, reusing cursors across +/// multiple accounts. +pub(crate) struct AsyncAccountValueEncoder { /// Storage proof jobs which were dispatched ahead of time. dispatched: B256Map>, /// Storage roots which have already been computed. This can be used only if a storage proof @@ -81,39 +138,59 @@ pub(crate) struct AsyncAccountValueEncoder { /// Tracks storage proof results received from the storage workers. [`Rc`] + [`RefCell`] is /// required because [`DeferredValueEncoder`] cannot have a lifetime. storage_proof_results: Rc>>>, + /// Shared storage proof calculator for synchronous computation. Reuses cursors and internal + /// buffers across multiple storage root calculations. + storage_calculator: Rc>>, + /// Shared stats for tracking wait time and variant counts. + stats: Rc>, } -impl AsyncAccountValueEncoder { - /// Initializes a [`Self`] using a `ProofWorkerHandle` which will be used to calculate storage - /// roots asynchronously. +impl AsyncAccountValueEncoder { + /// Initializes a [`Self`] using a storage proof calculator which will be reused to calculate + /// storage roots synchronously. + /// + /// # Parameters + /// - `dispatched`: Pre-dispatched storage proof receivers for target accounts + /// - `cached_storage_roots`: Shared cache of already-computed storage roots + /// - `storage_calculator`: Shared storage proof calculator for synchronous computation pub(crate) fn new( - storage_work_tx: CrossbeamSender, dispatched: B256Map>, cached_storage_roots: Arc>, + storage_calculator: Rc>>, ) -> Self { Self { - storage_work_tx, dispatched, cached_storage_roots, storage_proof_results: Default::default(), + storage_calculator, + stats: Default::default(), } } - /// Consume [`Self`] and return all collected storage proofs which had been dispatched. + /// Consume [`Self`] and return all collected storage proofs along with accumulated stats. + /// + /// This method collects any remaining dispatched proofs that weren't consumed during proof + /// calculation and includes their wait time in the returned stats. /// /// # Panics /// /// This method panics if any deferred encoders produced by [`Self::deferred_encoder`] have not /// been dropped. - pub(crate) fn into_storage_proofs( + pub(crate) fn finalize( self, - ) -> Result>, StateProofError> { + ) -> Result<(B256Map>, ValueEncoderStats), StateProofError> { let mut storage_proof_results = Rc::into_inner(self.storage_proof_results) .expect("no deferred encoders are still allocated") .into_inner(); - // Any remaining dispatched proofs need to have their results collected + let mut stats = Rc::into_inner(self.stats) + .expect("no deferred encoders are still allocated") + .into_inner(); + + // Any remaining dispatched proofs need to have their results collected. + // These are proofs that were pre-dispatched but not consumed during proof calculation. for (hashed_address, rx) in &self.dispatched { + let wait_start = Instant::now(); let result = rx .recv() .map_err(|_| { @@ -122,6 +199,7 @@ impl AsyncAccountValueEncoder { ))) })? .result?; + stats.storage_wait_time += wait_start.elapsed(); let StorageProofResult::V2 { proof, .. } = result else { panic!("StorageProofResult is not V2: {result:?}") @@ -130,13 +208,17 @@ impl AsyncAccountValueEncoder { storage_proof_results.insert(*hashed_address, proof); } - Ok(storage_proof_results) + Ok((storage_proof_results, stats)) } } -impl LeafValueEncoder for AsyncAccountValueEncoder { +impl LeafValueEncoder for AsyncAccountValueEncoder +where + TC: TrieStorageCursor, + HC: HashedStorageCursor, +{ type Value = Account; - type DeferredEncoder = AsyncAccountDeferredValueEncoder; + type DeferredEncoder = AsyncAccountDeferredValueEncoder; fn deferred_encoder( &mut self, @@ -146,11 +228,13 @@ impl LeafValueEncoder for AsyncAccountValueEncoder { // If the proof job has already been dispatched for this account then it's not necessary to // dispatch another. if let Some(rx) = self.dispatched.remove(&hashed_address) { + self.stats.borrow_mut().dispatched_count += 1; return AsyncAccountDeferredValueEncoder::Dispatched { hashed_address, account, proof_result_rx: Ok(rx), - storage_proof_results: Some(self.storage_proof_results.clone()), + storage_proof_results: self.storage_proof_results.clone(), + stats: self.stats.clone(), } } @@ -159,25 +243,17 @@ impl LeafValueEncoder for AsyncAccountValueEncoder { // If the root is already calculated then just use it directly if let Some(root) = self.cached_storage_roots.get(&hashed_address) { + self.stats.borrow_mut().from_cache_count += 1; return AsyncAccountDeferredValueEncoder::FromCache { account, root: *root } } - // Create a proof input which targets a bogus key, so that we calculate the root as a - // side-effect. - let input = StorageProofInput::new(hashed_address, vec![Target::new(B256::ZERO)]); - let (tx, rx) = crossbeam_channel::bounded(1); - - let proof_result_rx = self - .storage_work_tx - .send(StorageWorkerJob::StorageProof { input, proof_result_sender: tx }) - .map_err(|_| DatabaseError::Other("storage workers unavailable".to_string())) - .map(|_| rx); - - AsyncAccountDeferredValueEncoder::Dispatched { + // Compute storage root synchronously using the shared calculator + self.stats.borrow_mut().sync_count += 1; + AsyncAccountDeferredValueEncoder::Sync { + storage_calculator: self.storage_calculator.clone(), hashed_address, account, - proof_result_rx, - storage_proof_results: None, + cached_storage_roots: self.cached_storage_roots.clone(), } } } diff --git a/crates/trie/trie/src/proof_v2/value.rs b/crates/trie/trie/src/proof_v2/value.rs index 2b7b0851192..6c15bbdf08a 100644 --- a/crates/trie/trie/src/proof_v2/value.rs +++ b/crates/trie/trie/src/proof_v2/value.rs @@ -109,39 +109,20 @@ where T: TrieCursorFactory, H: HashedCursorFactory, { - // Synchronously computes the storage root for this account and RLP-encodes the resulting - // `TrieAccount` into `buf` fn encode(self, buf: &mut Vec) -> Result<(), StateProofError> { - // Create cursors for storage proof calculation let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(self.hashed_address)?; let hashed_cursor = self.hashed_cursor_factory.hashed_storage_cursor(self.hashed_address)?; - // Create storage proof calculator with StorageValueEncoder let mut storage_proof_calculator = ProofCalculator::new_storage(trie_cursor, hashed_cursor); - // Compute storage root by calling storage_proof with the root path as a target. - // This returns just the root node of the storage trie. + let proof = storage_proof_calculator + .storage_proof(self.hashed_address, &mut [B256::ZERO.into()])?; let storage_root = storage_proof_calculator - .storage_proof(self.hashed_address, &mut [B256::ZERO.into()]) - .map(|nodes| { - // Encode the root node to RLP and hash it - let root_node = - nodes.first().expect("storage_proof always returns at least the root"); - root_node.node.encode(buf); + .compute_root_hash(&proof)? + .expect("storage_proof with dummy target always returns root"); - let storage_root = alloy_primitives::keccak256(buf.as_slice()); - - // Clear the buffer so we can re-use it to encode the TrieAccount - buf.clear(); - - storage_root - })?; - - // Combine account with storage root to create TrieAccount let trie_account = self.account.into_trie_account(storage_root); - - // Encode the trie account trie_account.encode(buf); Ok(()) From af3601c65d2b1cbf0ffa04d3f3390c18c997bfb5 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 27 Jan 2026 19:17:49 +0400 Subject: [PATCH 239/267] feat: more metrics (#21481) --- crates/engine/tree/src/tree/cached_state.rs | 27 +++---------------- .../src/tree/payload_processor/prewarm.rs | 1 + crates/trie/parallel/src/proof_task.rs | 1 + 3 files changed, 5 insertions(+), 24 deletions(-) diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index dfdcafa49e9..7122b86fcb5 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -312,14 +312,7 @@ impl AccountReader for CachedStateProvider { match self.caches.get_or_try_insert_account_with(*address, || { self.state_provider.basic_account(address) })? { - CachedStatus::NotCached(value) => { - self.metrics.account_cache_misses.increment(1); - Ok(value) - } - CachedStatus::Cached(value) => { - self.metrics.account_cache_hits.increment(1); - Ok(value) - } + CachedStatus::NotCached(value) | CachedStatus::Cached(value) => Ok(value), } } else if let Some(account) = self.caches.account_cache.get(address) { self.metrics.account_cache_hits.increment(1); @@ -350,14 +343,7 @@ impl StateProvider for CachedStateProvider { match self.caches.get_or_try_insert_storage_with(account, storage_key, || { self.state_provider.storage(account, storage_key).map(Option::unwrap_or_default) })? { - CachedStatus::NotCached(value) => { - self.metrics.storage_cache_misses.increment(1); - // The slot that was never written to is indistinguishable from a slot - // explicitly set to zero. We return `None` in both cases. - Ok(Some(value).filter(|v| !v.is_zero())) - } - CachedStatus::Cached(value) => { - self.metrics.storage_cache_hits.increment(1); + CachedStatus::NotCached(value) | CachedStatus::Cached(value) => { // The slot that was never written to is indistinguishable from a slot // explicitly set to zero. We return `None` in both cases. Ok(Some(value).filter(|v| !v.is_zero())) @@ -379,14 +365,7 @@ impl BytecodeReader for CachedStateProvider { match self.caches.get_or_try_insert_code_with(*code_hash, || { self.state_provider.bytecode_by_hash(code_hash) })? { - CachedStatus::NotCached(code) => { - self.metrics.code_cache_misses.increment(1); - Ok(code) - } - CachedStatus::Cached(code) => { - self.metrics.code_cache_hits.increment(1); - Ok(code) - } + CachedStatus::NotCached(code) | CachedStatus::Cached(code) => Ok(code), } } else if let Some(code) = self.caches.code_cache.get(code_hash) { self.metrics.code_cache_hits.increment(1); diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 4ecb6fd1656..cb29b95f460 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -563,6 +563,7 @@ where index, tx_hash = %tx.tx().tx_hash(), is_success = tracing::field::Empty, + gas_used = tracing::field::Empty, ) .entered(); diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 3cbf5293d82..6cf75aa818e 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -1302,6 +1302,7 @@ where target: "trie::proof_task", "Account multiproof calculation", targets = targets.len(), + num_slots = targets.values().map(|slots| slots.len()).sum::(), worker_id=self.worker_id, ); let _span_guard = span.enter(); From ba8c8354e51a9b40f8320937c44a396ca73ecd39 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Tue, 27 Jan 2026 08:10:53 -0800 Subject: [PATCH 240/267] fix(reth-bench): retry up to 5 times on failed transaction fetches in big blocks generate (#21483) --- .../src/bench/generate_big_block.rs | 47 ++++++++++++------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/bin/reth-bench/src/bench/generate_big_block.rs b/bin/reth-bench/src/bench/generate_big_block.rs index 0352d0a39df..7869ee829c8 100644 --- a/bin/reth-bench/src/bench/generate_big_block.rs +++ b/bin/reth-bench/src/bench/generate_big_block.rs @@ -401,25 +401,38 @@ impl Command { let mut current_block = start_block; for payload_idx in 0..count { - match collector.collect(current_block).await { - Ok((transactions, total_gas, next_block)) => { - info!( - payload = payload_idx + 1, - tx_count = transactions.len(), - total_gas, - blocks = format!("{}..{}", current_block, next_block), - "Fetched transactions" - ); - current_block = next_block; - - if tx_sender.send(transactions).await.is_err() { - break; + const MAX_RETRIES: u32 = 5; + let mut attempts = 0; + let result = loop { + attempts += 1; + match collector.collect(current_block).await { + Ok(res) => break Some(res), + Err(e) => { + if attempts >= MAX_RETRIES { + warn!(payload = payload_idx + 1, attempts, error = %e, "Failed to fetch transactions after max retries"); + break None; + } + warn!(payload = payload_idx + 1, attempts, error = %e, "Failed to fetch transactions, retrying..."); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; } } - Err(e) => { - warn!(payload = payload_idx + 1, error = %e, "Failed to fetch transactions"); - break; - } + }; + + let Some((transactions, total_gas, next_block)) = result else { + break; + }; + + info!( + payload = payload_idx + 1, + tx_count = transactions.len(), + total_gas, + blocks = format!("{}..{}", current_block, next_block), + "Fetched transactions" + ); + current_block = next_block; + + if tx_sender.send(transactions).await.is_err() { + break; } } }); From 9eaa5a63033ac5e28b57b55385d82ab13e3f96e7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 27 Jan 2026 19:31:40 +0100 Subject: [PATCH 241/267] chore: remove Sync bound from cursor associated types (#21486) Co-authored-by: Amp --- crates/storage/db-api/src/transaction.rs | 9 ++++----- crates/trie/db/src/trie_cursor.rs | 6 +++--- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/crates/storage/db-api/src/transaction.rs b/crates/storage/db-api/src/transaction.rs index 545c0ce39f3..d92c4509800 100644 --- a/crates/storage/db-api/src/transaction.rs +++ b/crates/storage/db-api/src/transaction.rs @@ -20,9 +20,9 @@ pub type DupCursorMutTy = ::DupCursorMut; /// Read only transaction pub trait DbTx: Debug + Send { /// Cursor type for this read-only transaction - type Cursor: DbCursorRO + Send + Sync; + type Cursor: DbCursorRO + Send; /// `DupCursor` type for this read-only transaction - type DupCursor: DbDupCursorRO + DbCursorRO + Send + Sync; + type DupCursor: DbDupCursorRO + DbCursorRO + Send; /// Get value by an owned key fn get(&self, key: T::Key) -> Result, DatabaseError>; @@ -51,14 +51,13 @@ pub trait DbTx: Debug + Send { /// Read write transaction that allows writing to database pub trait DbTxMut: Send { /// Read-Write Cursor type - type CursorMut: DbCursorRW + DbCursorRO + Send + Sync; + type CursorMut: DbCursorRW + DbCursorRO + Send; /// Read-Write `DupCursor` type type DupCursorMut: DbDupCursorRW + DbCursorRW + DbDupCursorRO + DbCursorRO - + Send - + Sync; + + Send; /// Put value to database fn put(&self, key: T::Key, value: T::Value) -> Result<(), DatabaseError>; diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index 7b9c402545f..84268d5e541 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -64,7 +64,7 @@ impl DatabaseAccountTrieCursor { impl TrieCursor for DatabaseAccountTrieCursor where - C: DbCursorRO + Send + Sync, + C: DbCursorRO + Send, { /// Seeks an exact match for the provided key in the account trie. fn seek_exact( @@ -160,7 +160,7 @@ where impl TrieCursor for DatabaseStorageTrieCursor where - C: DbCursorRO + DbDupCursorRO + Send + Sync, + C: DbCursorRO + DbDupCursorRO + Send, { /// Seeks an exact match for the given key in the storage trie. fn seek_exact( @@ -202,7 +202,7 @@ where impl TrieStorageCursor for DatabaseStorageTrieCursor where - C: DbCursorRO + DbDupCursorRO + Send + Sync, + C: DbCursorRO + DbDupCursorRO + Send, { fn set_hashed_address(&mut self, hashed_address: B256) { self.hashed_address = hashed_address; From 2e05cec84b3e558c75ab4332e6d0c44edd2329af Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 27 Jan 2026 18:43:25 +0000 Subject: [PATCH 242/267] fix: ensure edge enables history in rocksdb (#21478) --- crates/e2e-test-utils/src/setup_import.rs | 12 +++- crates/exex/test-utils/src/lib.rs | 2 +- crates/storage/db-api/src/models/metadata.rs | 4 +- crates/storage/db-common/src/init.rs | 70 ++++++++++++++----- .../src/providers/database/provider.rs | 34 ++++++++- 5 files changed, 96 insertions(+), 26 deletions(-) diff --git a/crates/e2e-test-utils/src/setup_import.rs b/crates/e2e-test-utils/src/setup_import.rs index e0fda6020c4..d456bb37c9a 100644 --- a/crates/e2e-test-utils/src/setup_import.rs +++ b/crates/e2e-test-utils/src/setup_import.rs @@ -125,7 +125,10 @@ pub async fn setup_engine_with_chain_import( db.clone(), chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone())?, - reth_provider::providers::RocksDBProvider::builder(rocksdb_dir_path).build().unwrap(), + reth_provider::providers::RocksDBProvider::builder(rocksdb_dir_path) + .with_default_tables() + .build() + .unwrap(), )?; // Initialize genesis if needed @@ -328,6 +331,7 @@ mod tests { reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone()) .unwrap(), reth_provider::providers::RocksDBProvider::builder(rocksdb_dir_path.clone()) + .with_default_tables() .build() .unwrap(), ) @@ -392,6 +396,7 @@ mod tests { reth_provider::providers::StaticFileProvider::read_only(static_files_path, false) .unwrap(), reth_provider::providers::RocksDBProvider::builder(rocksdb_dir_path) + .with_default_tables() .build() .unwrap(), ) @@ -490,7 +495,10 @@ mod tests { db.clone(), chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_write(static_files_path).unwrap(), - reth_provider::providers::RocksDBProvider::builder(rocksdb_dir_path).build().unwrap(), + reth_provider::providers::RocksDBProvider::builder(rocksdb_dir_path) + .with_default_tables() + .build() + .unwrap(), ) .expect("failed to create provider factory"); diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 3fc75488e18..61989356154 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -251,7 +251,7 @@ pub async fn test_exex_context_with_chain_spec( db, chain_spec.clone(), StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"), - RocksDBProvider::builder(rocksdb_dir.keep()).build().unwrap(), + RocksDBProvider::builder(rocksdb_dir.keep()).with_default_tables().build().unwrap(), )?; let genesis_hash = init_genesis(&provider_factory)?; diff --git a/crates/storage/db-api/src/models/metadata.rs b/crates/storage/db-api/src/models/metadata.rs index b17dccabfba..211a6bc1773 100644 --- a/crates/storage/db-api/src/models/metadata.rs +++ b/crates/storage/db-api/src/models/metadata.rs @@ -63,9 +63,9 @@ impl StorageSettings { transaction_senders_in_static_files: true, account_changesets_in_static_files: true, storage_changesets_in_static_files: true, - storages_history_in_rocksdb: false, + storages_history_in_rocksdb: true, transaction_hash_numbers_in_rocksdb: true, - account_history_in_rocksdb: false, + account_history_in_rocksdb: true, } } diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index b4dfea23bb6..fd3c4fcd0bb 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -932,27 +932,59 @@ mod tests { let factory = create_test_provider_factory_with_chain_spec(chain_spec); init_genesis(&factory).unwrap(); - let provider = factory.provider().unwrap(); - - let tx = provider.tx_ref(); + let expected_accounts = vec![ + (ShardedKey::new(address_with_balance, u64::MAX), IntegerList::new([0]).unwrap()), + (ShardedKey::new(address_with_storage, u64::MAX), IntegerList::new([0]).unwrap()), + ]; + let expected_storages = vec![( + StorageShardedKey::new(address_with_storage, storage_key, u64::MAX), + IntegerList::new([0]).unwrap(), + )]; + + let collect_from_mdbx = |factory: &ProviderFactory| { + let provider = factory.provider().unwrap(); + let tx = provider.tx_ref(); + ( + collect_table_entries::, tables::AccountsHistory>(tx).unwrap(), + collect_table_entries::, tables::StoragesHistory>(tx).unwrap(), + ) + }; - assert_eq!( - collect_table_entries::, tables::AccountsHistory>(tx) - .expect("failed to collect"), - vec![ - (ShardedKey::new(address_with_balance, u64::MAX), IntegerList::new([0]).unwrap()), - (ShardedKey::new(address_with_storage, u64::MAX), IntegerList::new([0]).unwrap()) - ], - ); + #[cfg(feature = "edge")] + { + let settings = factory.cached_storage_settings(); + let rocksdb = factory.rocksdb_provider(); + + let collect_rocksdb = |rocksdb: &reth_provider::providers::RocksDBProvider| { + ( + rocksdb + .iter::() + .unwrap() + .collect::, _>>() + .unwrap(), + rocksdb + .iter::() + .unwrap() + .collect::, _>>() + .unwrap(), + ) + }; + + let (accounts, storages) = if settings.account_history_in_rocksdb { + collect_rocksdb(&rocksdb) + } else { + collect_from_mdbx(&factory) + }; + assert_eq!(accounts, expected_accounts); + assert_eq!(storages, expected_storages); + } - assert_eq!( - collect_table_entries::, tables::StoragesHistory>(tx) - .expect("failed to collect"), - vec![( - StorageShardedKey::new(address_with_storage, storage_key, u64::MAX), - IntegerList::new([0]).unwrap() - )], - ); + #[cfg(not(feature = "edge"))] + { + let (accounts, storages) = collect_from_mdbx(&factory); + assert_eq!(accounts, expected_accounts); + assert_eq!(storages, expected_storages); + } } #[test] diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 82c054f6898..795dbc308b5 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -3319,6 +3319,13 @@ impl BlockWriter Ok(()) } + /// Appends blocks with their execution state to the database. + /// + /// **Note:** This function is only used in tests. + /// + /// History indices are written to the appropriate backend based on storage settings: + /// MDBX when `*_history_in_rocksdb` is false, `RocksDB` when true. + /// /// TODO(joshie): this fn should be moved to `UnifiedStorageWriter` eventually fn append_blocks_with_state( &self, @@ -3376,8 +3383,31 @@ impl BlockWriter // Use pre-computed transitions for history indices since static file // writes aren't visible until commit. - self.insert_account_history_index(account_transitions)?; - self.insert_storage_history_index(storage_transitions)?; + // Note: For MDBX we use insert_*_history_index. For RocksDB we use + // append_*_history_shard which handles read-merge-write internally. + let storage_settings = self.cached_storage_settings(); + if storage_settings.account_history_in_rocksdb { + #[cfg(all(unix, feature = "rocksdb"))] + self.with_rocksdb_batch(|mut batch| { + for (address, blocks) in account_transitions { + batch.append_account_history_shard(address, blocks)?; + } + Ok(((), Some(batch.into_inner()))) + })?; + } else { + self.insert_account_history_index(account_transitions)?; + } + if storage_settings.storages_history_in_rocksdb { + #[cfg(all(unix, feature = "rocksdb"))] + self.with_rocksdb_batch(|mut batch| { + for ((address, key), blocks) in storage_transitions { + batch.append_storage_history_shard(address, key, blocks)?; + } + Ok(((), Some(batch.into_inner()))) + })?; + } else { + self.insert_storage_history_index(storage_transitions)?; + } durations_recorder.record_relative(metrics::Action::InsertHistoryIndices); // Update pipeline progress From f12acf17e600f87ea0eaa67ab6da0a8fc7ac7b94 Mon Sep 17 00:00:00 2001 From: katikatidimon <140461101+katikatidimon@users.noreply.github.com> Date: Tue, 27 Jan 2026 22:37:44 +0100 Subject: [PATCH 243/267] chore(txpool): remove redundant locals clone in config (#21477) --- crates/node/core/src/args/txpool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 8c826e1afba..2428fcd6b4b 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -507,7 +507,7 @@ impl RethTransactionPoolConfig for TxPoolArgs { PoolConfig { local_transactions_config: LocalTransactionConfig { no_exemptions: self.no_locals, - local_addresses: self.locals.clone().into_iter().collect(), + local_addresses: self.locals.iter().copied().collect(), propagate_local_transactions: !self.no_local_transactions_propagation, }, pending_limit: SubPoolLimit { From aa5b12af44cfbcb765c540888a23cc48f5de896c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 27 Jan 2026 23:06:41 +0100 Subject: [PATCH 244/267] refactor(db): make Tx::inner field private with accessor (#21490) Co-authored-by: Amp --- crates/cli/commands/src/db/list.rs | 4 +- crates/cli/commands/src/db/stats.rs | 8 +-- crates/storage/db/benches/criterion.rs | 12 +++-- crates/storage/db/benches/hash_keys.rs | 52 +++++++------------ crates/storage/db/benches/utils.rs | 4 +- .../storage/db/src/implementation/mdbx/mod.rs | 5 +- .../storage/db/src/implementation/mdbx/tx.rs | 7 ++- 7 files changed, 43 insertions(+), 49 deletions(-) diff --git a/crates/cli/commands/src/db/list.rs b/crates/cli/commands/src/db/list.rs index 8e3db03fb72..eee500eb539 100644 --- a/crates/cli/commands/src/db/list.rs +++ b/crates/cli/commands/src/db/list.rs @@ -101,8 +101,8 @@ impl TableViewer<()> for ListTableViewer<'_, N> { // We may be using the tui for a long time tx.disable_long_read_transaction_safety(); - let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?; - let stats = tx.inner.db_stat(table_db.dbi()).wrap_err(format!("Could not find table: {}", self.args.table.name()))?; + let table_db = tx.inner().open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?; + let stats = tx.inner().db_stat(table_db.dbi()).wrap_err(format!("Could not find table: {}", self.args.table.name()))?; let total_entries = stats.entries(); let final_entry_idx = total_entries.saturating_sub(1); if self.args.skip > final_entry_idx { diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index 62c8af1f407..85edbeeb4e5 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -92,10 +92,10 @@ impl Command { db_tables.sort(); let mut total_size = 0; for db_table in db_tables { - let table_db = tx.inner.open_db(Some(db_table)).wrap_err("Could not open db.")?; + let table_db = tx.inner().open_db(Some(db_table)).wrap_err("Could not open db.")?; let stats = tx - .inner + .inner() .db_stat(table_db.dbi()) .wrap_err(format!("Could not find table: {db_table}"))?; @@ -136,9 +136,9 @@ impl Command { .add_cell(Cell::new(human_bytes(total_size as f64))); table.add_row(row); - let freelist = tx.inner.env().freelist()?; + let freelist = tx.inner().env().freelist()?; let pagesize = - tx.inner.db_stat(mdbx::Database::freelist_db().dbi())?.page_size() as usize; + tx.inner().db_stat(mdbx::Database::freelist_db().dbi())?.page_size() as usize; let freelist_size = freelist * pagesize; let mut row = Row::new(); diff --git a/crates/storage/db/benches/criterion.rs b/crates/storage/db/benches/criterion.rs index 7d62384c164..9d667e601dd 100644 --- a/crates/storage/db/benches/criterion.rs +++ b/crates/storage/db/benches/criterion.rs @@ -137,7 +137,8 @@ where for (k, _, v, _) in input { crsr.append(k, &v).expect("submit"); } - tx.inner.commit().unwrap() + drop(crsr); + tx.commit().unwrap() }, ) }); @@ -157,8 +158,8 @@ where let (k, _, v, _) = input.get(index).unwrap().clone(); crsr.insert(k, &v).expect("submit"); } - - tx.inner.commit().unwrap() + drop(crsr); + tx.commit().unwrap() }, ) }); @@ -219,7 +220,8 @@ where for (k, _, v, _) in input { crsr.append_dup(k, v).expect("submit"); } - tx.inner.commit().unwrap() + drop(crsr); + tx.commit().unwrap() }, ) }); @@ -239,7 +241,7 @@ where let (k, _, v, _) = input.get(index).unwrap().clone(); tx.put::(k, v).unwrap(); } - tx.inner.commit().unwrap(); + tx.commit().unwrap() }, ) }); diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index b55965e1e74..ed21213be56 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -16,10 +16,9 @@ use reth_db_api::{ cursor::DbCursorRW, database::Database, table::{Table, TableRow}, - transaction::DbTxMut, + transaction::{DbTx, DbTxMut}, }; use reth_fs_util as fs; -use std::hint::black_box; mod utils; use utils::*; @@ -178,17 +177,13 @@ fn append(db: DatabaseEnv, input: Vec<(::Key, ::Value where T: Table, { - { - let tx = db.tx_mut().expect("tx"); - let mut crsr = tx.cursor_write::().expect("cursor"); - black_box({ - for (k, v) in input { - crsr.append(k, &v).expect("submit"); - } - - tx.inner.commit().unwrap() - }); + let tx = db.tx_mut().expect("tx"); + let mut crsr = tx.cursor_write::().expect("cursor"); + for (k, v) in input { + crsr.append(k, &v).expect("submit"); } + drop(crsr); + tx.commit().unwrap(); db } @@ -196,17 +191,13 @@ fn insert(db: DatabaseEnv, input: Vec<(::Key, ::Value where T: Table, { - { - let tx = db.tx_mut().expect("tx"); - let mut crsr = tx.cursor_write::().expect("cursor"); - black_box({ - for (k, v) in input { - crsr.insert(k, &v).expect("submit"); - } - - tx.inner.commit().unwrap() - }); + let tx = db.tx_mut().expect("tx"); + let mut crsr = tx.cursor_write::().expect("cursor"); + for (k, v) in input { + crsr.insert(k, &v).expect("submit"); } + drop(crsr); + tx.commit().unwrap(); db } @@ -214,16 +205,11 @@ fn put(db: DatabaseEnv, input: Vec<(::Key, ::Value)>) where T: Table, { - { - let tx = db.tx_mut().expect("tx"); - black_box({ - for (k, v) in input { - tx.put::(k, v).expect("submit"); - } - - tx.inner.commit().unwrap() - }); + let tx = db.tx_mut().expect("tx"); + for (k, v) in input { + tx.put::(k, v).expect("submit"); } + tx.commit().unwrap(); db } @@ -243,11 +229,11 @@ where T: Table, { db.view(|tx| { - let table_db = tx.inner.open_db(Some(T::NAME)).map_err(|_| "Could not open db.").unwrap(); + let table_db = tx.inner().open_db(Some(T::NAME)).map_err(|_| "Could not open db.").unwrap(); println!( "{:?}\n", - tx.inner + tx.inner() .db_stat(table_db.dbi()) .map_err(|_| format!("Could not find table: {}", T::NAME)) .map(|stats| { diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index 8c430342e72..5abad07c4d2 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -5,7 +5,7 @@ use alloy_primitives::Bytes; use reth_db::{test_utils::create_test_rw_db_with_path, DatabaseEnv}; use reth_db_api::{ table::{Compress, Encode, Table, TableRow}, - transaction::DbTxMut, + transaction::{DbTx, DbTxMut}, Database, }; use reth_fs_util as fs; @@ -68,7 +68,7 @@ where for (k, _, v, _) in pair.clone() { tx.put::(k, v).expect("submit"); } - tx.inner.commit().unwrap(); + tx.commit().unwrap(); } db.into_inner_db() diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 07b09b3ef50..8a95115eee4 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -274,10 +274,11 @@ impl DatabaseMetrics for DatabaseEnv { let _ = self .view(|tx| { for table in Tables::ALL.iter().map(Tables::name) { - let table_db = tx.inner.open_db(Some(table)).wrap_err("Could not open db.")?; + let table_db = + tx.inner().open_db(Some(table)).wrap_err("Could not open db.")?; let stats = tx - .inner + .inner() .db_stat(table_db.dbi()) .wrap_err(format!("Could not find table: {table}"))?; diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index c693c5a80de..5b4acad700d 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -30,7 +30,7 @@ const LONG_TRANSACTION_DURATION: Duration = Duration::from_secs(60); #[derive(Debug)] pub struct Tx { /// Libmdbx-sys transaction. - pub inner: Transaction, + inner: Transaction, /// Cached MDBX DBIs for reuse. dbis: Arc>, @@ -62,6 +62,11 @@ impl Tx { Ok(Self { inner, dbis, metrics_handler }) } + /// Returns a reference to the inner libmdbx transaction. + pub const fn inner(&self) -> &Transaction { + &self.inner + } + /// Gets this transaction ID. pub fn id(&self) -> reth_libmdbx::Result { self.metrics_handler.as_ref().map_or_else(|| self.inner.id(), |handler| Ok(handler.txn_id)) From 928bf37297cb5a0a5ed5393f2bdb1503e46f18a4 Mon Sep 17 00:00:00 2001 From: katikatidimon <140461101+katikatidimon@users.noreply.github.com> Date: Tue, 27 Jan 2026 23:26:31 +0100 Subject: [PATCH 245/267] perf: avoid cloning prefix sets in `TrieWitness::compute` (#21352) --- crates/trie/trie/src/witness.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index de444815fee..1a2dbf2d6a4 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -115,9 +115,10 @@ where } else { self.get_proof_targets(&state)? }; + let prefix_sets = core::mem::take(&mut self.prefix_sets); let multiproof = Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) - .with_prefix_sets_mut(self.prefix_sets.clone()) + .with_prefix_sets_mut(prefix_sets) .multiproof(proof_targets.clone())?; // No need to reconstruct the rest of the trie, we just need to include From 1ca62d069666bcbd4c3c1dfc8e745f25fec652d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E3=81=8B=E3=82=8A=E3=82=93=E3=81=A8=E3=81=86?= Date: Tue, 27 Jan 2026 23:59:58 +0100 Subject: [PATCH 246/267] fix(rpc): populate block fields in mev_simBundle logs (#21491) --- crates/rpc/rpc/src/eth/sim_bundle.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 9701b708516..48dbe4efa76 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -318,9 +318,9 @@ where .map(|inner| { let full_log = alloy_rpc_types_eth::Log { inner, - block_hash: None, - block_number: None, - block_timestamp: None, + block_hash: Some(current_block.hash()), + block_number: Some(current_block.number()), + block_timestamp: Some(current_block.timestamp()), transaction_hash: Some(*item.tx.tx_hash()), transaction_index: Some(tx_index as u64), log_index: Some(log_index), From 8417ddc0e84dd790212090eaf2aaf8a73c54d43b Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 28 Jan 2026 11:48:11 +0100 Subject: [PATCH 247/267] fix(engine): guard receipt streaming against duplicate indices (#21512) --- .../payload_processor/receipt_root_task.rs | 18 ++++++++++++++++-- .../engine/tree/src/tree/payload_validator.rs | 17 +++++++++++++---- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/receipt_root_task.rs b/crates/engine/tree/src/tree/payload_processor/receipt_root_task.rs index 993a57326dd..08dfc575e8f 100644 --- a/crates/engine/tree/src/tree/payload_processor/receipt_root_task.rs +++ b/crates/engine/tree/src/tree/payload_processor/receipt_root_task.rs @@ -77,8 +77,22 @@ impl ReceiptRootTaskHandle { receipt_with_bloom.encode_2718(&mut encode_buf); aggregated_bloom |= *receipt_with_bloom.bloom_ref(); - builder.push_unchecked(indexed_receipt.index, &encode_buf); - received_count += 1; + match builder.push(indexed_receipt.index, &encode_buf) { + Ok(()) => { + received_count += 1; + } + Err(err) => { + // If a duplicate or out-of-bounds index is streamed, skip it and + // fall back to computing the receipt root from the full receipts + // vector later. + tracing::error!( + target: "engine::tree::payload_processor", + index = indexed_receipt.index, + ?err, + "Receipt root task received invalid receipt index, skipping" + ); + } + } } let Ok(root) = builder.finalize() else { diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 637d9fb2ad8..263c03957f4 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -792,6 +792,11 @@ where // Execute transactions let exec_span = debug_span!(target: "engine::tree", "execution").entered(); let mut transactions = transactions.into_iter(); + // Some executors may execute transactions that do not append receipts during the + // main loop (e.g., system transactions whose receipts are added during finalization). + // In that case, invoking the callback on every transaction would resend the previous + // receipt with the same index and can panic the ordered root builder. + let mut last_sent_len = 0usize; loop { // Measure time spent waiting for next transaction from iterator // (e.g., parallel signature recovery) @@ -818,10 +823,14 @@ where let gas_used = executor.execute_transaction(tx)?; self.metrics.record_transaction_execution(tx_start.elapsed()); - // Send the latest receipt to the background task for incremental root computation - if let Some(receipt) = executor.receipts().last() { - let tx_index = executor.receipts().len() - 1; - let _ = receipt_tx.send(IndexedReceipt::new(tx_index, receipt.clone())); + let current_len = executor.receipts().len(); + if current_len > last_sent_len { + last_sent_len = current_len; + // Send the latest receipt to the background task for incremental root computation. + if let Some(receipt) = executor.receipts().last() { + let tx_index = current_len - 1; + let _ = receipt_tx.send(IndexedReceipt::new(tx_index, receipt.clone())); + } } enter.record("gas_used", gas_used); From 42765890b5ccb746139ce2a3063cb181ac5bb987 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 28 Jan 2026 11:54:50 +0100 Subject: [PATCH 248/267] feat(trie): Enable proofs v2 by default (#21434) --- crates/engine/primitives/src/config.rs | 39 +++++++++++++------ .../tree/src/tree/payload_processor/mod.rs | 2 +- crates/node/core/src/args/engine.rs | 24 ++++++------ docs/vocs/docs/pages/cli/op-reth/node.mdx | 4 +- docs/vocs/docs/pages/cli/reth/node.mdx | 4 +- 5 files changed, 45 insertions(+), 28 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index aecf92eb48f..0acd4425e46 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -34,6 +34,11 @@ fn default_account_worker_count() -> usize { /// The size of proof targets chunk to spawn in one multiproof calculation. pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 60; +/// The size of proof targets chunk to spawn in one multiproof calculation when V2 proofs are +/// enabled. This is 4x the default chunk size to take advantage of more efficient V2 proof +/// computation. +pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE_V2: usize = DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE * 4; + /// Default number of reserved CPU cores for non-reth processes. /// /// This will be deducted from the thread count of main reth global threadpool. @@ -143,8 +148,8 @@ pub struct TreeConfig { storage_worker_count: usize, /// Number of account proof worker threads. account_worker_count: usize, - /// Whether to enable V2 storage proofs. - enable_proof_v2: bool, + /// Whether to disable V2 storage proofs. + disable_proof_v2: bool, /// Whether to disable cache metrics recording (can be expensive with large cached state). disable_cache_metrics: bool, } @@ -174,7 +179,7 @@ impl Default for TreeConfig { allow_unwind_canonical_header: false, storage_worker_count: default_storage_worker_count(), account_worker_count: default_account_worker_count(), - enable_proof_v2: false, + disable_proof_v2: false, disable_cache_metrics: false, } } @@ -206,7 +211,7 @@ impl TreeConfig { allow_unwind_canonical_header: bool, storage_worker_count: usize, account_worker_count: usize, - enable_proof_v2: bool, + disable_proof_v2: bool, disable_cache_metrics: bool, ) -> Self { Self { @@ -232,7 +237,7 @@ impl TreeConfig { allow_unwind_canonical_header, storage_worker_count, account_worker_count, - enable_proof_v2, + disable_proof_v2, disable_cache_metrics, } } @@ -272,6 +277,18 @@ impl TreeConfig { self.multiproof_chunk_size } + /// Return the multiproof task chunk size, using the V2 default if V2 proofs are enabled + /// and the chunk size is at the default value. + pub const fn effective_multiproof_chunk_size(&self) -> usize { + if !self.disable_proof_v2 && + self.multiproof_chunk_size == DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE + { + DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE_V2 + } else { + self.multiproof_chunk_size + } + } + /// Return the number of reserved CPU cores for non-reth processes pub const fn reserved_cpu_cores(&self) -> usize { self.reserved_cpu_cores @@ -502,14 +519,14 @@ impl TreeConfig { self } - /// Return whether V2 storage proofs are enabled. - pub const fn enable_proof_v2(&self) -> bool { - self.enable_proof_v2 + /// Return whether V2 storage proofs are disabled. + pub const fn disable_proof_v2(&self) -> bool { + self.disable_proof_v2 } - /// Setter for whether to enable V2 storage proofs. - pub const fn with_enable_proof_v2(mut self, enable_proof_v2: bool) -> Self { - self.enable_proof_v2 = enable_proof_v2; + /// Setter for whether to disable V2 storage proofs. + pub const fn with_disable_proof_v2(mut self, disable_proof_v2: bool) -> Self { + self.disable_proof_v2 = disable_proof_v2; self } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 5d5fac93ee3..d856c3ba7be 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -238,7 +238,7 @@ where let (to_multi_proof, from_multi_proof) = crossbeam_channel::unbounded(); // Extract V2 proofs flag early so we can pass it to prewarm - let v2_proofs_enabled = config.enable_proof_v2(); + let v2_proofs_enabled = !config.disable_proof_v2(); // Handle BAL-based optimization if available let prewarm_handle = if let Some(bal) = bal { diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 9c3864b367a..75407b2773a 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -35,7 +35,7 @@ pub struct DefaultEngineValues { allow_unwind_canonical_header: bool, storage_worker_count: Option, account_worker_count: Option, - enable_proof_v2: bool, + disable_proof_v2: bool, cache_metrics_disabled: bool, } @@ -161,9 +161,9 @@ impl DefaultEngineValues { self } - /// Set whether to enable proof V2 by default - pub const fn with_enable_proof_v2(mut self, v: bool) -> Self { - self.enable_proof_v2 = v; + /// Set whether to disable proof V2 by default + pub const fn with_disable_proof_v2(mut self, v: bool) -> Self { + self.disable_proof_v2 = v; self } @@ -195,7 +195,7 @@ impl Default for DefaultEngineValues { allow_unwind_canonical_header: false, storage_worker_count: None, account_worker_count: None, - enable_proof_v2: false, + disable_proof_v2: false, cache_metrics_disabled: false, } } @@ -317,9 +317,9 @@ pub struct EngineArgs { #[arg(long = "engine.account-worker-count", default_value = Resettable::from(DefaultEngineValues::get_global().account_worker_count.map(|v| v.to_string().into())))] pub account_worker_count: Option, - /// Enable V2 storage proofs for state root calculations - #[arg(long = "engine.enable-proof-v2", default_value_t = DefaultEngineValues::get_global().enable_proof_v2)] - pub enable_proof_v2: bool, + /// Disable V2 storage proofs for state root calculations + #[arg(long = "engine.disable-proof-v2", default_value_t = DefaultEngineValues::get_global().disable_proof_v2)] + pub disable_proof_v2: bool, /// Disable cache metrics recording, which can take up to 50ms with large cached state. #[arg(long = "engine.disable-cache-metrics", default_value_t = DefaultEngineValues::get_global().cache_metrics_disabled)] @@ -348,7 +348,7 @@ impl Default for EngineArgs { allow_unwind_canonical_header, storage_worker_count, account_worker_count, - enable_proof_v2, + disable_proof_v2, cache_metrics_disabled, } = DefaultEngineValues::get_global().clone(); Self { @@ -374,7 +374,7 @@ impl Default for EngineArgs { allow_unwind_canonical_header, storage_worker_count, account_worker_count, - enable_proof_v2, + disable_proof_v2, cache_metrics_disabled, } } @@ -410,7 +410,7 @@ impl EngineArgs { config = config.with_account_worker_count(count); } - config = config.with_enable_proof_v2(self.enable_proof_v2); + config = config.with_disable_proof_v2(self.disable_proof_v2); config = config.without_cache_metrics(self.cache_metrics_disabled); config @@ -462,7 +462,7 @@ mod tests { allow_unwind_canonical_header: true, storage_worker_count: Some(16), account_worker_count: Some(8), - enable_proof_v2: false, + disable_proof_v2: false, cache_metrics_disabled: true, }; diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index 3fe597815c4..ad3c8eff2ba 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -1001,8 +1001,8 @@ Engine: --engine.account-worker-count Configure the number of account proof workers in the Tokio blocking pool. If not specified, defaults to the same count as storage workers - --engine.enable-proof-v2 - Enable V2 storage proofs for state root calculations + --engine.disable-proof-v2 + Disable V2 storage proofs for state root calculations --engine.disable-cache-metrics Disable cache metrics recording, which can take up to 50ms with large cached state diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 31896c640ac..6105ff7f008 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -1001,8 +1001,8 @@ Engine: --engine.account-worker-count Configure the number of account proof workers in the Tokio blocking pool. If not specified, defaults to the same count as storage workers - --engine.enable-proof-v2 - Enable V2 storage proofs for state root calculations + --engine.disable-proof-v2 + Disable V2 storage proofs for state root calculations --engine.disable-cache-metrics Disable cache metrics recording, which can take up to 50ms with large cached state From 231292b58e126a21da3289fdb974dccbec16044e Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 28 Jan 2026 11:03:49 +0000 Subject: [PATCH 249/267] fix(provider): cap static file changeset iteration to highest available block (#21510) --- .../src/providers/static_file/manager.rs | 31 ++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 79b7b2a3d92..14beb0a4d89 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -50,7 +50,7 @@ use reth_storage_errors::provider::{ProviderError, ProviderResult, StaticFileWri use std::{ collections::BTreeMap, fmt::Debug, - ops::{Deref, Range, RangeBounds, RangeInclusive}, + ops::{Bound, Deref, Range, RangeBounds, RangeInclusive}, path::{Path, PathBuf}, sync::{atomic::AtomicU64, mpsc, Arc}, thread, @@ -1879,6 +1879,33 @@ impl StaticFileProvider { self.indexes.read().get(segment).map(|index| index.max_block) } + /// Converts a range to a bounded `RangeInclusive` capped to the highest static file block. + /// + /// This is necessary because static file iteration beyond the tip would loop forever: + /// blocks beyond the static file tip return `Ok(empty)` which is indistinguishable from + /// blocks with no changes. We cap the end to the highest available block regardless of + /// whether the input was unbounded or an explicit large value like `BlockNumber::MAX`. + fn bound_range( + &self, + range: impl RangeBounds, + segment: StaticFileSegment, + ) -> RangeInclusive { + let highest_block = self.get_highest_static_file_block(segment).unwrap_or(0); + + let start = match range.start_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n.saturating_add(1), + Bound::Unbounded => 0, + }; + let end = match range.end_bound() { + Bound::Included(&n) => n.min(highest_block), + Bound::Excluded(&n) => n.saturating_sub(1).min(highest_block), + Bound::Unbounded => highest_block, + }; + + start..=end + } + /// Gets the highest static file transaction. /// /// If there is nothing on disk for the given segment, this will return [`None`]. @@ -2354,6 +2381,7 @@ impl ChangeSetReader for StaticFileProvider { &self, range: impl core::ops::RangeBounds, ) -> ProviderResult> { + let range = self.bound_range(range, StaticFileSegment::AccountChangeSets); self.walk_account_changeset_range(range).collect() } @@ -2473,6 +2501,7 @@ impl StorageChangeSetReader for StaticFileProvider { &self, range: RangeInclusive, ) -> ProviderResult> { + let range = self.bound_range(range, StaticFileSegment::StorageChangeSets); self.walk_storage_changeset_range(range).collect() } From e0a0a0d5fbfe706003f971a42cc46b146316944f Mon Sep 17 00:00:00 2001 From: katikatidimon <140461101+katikatidimon@users.noreply.github.com> Date: Wed, 28 Jan 2026 13:33:10 +0100 Subject: [PATCH 250/267] refactor: remove redundant clone() in CursorSubNode::new (#21493) --- crates/trie/trie/src/trie_cursor/subnode.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/trie/trie/src/trie_cursor/subnode.rs b/crates/trie/trie/src/trie_cursor/subnode.rs index 9c9b5e03d7d..4bb3747a4cb 100644 --- a/crates/trie/trie/src/trie_cursor/subnode.rs +++ b/crates/trie/trie/src/trie_cursor/subnode.rs @@ -60,8 +60,9 @@ impl CursorSubNode { let position = node.as_ref().filter(|n| n.root_hash.is_none()).map_or( SubNodePosition::ParentBranch, |n| { + let mut child_index_range = CHILD_INDEX_RANGE; SubNodePosition::Child( - CHILD_INDEX_RANGE.clone().find(|i| n.state_mask.is_bit_set(*i)).unwrap(), + child_index_range.find(|i| n.state_mask.is_bit_set(*i)).unwrap(), ) }, ); From 6aa91b002030acfa7d137c7d3d3342edd9b77d48 Mon Sep 17 00:00:00 2001 From: David Klank <155117116+davidjsonn@users.noreply.github.com> Date: Wed, 28 Jan 2026 14:39:08 +0200 Subject: [PATCH 251/267] perf(trie-db): preallocate vectors in changeset computation (#21465) --- crates/trie/db/src/changesets.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/trie/db/src/changesets.rs b/crates/trie/db/src/changesets.rs index deccb1df456..8b29273a0b7 100644 --- a/crates/trie/db/src/changesets.rs +++ b/crates/trie/db/src/changesets.rs @@ -219,11 +219,12 @@ where let cursor_factory = InMemoryTrieCursorFactory::new(db_cursor_factory, &reverts); // Step 5: Collect all account trie nodes that changed in the target block - let mut account_nodes = Vec::new(); + let account_nodes_ref = changesets.account_nodes_ref(); + let mut account_nodes = Vec::with_capacity(account_nodes_ref.len()); let mut account_cursor = cursor_factory.account_trie_cursor()?; // Iterate over the account nodes from the changesets - for (nibbles, _old_node) in changesets.account_nodes_ref() { + for (nibbles, _old_node) in account_nodes_ref { // Look up the current value of this trie node using the overlay cursor let node_value = account_cursor.seek_exact(*nibbles)?.map(|(_, node)| node); account_nodes.push((*nibbles, node_value)); @@ -235,10 +236,11 @@ where // Iterate over the storage tries from the changesets for (hashed_address, storage_changeset) in changesets.storage_tries_ref() { let mut storage_cursor = cursor_factory.storage_trie_cursor(*hashed_address)?; - let mut storage_nodes = Vec::new(); + let storage_nodes_ref = storage_changeset.storage_nodes_ref(); + let mut storage_nodes = Vec::with_capacity(storage_nodes_ref.len()); // Iterate over the storage nodes for this account - for (nibbles, _old_node) in storage_changeset.storage_nodes_ref() { + for (nibbles, _old_node) in storage_nodes_ref { // Look up the current value of this storage trie node let node_value = storage_cursor.seek_exact(*nibbles)?.map(|(_, node)| node); storage_nodes.push((*nibbles, node_value)); From d53858b3e243785cbe19b29def81f76a9547e8a2 Mon Sep 17 00:00:00 2001 From: ligt Date: Wed, 28 Jan 2026 19:43:30 +0700 Subject: [PATCH 252/267] chore(engine): simplify EngineApiTreeHandler type inference (#21503) --- crates/engine/service/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 496f994fc43..1983421ead3 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -101,7 +101,7 @@ where let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( + let (to_tree_tx, from_tree) = EngineApiTreeHandler::spawn_new( blockchain_db, consensus, payload_validator, From 48a999a81b29ee0daaf893c6aae8737a3a77dc4d Mon Sep 17 00:00:00 2001 From: bobtajson <152420524+bobtajson@users.noreply.github.com> Date: Wed, 28 Jan 2026 13:46:19 +0100 Subject: [PATCH 253/267] refactor: using iterator over references (#21506) --- crates/trie/trie/src/proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/trie/trie/src/proof/mod.rs b/crates/trie/trie/src/proof/mod.rs index cba010f2e11..f9059b1c3c5 100644 --- a/crates/trie/trie/src/proof/mod.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -367,7 +367,7 @@ where let target_nibbles = targets.into_iter().map(Nibbles::unpack).collect::>(); let mut prefix_set = self.prefix_set; - prefix_set.extend_keys(target_nibbles.clone()); + prefix_set.extend_keys(target_nibbles.iter().copied()); let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(self.hashed_address)?; From 497985ca86503394263ccee011e6de8b1700989b Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Wed, 28 Jan 2026 05:41:55 -0800 Subject: [PATCH 254/267] fix(prune): improve pruner log readability (#21522) Co-authored-by: Amp Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- Cargo.lock | 1 + crates/prune/prune/src/pruner.rs | 16 +---------- crates/prune/types/Cargo.toml | 4 ++- crates/prune/types/src/pruner.rs | 47 +++++++++++++++++++++++++++++++- 4 files changed, 51 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f424bf169e..12b99042572 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10248,6 +10248,7 @@ dependencies = [ "strum 0.27.2", "thiserror 2.0.18", "toml", + "tracing", ] [[package]] diff --git a/crates/prune/prune/src/pruner.rs b/crates/prune/prune/src/pruner.rs index b700d61028f..bb55f75cb0f 100644 --- a/crates/prune/prune/src/pruner.rs +++ b/crates/prune/prune/src/pruner.rs @@ -149,21 +149,7 @@ where let elapsed = start.elapsed(); self.metrics.duration_seconds.record(elapsed); - let message = match output.progress { - PruneProgress::HasMoreData(_) => "Pruner interrupted and has more data to prune", - PruneProgress::Finished => "Pruner finished", - }; - - debug!( - target: "pruner", - %tip_block_number, - ?elapsed, - ?deleted_entries, - ?limiter, - ?output, - ?stats, - "{message}", - ); + output.debug_log(tip_block_number, deleted_entries, elapsed); self.event_sender.notify(PrunerEvent::Finished { tip_block_number, elapsed, stats }); diff --git a/crates/prune/types/Cargo.toml b/crates/prune/types/Cargo.toml index 30adbb14d91..5eba313683f 100644 --- a/crates/prune/types/Cargo.toml +++ b/crates/prune/types/Cargo.toml @@ -18,6 +18,7 @@ alloy-primitives.workspace = true derive_more.workspace = true strum = { workspace = true, features = ["derive"] } thiserror.workspace = true +tracing.workspace = true modular-bitfield = { workspace = true, optional = true } serde = { workspace = true, features = ["derive"], optional = true } @@ -42,8 +43,9 @@ std = [ "derive_more/std", "serde?/std", "serde_json/std", - "thiserror/std", "strum/std", + "thiserror/std", + "tracing/std", ] test-utils = [ "std", diff --git a/crates/prune/types/src/pruner.rs b/crates/prune/types/src/pruner.rs index fcac3da139f..1ad3ca4ecda 100644 --- a/crates/prune/types/src/pruner.rs +++ b/crates/prune/types/src/pruner.rs @@ -1,7 +1,9 @@ use crate::{PruneCheckpoint, PruneMode, PruneSegment}; -use alloc::vec::Vec; +use alloc::{format, string::ToString, vec::Vec}; use alloy_primitives::{BlockNumber, TxNumber}; +use core::time::Duration; use derive_more::Display; +use tracing::debug; /// Pruner run output. #[derive(Debug)] @@ -18,6 +20,49 @@ impl From for PrunerOutput { } } +impl PrunerOutput { + /// Logs a human-readable summary of the pruner run at DEBUG level. + /// + /// Format: `"Pruner finished tip=24328929 deleted=10886 elapsed=148ms + /// segments=AccountHistory[24318865, done] ..."` + #[inline] + pub fn debug_log( + &self, + tip_block_number: BlockNumber, + deleted_entries: usize, + elapsed: Duration, + ) { + let message = match self.progress { + PruneProgress::HasMoreData(_) => "Pruner interrupted, has more data", + PruneProgress::Finished => "Pruner finished", + }; + + let segments: Vec<_> = self + .segments + .iter() + .filter(|(_, seg)| seg.pruned > 0) + .map(|(segment, seg)| { + let block = seg + .checkpoint + .and_then(|c| c.block_number) + .map(|b| b.to_string()) + .unwrap_or_else(|| "?".to_string()); + let status = if seg.progress.is_finished() { "done" } else { "in_progress" }; + format!("{segment}[{block}, {status}]") + }) + .collect(); + + debug!( + target: "pruner", + %tip_block_number, + deleted_entries, + ?elapsed, + segments = %segments.join(" "), + "{message}", + ); + } +} + /// Represents information of a pruner run for a segment. #[derive(Debug, Clone, PartialEq, Eq, Display)] #[display("(table={segment}, pruned={pruned}, status={progress})")] From 747c0169a77076712574d32f91658811cdf5f089 Mon Sep 17 00:00:00 2001 From: YK Date: Wed, 28 Jan 2026 21:55:21 +0800 Subject: [PATCH 255/267] feat(trie): add prune method to SparseTrieInterface (#21427) Co-authored-by: Amp Co-authored-by: Georgios Konstantopoulos --- crates/trie/sparse-parallel/src/trie.rs | 578 +++++++++++++++++++++++- crates/trie/sparse/src/lib.rs | 6 + crates/trie/sparse/src/state.rs | 115 ++++- crates/trie/sparse/src/traits.rs | 30 ++ 4 files changed, 725 insertions(+), 4 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 7b55b66fd40..d6a1bfec937 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -15,7 +15,7 @@ use reth_trie_common::{ use reth_trie_sparse::{ provider::{RevealedNode, TrieNodeProvider}, LeafLookup, LeafLookupError, RlpNodeStackItem, SparseNode, SparseNodeType, SparseTrie, - SparseTrieUpdates, + SparseTrieExt, SparseTrieUpdates, }; use smallvec::SmallVec; use std::cmp::{Ord, Ordering, PartialOrd}; @@ -908,6 +908,162 @@ impl SparseTrie for ParallelSparseTrie { } } +impl SparseTrieExt for ParallelSparseTrie { + /// Returns the count of revealed (non-hash) nodes across all subtries. + fn revealed_node_count(&self) -> usize { + let upper_count = self.upper_subtrie.nodes.values().filter(|n| !n.is_hash()).count(); + + let lower_count: usize = self + .lower_subtries + .iter() + .filter_map(|s| s.as_revealed_ref()) + .map(|s| s.nodes.values().filter(|n| !n.is_hash()).count()) + .sum(); + + upper_count + lower_count + } + + fn prune(&mut self, max_depth: usize) -> usize { + // DFS traversal to find nodes at max_depth that can be pruned. + // Collects "effective pruned roots" - children of nodes at max_depth with computed hashes. + // We replace nodes with Hash stubs inline during traversal. + let mut effective_pruned_roots = Vec::<(Nibbles, B256)>::new(); + let mut stack: SmallVec<[(Nibbles, usize); 32]> = SmallVec::new(); + stack.push((Nibbles::default(), 0)); + + // DFS traversal: pop path and depth, skip if subtrie or node not found. + while let Some((path, depth)) = stack.pop() { + // Get children to visit from current node (immutable access) + let children: SmallVec<[Nibbles; 16]> = { + let Some(subtrie) = self.subtrie_for_path(&path) else { continue }; + let Some(node) = subtrie.nodes.get(&path) else { continue }; + + match node { + SparseNode::Empty | SparseNode::Hash(_) | SparseNode::Leaf { .. } => { + SmallVec::new() + } + SparseNode::Extension { key, .. } => { + let mut child = path; + child.extend(key); + SmallVec::from_buf_and_len([child; 16], 1) + } + SparseNode::Branch { state_mask, .. } => { + let mut children = SmallVec::new(); + let mut mask = state_mask.get(); + while mask != 0 { + let nibble = mask.trailing_zeros() as u8; + mask &= mask - 1; + let mut child = path; + child.push_unchecked(nibble); + children.push(child); + } + children + } + } + }; + + // Process children - either continue traversal or prune + for child in children { + if depth == max_depth { + // Check if child has a computed hash and replace inline + let hash = self + .subtrie_for_path(&child) + .and_then(|s| s.nodes.get(&child)) + .filter(|n| !n.is_hash()) + .and_then(|n| n.hash()); + + if let Some(hash) = hash { + self.subtrie_for_path_mut(&child) + .nodes + .insert(child, SparseNode::Hash(hash)); + effective_pruned_roots.push((child, hash)); + } + } else { + stack.push((child, depth + 1)); + } + } + } + + if effective_pruned_roots.is_empty() { + return 0; + } + + let nodes_converted = effective_pruned_roots.len(); + + // Sort roots by subtrie type (upper first), then by path for efficient partitioning. + effective_pruned_roots.sort_unstable_by(|(path_a, _), (path_b, _)| { + let subtrie_type_a = SparseSubtrieType::from_path(path_a); + let subtrie_type_b = SparseSubtrieType::from_path(path_b); + subtrie_type_a.cmp(&subtrie_type_b).then(path_a.cmp(path_b)) + }); + + // Split off upper subtrie roots (they come first due to sorting) + let num_upper_roots = effective_pruned_roots + .iter() + .position(|(p, _)| !SparseSubtrieType::path_len_is_upper(p.len())) + .unwrap_or(effective_pruned_roots.len()); + + let roots_upper = &effective_pruned_roots[..num_upper_roots]; + let roots_lower = &effective_pruned_roots[num_upper_roots..]; + + debug_assert!( + { + let mut all_roots: Vec<_> = effective_pruned_roots.iter().map(|(p, _)| p).collect(); + all_roots.sort_unstable(); + all_roots.windows(2).all(|w| !w[1].starts_with(w[0])) + }, + "prune roots must be prefix-free" + ); + + // Upper prune roots that are prefixes of lower subtrie root paths cause the entire + // subtrie to be cleared (preserving allocations for reuse). + if !roots_upper.is_empty() { + for subtrie in &mut self.lower_subtries { + let should_clear = subtrie.as_revealed_ref().is_some_and(|s| { + let search_idx = roots_upper.partition_point(|(root, _)| root <= &s.path); + search_idx > 0 && s.path.starts_with(&roots_upper[search_idx - 1].0) + }); + if should_clear { + subtrie.clear(); + } + } + } + + // Upper subtrie: prune nodes and values + self.upper_subtrie.nodes.retain(|p, _| !is_strict_descendant_in(roots_upper, p)); + self.upper_subtrie.inner.values.retain(|p, _| { + !starts_with_pruned_in(roots_upper, p) && !starts_with_pruned_in(roots_lower, p) + }); + + // Process lower subtries using chunk_by to group roots by subtrie + for roots_group in roots_lower.chunk_by(|(path_a, _), (path_b, _)| { + SparseSubtrieType::from_path(path_a) == SparseSubtrieType::from_path(path_b) + }) { + let subtrie_idx = path_subtrie_index_unchecked(&roots_group[0].0); + + // Skip unrevealed/blinded subtries - nothing to prune + let Some(subtrie) = self.lower_subtries[subtrie_idx].as_revealed_mut() else { + continue; + }; + + // Retain only nodes/values not descended from any pruned root. + subtrie.nodes.retain(|p, _| !is_strict_descendant_in(roots_group, p)); + subtrie.inner.values.retain(|p, _| !starts_with_pruned_in(roots_group, p)); + } + + // Branch node masks pruning + self.branch_node_masks.retain(|p, _| { + if SparseSubtrieType::path_len_is_upper(p.len()) { + !starts_with_pruned_in(roots_upper, p) + } else { + !starts_with_pruned_in(roots_lower, p) && !starts_with_pruned_in(roots_upper, p) + } + }); + + nodes_converted + } +} + impl ParallelSparseTrie { /// Sets the thresholds that control when parallelism is used during operations. pub const fn with_parallelism_thresholds(mut self, thresholds: ParallelismThresholds) -> Self { @@ -2654,6 +2810,44 @@ fn path_subtrie_index_unchecked(path: &Nibbles) -> usize { path.get_byte_unchecked(0) as usize } +/// Checks if `path` is a strict descendant of any root in a sorted slice. +/// +/// Uses binary search to find the candidate root that could be an ancestor. +/// Returns `true` if `path` starts with a root and is longer (strict descendant). +fn is_strict_descendant_in(roots: &[(Nibbles, B256)], path: &Nibbles) -> bool { + if roots.is_empty() { + return false; + } + debug_assert!(roots.windows(2).all(|w| w[0].0 <= w[1].0), "roots must be sorted by path"); + let idx = roots.partition_point(|(root, _)| root <= path); + if idx > 0 { + let candidate = &roots[idx - 1].0; + if path.starts_with(candidate) && path.len() > candidate.len() { + return true; + } + } + false +} + +/// Checks if `path` starts with any root in a sorted slice (inclusive). +/// +/// Uses binary search to find the candidate root that could be a prefix. +/// Returns `true` if `path` starts with a root (including exact match). +fn starts_with_pruned_in(roots: &[(Nibbles, B256)], path: &Nibbles) -> bool { + if roots.is_empty() { + return false; + } + debug_assert!(roots.windows(2).all(|w| w[0].0 <= w[1].0), "roots must be sorted by path"); + let idx = roots.partition_point(|(root, _)| root <= path); + if idx > 0 { + let candidate = &roots[idx - 1].0; + if path.starts_with(candidate) { + return true; + } + } + false +} + /// Used by lower subtries to communicate updates to the top-level [`SparseTrieUpdates`] set. #[derive(Clone, Debug, Eq, PartialEq)] enum SparseTrieUpdatesAction { @@ -2704,7 +2898,8 @@ mod tests { use reth_trie_db::DatabaseTrieCursorFactory; use reth_trie_sparse::{ provider::{DefaultTrieNodeProvider, RevealedNode, TrieNodeProvider}, - LeafLookup, LeafLookupError, SerialSparseTrie, SparseNode, SparseTrie, SparseTrieUpdates, + LeafLookup, LeafLookupError, SerialSparseTrie, SparseNode, SparseTrie, SparseTrieExt, + SparseTrieUpdates, }; use std::collections::{BTreeMap, BTreeSet}; @@ -2749,6 +2944,17 @@ mod tests { Account { nonce, ..Default::default() } } + fn large_account_value() -> Vec { + let account = Account { + nonce: 0x123456789abcdef, + balance: U256::from(0x123456789abcdef0123456789abcdef_u128), + ..Default::default() + }; + let mut buf = Vec::new(); + account.into_trie_account(EMPTY_ROOT_HASH).encode(&mut buf); + buf + } + fn encode_account_value(nonce: u64) -> Vec { let account = Account { nonce, ..Default::default() }; let trie_account = account.into_trie_account(EMPTY_ROOT_HASH); @@ -7106,4 +7312,372 @@ mod tests { // Value should be retrievable assert_eq!(trie.get_leaf_value(&slot_path), Some(&slot_value)); } + + #[test] + fn test_prune_empty_suffix_key_regression() { + // Regression test: when a leaf has an empty suffix key (full path == node path), + // the value must be removed when that path becomes a pruned root. + // This catches the bug where is_strict_descendant fails to remove p == pruned_root. + + use reth_trie_sparse::provider::DefaultTrieNodeProvider; + + let provider = DefaultTrieNodeProvider; + let mut parallel = ParallelSparseTrie::default(); + + // Large value to ensure nodes have hashes (RLP >= 32 bytes) + let value = { + let account = Account { + nonce: 0x123456789abcdef, + balance: U256::from(0x123456789abcdef0123456789abcdef_u128), + ..Default::default() + }; + let mut buf = Vec::new(); + account.into_trie_account(EMPTY_ROOT_HASH).encode(&mut buf); + buf + }; + + // Create a trie with multiple leaves to force a branch at root + for i in 0..16u8 { + parallel + .update_leaf( + Nibbles::from_nibbles([i, 0x1, 0x2, 0x3, 0x4, 0x5]), + value.clone(), + &provider, + ) + .unwrap(); + } + + // Compute root to get hashes + let root_before = parallel.root(); + + // Prune at depth 0: the children of root become pruned roots + parallel.prune(0); + + let root_after = parallel.root(); + assert_eq!(root_before, root_after, "root hash must be preserved"); + + // Key assertion: values under pruned paths must be removed + // With the bug, values at pruned_root paths (not strict descendants) would remain + for i in 0..16u8 { + let path = Nibbles::from_nibbles([i, 0x1, 0x2, 0x3, 0x4, 0x5]); + assert!( + parallel.get_leaf_value(&path).is_none(), + "value at {:?} should be removed after prune", + path + ); + } + } + + #[test] + fn test_prune_at_various_depths() { + for max_depth in [0, 1, 2] { + let provider = DefaultTrieNodeProvider; + let mut trie = ParallelSparseTrie::default(); + + let value = large_account_value(); + + for i in 0..4u8 { + for j in 0..4u8 { + for k in 0..4u8 { + trie.update_leaf( + Nibbles::from_nibbles([i, j, k, 0x1, 0x2, 0x3]), + value.clone(), + &provider, + ) + .unwrap(); + } + } + } + + let root_before = trie.root(); + let nodes_before = trie.revealed_node_count(); + + trie.prune(max_depth); + + let root_after = trie.root(); + assert_eq!(root_before, root_after, "root hash should be preserved after prune"); + + let nodes_after = trie.revealed_node_count(); + assert!( + nodes_after < nodes_before, + "node count should decrease after prune at depth {max_depth}" + ); + + if max_depth == 0 { + assert_eq!(nodes_after, 1, "only root should be revealed after prune(0)"); + } + } + } + + #[test] + fn test_prune_empty_trie() { + let mut trie = ParallelSparseTrie::default(); + trie.prune(2); + let root = trie.root(); + assert_eq!(root, EMPTY_ROOT_HASH, "empty trie should have empty root hash"); + } + + #[test] + fn test_prune_preserves_root_hash() { + let provider = DefaultTrieNodeProvider; + let mut trie = ParallelSparseTrie::default(); + + let value = large_account_value(); + + for i in 0..8u8 { + for j in 0..4u8 { + trie.update_leaf( + Nibbles::from_nibbles([i, j, 0x3, 0x4, 0x5, 0x6]), + value.clone(), + &provider, + ) + .unwrap(); + } + } + + let root_before = trie.root(); + trie.prune(1); + let root_after = trie.root(); + assert_eq!(root_before, root_after, "root hash must be preserved after prune"); + } + + #[test] + fn test_prune_single_leaf_trie() { + let provider = DefaultTrieNodeProvider; + let mut trie = ParallelSparseTrie::default(); + + let value = large_account_value(); + trie.update_leaf(Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]), value, &provider).unwrap(); + + let root_before = trie.root(); + let nodes_before = trie.revealed_node_count(); + + trie.prune(0); + + let root_after = trie.root(); + assert_eq!(root_before, root_after, "root hash should be preserved"); + assert_eq!(trie.revealed_node_count(), nodes_before, "single leaf trie should not change"); + } + + #[test] + fn test_prune_deep_depth_no_effect() { + let provider = DefaultTrieNodeProvider; + let mut trie = ParallelSparseTrie::default(); + + let value = large_account_value(); + + for i in 0..4u8 { + trie.update_leaf(Nibbles::from_nibbles([i, 0x2, 0x3, 0x4]), value.clone(), &provider) + .unwrap(); + } + + trie.root(); + let nodes_before = trie.revealed_node_count(); + + trie.prune(100); + + assert_eq!(nodes_before, trie.revealed_node_count(), "deep prune should have no effect"); + } + + #[test] + fn test_prune_extension_node_depth_semantics() { + let provider = DefaultTrieNodeProvider; + let mut trie = ParallelSparseTrie::default(); + + let value = large_account_value(); + + trie.update_leaf(Nibbles::from_nibbles([0, 1, 2, 3, 0, 5, 6, 7]), value.clone(), &provider) + .unwrap(); + trie.update_leaf(Nibbles::from_nibbles([0, 1, 2, 3, 1, 5, 6, 7]), value, &provider) + .unwrap(); + + let root_before = trie.root(); + trie.prune(1); + + assert_eq!(root_before, trie.root(), "root hash should be preserved"); + assert_eq!(trie.revealed_node_count(), 2, "should have root + extension after prune(1)"); + } + + #[test] + fn test_prune_embedded_node_preserved() { + let provider = DefaultTrieNodeProvider; + let mut trie = ParallelSparseTrie::default(); + + let small_value = vec![0x80]; + trie.update_leaf(Nibbles::from_nibbles([0x0]), small_value.clone(), &provider).unwrap(); + trie.update_leaf(Nibbles::from_nibbles([0x1]), small_value, &provider).unwrap(); + + let root_before = trie.root(); + let nodes_before = trie.revealed_node_count(); + + trie.prune(0); + + assert_eq!(root_before, trie.root(), "root hash must be preserved"); + + if trie.revealed_node_count() == nodes_before { + assert!(trie.get_leaf_value(&Nibbles::from_nibbles([0x0])).is_some()); + assert!(trie.get_leaf_value(&Nibbles::from_nibbles([0x1])).is_some()); + } + } + + #[test] + fn test_prune_mixed_embedded_and_hashed() { + let provider = DefaultTrieNodeProvider; + let mut trie = ParallelSparseTrie::default(); + + let large_value = large_account_value(); + let small_value = vec![0x80]; + + for i in 0..8u8 { + let value = if i < 4 { large_value.clone() } else { small_value.clone() }; + trie.update_leaf(Nibbles::from_nibbles([i, 0x1, 0x2, 0x3]), value, &provider).unwrap(); + } + + let root_before = trie.root(); + trie.prune(0); + assert_eq!(root_before, trie.root(), "root hash must be preserved"); + } + + #[test] + fn test_prune_many_lower_subtries() { + let provider = DefaultTrieNodeProvider; + + let large_value = large_account_value(); + + let mut keys = Vec::new(); + for first in 0..16u8 { + for second in 0..16u8 { + keys.push(Nibbles::from_nibbles([first, second, 0x1, 0x2, 0x3, 0x4])); + } + } + + let mut trie = ParallelSparseTrie::default(); + + for key in &keys { + trie.update_leaf(*key, large_value.clone(), &provider).unwrap(); + } + + let root_before = trie.root(); + let pruned = trie.prune(1); + + assert!(pruned > 0, "should have pruned some nodes"); + assert_eq!(root_before, trie.root(), "root hash should be preserved"); + + for key in &keys { + assert!(trie.get_leaf_value(key).is_none(), "value should be pruned"); + } + } + + #[test] + #[ignore = "profiling test - run manually"] + fn test_prune_profile() { + use std::time::Instant; + + let provider = DefaultTrieNodeProvider; + let large_value = large_account_value(); + + // Generate 65536 keys (16^4) for a large trie + let mut keys = Vec::with_capacity(65536); + for a in 0..16u8 { + for b in 0..16u8 { + for c in 0..16u8 { + for d in 0..16u8 { + keys.push(Nibbles::from_nibbles([a, b, c, d, 0x5, 0x6, 0x7, 0x8])); + } + } + } + } + + // Build base trie once + let mut base_trie = ParallelSparseTrie::default(); + for key in &keys { + base_trie.update_leaf(*key, large_value.clone(), &provider).unwrap(); + } + base_trie.root(); // ensure hashes computed + + // Pre-clone tries to exclude clone time from profiling + let iterations = 100; + let mut tries: Vec<_> = (0..iterations).map(|_| base_trie.clone()).collect(); + + // Measure only prune() + let mut total_pruned = 0; + let start = Instant::now(); + for trie in &mut tries { + total_pruned += trie.prune(2); + } + let elapsed = start.elapsed(); + + println!( + "Prune benchmark: {} iterations, total: {:?}, avg: {:?}, pruned/iter: {}", + iterations, + elapsed, + elapsed / iterations as u32, + total_pruned / iterations + ); + } + + #[test] + fn test_prune_max_depth_overflow() { + // Verify that max_depth > 255 is not truncated (was u8, now usize) + let provider = DefaultTrieNodeProvider; + let mut trie = ParallelSparseTrie::default(); + + let value = large_account_value(); + + for i in 0..4u8 { + trie.update_leaf(Nibbles::from_nibbles([i, 0x1, 0x2, 0x3]), value.clone(), &provider) + .unwrap(); + } + + trie.root(); + let nodes_before = trie.revealed_node_count(); + + // If depth were truncated to u8, 300 would become 44 and might prune something + trie.prune(300); + + assert_eq!( + nodes_before, + trie.revealed_node_count(), + "prune(300) should have no effect on a shallow trie" + ); + } + + #[test] + fn test_prune_fast_path_case2_update_after() { + // Test fast-path Case 2: upper prune root is prefix of lower subtrie. + // After pruning, we should be able to update leaves without panic. + let provider = DefaultTrieNodeProvider; + let mut trie = ParallelSparseTrie::default(); + + let value = large_account_value(); + + // Create keys that span into lower subtries (path.len() >= UPPER_TRIE_MAX_DEPTH) + // UPPER_TRIE_MAX_DEPTH is typically 2, so paths of length 3+ go to lower subtries + for first in 0..4u8 { + for second in 0..4u8 { + trie.update_leaf( + Nibbles::from_nibbles([first, second, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6]), + value.clone(), + &provider, + ) + .unwrap(); + } + } + + let root_before = trie.root(); + + // Prune at depth 0 - upper roots become prefixes of lower subtrie paths + trie.prune(0); + + let root_after = trie.root(); + assert_eq!(root_before, root_after, "root hash should be preserved"); + + // Now try to update a leaf - this should not panic even though lower subtries + // were replaced with Blind(None) + let new_path = Nibbles::from_nibbles([0x5, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); + trie.update_leaf(new_path, value, &provider).unwrap(); + + // The trie should still be functional + let _ = trie.root(); + } } diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs index 6b175970481..d63027fde1d 100644 --- a/crates/trie/sparse/src/lib.rs +++ b/crates/trie/sparse/src/lib.rs @@ -5,6 +5,12 @@ extern crate alloc; +/// Default depth to prune sparse tries to for cross-payload caching. +pub const DEFAULT_SPARSE_TRIE_PRUNE_DEPTH: usize = 4; + +/// Default number of storage tries to preserve across payload validations. +pub const DEFAULT_MAX_PRESERVED_STORAGE_TRIES: usize = 100; + mod state; pub use state::*; diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 84f57cde78e..1b032b8dda5 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,6 +1,6 @@ use crate::{ provider::{TrieNodeProvider, TrieNodeProviderFactory}, - traits::SparseTrie as SparseTrieTrait, + traits::{SparseTrie as SparseTrieTrait, SparseTrieExt}, RevealableSparseTrie, SerialSparseTrie, }; use alloc::{collections::VecDeque, vec::Vec}; @@ -972,6 +972,117 @@ where } } +impl SparseStateTrie +where + A: SparseTrieTrait + SparseTrieExt + Default, + S: SparseTrieTrait + SparseTrieExt + Default + Clone, +{ + /// Minimum number of storage tries before parallel pruning is enabled. + const PARALLEL_PRUNE_THRESHOLD: usize = 16; + + /// Returns true if parallelism should be enabled for pruning the given number of tries. + /// Will always return false in `no_std` builds. + const fn is_prune_parallelism_enabled(num_tries: usize) -> bool { + #[cfg(not(feature = "std"))] + return false; + + num_tries >= Self::PARALLEL_PRUNE_THRESHOLD + } + + /// Prunes the account trie and selected storage tries to reduce memory usage. + /// + /// Storage tries not in the top `max_storage_tries` by revealed node count are cleared + /// entirely. + /// + /// # Preconditions + /// + /// Node hashes must be computed via `root()` before calling this method. Otherwise, nodes + /// cannot be converted to hash stubs and pruning will have no effect. + /// + /// # Effects + /// + /// - Clears `revealed_account_paths` and `revealed_paths` for all storage tries + pub fn prune(&mut self, max_depth: usize, max_storage_tries: usize) { + if let Some(trie) = self.state.as_revealed_mut() { + trie.prune(max_depth); + } + self.revealed_account_paths.clear(); + + let mut storage_trie_counts: Vec<(B256, usize)> = self + .storage + .tries + .iter() + .map(|(hash, trie)| { + let count = match trie { + RevealableSparseTrie::Revealed(t) => t.revealed_node_count(), + RevealableSparseTrie::Blind(_) => 0, + }; + (*hash, count) + }) + .collect(); + + // Use O(n) selection instead of O(n log n) sort + let tries_to_keep: HashSet = if storage_trie_counts.len() <= max_storage_tries { + storage_trie_counts.iter().map(|(hash, _)| *hash).collect() + } else { + storage_trie_counts + .select_nth_unstable_by(max_storage_tries.saturating_sub(1), |a, b| b.1.cmp(&a.1)); + storage_trie_counts[..max_storage_tries].iter().map(|(hash, _)| *hash).collect() + }; + + // Collect keys to avoid borrow conflict + let tries_to_clear: Vec = self + .storage + .tries + .keys() + .filter(|hash| !tries_to_keep.contains(*hash)) + .copied() + .collect(); + + // Evict storage tries that exceeded limit, saving cleared allocations for reuse + for hash in tries_to_clear { + if let Some(trie) = self.storage.tries.remove(&hash) { + self.storage.cleared_tries.push(trie.clear()); + } + if let Some(mut paths) = self.storage.revealed_paths.remove(&hash) { + paths.clear(); + self.storage.cleared_revealed_paths.push(paths); + } + } + + // Prune storage tries that are kept + if Self::is_prune_parallelism_enabled(tries_to_keep.len()) { + #[cfg(feature = "std")] + { + use rayon::prelude::*; + + self.storage.tries.par_iter_mut().for_each(|(hash, trie)| { + if tries_to_keep.contains(hash) && + let Some(t) = trie.as_revealed_mut() + { + t.prune(max_depth); + } + }); + } + } else { + for hash in &tries_to_keep { + if let Some(trie) = + self.storage.tries.get_mut(hash).and_then(|t| t.as_revealed_mut()) + { + trie.prune(max_depth); + } + } + } + + // Clear revealed_paths for kept tries + for hash in &tries_to_keep { + if let Some(paths) = self.storage.revealed_paths.get_mut(hash) { + paths.clear(); + } + } + } +} + /// The fields of [`SparseStateTrie`] related to storage tries. This is kept separate from the rest /// of [`SparseStateTrie`] both to help enforce allocation re-use and to allow us to implement /// methods like `get_trie_and_revealed_paths` which return multiple mutable borrows. @@ -1260,7 +1371,7 @@ mod tests { use reth_trie::{updates::StorageTrieUpdates, HashBuilder, MultiProof, EMPTY_ROOT_HASH}; use reth_trie_common::{ proof::{ProofNodes, ProofRetainer}, - BranchNode, BranchNodeMasks, LeafNode, StorageMultiProof, TrieMask, + BranchNode, BranchNodeMasks, BranchNodeMasksMap, LeafNode, StorageMultiProof, TrieMask, }; #[test] diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 15f474c6a2c..e235cead63f 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -232,6 +232,36 @@ pub trait SparseTrie: Sized + Debug + Send + Sync { fn shrink_values_to(&mut self, size: usize); } +/// Extension trait for sparse tries that support pruning. +/// +/// This trait provides the `prune` method for sparse trie implementations that support +/// converting nodes beyond a certain depth into hash stubs. This is useful for reducing +/// memory usage when caching tries across payload validations. +pub trait SparseTrieExt: SparseTrie { + /// Returns the number of revealed (non-Hash) nodes in the trie. + fn revealed_node_count(&self) -> usize; + + /// Replaces nodes beyond `max_depth` with hash stubs and removes their descendants. + /// + /// Depth counts nodes traversed (not nibbles), so extension nodes count as 1 depth + /// regardless of key length. `max_depth == 0` prunes all children of the root node. + /// + /// # Preconditions + /// + /// Must be called after `root()` to ensure all nodes have computed hashes. + /// Calling on a trie without computed hashes will result in no pruning. + /// + /// # Behavior + /// + /// - Embedded nodes (RLP < 32 bytes) are preserved since they have no hash + /// - Returns 0 if `max_depth` exceeds trie depth or trie is empty + /// + /// # Returns + /// + /// The number of nodes converted to hash stubs. + fn prune(&mut self, max_depth: usize) -> usize; +} + /// Tracks modifications to the sparse trie structure. /// /// Maintains references to both modified and pruned/removed branches, enabling From 88eb0beeb2332deceee48cd3bd73a742b03c9e12 Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Wed, 28 Jan 2026 09:53:17 -0500 Subject: [PATCH 256/267] chore(op-reth): remove op-reth dependencies from core reth library crates (#21492) --- Cargo.lock | 5 +-- crates/engine/local/Cargo.toml | 3 -- crates/engine/local/src/payload.rs | 13 +++++-- crates/optimism/chainspec/src/constants.rs | 12 ------ crates/optimism/rpc/src/eth/transaction.rs | 23 +++++++++-- crates/rpc/rpc-convert/Cargo.toml | 4 -- crates/rpc/rpc-convert/src/lib.rs | 3 -- crates/rpc/rpc-convert/src/receipt.rs | 2 +- crates/rpc/rpc-convert/src/transaction.rs | 44 +++------------------- crates/storage/db-api/Cargo.toml | 6 +-- crates/storage/db-api/src/models/mod.rs | 4 +- 11 files changed, 41 insertions(+), 78 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 12b99042572..f523fa4b816 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8041,6 +8041,7 @@ dependencies = [ "derive_more", "metrics", "modular-bitfield", + "op-alloy-consensus", "parity-scale-codec", "proptest", "proptest-arbitrary-interop", @@ -8048,7 +8049,6 @@ dependencies = [ "reth-codecs", "reth-db-models", "reth-ethereum-primitives", - "reth-optimism-primitives", "reth-primitives-traits", "reth-prune-types", "reth-stages-types", @@ -8319,7 +8319,6 @@ dependencies = [ "reth-chainspec", "reth-engine-primitives", "reth-ethereum-engine-primitives", - "reth-optimism-chainspec", "reth-payload-builder", "reth-payload-primitives", "reth-primitives-traits", @@ -10526,9 +10525,7 @@ dependencies = [ "op-alloy-rpc-types", "reth-ethereum-primitives", "reth-evm", - "reth-optimism-primitives", "reth-primitives-traits", - "reth-storage-api", "serde_json", "thiserror 2.0.18", ] diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index 8bf9e28bcbf..b115ad56267 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -32,9 +32,7 @@ futures-util.workspace = true # misc eyre.workspace = true tracing.workspace = true - op-alloy-rpc-types-engine = { workspace = true, optional = true } -reth-optimism-chainspec = { workspace = true, optional = true } [lints] workspace = true @@ -42,7 +40,6 @@ workspace = true [features] op = [ "dep:op-alloy-rpc-types-engine", - "dep:reth-optimism-chainspec", "reth-payload-primitives/op", "reth-primitives-traits/op", ] diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index dc3be02f17e..55551430967 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -72,13 +72,18 @@ where &self, parent: &SealedHeader, ) -> op_alloy_rpc_types_engine::OpPayloadAttributes { + /// Dummy system transaction for dev mode. + /// OP Mainnet transaction at index 0 in block 124665056. + /// + /// + const TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056: [u8; 251] = alloy_primitives::hex!( + "7ef8f8a0683079df94aa5b9cf86687d739a60a9b4f0835e520ec4d664e2e415dca17a6df94deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e200000146b000f79c500000000000000040000000066d052e700000000013ad8a3000000000000000000000000000000000000000000000000000000003ef1278700000000000000000000000000000000000000000000000000000000000000012fdf87b89884a61e74b322bbcf60386f543bfae7827725efaaf0ab1de2294a590000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985" + ); + op_alloy_rpc_types_engine::OpPayloadAttributes { payload_attributes: self.build(parent), // Add dummy system transaction - transactions: Some(vec![ - reth_optimism_chainspec::constants::TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056 - .into(), - ]), + transactions: Some(vec![TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056.into()]), no_tx_pool: None, gas_limit: None, eip_1559_params: None, diff --git a/crates/optimism/chainspec/src/constants.rs b/crates/optimism/chainspec/src/constants.rs index ba69adfe1ee..439c2fed83c 100644 --- a/crates/optimism/chainspec/src/constants.rs +++ b/crates/optimism/chainspec/src/constants.rs @@ -1,7 +1,5 @@ //! OP stack variation of chain spec constants. -use alloy_primitives::hex; - //------------------------------- BASE MAINNET -------------------------------// /// Max gas limit on Base: @@ -11,13 +9,3 @@ pub const BASE_MAINNET_MAX_GAS_LIMIT: u64 = 105_000_000; /// Max gas limit on Base Sepolia: pub const BASE_SEPOLIA_MAX_GAS_LIMIT: u64 = 45_000_000; - -//----------------------------------- DEV ------------------------------------// - -/// Dummy system transaction for dev mode -/// OP Mainnet transaction at index 0 in block 124665056. -/// -/// -pub const TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056: [u8; 251] = hex!( - "7ef8f8a0683079df94aa5b9cf86687d739a60a9b4f0835e520ec4d664e2e415dca17a6df94deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e200000146b000f79c500000000000000040000000066d052e700000000013ad8a3000000000000000000000000000000000000000000000000000000003ef1278700000000000000000000000000000000000000000000000000000000000000012fdf87b89884a61e74b322bbcf60386f543bfae7827725efaaf0ab1de2294a590000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985" -); diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index f9e7e83375c..af842e71c6f 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -4,14 +4,17 @@ use crate::{OpEthApi, OpEthApiError, SequencerClient}; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_eth::TransactionInfo; use futures::StreamExt; -use op_alloy_consensus::{transaction::OpTransactionInfo, OpTransaction}; +use op_alloy_consensus::{ + transaction::{OpDepositInfo, OpTransactionInfo}, + OpTransaction, +}; use reth_chain_state::CanonStateSubscriptions; use reth_optimism_primitives::DepositReceipt; use reth_primitives_traits::{Recovered, SignedTransaction, SignerRecoverable, WithEncoded}; use reth_rpc_eth_api::{ helpers::{spec::SignersForRpc, EthTransactions, LoadReceipt, LoadTransaction, SpawnBlocking}, - try_into_op_tx_info, EthApiTypes as _, FromEthApiError, FromEvmError, RpcConvert, RpcNodeCore, - RpcReceipt, TxInfoMapper, + EthApiTypes as _, FromEthApiError, FromEvmError, RpcConvert, RpcNodeCore, RpcReceipt, + TxInfoMapper, }; use reth_rpc_eth_types::{block::convert_transaction_receipt, EthApiError, TransactionSource}; use reth_storage_api::{errors::ProviderError, ProviderTx, ReceiptProvider, TransactionsProvider}; @@ -282,6 +285,18 @@ where type Err = ProviderError; fn try_map(&self, tx: &T, tx_info: TransactionInfo) -> Result { - try_into_op_tx_info(&self.provider, tx, tx_info) + let deposit_meta = if tx.is_deposit() { + self.provider.receipt_by_hash(*tx.tx_hash())?.and_then(|receipt| { + receipt.as_deposit_receipt().map(|receipt| OpDepositInfo { + deposit_receipt_version: receipt.deposit_receipt_version, + deposit_nonce: receipt.deposit_nonce, + }) + }) + } else { + None + } + .unwrap_or_default(); + + Ok(OpTransactionInfo::new(tx_info, deposit_meta)) } } diff --git a/crates/rpc/rpc-convert/Cargo.toml b/crates/rpc/rpc-convert/Cargo.toml index 53b8d0541e7..0f66d081bfd 100644 --- a/crates/rpc/rpc-convert/Cargo.toml +++ b/crates/rpc/rpc-convert/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] # reth reth-primitives-traits.workspace = true -reth-storage-api = { workspace = true, optional = true } reth-evm.workspace = true reth-ethereum-primitives.workspace = true @@ -31,7 +30,6 @@ alloy-evm = { workspace = true, features = ["rpc"] } op-alloy-consensus = { workspace = true, optional = true } op-alloy-rpc-types = { workspace = true, optional = true } op-alloy-network = { workspace = true, optional = true } -reth-optimism-primitives = { workspace = true, optional = true } # io jsonrpsee-types.workspace = true @@ -51,8 +49,6 @@ op = [ "dep:op-alloy-consensus", "dep:op-alloy-rpc-types", "dep:op-alloy-network", - "dep:reth-optimism-primitives", - "dep:reth-storage-api", "reth-evm/op", "reth-primitives-traits/op", "alloy-evm/op", diff --git a/crates/rpc/rpc-convert/src/lib.rs b/crates/rpc/rpc-convert/src/lib.rs index 0d33251ce04..1c9ab804324 100644 --- a/crates/rpc/rpc-convert/src/lib.rs +++ b/crates/rpc/rpc-convert/src/lib.rs @@ -24,6 +24,3 @@ pub use transaction::{ }; pub use alloy_evm::rpc::{CallFees, CallFeesError, EthTxEnvError, TryIntoTxEnv}; - -#[cfg(feature = "op")] -pub use transaction::op::*; diff --git a/crates/rpc/rpc-convert/src/receipt.rs b/crates/rpc/rpc-convert/src/receipt.rs index 2da79a9b530..c2974145952 100644 --- a/crates/rpc/rpc-convert/src/receipt.rs +++ b/crates/rpc/rpc-convert/src/receipt.rs @@ -29,7 +29,7 @@ impl TryFromReceiptResponse for reth_ethereum_primitive } #[cfg(feature = "op")] -impl TryFromReceiptResponse for reth_optimism_primitives::OpReceipt { +impl TryFromReceiptResponse for op_alloy_consensus::OpReceipt { type Error = Infallible; fn from_receipt_response( diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index 1036e151998..9e60f823c63 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -872,40 +872,8 @@ pub mod op { use super::*; use alloy_consensus::SignableTransaction; use alloy_signer::Signature; - use op_alloy_consensus::{ - transaction::{OpDepositInfo, OpTransactionInfo}, - OpTxEnvelope, - }; + use op_alloy_consensus::{transaction::OpTransactionInfo, OpTxEnvelope}; use op_alloy_rpc_types::OpTransactionRequest; - use reth_optimism_primitives::DepositReceipt; - use reth_primitives_traits::SignedTransaction; - use reth_storage_api::{errors::ProviderError, ReceiptProvider}; - - /// Creates [`OpTransactionInfo`] by adding [`OpDepositInfo`] to [`TransactionInfo`] if `tx` is - /// a deposit. - pub fn try_into_op_tx_info( - provider: &T, - tx: &Tx, - tx_info: TransactionInfo, - ) -> Result - where - Tx: op_alloy_consensus::OpTransaction + SignedTransaction, - T: ReceiptProvider, - { - let deposit_meta = if tx.is_deposit() { - provider.receipt_by_hash(*tx.tx_hash())?.and_then(|receipt| { - receipt.as_deposit_receipt().map(|receipt| OpDepositInfo { - deposit_receipt_version: receipt.deposit_receipt_version, - deposit_nonce: receipt.deposit_nonce, - }) - }) - } else { - None - } - .unwrap_or_default(); - - Ok(OpTransactionInfo::new(tx_info, deposit_meta)) - } impl FromConsensusTx for op_alloy_rpc_types::Transaction @@ -964,9 +932,7 @@ impl TryFromTransactionResponse } #[cfg(feature = "op")] -impl TryFromTransactionResponse - for reth_optimism_primitives::OpTransactionSigned -{ +impl TryFromTransactionResponse for op_alloy_consensus::OpTxEnvelope { type Error = Infallible; fn from_transaction_response( @@ -1015,7 +981,6 @@ mod transaction_response_tests { fn test_optimism_transaction_conversion() { use op_alloy_consensus::OpTxEnvelope; use op_alloy_network::Optimism; - use reth_optimism_primitives::OpTransactionSigned; let signed_tx = Signed::new_unchecked( TxLegacy::default(), @@ -1038,7 +1003,10 @@ mod transaction_response_tests { deposit_receipt_version: None, }; - let result = >::from_transaction_response(tx_response); + let result = + >::from_transaction_response( + tx_response, + ); assert!(result.is_ok()); } diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 9faf7cb0ab3..b5026043a75 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -28,7 +28,7 @@ alloy-genesis.workspace = true alloy-consensus.workspace = true # optimism -reth-optimism-primitives = { workspace = true, optional = true, features = ["serde", "reth-codec"] } +op-alloy-consensus = { workspace = true, optional = true } # codecs modular-bitfield.workspace = true @@ -85,11 +85,11 @@ arbitrary = [ "reth-prune-types/arbitrary", "reth-stages-types/arbitrary", "alloy-consensus/arbitrary", - "reth-optimism-primitives?/arbitrary", + "op-alloy-consensus?/arbitrary", "reth-ethereum-primitives/arbitrary", ] op = [ - "dep:reth-optimism-primitives", + "dep:op-alloy-consensus", "reth-codecs/op", "reth-primitives-traits/op", ] diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 0b6a12f011c..523c17ef437 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -240,9 +240,9 @@ impl_compression_for_compact!( #[cfg(feature = "op")] mod op { use super::*; - use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; + use op_alloy_consensus::{OpReceipt, OpTxEnvelope}; - impl_compression_for_compact!(OpTransactionSigned, OpReceipt); + impl_compression_for_compact!(OpTxEnvelope, OpReceipt); } macro_rules! impl_compression_fixed_compact { From 543a85e9f34d79515b838f75a22ac12a03e338d5 Mon Sep 17 00:00:00 2001 From: SS <154840768+yyakmv@users.noreply.github.com> Date: Wed, 28 Jan 2026 16:12:55 +0100 Subject: [PATCH 257/267] fix: simplify UTF-8 decoding in `StreamCodec` by using `Result::ok` (#21524) --- crates/rpc/ipc/src/stream_codec.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crates/rpc/ipc/src/stream_codec.rs b/crates/rpc/ipc/src/stream_codec.rs index d4eb20fb5a6..be54eaa0a1d 100644 --- a/crates/rpc/ipc/src/stream_codec.rs +++ b/crates/rpc/ipc/src/stream_codec.rs @@ -117,10 +117,7 @@ impl tokio_util::codec::Decoder for StreamCodec { buf.advance(start_idx); } let bts = buf.split_to(idx + 1 - start_idx); - return match String::from_utf8(bts.into()) { - Ok(val) => Ok(Some(val)), - Err(_) => Ok(None), - } + return Ok(String::from_utf8(bts.into()).ok()) } } Ok(None) From effa0ab4c7ae559c06c7ecf4452d8ef8f10192e2 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 28 Jan 2026 15:52:08 +0000 Subject: [PATCH 258/267] fix(provider): read changesets from static files during unwind (#21528) Co-authored-by: Amp --- .../stages/src/stages/hashing_storage.rs | 6 ++-- .../src/stages/index_storage_history.rs | 2 +- .../src/providers/blockchain_provider.rs | 2 +- .../provider/src/providers/consistent.rs | 2 +- .../src/providers/database/provider.rs | 35 ++++++------------- .../src/providers/static_file/manager.rs | 2 +- .../storage/provider/src/test_utils/mock.rs | 2 +- crates/storage/storage-api/src/hashing.rs | 2 +- crates/storage/storage-api/src/history.rs | 2 +- crates/storage/storage-api/src/noop.rs | 2 +- crates/storage/storage-api/src/storage.rs | 6 ++-- 11 files changed, 23 insertions(+), 40 deletions(-) diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index b2b771cd9a0..19e8936209f 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -3,7 +3,7 @@ use itertools::Itertools; use reth_config::config::{EtlConfig, HashingConfig}; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRW}, - models::{BlockNumberAddress, CompactU256}, + models::CompactU256, table::Decompress, tables, transaction::{DbTx, DbTxMut}, @@ -179,7 +179,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_storage_hashing_range(BlockNumberAddress::range(range))?; + provider.unwind_storage_hashing_range(range)?; let mut stage_checkpoint = input.checkpoint.storage_hashing_stage_checkpoint().unwrap_or_default(); @@ -227,7 +227,7 @@ mod tests { use rand::Rng; use reth_db_api::{ cursor::{DbCursorRW, DbDupCursorRO}, - models::StoredBlockBodyIndices, + models::{BlockNumberAddress, StoredBlockBodyIndices}, }; use reth_ethereum_primitives::Block; use reth_primitives_traits::SealedBlock; diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index 7b7d39f6d67..29eb5816d61 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -166,7 +166,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_storage_history_indices_range(BlockNumberAddress::range(range))?; + provider.unwind_storage_history_indices_range(range)?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) }) } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 141a5074b63..a9cf4c38f42 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -728,7 +728,7 @@ impl StorageChangeSetReader for BlockchainProvider { fn storage_changesets_range( &self, - range: RangeInclusive, + range: impl RangeBounds, ) -> ProviderResult> { self.consistent_provider()?.storage_changesets_range(range) } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 076e0e3d1ff..4963708d1b1 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1397,7 +1397,7 @@ impl StorageChangeSetReader for ConsistentProvider { fn storage_changesets_range( &self, - range: RangeInclusive, + range: impl RangeBounds, ) -> ProviderResult> { let range = to_range(range); let mut changesets = Vec::new(); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 795dbc308b5..89c3c65020b 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -40,7 +40,8 @@ use reth_db_api::{ database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - ShardedKey, StorageBeforeTx, StorageSettings, StoredBlockBodyIndices, + BlockNumberAddressRange, ShardedKey, StorageBeforeTx, StorageSettings, + StoredBlockBodyIndices, }, table::Table, tables, @@ -1384,14 +1385,14 @@ impl StorageChangeSetReader for DatabaseProvider fn storage_changesets_range( &self, - range: RangeInclusive, + range: impl RangeBounds, ) -> ProviderResult> { if self.cached_storage_settings().storage_changesets_in_static_files { self.static_file_provider.storage_changesets_range(range) } else { self.tx .cursor_dup_read::()? - .walk_range(BlockNumberAddress::range(range))? + .walk_range(BlockNumberAddressRange::from(range))? .map(|r| r.map_err(Into::into)) .collect() } @@ -2834,11 +2835,7 @@ impl HashingWriter for DatabaseProvi &self, range: impl RangeBounds, ) -> ProviderResult>> { - let changesets = self - .tx - .cursor_read::()? - .walk_range(range)? - .collect::, _>>()?; + let changesets = self.account_changesets_range(range)?; self.unwind_account_hashing(changesets.iter()) } @@ -2896,13 +2893,9 @@ impl HashingWriter for DatabaseProvi fn unwind_storage_hashing_range( &self, - range: impl RangeBounds, + range: impl RangeBounds, ) -> ProviderResult>> { - let changesets = self - .tx - .cursor_read::()? - .walk_range(range)? - .collect::, _>>()?; + let changesets = self.storage_changesets_range(range)?; self.unwind_storage_hashing(changesets.into_iter()) } @@ -2997,11 +2990,7 @@ impl HistoryWriter for DatabaseProvi &self, range: impl RangeBounds, ) -> ProviderResult { - let changesets = self - .tx - .cursor_read::()? - .walk_range(range)? - .collect::, _>>()?; + let changesets = self.account_changesets_range(range)?; self.unwind_account_history_indices(changesets.iter()) } @@ -3063,13 +3052,9 @@ impl HistoryWriter for DatabaseProvi fn unwind_storage_history_indices_range( &self, - range: impl RangeBounds, + range: impl RangeBounds, ) -> ProviderResult { - let changesets = self - .tx - .cursor_read::()? - .walk_range(range)? - .collect::, _>>()?; + let changesets = self.storage_changesets_range(range)?; self.unwind_storage_history_indices(changesets.into_iter()) } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 14beb0a4d89..a8743f301ca 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -2499,7 +2499,7 @@ impl StorageChangeSetReader for StaticFileProvider { fn storage_changesets_range( &self, - range: RangeInclusive, + range: impl RangeBounds, ) -> ProviderResult> { let range = self.bound_range(range, StaticFileSegment::StorageChangeSets); self.walk_storage_changeset_range(range).collect() diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index d54324c54c4..4d6b1729936 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1025,7 +1025,7 @@ impl StorageChangeSetReader fn storage_changesets_range( &self, - _range: RangeInclusive, + _range: impl RangeBounds, ) -> ProviderResult> { Ok(Vec::default()) } diff --git a/crates/storage/storage-api/src/hashing.rs b/crates/storage/storage-api/src/hashing.rs index 7c1ced53c1b..30d734d09b8 100644 --- a/crates/storage/storage-api/src/hashing.rs +++ b/crates/storage/storage-api/src/hashing.rs @@ -57,7 +57,7 @@ pub trait HashingWriter: Send { /// Mapping of hashed keys of updated accounts to their respective updated hashed slots. fn unwind_storage_hashing_range( &self, - range: impl RangeBounds, + range: impl RangeBounds, ) -> ProviderResult>>; /// Iterates over storages and inserts them to hashing table. diff --git a/crates/storage/storage-api/src/history.rs b/crates/storage/storage-api/src/history.rs index d47f354ab6e..a06816a170d 100644 --- a/crates/storage/storage-api/src/history.rs +++ b/crates/storage/storage-api/src/history.rs @@ -44,7 +44,7 @@ pub trait HistoryWriter: Send { /// Returns number of changesets walked. fn unwind_storage_history_indices_range( &self, - range: impl RangeBounds, + range: impl RangeBounds, ) -> ProviderResult; /// Insert storage change index to database. Used inside `StorageHistoryIndex` stage diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index c6f0a30e08a..42620d9f833 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -430,7 +430,7 @@ impl StorageChangeSetReader for NoopProvider< fn storage_changesets_range( &self, - _range: RangeInclusive, + _range: impl core::ops::RangeBounds, ) -> ProviderResult< Vec<(reth_db_api::models::BlockNumberAddress, reth_primitives_traits::StorageEntry)>, > { diff --git a/crates/storage/storage-api/src/storage.rs b/crates/storage/storage-api/src/storage.rs index 66f74e7f0ce..ab92744970b 100644 --- a/crates/storage/storage-api/src/storage.rs +++ b/crates/storage/storage-api/src/storage.rs @@ -3,7 +3,7 @@ use alloc::{ vec::Vec, }; use alloy_primitives::{Address, BlockNumber, B256}; -use core::ops::RangeInclusive; +use core::ops::{RangeBounds, RangeInclusive}; use reth_primitives_traits::StorageEntry; use reth_storage_errors::provider::ProviderResult; @@ -53,11 +53,9 @@ pub trait StorageChangeSetReader: Send { ) -> ProviderResult>; /// Get all storage changesets in a range of blocks. - /// - /// NOTE: Get inclusive range of blocks. fn storage_changesets_range( &self, - range: RangeInclusive, + range: impl RangeBounds, ) -> ProviderResult>; /// Get the total count of all storage changes. From 013dfdf8c8379045b4d47ee87a68e93e43623683 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 28 Jan 2026 18:10:07 +0000 Subject: [PATCH 259/267] fix(prune): add minimum 64 block retention for receipts and bodies (#21520) --- crates/node/core/src/args/pruning.rs | 19 +++++++----- .../src/segments/user/receipts_by_logs.rs | 6 ++-- crates/prune/types/src/lib.rs | 4 ++- crates/prune/types/src/mode.rs | 30 ++++++++++--------- crates/prune/types/src/segment.rs | 7 +++-- crates/prune/types/src/target.rs | 11 +++++-- .../src/providers/database/provider.rs | 6 ++-- docs/vocs/docs/pages/cli/op-reth/node.mdx | 2 +- docs/vocs/docs/pages/cli/reth/node.mdx | 2 +- 9 files changed, 50 insertions(+), 37 deletions(-) diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 24575a8ff75..9eff92a2abf 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -5,7 +5,9 @@ use alloy_primitives::{Address, BlockNumber}; use clap::{builder::RangedU64ValueParser, Args}; use reth_chainspec::EthereumHardforks; use reth_config::config::PruneConfig; -use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE}; +use reth_prune_types::{ + PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_UNWIND_SAFE_DISTANCE, +}; use std::{collections::BTreeMap, ops::Not, sync::OnceLock}; /// Global static pruning defaults @@ -68,9 +70,9 @@ impl Default for DefaultPruningValues { full_prune_modes: PruneModes { sender_recovery: Some(PruneMode::Full), transaction_lookup: None, - receipts: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + receipts: Some(PruneMode::Distance(MINIMUM_UNWIND_SAFE_DISTANCE)), + account_history: Some(PruneMode::Distance(MINIMUM_UNWIND_SAFE_DISTANCE)), + storage_history: Some(PruneMode::Distance(MINIMUM_UNWIND_SAFE_DISTANCE)), // This field is ignored when full_bodies_history_use_pre_merge is true bodies_history: None, receipts_log_filter: Default::default(), @@ -80,9 +82,9 @@ impl Default for DefaultPruningValues { sender_recovery: Some(PruneMode::Full), transaction_lookup: Some(PruneMode::Full), receipts: Some(PruneMode::Full), - account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - bodies_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + account_history: Some(PruneMode::Distance(MINIMUM_UNWIND_SAFE_DISTANCE)), + storage_history: Some(PruneMode::Distance(MINIMUM_UNWIND_SAFE_DISTANCE)), + bodies_history: Some(PruneMode::Distance(MINIMUM_UNWIND_SAFE_DISTANCE)), receipts_log_filter: Default::default(), }, } @@ -93,7 +95,8 @@ impl Default for DefaultPruningValues { #[derive(Debug, Clone, Args, PartialEq, Eq, Default)] #[command(next_help_heading = "Pruning")] pub struct PruningArgs { - /// Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored. + /// Run full node. Only the most recent [`MINIMUM_UNWIND_SAFE_DISTANCE`] block states are + /// stored. #[arg(long, default_value_t = false, conflicts_with = "minimal")] pub full: bool, diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index 9e57bd2411a..591e77997e5 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -11,7 +11,7 @@ use reth_provider::{ }; use reth_prune_types::{ PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, SegmentOutput, - MINIMUM_PRUNING_DISTANCE, + MINIMUM_UNWIND_SAFE_DISTANCE, }; use tracing::{instrument, trace}; #[derive(Debug)] @@ -49,8 +49,8 @@ where fn prune(&self, provider: &Provider, input: PruneInput) -> Result { // Contract log filtering removes every receipt possible except the ones in the list. So, // for the other receipts it's as if they had a `PruneMode::Distance()` of - // `MINIMUM_PRUNING_DISTANCE`. - let to_block = PruneMode::Distance(MINIMUM_PRUNING_DISTANCE) + // `MINIMUM_UNWIND_SAFE_DISTANCE`. + let to_block = PruneMode::Distance(MINIMUM_UNWIND_SAFE_DISTANCE) .prune_target_block(input.to_block, PruneSegment::ContractLogs, PrunePurpose::User)? .map(|(bn, _)| bn) .unwrap_or_default(); diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 315063278b2..88d2d6490e6 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -30,7 +30,9 @@ pub use pruner::{ SegmentOutputCheckpoint, }; pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; -pub use target::{PruneModes, UnwindTargetPrunedError, MINIMUM_PRUNING_DISTANCE}; +pub use target::{ + PruneModes, UnwindTargetPrunedError, MINIMUM_DISTANCE, MINIMUM_UNWIND_SAFE_DISTANCE, +}; /// Configuration for pruning receipts not associated with logs emitted by the specified contracts. #[derive(Debug, Clone, PartialEq, Eq, Default)] diff --git a/crates/prune/types/src/mode.rs b/crates/prune/types/src/mode.rs index 8f5eaac0d7a..3706094b5fe 100644 --- a/crates/prune/types/src/mode.rs +++ b/crates/prune/types/src/mode.rs @@ -41,8 +41,12 @@ impl PruneMode { segment: PruneSegment, purpose: PrunePurpose, ) -> Result, PruneSegmentError> { + let min_blocks = segment.min_blocks(); let result = match self { - Self::Full if segment.min_blocks() == 0 => Some((tip, *self)), + Self::Full if min_blocks == 0 => Some((tip, *self)), + // For segments with min_blocks > 0, Full mode behaves like Distance(min_blocks) + Self::Full if min_blocks <= tip => Some((tip - min_blocks, *self)), + Self::Full => None, // Nothing to prune yet Self::Distance(distance) if *distance > tip => None, // Nothing to prune yet Self::Distance(distance) if *distance >= segment.min_blocks() => { Some((tip - distance, *self)) @@ -84,9 +88,7 @@ impl PruneMode { #[cfg(test)] mod tests { - use crate::{ - PruneMode, PrunePurpose, PruneSegment, PruneSegmentError, MINIMUM_PRUNING_DISTANCE, - }; + use crate::{PruneMode, PrunePurpose, PruneSegment, MINIMUM_UNWIND_SAFE_DISTANCE}; use assert_matches::assert_matches; use serde::Deserialize; @@ -96,8 +98,8 @@ mod tests { let segment = PruneSegment::AccountHistory; let tests = vec![ - // MINIMUM_PRUNING_DISTANCE makes this impossible - (PruneMode::Full, Err(PruneSegmentError::Configuration(segment))), + // Full mode with min_blocks > 0 behaves like Distance(min_blocks) + (PruneMode::Full, Ok(Some(tip - segment.min_blocks()))), // Nothing to prune (PruneMode::Distance(tip + 1), Ok(None)), ( @@ -107,12 +109,12 @@ mod tests { // Nothing to prune (PruneMode::Before(tip + 1), Ok(None)), ( - PruneMode::Before(tip - MINIMUM_PRUNING_DISTANCE), - Ok(Some(tip - MINIMUM_PRUNING_DISTANCE - 1)), + PruneMode::Before(tip - MINIMUM_UNWIND_SAFE_DISTANCE), + Ok(Some(tip - MINIMUM_UNWIND_SAFE_DISTANCE - 1)), ), ( - PruneMode::Before(tip - MINIMUM_PRUNING_DISTANCE - 1), - Ok(Some(tip - MINIMUM_PRUNING_DISTANCE - 2)), + PruneMode::Before(tip - MINIMUM_UNWIND_SAFE_DISTANCE - 1), + Ok(Some(tip - MINIMUM_UNWIND_SAFE_DISTANCE - 2)), ), // Nothing to prune (PruneMode::Before(tip - 1), Ok(None)), @@ -146,13 +148,13 @@ mod tests { let tests = vec![ (PruneMode::Distance(tip + 1), 1, !should_prune), ( - PruneMode::Distance(MINIMUM_PRUNING_DISTANCE + 1), - tip - MINIMUM_PRUNING_DISTANCE - 1, + PruneMode::Distance(MINIMUM_UNWIND_SAFE_DISTANCE + 1), + tip - MINIMUM_UNWIND_SAFE_DISTANCE - 1, !should_prune, ), ( - PruneMode::Distance(MINIMUM_PRUNING_DISTANCE + 1), - tip - MINIMUM_PRUNING_DISTANCE - 2, + PruneMode::Distance(MINIMUM_UNWIND_SAFE_DISTANCE + 1), + tip - MINIMUM_UNWIND_SAFE_DISTANCE - 2, should_prune, ), (PruneMode::Before(tip + 1), 1, should_prune), diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index 0e3f4e1edc6..5bc055f8296 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -1,6 +1,6 @@ #![allow(deprecated)] // necessary to all defining deprecated `PruneSegment` variants -use crate::MINIMUM_PRUNING_DISTANCE; +use crate::{MINIMUM_DISTANCE, MINIMUM_UNWIND_SAFE_DISTANCE}; use derive_more::Display; use strum::{EnumIter, IntoEnumIterator}; use thiserror::Error; @@ -65,9 +65,10 @@ impl PruneSegment { /// Returns minimum number of blocks to keep in the database for this segment. pub const fn min_blocks(&self) -> u64 { match self { - Self::SenderRecovery | Self::TransactionLookup | Self::Receipts | Self::Bodies => 0, + Self::SenderRecovery | Self::TransactionLookup => 0, + Self::Receipts | Self::Bodies => MINIMUM_DISTANCE, Self::ContractLogs | Self::AccountHistory | Self::StorageHistory => { - MINIMUM_PRUNING_DISTANCE + MINIMUM_UNWIND_SAFE_DISTANCE } #[expect(deprecated)] #[expect(clippy::match_same_arms)] diff --git a/crates/prune/types/src/target.rs b/crates/prune/types/src/target.rs index 92a01fc2e5b..7f5c383a652 100644 --- a/crates/prune/types/src/target.rs +++ b/crates/prune/types/src/target.rs @@ -9,7 +9,12 @@ use crate::{PruneCheckpoint, PruneMode, PruneSegment, ReceiptsLogPruneConfig}; /// consensus protocol. /// 2. Another 10k blocks to have a room for maneuver in case when things go wrong and a manual /// unwind is required. -pub const MINIMUM_PRUNING_DISTANCE: u64 = 32 * 2 + 10_000; +pub const MINIMUM_UNWIND_SAFE_DISTANCE: u64 = 32 * 2 + 10_000; + +/// Minimum blocks to retain for receipts and bodies to ensure reorg safety. +/// This prevents pruning data that may be needed when handling chain reorganizations, +/// specifically when `canonical_block_by_hash` needs to reconstruct `ExecutedBlock` from disk. +pub const MINIMUM_DISTANCE: u64 = 64; /// Type of history that can be pruned #[derive(Debug, Error, PartialEq, Eq, Clone)] @@ -56,7 +61,7 @@ pub struct PruneModes { any(test, feature = "serde"), serde( skip_serializing_if = "Option::is_none", - deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::" + deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::" ) )] pub account_history: Option, @@ -65,7 +70,7 @@ pub struct PruneModes { any(test, feature = "serde"), serde( skip_serializing_if = "Option::is_none", - deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::" + deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::" ) )] pub storage_history: Option, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 89c3c65020b..2ee377093f4 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -54,7 +54,7 @@ use reth_primitives_traits::{ Account, Block as _, BlockBody as _, Bytecode, RecoveredBlock, SealedHeader, StorageEntry, }; use reth_prune_types::{ - PruneCheckpoint, PruneMode, PruneModes, PruneSegment, MINIMUM_PRUNING_DISTANCE, + PruneCheckpoint, PruneMode, PruneModes, PruneSegment, MINIMUM_UNWIND_SAFE_DISTANCE, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -368,7 +368,7 @@ impl DatabaseProvider { changeset_cache, pending_rocksdb_batches: Default::default(), commit_order, - minimum_pruning_distance: MINIMUM_PRUNING_DISTANCE, + minimum_pruning_distance: MINIMUM_UNWIND_SAFE_DISTANCE, metrics: metrics::DatabaseProviderMetrics::default(), } } @@ -958,7 +958,7 @@ impl DatabaseProvider { changeset_cache, pending_rocksdb_batches: Default::default(), commit_order: CommitOrder::Normal, - minimum_pruning_distance: MINIMUM_PRUNING_DISTANCE, + minimum_pruning_distance: MINIMUM_UNWIND_SAFE_DISTANCE, metrics: metrics::DatabaseProviderMetrics::default(), } } diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index ad3c8eff2ba..054fe93391c 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -832,7 +832,7 @@ Dev testnet: Pruning: --full - Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored + Run full node. Only the most recent [`MINIMUM_UNWIND_SAFE_DISTANCE`] block states are stored --minimal Run minimal storage mode with maximum pruning and smaller static files. diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 6105ff7f008..2f0c4e36976 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -832,7 +832,7 @@ Dev testnet: Pruning: --full - Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored + Run full node. Only the most recent [`MINIMUM_UNWIND_SAFE_DISTANCE`] block states are stored --minimal Run minimal storage mode with maximum pruning and smaller static files. From 50e05915406a064f1996a41ac26e8908ba566737 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 28 Jan 2026 19:16:04 +0100 Subject: [PATCH 260/267] perf(tree): optimistically prepare canonical overlay (#21475) Co-authored-by: Amp --- crates/engine/tree/src/tree/mod.rs | 13 +++ .../engine/tree/src/tree/payload_validator.rs | 14 +++ crates/engine/tree/src/tree/state.rs | 105 +++++++++++++++++- crates/engine/tree/src/tree/tests.rs | 1 + 4 files changed, 132 insertions(+), 1 deletion(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 0e4daeef244..ea62aac72c6 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1406,7 +1406,20 @@ where ); self.changeset_cache.evict(eviction_threshold); + // Invalidate cached overlay since the anchor has changed + self.state.tree_state.invalidate_cached_overlay(); + self.on_new_persisted_block()?; + + // Re-prepare overlay for the current canonical head with the new anchor. + // Spawn a background task to trigger computation so it's ready when the next payload + // arrives. + if let Some(overlay) = self.state.tree_state.prepare_canonical_overlay() { + rayon::spawn(move || { + let _ = overlay.get(); + }); + } + Ok(()) } diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 263c03957f4..25168ef3ef0 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -1112,10 +1112,13 @@ where /// while the trie input computation is deferred until the overlay is actually needed. /// /// If parent is on disk (no in-memory blocks), returns `None` for the lazy overlay. + /// + /// Uses a cached overlay if available for the canonical head (the common case). fn get_parent_lazy_overlay( parent_hash: B256, state: &EngineApiTreeState, ) -> (Option, B256) { + // Get blocks leading to the parent to determine the anchor let (anchor_hash, blocks) = state.tree_state.blocks_by_hash(parent_hash).unwrap_or_else(|| (parent_hash, vec![])); @@ -1124,6 +1127,17 @@ where return (None, anchor_hash); } + // Try to use the cached overlay if it matches both parent hash and anchor + if let Some(cached) = state.tree_state.get_cached_overlay(parent_hash, anchor_hash) { + debug!( + target: "engine::tree::payload_validator", + %parent_hash, + %anchor_hash, + "Using cached canonical overlay" + ); + return (Some(cached.overlay.clone()), cached.anchor_hash); + } + debug!( target: "engine::tree::payload_validator", %anchor_hash, diff --git a/crates/engine/tree/src/tree/state.rs b/crates/engine/tree/src/tree/state.rs index 0a13207e660..2827997a9d5 100644 --- a/crates/engine/tree/src/tree/state.rs +++ b/crates/engine/tree/src/tree/state.rs @@ -6,7 +6,7 @@ use alloy_primitives::{ map::{HashMap, HashSet}, BlockNumber, B256, }; -use reth_chain_state::{EthPrimitives, ExecutedBlock}; +use reth_chain_state::{DeferredTrieData, EthPrimitives, ExecutedBlock, LazyOverlay}; use reth_primitives_traits::{AlloyBlockHeader, NodePrimitives, SealedHeader}; use std::{ collections::{btree_map, hash_map, BTreeMap, VecDeque}, @@ -38,6 +38,12 @@ pub struct TreeState { pub(crate) current_canonical_head: BlockNumHash, /// The engine API variant of this handler pub(crate) engine_kind: EngineApiKind, + /// Pre-computed lazy overlay for the canonical head. + /// + /// This is optimistically prepared after the canonical head changes, so that + /// the next payload building on the canonical head can use it immediately + /// without recomputing. + pub(crate) cached_canonical_overlay: Option, } impl TreeState { @@ -49,6 +55,7 @@ impl TreeState { current_canonical_head, parent_to_child: HashMap::default(), engine_kind, + cached_canonical_overlay: None, } } @@ -92,6 +99,66 @@ impl TreeState { Some((parent_hash, blocks)) } + /// Prepares a cached lazy overlay for the current canonical head. + /// + /// This should be called after the canonical head changes to optimistically + /// prepare the overlay for the next payload that will likely build on it. + /// + /// Returns a clone of the [`LazyOverlay`] so the caller can spawn a background + /// task to trigger computation via [`LazyOverlay::get`]. This ensures the overlay + /// is actually computed before the next payload arrives. + pub(crate) fn prepare_canonical_overlay(&mut self) -> Option { + let canonical_hash = self.current_canonical_head.hash; + + // Get blocks leading to the canonical head + let Some((anchor_hash, blocks)) = self.blocks_by_hash(canonical_hash) else { + // Canonical head not in memory (persisted), no overlay needed + self.cached_canonical_overlay = None; + return None; + }; + + // Extract deferred trie data handles from blocks (newest to oldest) + let handles: Vec = blocks.iter().map(|b| b.trie_data_handle()).collect(); + + let overlay = LazyOverlay::new(anchor_hash, handles); + self.cached_canonical_overlay = Some(PreparedCanonicalOverlay { + parent_hash: canonical_hash, + overlay: overlay.clone(), + anchor_hash, + }); + + debug!( + target: "engine::tree", + %canonical_hash, + %anchor_hash, + num_blocks = blocks.len(), + "Prepared cached canonical overlay" + ); + + Some(overlay) + } + + /// Returns the cached overlay if it matches the requested parent hash and anchor. + /// + /// Both parent hash and anchor hash must match to ensure the overlay is valid. + /// This prevents using a stale overlay after persistence has advanced the anchor. + pub(crate) fn get_cached_overlay( + &self, + parent_hash: B256, + expected_anchor: B256, + ) -> Option<&PreparedCanonicalOverlay> { + self.cached_canonical_overlay.as_ref().filter(|cached| { + cached.parent_hash == parent_hash && cached.anchor_hash == expected_anchor + }) + } + + /// Invalidates the cached overlay. + /// + /// Should be called when the anchor changes (e.g., after persistence). + pub(crate) fn invalidate_cached_overlay(&mut self) { + self.cached_canonical_overlay = None; + } + /// Insert executed block into the state. pub(crate) fn insert_executed(&mut self, executed: ExecutedBlock) { let hash = executed.recovered_block().hash(); @@ -288,6 +355,9 @@ impl TreeState { if let Some(finalized_num_hash) = finalized_num_hash { self.prune_finalized_sidechains(finalized_num_hash); } + + // Invalidate the cached overlay since blocks were removed and the anchor may have changed + self.invalidate_cached_overlay(); } /// Updates the canonical head to the given block. @@ -355,6 +425,39 @@ impl TreeState { } } +/// Pre-computed lazy overlay for the canonical head block. +/// +/// This is prepared **optimistically** when the canonical head changes, allowing +/// the next payload (which typically builds on the canonical head) to reuse +/// the pre-computed overlay immediately without re-traversing in-memory blocks. +/// +/// The overlay captures deferred trie data handles from all in-memory blocks +/// between the canonical head and the persisted anchor. When a new payload +/// arrives building on the canonical head, this cached overlay can be used +/// directly instead of calling `blocks_by_hash` and collecting handles again. +/// +/// # Invalidation +/// +/// The cached overlay is invalidated when: +/// - Persistence completes (anchor changes) +/// - The canonical head changes to a different block +#[derive(Debug, Clone)] +pub struct PreparedCanonicalOverlay { + /// The block hash for which this overlay is prepared as a parent. + /// + /// When a payload arrives with this parent hash, the overlay can be reused. + pub parent_hash: B256, + /// The pre-computed lazy overlay containing deferred trie data handles. + /// + /// This is computed optimistically after `set_canonical_head` so subsequent + /// payloads don't need to re-collect the handles. + pub overlay: LazyOverlay, + /// The anchor hash (persisted ancestor) this overlay is based on. + /// + /// Used to verify the overlay is still valid (anchor hasn't changed due to persistence). + pub anchor_hash: B256, +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index dd576ed37f8..b2ea8272a09 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -259,6 +259,7 @@ impl TestHarness { current_canonical_head: blocks.last().unwrap().recovered_block().num_hash(), parent_to_child, engine_kind: EngineApiKind::Ethereum, + cached_canonical_overlay: None, }; let last_executed_block = blocks.last().unwrap().clone(); From 8d58c980349c5af747066617f21625745f04193b Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 28 Jan 2026 19:13:19 +0000 Subject: [PATCH 261/267] feat(reth-bench): add reporting and wait options to replay-payloads (#21537) --- bin/reth-bench/src/bench/mod.rs | 1 + bin/reth-bench/src/bench/new_payload_fcu.rs | 279 ++---------------- bin/reth-bench/src/bench/output.rs | 53 +++- .../src/bench/persistence_waiter.rs | 267 +++++++++++++++++ bin/reth-bench/src/bench/replay_payloads.rs | 266 +++++++++++++---- 5 files changed, 542 insertions(+), 324 deletions(-) create mode 100644 bin/reth-bench/src/bench/persistence_waiter.rs diff --git a/bin/reth-bench/src/bench/mod.rs b/bin/reth-bench/src/bench/mod.rs index 5ccbc775467..3bb84915c95 100644 --- a/bin/reth-bench/src/bench/mod.rs +++ b/bin/reth-bench/src/bench/mod.rs @@ -15,6 +15,7 @@ pub use generate_big_block::{ mod new_payload_fcu; mod new_payload_only; mod output; +mod persistence_waiter; mod replay_payloads; mod send_invalid_payload; mod send_payload; diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index 62e0aef2594..e92556765be 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -15,19 +15,17 @@ use crate::{ output::{ write_benchmark_results, CombinedResult, NewPayloadResult, TotalGasOutput, TotalGasRow, }, + persistence_waiter::{ + engine_url_to_ws_url, setup_persistence_subscription, PersistenceWaiter, + PERSISTENCE_CHECKPOINT_TIMEOUT, + }, }, valid_payload::{block_to_new_payload, call_forkchoice_updated, call_new_payload}, }; -use alloy_eips::BlockNumHash; -use alloy_network::Ethereum; -use alloy_provider::{Provider, RootProvider}; -use alloy_pubsub::SubscriptionStream; -use alloy_rpc_client::RpcClient; +use alloy_provider::Provider; use alloy_rpc_types_engine::ForkchoiceState; -use alloy_transport_ws::WsConnect; use clap::Parser; use eyre::{Context, OptionExt}; -use futures::StreamExt; use humantime::parse_duration; use reth_cli_runner::CliContext; use reth_engine_primitives::config::DEFAULT_PERSISTENCE_THRESHOLD; @@ -36,8 +34,6 @@ use std::time::{Duration, Instant}; use tracing::{debug, info}; use url::Url; -const PERSISTENCE_CHECKPOINT_TIMEOUT: Duration = Duration::from_secs(60); - /// `reth benchmark new-payload-fcu` command #[derive(Debug, Parser)] pub struct Command { @@ -105,7 +101,8 @@ impl Command { let mut waiter = match (self.wait_time, self.wait_for_persistence) { (Some(duration), _) => Some(PersistenceWaiter::with_duration(duration)), (None, true) => { - let sub = self.setup_persistence_subscription().await?; + let ws_url = self.derive_ws_rpc_url()?; + let sub = setup_persistence_subscription(ws_url).await?; Some(PersistenceWaiter::with_subscription( sub, self.persistence_threshold, @@ -245,17 +242,20 @@ impl Command { results.into_iter().unzip(); if let Some(ref path) = self.benchmark.output { - write_benchmark_results(path, &gas_output_results, combined_results)?; + write_benchmark_results(path, &gas_output_results, &combined_results)?; } - let gas_output = TotalGasOutput::new(gas_output_results)?; + let gas_output = + TotalGasOutput::with_combined_results(gas_output_results, &combined_results)?; info!( - total_duration=?gas_output.total_duration, - total_gas_used=?gas_output.total_gas_used, - blocks_processed=?gas_output.blocks_processed, - "Total Ggas/s: {:.4}", - gas_output.total_gigagas_per_second() + total_gas_used = gas_output.total_gas_used, + total_duration = ?gas_output.total_duration, + execution_duration = ?gas_output.execution_duration, + blocks_processed = gas_output.blocks_processed, + wall_clock_ggas_per_second = format_args!("{:.4}", gas_output.total_gigagas_per_second()), + execution_ggas_per_second = format_args!("{:.4}", gas_output.execution_gigagas_per_second()), + "Benchmark complete" ); Ok(()) @@ -289,249 +289,4 @@ impl Command { Ok(derived) } } - - /// Establishes a websocket connection and subscribes to `reth_subscribePersistedBlock`. - async fn setup_persistence_subscription(&self) -> eyre::Result { - let ws_url = self.derive_ws_rpc_url()?; - - info!("Connecting to WebSocket at {} for persistence subscription", ws_url); - - let ws_connect = WsConnect::new(ws_url.to_string()); - let client = RpcClient::connect_pubsub(ws_connect) - .await - .wrap_err("Failed to connect to WebSocket RPC endpoint")?; - let provider: RootProvider = RootProvider::new(client); - - let subscription = provider - .subscribe_to::("reth_subscribePersistedBlock") - .await - .wrap_err("Failed to subscribe to persistence notifications")?; - - info!("Subscribed to persistence notifications"); - - Ok(PersistenceSubscription::new(provider, subscription.into_stream())) - } -} - -/// Converts an engine API URL to the default RPC websocket URL. -/// -/// Transformations: -/// - `http` → `ws` -/// - `https` → `wss` -/// - `ws` / `wss` keep their scheme -/// - Port is always set to `8546`, reth's default RPC websocket port. -/// -/// This is used when we only know the engine API URL (typically `:8551`) but -/// need to connect to the node's WS RPC endpoint for persistence events. -fn engine_url_to_ws_url(engine_url: &str) -> eyre::Result { - let url: Url = engine_url - .parse() - .wrap_err_with(|| format!("Failed to parse engine RPC URL: {engine_url}"))?; - - let mut ws_url = url.clone(); - - match ws_url.scheme() { - "http" => ws_url - .set_scheme("ws") - .map_err(|_| eyre::eyre!("Failed to set WS scheme for URL: {url}"))?, - "https" => ws_url - .set_scheme("wss") - .map_err(|_| eyre::eyre!("Failed to set WSS scheme for URL: {url}"))?, - "ws" | "wss" => {} - scheme => { - return Err(eyre::eyre!( - "Unsupported URL scheme '{scheme}' for URL: {url}. Expected http, https, ws, or wss." - )) - } - } - - ws_url.set_port(Some(8546)).map_err(|_| eyre::eyre!("Failed to set port for URL: {url}"))?; - - Ok(ws_url) -} - -/// Waits until the persistence subscription reports that `target` has been persisted. -/// -/// Consumes subscription events until `last_persisted >= target`, or returns an error if: -/// - the subscription stream ends unexpectedly, or -/// - `timeout` elapses before `target` is observed. -async fn wait_for_persistence( - stream: &mut SubscriptionStream, - target: u64, - last_persisted: &mut u64, - timeout: Duration, -) -> eyre::Result<()> { - tokio::time::timeout(timeout, async { - while *last_persisted < target { - match stream.next().await { - Some(persisted) => { - *last_persisted = persisted.number; - debug!( - target: "reth-bench", - persisted_block = ?last_persisted, - "Received persistence notification" - ); - } - None => { - return Err(eyre::eyre!("Persistence subscription closed unexpectedly")); - } - } - } - Ok(()) - }) - .await - .map_err(|_| { - eyre::eyre!( - "Persistence timeout: target block {} not persisted within {:?}. Last persisted: {}", - target, - timeout, - last_persisted - ) - })? -} - -/// Wrapper that keeps both the subscription stream and the underlying provider alive. -/// The provider must be kept alive for the subscription to continue receiving events. -struct PersistenceSubscription { - _provider: RootProvider, - stream: SubscriptionStream, -} - -impl PersistenceSubscription { - const fn new( - provider: RootProvider, - stream: SubscriptionStream, - ) -> Self { - Self { _provider: provider, stream } - } - - const fn stream_mut(&mut self) -> &mut SubscriptionStream { - &mut self.stream - } -} - -/// Encapsulates the block waiting logic. -/// -/// Provides a simple `on_block()` interface that handles both: -/// - Fixed duration waits (when `wait_time` is set) -/// - Persistence-based waits (when `subscription` is set) -/// -/// For persistence mode, waits after every `(threshold + 1)` blocks. -struct PersistenceWaiter { - wait_time: Option, - subscription: Option, - blocks_sent: u64, - last_persisted: u64, - threshold: u64, - timeout: Duration, -} - -impl PersistenceWaiter { - const fn with_duration(wait_time: Duration) -> Self { - Self { - wait_time: Some(wait_time), - subscription: None, - blocks_sent: 0, - last_persisted: 0, - threshold: 0, - timeout: Duration::ZERO, - } - } - - const fn with_subscription( - subscription: PersistenceSubscription, - threshold: u64, - timeout: Duration, - ) -> Self { - Self { - wait_time: None, - subscription: Some(subscription), - blocks_sent: 0, - last_persisted: 0, - threshold, - timeout, - } - } - - /// Called once per block. Waits based on the configured mode. - #[allow(clippy::manual_is_multiple_of)] - async fn on_block(&mut self, block_number: u64) -> eyre::Result<()> { - if let Some(wait_time) = self.wait_time { - tokio::time::sleep(wait_time).await; - return Ok(()); - } - - let Some(ref mut subscription) = self.subscription else { - return Ok(()); - }; - - self.blocks_sent += 1; - - if self.blocks_sent % (self.threshold + 1) == 0 { - debug!( - target: "reth-bench", - target_block = ?block_number, - last_persisted = self.last_persisted, - blocks_sent = self.blocks_sent, - "Waiting for persistence" - ); - - wait_for_persistence( - subscription.stream_mut(), - block_number, - &mut self.last_persisted, - self.timeout, - ) - .await?; - - debug!( - target: "reth-bench", - persisted = self.last_persisted, - "Persistence caught up" - ); - } - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_engine_url_to_ws_url() { - // http -> ws, always uses port 8546 - let result = engine_url_to_ws_url("http://localhost:8551").unwrap(); - assert_eq!(result.as_str(), "ws://localhost:8546/"); - - // https -> wss - let result = engine_url_to_ws_url("https://localhost:8551").unwrap(); - assert_eq!(result.as_str(), "wss://localhost:8546/"); - - // Custom engine port still maps to 8546 - let result = engine_url_to_ws_url("http://localhost:9551").unwrap(); - assert_eq!(result.port(), Some(8546)); - - // Already ws passthrough - let result = engine_url_to_ws_url("ws://localhost:8546").unwrap(); - assert_eq!(result.scheme(), "ws"); - - // Invalid inputs - assert!(engine_url_to_ws_url("ftp://localhost:8551").is_err()); - assert!(engine_url_to_ws_url("not a valid url").is_err()); - } - - #[tokio::test] - async fn test_waiter_with_duration() { - let mut waiter = PersistenceWaiter::with_duration(Duration::from_millis(1)); - - let start = Instant::now(); - waiter.on_block(1).await.unwrap(); - waiter.on_block(2).await.unwrap(); - waiter.on_block(3).await.unwrap(); - - // Should have waited ~3ms total - assert!(start.elapsed() >= Duration::from_millis(3)); - } } diff --git a/bin/reth-bench/src/bench/output.rs b/bin/reth-bench/src/bench/output.rs index 25a1deaf22c..d367f63f3dc 100644 --- a/bin/reth-bench/src/bench/output.rs +++ b/bin/reth-bench/src/bench/output.rs @@ -6,7 +6,7 @@ use csv::Writer; use eyre::OptionExt; use reth_primitives_traits::constants::GIGAGAS; use serde::{ser::SerializeStruct, Deserialize, Serialize}; -use std::{path::Path, time::Duration}; +use std::{fs, path::Path, time::Duration}; use tracing::info; /// This is the suffix for gas output csv files. @@ -158,29 +158,58 @@ pub(crate) struct TotalGasRow { pub(crate) struct TotalGasOutput { /// The total gas used in the benchmark. pub(crate) total_gas_used: u64, - /// The total duration of the benchmark. + /// The total wall-clock duration of the benchmark (includes wait times). pub(crate) total_duration: Duration, - /// The total gas used per second. - pub(crate) total_gas_per_second: f64, + /// The total execution-only duration (excludes wait times). + pub(crate) execution_duration: Duration, /// The number of blocks processed. pub(crate) blocks_processed: u64, } impl TotalGasOutput { - /// Create a new [`TotalGasOutput`] from a list of [`TotalGasRow`]. + /// Create a new [`TotalGasOutput`] from gas rows only. + /// + /// Use this when execution-only timing is not available (e.g., `new_payload_only`). + /// `execution_duration` will equal `total_duration`. pub(crate) fn new(rows: Vec) -> eyre::Result { - // the duration is obtained from the last row let total_duration = rows.last().map(|row| row.time).ok_or_eyre("empty results")?; let blocks_processed = rows.len() as u64; let total_gas_used: u64 = rows.into_iter().map(|row| row.gas_used).sum(); - let total_gas_per_second = total_gas_used as f64 / total_duration.as_secs_f64(); - Ok(Self { total_gas_used, total_duration, total_gas_per_second, blocks_processed }) + Ok(Self { + total_gas_used, + total_duration, + execution_duration: total_duration, + blocks_processed, + }) } - /// Return the total gigagas per second. + /// Create a new [`TotalGasOutput`] from gas rows and combined results. + /// + /// - `rows`: Used for total gas and wall-clock duration + /// - `combined_results`: Used for execution-only duration (sum of `total_latency`) + pub(crate) fn with_combined_results( + rows: Vec, + combined_results: &[CombinedResult], + ) -> eyre::Result { + let total_duration = rows.last().map(|row| row.time).ok_or_eyre("empty results")?; + let blocks_processed = rows.len() as u64; + let total_gas_used: u64 = rows.into_iter().map(|row| row.gas_used).sum(); + + // Sum execution-only time from combined results + let execution_duration: Duration = combined_results.iter().map(|r| r.total_latency).sum(); + + Ok(Self { total_gas_used, total_duration, execution_duration, blocks_processed }) + } + + /// Return the total gigagas per second based on wall-clock time. pub(crate) fn total_gigagas_per_second(&self) -> f64 { - self.total_gas_per_second / GIGAGAS as f64 + self.total_gas_used as f64 / self.total_duration.as_secs_f64() / GIGAGAS as f64 + } + + /// Return the execution-only gigagas per second (excludes wait times). + pub(crate) fn execution_gigagas_per_second(&self) -> f64 { + self.total_gas_used as f64 / self.execution_duration.as_secs_f64() / GIGAGAS as f64 } } @@ -192,8 +221,10 @@ impl TotalGasOutput { pub(crate) fn write_benchmark_results( output_dir: &Path, gas_results: &[TotalGasRow], - combined_results: Vec, + combined_results: &[CombinedResult], ) -> eyre::Result<()> { + fs::create_dir_all(output_dir)?; + let output_path = output_dir.join(COMBINED_OUTPUT_SUFFIX); info!("Writing engine api call latency output to file: {:?}", output_path); let mut writer = Writer::from_path(&output_path)?; diff --git a/bin/reth-bench/src/bench/persistence_waiter.rs b/bin/reth-bench/src/bench/persistence_waiter.rs new file mode 100644 index 00000000000..43022040562 --- /dev/null +++ b/bin/reth-bench/src/bench/persistence_waiter.rs @@ -0,0 +1,267 @@ +//! Persistence waiting utilities for benchmarks. +//! +//! Provides waiting behavior to control benchmark pacing: +//! - **Fixed duration waits**: Sleep for a fixed time between blocks +//! - **Persistence-based waits**: Wait for blocks to be persisted using +//! `reth_subscribePersistedBlock` subscription + +use alloy_eips::BlockNumHash; +use alloy_network::Ethereum; +use alloy_provider::{Provider, RootProvider}; +use alloy_pubsub::SubscriptionStream; +use alloy_rpc_client::RpcClient; +use alloy_transport_ws::WsConnect; +use eyre::Context; +use futures::StreamExt; +use std::time::Duration; +use tracing::{debug, info}; +use url::Url; + +/// Default timeout for waiting on persistence. +pub(crate) const PERSISTENCE_CHECKPOINT_TIMEOUT: Duration = Duration::from_secs(60); + +/// Converts an engine API URL to the default RPC websocket URL. +/// +/// Transformations: +/// - `http` → `ws` +/// - `https` → `wss` +/// - `ws` / `wss` keep their scheme +/// - Port is always set to `8546`, reth's default RPC websocket port. +/// +/// This is used when we only know the engine API URL (typically `:8551`) but +/// need to connect to the node's WS RPC endpoint for persistence events. +pub(crate) fn engine_url_to_ws_url(engine_url: &str) -> eyre::Result { + let url: Url = engine_url + .parse() + .wrap_err_with(|| format!("Failed to parse engine RPC URL: {engine_url}"))?; + + let mut ws_url = url.clone(); + + match ws_url.scheme() { + "http" => ws_url + .set_scheme("ws") + .map_err(|_| eyre::eyre!("Failed to set WS scheme for URL: {url}"))?, + "https" => ws_url + .set_scheme("wss") + .map_err(|_| eyre::eyre!("Failed to set WSS scheme for URL: {url}"))?, + "ws" | "wss" => {} + scheme => { + return Err(eyre::eyre!( + "Unsupported URL scheme '{scheme}' for URL: {url}. Expected http, https, ws, or wss." + )) + } + } + + ws_url.set_port(Some(8546)).map_err(|_| eyre::eyre!("Failed to set port for URL: {url}"))?; + + Ok(ws_url) +} + +/// Waits until the persistence subscription reports that `target` has been persisted. +/// +/// Consumes subscription events until `last_persisted >= target`, or returns an error if: +/// - the subscription stream ends unexpectedly, or +/// - `timeout` elapses before `target` is observed. +async fn wait_for_persistence( + stream: &mut SubscriptionStream, + target: u64, + last_persisted: &mut u64, + timeout: Duration, +) -> eyre::Result<()> { + tokio::time::timeout(timeout, async { + while *last_persisted < target { + match stream.next().await { + Some(persisted) => { + *last_persisted = persisted.number; + debug!( + target: "reth-bench", + persisted_block = ?last_persisted, + "Received persistence notification" + ); + } + None => { + return Err(eyre::eyre!("Persistence subscription closed unexpectedly")); + } + } + } + Ok(()) + }) + .await + .map_err(|_| { + eyre::eyre!( + "Persistence timeout: target block {} not persisted within {:?}. Last persisted: {}", + target, + timeout, + last_persisted + ) + })? +} + +/// Wrapper that keeps both the subscription stream and the underlying provider alive. +/// The provider must be kept alive for the subscription to continue receiving events. +pub(crate) struct PersistenceSubscription { + _provider: RootProvider, + stream: SubscriptionStream, +} + +impl PersistenceSubscription { + const fn new( + provider: RootProvider, + stream: SubscriptionStream, + ) -> Self { + Self { _provider: provider, stream } + } + + const fn stream_mut(&mut self) -> &mut SubscriptionStream { + &mut self.stream + } +} + +/// Establishes a websocket connection and subscribes to `reth_subscribePersistedBlock`. +pub(crate) async fn setup_persistence_subscription( + ws_url: Url, +) -> eyre::Result { + info!("Connecting to WebSocket at {} for persistence subscription", ws_url); + + let ws_connect = WsConnect::new(ws_url.to_string()); + let client = RpcClient::connect_pubsub(ws_connect) + .await + .wrap_err("Failed to connect to WebSocket RPC endpoint")?; + let provider: RootProvider = RootProvider::new(client); + + let subscription = provider + .subscribe_to::("reth_subscribePersistedBlock") + .await + .wrap_err("Failed to subscribe to persistence notifications")?; + + info!("Subscribed to persistence notifications"); + + Ok(PersistenceSubscription::new(provider, subscription.into_stream())) +} + +/// Encapsulates the block waiting logic. +/// +/// Provides a simple `on_block()` interface that handles both: +/// - Fixed duration waits (when `wait_time` is set) +/// - Persistence-based waits (when `subscription` is set) +/// +/// For persistence mode, waits after every `(threshold + 1)` blocks. +pub(crate) struct PersistenceWaiter { + wait_time: Option, + subscription: Option, + blocks_sent: u64, + last_persisted: u64, + threshold: u64, + timeout: Duration, +} + +impl PersistenceWaiter { + pub(crate) const fn with_duration(wait_time: Duration) -> Self { + Self { + wait_time: Some(wait_time), + subscription: None, + blocks_sent: 0, + last_persisted: 0, + threshold: 0, + timeout: Duration::ZERO, + } + } + + pub(crate) const fn with_subscription( + subscription: PersistenceSubscription, + threshold: u64, + timeout: Duration, + ) -> Self { + Self { + wait_time: None, + subscription: Some(subscription), + blocks_sent: 0, + last_persisted: 0, + threshold, + timeout, + } + } + + /// Called once per block. Waits based on the configured mode. + #[allow(clippy::manual_is_multiple_of)] + pub(crate) async fn on_block(&mut self, block_number: u64) -> eyre::Result<()> { + if let Some(wait_time) = self.wait_time { + tokio::time::sleep(wait_time).await; + return Ok(()); + } + + let Some(ref mut subscription) = self.subscription else { + return Ok(()); + }; + + self.blocks_sent += 1; + + if self.blocks_sent % (self.threshold + 1) == 0 { + debug!( + target: "reth-bench", + target_block = ?block_number, + last_persisted = self.last_persisted, + blocks_sent = self.blocks_sent, + "Waiting for persistence" + ); + + wait_for_persistence( + subscription.stream_mut(), + block_number, + &mut self.last_persisted, + self.timeout, + ) + .await?; + + debug!( + target: "reth-bench", + persisted = self.last_persisted, + "Persistence caught up" + ); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::time::Instant; + + #[test] + fn test_engine_url_to_ws_url() { + // http -> ws, always uses port 8546 + let result = engine_url_to_ws_url("http://localhost:8551").unwrap(); + assert_eq!(result.as_str(), "ws://localhost:8546/"); + + // https -> wss + let result = engine_url_to_ws_url("https://localhost:8551").unwrap(); + assert_eq!(result.as_str(), "wss://localhost:8546/"); + + // Custom engine port still maps to 8546 + let result = engine_url_to_ws_url("http://localhost:9551").unwrap(); + assert_eq!(result.port(), Some(8546)); + + // Already ws passthrough + let result = engine_url_to_ws_url("ws://localhost:8546").unwrap(); + assert_eq!(result.scheme(), "ws"); + + // Invalid inputs + assert!(engine_url_to_ws_url("ftp://localhost:8551").is_err()); + assert!(engine_url_to_ws_url("not a valid url").is_err()); + } + + #[tokio::test] + async fn test_waiter_with_duration() { + let mut waiter = PersistenceWaiter::with_duration(Duration::from_millis(1)); + + let start = Instant::now(); + waiter.on_block(1).await.unwrap(); + waiter.on_block(2).await.unwrap(); + waiter.on_block(3).await.unwrap(); + + // Should have waited ~3ms total + assert!(start.elapsed() >= Duration::from_millis(3)); + } +} diff --git a/bin/reth-bench/src/bench/replay_payloads.rs b/bin/reth-bench/src/bench/replay_payloads.rs index e6d388d9efe..cf1b82f5be6 100644 --- a/bin/reth-bench/src/bench/replay_payloads.rs +++ b/bin/reth-bench/src/bench/replay_payloads.rs @@ -2,10 +2,27 @@ //! //! This command reads `ExecutionPayloadEnvelopeV4` files from a directory and replays them //! in sequence using `newPayload` followed by `forkchoiceUpdated`. +//! +//! Supports configurable waiting behavior: +//! - **`--wait-time`**: Fixed sleep interval between blocks. +//! - **`--wait-for-persistence`**: Waits for every Nth block to be persisted using the +//! `reth_subscribePersistedBlock` subscription, where N matches the engine's persistence +//! threshold. This ensures the benchmark doesn't outpace persistence. +//! +//! Both options can be used together or independently. use crate::{ authenticated_transport::AuthenticatedTransportConnect, - bench::output::GasRampPayloadFile, + bench::{ + output::{ + write_benchmark_results, CombinedResult, GasRampPayloadFile, NewPayloadResult, + TotalGasOutput, TotalGasRow, + }, + persistence_waiter::{ + engine_url_to_ws_url, setup_persistence_subscription, PersistenceWaiter, + PERSISTENCE_CHECKPOINT_TIMEOUT, + }, + }, valid_payload::{call_forkchoice_updated, call_new_payload}, }; use alloy_primitives::B256; @@ -14,11 +31,16 @@ use alloy_rpc_client::ClientBuilder; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV4, ForkchoiceState, JwtSecret}; use clap::Parser; use eyre::Context; -use reqwest::Url; +use humantime::parse_duration; use reth_cli_runner::CliContext; +use reth_engine_primitives::config::DEFAULT_PERSISTENCE_THRESHOLD; use reth_node_api::EngineApiMessageVersion; -use std::path::PathBuf; +use std::{ + path::PathBuf, + time::{Duration, Instant}, +}; use tracing::{debug, info}; +use url::Url; /// `reth bench replay-payloads` command /// @@ -51,6 +73,42 @@ pub struct Command { /// These are replayed before the main payloads to warm up the gas limit. #[arg(long, value_name = "GAS_RAMP_DIR")] gas_ramp_dir: Option, + + /// Optional output directory for benchmark results (CSV files). + #[arg(long, value_name = "OUTPUT")] + output: Option, + + /// How long to wait after a forkchoice update before sending the next payload. + #[arg(long, value_name = "WAIT_TIME", value_parser = parse_duration, verbatim_doc_comment)] + wait_time: Option, + + /// Wait for blocks to be persisted before sending the next batch. + /// + /// When enabled, waits for every Nth block to be persisted using the + /// `reth_subscribePersistedBlock` subscription. This ensures the benchmark + /// doesn't outpace persistence. + /// + /// The subscription uses the regular RPC websocket endpoint (no JWT required). + #[arg(long, default_value = "false", verbatim_doc_comment)] + wait_for_persistence: bool, + + /// Engine persistence threshold used for deciding when to wait for persistence. + /// + /// The benchmark waits after every `(threshold + 1)` blocks. By default this + /// matches the engine's `DEFAULT_PERSISTENCE_THRESHOLD` (2), so waits occur + /// at blocks 3, 6, 9, etc. + #[arg( + long = "persistence-threshold", + value_name = "PERSISTENCE_THRESHOLD", + default_value_t = DEFAULT_PERSISTENCE_THRESHOLD, + verbatim_doc_comment + )] + persistence_threshold: u64, + + /// Optional `WebSocket` RPC URL for persistence subscription. + /// If not provided, derives from engine RPC URL by changing scheme to ws and port to 8546. + #[arg(long, value_name = "WS_RPC_URL", verbatim_doc_comment)] + ws_rpc_url: Option, } /// A loaded payload ready for execution. @@ -78,6 +136,33 @@ impl Command { pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { info!(payload_dir = %self.payload_dir.display(), "Replaying payloads"); + // Log mode configuration + if let Some(duration) = self.wait_time { + info!("Using wait-time mode with {}ms delay between blocks", duration.as_millis()); + } + if self.wait_for_persistence { + info!( + "Persistence waiting enabled (waits after every {} blocks to match engine gap > {} behavior)", + self.persistence_threshold + 1, + self.persistence_threshold + ); + } + + // Set up waiter based on configured options (duration takes precedence) + let mut waiter = match (self.wait_time, self.wait_for_persistence) { + (Some(duration), _) => Some(PersistenceWaiter::with_duration(duration)), + (None, true) => { + let ws_url = self.derive_ws_rpc_url()?; + let sub = setup_persistence_subscription(ws_url).await?; + Some(PersistenceWaiter::with_subscription( + sub, + self.persistence_threshold, + PERSISTENCE_CHECKPOINT_TIMEOUT, + )) + } + (None, false) => None, + }; + // Set up authenticated engine provider let jwt = std::fs::read_to_string(&self.jwt_secret).wrap_err("Failed to read JWT secret file")?; @@ -144,6 +229,11 @@ impl Command { call_forkchoice_updated(&auth_provider, payload.version, fcu_state, None).await?; info!(gas_ramp_payload = i + 1, "Gas ramp payload executed successfully"); + + if let Some(w) = &mut waiter { + w.on_block(payload.block_number).await?; + } + parent_hash = payload.file.block_hash; } @@ -151,22 +241,112 @@ impl Command { info!(count = gas_ramp_payloads.len(), "All gas ramp payloads replayed"); } + let mut results = Vec::new(); + let total_benchmark_duration = Instant::now(); + for (i, payload) in payloads.iter().enumerate() { - info!( + let envelope = &payload.envelope; + let block_hash = payload.block_hash; + let execution_payload = &envelope.envelope_inner.execution_payload; + let inner_payload = &execution_payload.payload_inner.payload_inner; + + let gas_used = inner_payload.gas_used; + let gas_limit = inner_payload.gas_limit; + let block_number = inner_payload.block_number; + let transaction_count = + execution_payload.payload_inner.payload_inner.transactions.len() as u64; + + debug!( payload = i + 1, total = payloads.len(), index = payload.index, - block_hash = %payload.block_hash, + block_hash = %block_hash, "Executing payload (newPayload + FCU)" ); - self.execute_payload_v4(&auth_provider, &payload.envelope, parent_hash).await?; + let start = Instant::now(); + + debug!( + method = "engine_newPayloadV4", + block_hash = %block_hash, + "Sending newPayload" + ); + + let status = auth_provider + .new_payload_v4( + execution_payload.clone(), + vec![], + B256::ZERO, + envelope.execution_requests.to_vec(), + ) + .await?; + + let new_payload_result = NewPayloadResult { gas_used, latency: start.elapsed() }; + + if !status.is_valid() { + return Err(eyre::eyre!("Payload rejected: {:?}", status)); + } + + let fcu_state = ForkchoiceState { + head_block_hash: block_hash, + safe_block_hash: parent_hash, + finalized_block_hash: parent_hash, + }; + + debug!(method = "engine_forkchoiceUpdatedV3", ?fcu_state, "Sending forkchoiceUpdated"); + + let fcu_result = auth_provider.fork_choice_updated_v3(fcu_state, None).await?; + + let total_latency = start.elapsed(); + let fcu_latency = total_latency - new_payload_result.latency; + + let combined_result = CombinedResult { + block_number, + gas_limit, + transaction_count, + new_payload_result, + fcu_latency, + total_latency, + }; - info!(payload = i + 1, "Payload executed successfully"); - parent_hash = payload.block_hash; + let current_duration = total_benchmark_duration.elapsed(); + info!(%combined_result); + + if let Some(w) = &mut waiter { + w.on_block(block_number).await?; + } + + let gas_row = + TotalGasRow { block_number, transaction_count, gas_used, time: current_duration }; + results.push((gas_row, combined_result)); + + debug!(?status, ?fcu_result, "Payload executed successfully"); + parent_hash = block_hash; } - info!(count = payloads.len(), "All payloads replayed successfully"); + // Drop waiter - we don't need to wait for final blocks to persist + // since the benchmark goal is measuring Ggas/s of newPayload/FCU, not persistence. + drop(waiter); + + let (gas_output_results, combined_results): (Vec, Vec) = + results.into_iter().unzip(); + + if let Some(ref path) = self.output { + write_benchmark_results(path, &gas_output_results, &combined_results)?; + } + + let gas_output = + TotalGasOutput::with_combined_results(gas_output_results, &combined_results)?; + info!( + total_gas_used = gas_output.total_gas_used, + total_duration = ?gas_output.total_duration, + execution_duration = ?gas_output.execution_duration, + blocks_processed = gas_output.blocks_processed, + wall_clock_ggas_per_second = format_args!("{:.4}", gas_output.total_gigagas_per_second()), + execution_ggas_per_second = format_args!("{:.4}", gas_output.execution_gigagas_per_second()), + "Benchmark complete" + ); + Ok(()) } @@ -285,48 +465,32 @@ impl Command { Ok(payloads) } - async fn execute_payload_v4( - &self, - provider: &RootProvider, - envelope: &ExecutionPayloadEnvelopeV4, - parent_hash: B256, - ) -> eyre::Result<()> { - let block_hash = - envelope.envelope_inner.execution_payload.payload_inner.payload_inner.block_hash; - - debug!( - method = "engine_newPayloadV4", - block_hash = %block_hash, - "Sending newPayload" - ); - - let status = provider - .new_payload_v4( - envelope.envelope_inner.execution_payload.clone(), - vec![], - B256::ZERO, - envelope.execution_requests.to_vec(), - ) - .await?; - - info!(?status, "newPayloadV4 response"); - - if !status.is_valid() { - return Err(eyre::eyre!("Payload rejected: {:?}", status)); + /// Returns the websocket RPC URL used for the persistence subscription. + /// + /// Preference: + /// - If `--ws-rpc-url` is provided, use it directly. + /// - Otherwise, derive a WS RPC URL from `--engine-rpc-url`. + /// + /// The persistence subscription endpoint (`reth_subscribePersistedBlock`) is exposed on + /// the regular RPC server (WS port, usually 8546), not on the engine API port (usually 8551). + /// Since we only have the engine URL by default, we convert the scheme + /// (http→ws, https→wss) and force the port to 8546. + fn derive_ws_rpc_url(&self) -> eyre::Result { + if let Some(ref ws_url) = self.ws_rpc_url { + let parsed: Url = ws_url + .parse() + .wrap_err_with(|| format!("Failed to parse WebSocket RPC URL: {ws_url}"))?; + info!(target: "reth-bench", ws_url = %parsed, "Using provided WebSocket RPC URL"); + Ok(parsed) + } else { + let derived = engine_url_to_ws_url(&self.engine_rpc_url)?; + debug!( + target: "reth-bench", + engine_url = %self.engine_rpc_url, + %derived, + "Derived WebSocket RPC URL from engine RPC URL" + ); + Ok(derived) } - - let fcu_state = ForkchoiceState { - head_block_hash: block_hash, - safe_block_hash: parent_hash, - finalized_block_hash: parent_hash, - }; - - debug!(method = "engine_forkchoiceUpdatedV3", ?fcu_state, "Sending forkchoiceUpdated"); - - let fcu_result = provider.fork_choice_updated_v3(fcu_state, None).await?; - - info!(?fcu_result, "forkchoiceUpdatedV3 response"); - - Ok(()) } } From f5ca71d2fb14d7473824311ef0831a8b9d9ada12 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 28 Jan 2026 20:49:15 +0100 Subject: [PATCH 262/267] chore(deps): cargo update (#21538) Co-authored-by: Amp --- Cargo.lock | 108 ++++++++---------- Cargo.toml | 10 +- examples/custom-beacon-withdrawals/Cargo.toml | 1 - .../custom-beacon-withdrawals/src/main.rs | 3 +- 4 files changed, 56 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f523fa4b816..16ebc60e887 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -186,9 +186,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "1.5.2" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "369f5707b958927176265e8a58627fc6195e5dfa5c55689396e68b241b3a72e6" +checksum = "14ff5ee5f27aa305bda825c735f686ad71bb65508158f059f513895abe69b8c3" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -340,9 +340,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "1.5.2" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84e3cf01219c966f95a460c95f1d4c30e12f6c18150c21a30b768af2a2a29142" +checksum = "8708475665cc00e081c085886e68eada2f64cfa08fc668213a9231655093d4de" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -437,9 +437,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "1.5.2" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6a0fb18dd5fb43ec5f0f6a20be1ce0287c79825827de5744afaa6c957737c33" +checksum = "3b88cf92ed20685979ed1d8472422f0c6c2d010cec77caf63aaa7669cc1a7bc2" dependencies = [ "alloy-rlp", "arbitrary", @@ -464,7 +464,6 @@ dependencies = [ "rustc-hash", "serde", "sha3", - "tiny-keccak", ] [[package]] @@ -794,9 +793,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.5.2" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09eb18ce0df92b4277291bbaa0ed70545d78b02948df756bbd3d6214bf39a218" +checksum = "f5fa1ca7e617c634d2bd9fa71f9ec8e47c07106e248b9fcbd3eaddc13cabd625" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -808,9 +807,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "1.5.2" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95d9fa2daf21f59aa546d549943f10b5cce1ae59986774019fbedae834ffe01b" +checksum = "27c00c0c3a75150a9dc7c8c679ca21853a137888b4e1c5569f92d7e2b15b5102" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -819,16 +818,16 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", + "sha3", "syn 2.0.114", "syn-solidity", - "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "1.5.2" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9396007fe69c26ee118a19f4dee1f5d1d6be186ea75b3881adf16d87f8444686" +checksum = "297db260eb4d67c105f68d6ba11b8874eec681caec5505eab8fbebee97f790bc" dependencies = [ "const-hex", "dunce", @@ -842,9 +841,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "1.5.2" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af67a0b0dcebe14244fc92002cd8d96ecbf65db4639d479f5fcd5805755a4c27" +checksum = "94b91b13181d3bcd23680fd29d7bc861d1f33fbe90fdd0af67162434aeba902d" dependencies = [ "serde", "winnow", @@ -852,9 +851,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.5.2" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09aeea64f09a7483bdcd4193634c7e5cf9fd7775ee767585270cd8ce2d69dc95" +checksum = "fc442cc2a75207b708d481314098a0f8b6f7b58e3148dd8d8cc7407b0d6f9385" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -2209,9 +2208,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.54" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" +checksum = "3e34525d5bbbd55da2bb745d34b36121baac88d07619a9a09cfcf4a6c0832785" dependencies = [ "clap_builder", "clap_derive", @@ -2219,9 +2218,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.54" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" +checksum = "59a20016a20a3da95bef50ec7238dbd09baeef4311dcdd38ec15aba69812fb61" dependencies = [ "anstream", "anstyle", @@ -2231,9 +2230,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.49" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" +checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" dependencies = [ "heck", "proc-macro2", @@ -3590,7 +3589,6 @@ version = "0.0.0" dependencies = [ "alloy-eips", "alloy-evm", - "alloy-sol-macro", "alloy-sol-types", "eyre", "reth-ethereum", @@ -4819,9 +4817,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.64" +version = "0.1.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -5489,9 +5487,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +checksum = "b646a74e746cd25045aa0fd42f4f7f78aa6d119380182c7e63a5593c4ab8df6f" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -5979,9 +5977,9 @@ dependencies = [ [[package]] name = "moka" -version = "0.12.12" +version = "0.12.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3dec6bd31b08944e08b58fd99373893a6c17054d6f3ea5006cc894f4f4eee2a" +checksum = "b4ac832c50ced444ef6be0767a008b02c106a909ba79d1d830501e94b96f6b7e" dependencies = [ "crossbeam-channel", "crossbeam-epoch", @@ -6095,9 +6093,12 @@ dependencies = [ [[package]] name = "notify-types" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e0826a989adedc2a244799e823aece04662b66609d96af8dff7ac6df9a8925d" +checksum = "42b8cfee0e339a0337359f3c88165702ac6e600dc01c0cc9579a92d62b08477a" +dependencies = [ + "bitflags 2.10.0", +] [[package]] name = "ntapi" @@ -12201,9 +12202,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +checksum = "b31139435f327c93c6038ed350ae4588e2c70a13d50599509fee6349967ba35a" dependencies = [ "cc", "cfg-if", @@ -12315,9 +12316,9 @@ dependencies = [ [[package]] name = "siphasher" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "sketches-ddsketch" @@ -12512,9 +12513,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.5.2" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f92d01b5de07eaf324f7fca61cc6bd3d82bbc1de5b6c963e6fe79e86f36580d" +checksum = "2379beea9476b89d0237078be761cf8e012d92d5ae4ae0c9a329f974838870fc" dependencies = [ "paste", "proc-macro2", @@ -12822,15 +12823,6 @@ dependencies = [ "time-core", ] -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - [[package]] name = "tinystr" version = "0.8.2" @@ -13022,9 +13014,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "tonic" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" +checksum = "a286e33f82f8a1ee2df63f4fa35c0becf4a85a0cb03091a15fd7bf0b402dc94a" dependencies = [ "async-trait", "base64 0.22.1", @@ -13048,9 +13040,9 @@ dependencies = [ [[package]] name = "tonic-prost" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +checksum = "d6c55a2d6a14174563de34409c9f92ff981d006f56da9c6ecd40d9d4a31500b0" dependencies = [ "bytes", "prost 0.14.3", @@ -14437,18 +14429,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.33" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" +checksum = "fdea86ddd5568519879b8187e1cf04e24fce28f7fe046ceecbce472ff19a2572" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.33" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" +checksum = "0c15e1b46eff7c6c91195752e0eeed8ef040e391cdece7c25376957d5f15df22" dependencies = [ "proc-macro2", "quote", @@ -14532,9 +14524,9 @@ dependencies = [ [[package]] name = "zmij" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65" +checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439" [[package]] name = "zstd" diff --git a/Cargo.toml b/Cargo.toml index 9d774429bb7..19afd6917a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -484,15 +484,15 @@ op-revm = { version = "15.0.0", default-features = false } revm-inspectors = "0.34.1" # eth +alloy-dyn-abi = "1.5.4" +alloy-primitives = { version = "1.5.4", default-features = false, features = ["map-foldhash"] } +alloy-sol-types = { version = "1.5.4", default-features = false } + alloy-chains = { version = "0.2.5", default-features = false } -alloy-dyn-abi = "1.5.2" alloy-eip2124 = { version = "0.2.0", default-features = false } alloy-eip7928 = { version = "0.3.0", default-features = false } alloy-evm = { version = "0.27.0", default-features = false } -alloy-primitives = { version = "1.5.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } -alloy-sol-macro = "1.5.0" -alloy-sol-types = { version = "1.5.0", default-features = false } alloy-trie = { version = "0.9.1", default-features = false } alloy-hardforks = "0.4.5" @@ -733,7 +733,7 @@ snap = "1.1.1" socket2 = { version = "0.5", default-features = false } sysinfo = { version = "0.33", default-features = false } tracing-journald = "0.3" -tracing-logfmt = "0.3.3" +tracing-logfmt = "=0.3.5" tracing-samply = "0.1" tracing-subscriber = { version = "0.3", default-features = false } tracing-tracy = "0.11" diff --git a/examples/custom-beacon-withdrawals/Cargo.toml b/examples/custom-beacon-withdrawals/Cargo.toml index c36a5ee915a..bc484fa4542 100644 --- a/examples/custom-beacon-withdrawals/Cargo.toml +++ b/examples/custom-beacon-withdrawals/Cargo.toml @@ -9,7 +9,6 @@ license.workspace = true reth-ethereum = { workspace = true, features = ["node", "node-api", "evm", "cli"] } alloy-evm.workspace = true -alloy-sol-macro.workspace = true alloy-sol-types.workspace = true alloy-eips.workspace = true diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index 4ba63885fa4..d6b67572167 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -11,8 +11,7 @@ use alloy_evm::{ revm::context::Block as _, EthEvm, EthEvmFactory, }; -use alloy_sol_macro::sol; -use alloy_sol_types::SolCall; +use alloy_sol_types::{sol, SolCall}; use reth_ethereum::{ chainspec::ChainSpec, cli::interface::Cli, From 2d9cf4c989f73f3c94de5b7336206731d9eadd3b Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 28 Jan 2026 21:48:59 +0000 Subject: [PATCH 263/267] chore: fix unused warns in sparse trie (#21546) --- crates/trie/sparse/src/state.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 1b032b8dda5..654f1e4ddd3 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -984,9 +984,15 @@ where /// Will always return false in `no_std` builds. const fn is_prune_parallelism_enabled(num_tries: usize) -> bool { #[cfg(not(feature = "std"))] - return false; + { + let _ = num_tries; + return false; + } - num_tries >= Self::PARALLEL_PRUNE_THRESHOLD + #[cfg(feature = "std")] + { + num_tries >= Self::PARALLEL_PRUNE_THRESHOLD + } } /// Prunes the account trie and selected storage tries to reduce memory usage. From 8d37f76d23491a78f361d38649a70e01a9b9942c Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 28 Jan 2026 23:14:37 +0100 Subject: [PATCH 264/267] chore: move scripts from .github/assets to .github/scripts (#21539) Co-authored-by: Amp --- .github/{assets => scripts}/check_rv32imac.sh | 0 .github/{assets => scripts}/check_wasm.sh | 0 .github/{assets => scripts}/hive/Dockerfile | 0 .github/{assets => scripts}/hive/build_simulators.sh | 2 +- .../{assets => scripts}/hive/expected_failures.yaml | 0 .github/{assets => scripts}/hive/ignored_tests.yaml | 0 .github/{assets => scripts}/hive/load_images.sh | 0 .github/{assets => scripts}/hive/no_sim_build.diff | 0 .github/{assets => scripts}/hive/parse.py | 0 .github/{assets => scripts}/hive/run_simulator.sh | 0 .github/{assets => scripts}/install_geth.sh | 0 .github/{assets => scripts}/label_pr.js | 0 .github/workflows/hive.yml | 10 +++++----- .github/workflows/integration.yml | 2 +- .github/workflows/label-pr.yml | 2 +- .github/workflows/lint.yml | 4 ++-- .github/workflows/prepare-reth.yml | 2 +- 17 files changed, 11 insertions(+), 11 deletions(-) rename .github/{assets => scripts}/check_rv32imac.sh (100%) rename .github/{assets => scripts}/check_wasm.sh (100%) rename .github/{assets => scripts}/hive/Dockerfile (100%) rename .github/{assets => scripts}/hive/build_simulators.sh (97%) rename .github/{assets => scripts}/hive/expected_failures.yaml (100%) rename .github/{assets => scripts}/hive/ignored_tests.yaml (100%) rename .github/{assets => scripts}/hive/load_images.sh (100%) rename .github/{assets => scripts}/hive/no_sim_build.diff (100%) rename .github/{assets => scripts}/hive/parse.py (100%) rename .github/{assets => scripts}/hive/run_simulator.sh (100%) rename .github/{assets => scripts}/install_geth.sh (100%) rename .github/{assets => scripts}/label_pr.js (100%) diff --git a/.github/assets/check_rv32imac.sh b/.github/scripts/check_rv32imac.sh similarity index 100% rename from .github/assets/check_rv32imac.sh rename to .github/scripts/check_rv32imac.sh diff --git a/.github/assets/check_wasm.sh b/.github/scripts/check_wasm.sh similarity index 100% rename from .github/assets/check_wasm.sh rename to .github/scripts/check_wasm.sh diff --git a/.github/assets/hive/Dockerfile b/.github/scripts/hive/Dockerfile similarity index 100% rename from .github/assets/hive/Dockerfile rename to .github/scripts/hive/Dockerfile diff --git a/.github/assets/hive/build_simulators.sh b/.github/scripts/hive/build_simulators.sh similarity index 97% rename from .github/assets/hive/build_simulators.sh rename to .github/scripts/hive/build_simulators.sh index d65e609e700..81fed98e876 100755 --- a/.github/assets/hive/build_simulators.sh +++ b/.github/scripts/hive/build_simulators.sh @@ -38,6 +38,6 @@ for pid in "${saving_pids[@]}"; do done # Make sure we don't rebuild images on the CI jobs -git apply ../.github/assets/hive/no_sim_build.diff +git apply ../.github/scripts/hive/no_sim_build.diff go build . mv ./hive ../hive_assets/ diff --git a/.github/assets/hive/expected_failures.yaml b/.github/scripts/hive/expected_failures.yaml similarity index 100% rename from .github/assets/hive/expected_failures.yaml rename to .github/scripts/hive/expected_failures.yaml diff --git a/.github/assets/hive/ignored_tests.yaml b/.github/scripts/hive/ignored_tests.yaml similarity index 100% rename from .github/assets/hive/ignored_tests.yaml rename to .github/scripts/hive/ignored_tests.yaml diff --git a/.github/assets/hive/load_images.sh b/.github/scripts/hive/load_images.sh similarity index 100% rename from .github/assets/hive/load_images.sh rename to .github/scripts/hive/load_images.sh diff --git a/.github/assets/hive/no_sim_build.diff b/.github/scripts/hive/no_sim_build.diff similarity index 100% rename from .github/assets/hive/no_sim_build.diff rename to .github/scripts/hive/no_sim_build.diff diff --git a/.github/assets/hive/parse.py b/.github/scripts/hive/parse.py similarity index 100% rename from .github/assets/hive/parse.py rename to .github/scripts/hive/parse.py diff --git a/.github/assets/hive/run_simulator.sh b/.github/scripts/hive/run_simulator.sh similarity index 100% rename from .github/assets/hive/run_simulator.sh rename to .github/scripts/hive/run_simulator.sh diff --git a/.github/assets/install_geth.sh b/.github/scripts/install_geth.sh similarity index 100% rename from .github/assets/install_geth.sh rename to .github/scripts/install_geth.sh diff --git a/.github/assets/label_pr.js b/.github/scripts/label_pr.js similarity index 100% rename from .github/assets/label_pr.js rename to .github/scripts/label_pr.js diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index e94042d6b9b..16492194d93 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -58,11 +58,11 @@ jobs: uses: actions/cache@v5 with: path: ./hive_assets - key: hive-assets-${{ steps.hive-commit.outputs.hash }}-${{ hashFiles('.github/assets/hive/build_simulators.sh') }} + key: hive-assets-${{ steps.hive-commit.outputs.hash }}-${{ hashFiles('.github/scripts/hive/build_simulators.sh') }} - name: Build hive assets if: steps.cache-hive.outputs.cache-hit != 'true' - run: .github/assets/hive/build_simulators.sh + run: .github/scripts/hive/build_simulators.sh - name: Load cached Docker images if: steps.cache-hive.outputs.cache-hit == 'true' @@ -213,7 +213,7 @@ jobs: path: /tmp - name: Load Docker images - run: .github/assets/hive/load_images.sh + run: .github/scripts/hive/load_images.sh - name: Move hive binary run: | @@ -241,11 +241,11 @@ jobs: FILTER="/" fi echo "filter: $FILTER" - .github/assets/hive/run_simulator.sh "${{ matrix.scenario.sim }}" "$FILTER" + .github/scripts/hive/run_simulator.sh "${{ matrix.scenario.sim }}" "$FILTER" - name: Parse hive output run: | - find hivetests/workspace/logs -type f -name "*.json" ! -name "hive.json" | xargs -I {} python .github/assets/hive/parse.py {} --exclusion .github/assets/hive/expected_failures.yaml --ignored .github/assets/hive/ignored_tests.yaml + find hivetests/workspace/logs -type f -name "*.json" ! -name "hive.json" | xargs -I {} python .github/scripts/hive/parse.py {} --exclusion .github/scripts/hive/expected_failures.yaml --ignored .github/scripts/hive/ignored_tests.yaml - name: Print simulator output if: ${{ failure() }} diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 46f5670c72f..facd449f5a0 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -36,7 +36,7 @@ jobs: - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - name: Install Geth - run: .github/assets/install_geth.sh + run: .github/scripts/install_geth.sh - uses: taiki-e/install-action@nextest - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/label-pr.yml b/.github/workflows/label-pr.yml index 7211f383a81..616e53295a4 100644 --- a/.github/workflows/label-pr.yml +++ b/.github/workflows/label-pr.yml @@ -19,5 +19,5 @@ jobs: uses: actions/github-script@v8 with: script: | - const label_pr = require('./.github/assets/label_pr.js') + const label_pr = require('./.github/scripts/label_pr.js') await label_pr({github, context}) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index bc2ab5bdded..80d2af3fce7 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -76,7 +76,7 @@ jobs: - name: Run Wasm checks run: | sudo apt update && sudo apt install gcc-multilib - .github/assets/check_wasm.sh + .github/scripts/check_wasm.sh riscv: runs-on: depot-ubuntu-latest @@ -94,7 +94,7 @@ jobs: cache-on-failure: true - uses: dcarbone/install-jq-action@v3 - name: Run RISC-V checks - run: .github/assets/check_rv32imac.sh + run: .github/scripts/check_rv32imac.sh crate-checks: name: crate-checks (${{ matrix.partition }}/${{ matrix.total_partitions }}) diff --git a/.github/workflows/prepare-reth.yml b/.github/workflows/prepare-reth.yml index e738c72303b..ee0c2de81f1 100644 --- a/.github/workflows/prepare-reth.yml +++ b/.github/workflows/prepare-reth.yml @@ -43,7 +43,7 @@ jobs: uses: docker/build-push-action@v6 with: context: . - file: .github/assets/hive/Dockerfile + file: .github/scripts/hive/Dockerfile tags: ${{ inputs.image_tag }} outputs: type=docker,dest=./artifacts/reth_image.tar build-args: | From 1a98605ce667cdb3c198c0fc86bf12fb3f71bdf2 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Wed, 28 Jan 2026 14:41:42 -0800 Subject: [PATCH 265/267] chore(net): downgrade fork id mismatch log to trace (#21554) Co-authored-by: Amp --- crates/net/network/src/swarm.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 229d149a2f9..a1c41174001 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -20,7 +20,7 @@ use std::{ sync::Arc, task::{Context, Poll}, }; -use tracing::{debug, trace}; +use tracing::trace; #[cfg_attr(doc, aquamarine::aquamarine)] /// Contains the connectivity related state of the network. @@ -259,7 +259,7 @@ impl Swarm { if self.sessions.is_valid_fork_id(fork_id) { self.state_mut().peers_mut().set_discovered_fork_id(peer_id, fork_id); } else { - debug!(target: "net", ?peer_id, remote_fork_id=?fork_id, our_fork_id=?self.sessions.fork_id(), "fork id mismatch, removing peer"); + trace!(target: "net", ?peer_id, remote_fork_id=?fork_id, our_fork_id=?self.sessions.fork_id(), "fork id mismatch, removing peer"); self.state_mut().peers_mut().remove_peer(peer_id); } } From 2352158b3dc60cf8f7ce8522984fd57669d38b98 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 29 Jan 2026 01:35:19 +0100 Subject: [PATCH 266/267] fix(reth-bench): return error instead of panic on invalid payload (#21557) Co-authored-by: Amp --- bin/reth-bench/src/valid_payload.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bin/reth-bench/src/valid_payload.rs b/bin/reth-bench/src/valid_payload.rs index 3680211fdc0..2f4e89503d8 100644 --- a/bin/reth-bench/src/valid_payload.rs +++ b/bin/reth-bench/src/valid_payload.rs @@ -260,7 +260,9 @@ pub(crate) async fn call_new_payload>( while !status.is_valid() { if status.is_invalid() { error!(?status, ?params, "Invalid {method}",); - panic!("Invalid {method}: {status:?}"); + return Err(alloy_json_rpc::RpcError::LocalUsageError(Box::new(std::io::Error::other( + format!("Invalid {method}: {status:?}"), + )))) } if status.is_syncing() { return Err(alloy_json_rpc::RpcError::UnsupportedFeature( From 8eedf6abbab70040e18dc0fcc9c997f33774f8f3 Mon Sep 17 00:00:00 2001 From: Rez Date: Thu, 29 Jan 2026 14:28:18 +1100 Subject: [PATCH 267/267] avoid extra clone --- crates/optimism/flashblocks/src/cache.rs | 52 +++++++++++++++++------- 1 file changed, 38 insertions(+), 14 deletions(-) diff --git a/crates/optimism/flashblocks/src/cache.rs b/crates/optimism/flashblocks/src/cache.rs index 99b2a317f02..5513d4e3cb0 100644 --- a/crates/optimism/flashblocks/src/cache.rs +++ b/crates/optimism/flashblocks/src/cache.rs @@ -145,21 +145,45 @@ impl SequenceManager

{ ) -> Option> { // Try to find a buildable sequence: (base, last_fb, transactions, cached_state, // source_name) - let (base, last_flashblock, transactions, cached_state, source_name) = + let (base, last_index, last_hash, last_state_root_zero, transactions, cached_state, source_name) = // Priority 1: Try current pending sequence - if let Some(base) = self.pending.payload_base().cloned().filter(|b| b.parent_hash() == local_tip_hash) { + if let Some(base) = self + .pending + .payload_base() + .cloned() + .filter(|b| b.parent_hash() == local_tip_hash) + { let cached_state = self.pending.take_cached_reads().map(|r| (base.parent_hash(), r)); - let last_fb = self.pending.last_flashblock()?.clone(); + let last_fb = self.pending.last_flashblock()?; let transactions = self.pending_transactions.clone(); - (base, last_fb, transactions, cached_state, "pending") + ( + base, + last_fb.index(), + last_fb.diff().block_hash(), + last_fb.diff().state_root().is_zero(), + transactions, + cached_state, + "pending", + ) } // Priority 2: Try cached sequence with exact parent match - else if let Some((cached, txs)) = self.completed_cache.iter().find(|(c, _)| c.payload_base().parent_hash() == local_tip_hash) { + else if let Some((cached, txs)) = self + .completed_cache + .iter() + .find(|(c, _)| c.payload_base().parent_hash() == local_tip_hash) + { let base = cached.payload_base().clone(); - let last_fb = cached.last().clone(); + let last_fb = cached.last(); let transactions = txs.clone(); - let cached_state = None; - (base, last_fb, transactions, cached_state, "cached") + ( + base, + last_fb.index(), + last_fb.diff().block_hash(), + last_fb.diff().state_root().is_zero(), + transactions, + None, + "cached", + ) } else { return None; }; @@ -195,17 +219,17 @@ impl SequenceManager

{ let block_time_ms = (base.timestamp() - local_tip_timestamp) * 1000; let expected_final_flashblock = block_time_ms / FLASHBLOCK_BLOCK_TIME; let compute_state_root = self.compute_state_root && - last_flashblock.diff().state_root().is_zero() && - last_flashblock.index() >= expected_final_flashblock.saturating_sub(1); + last_state_root_zero && + last_index >= expected_final_flashblock.saturating_sub(1); trace!( target: "flashblocks", block_number = base.block_number(), source = source_name, - flashblock_index = last_flashblock.index(), + flashblock_index = last_index, expected_final_flashblock, compute_state_root_enabled = self.compute_state_root, - state_root_is_zero = last_flashblock.diff().state_root().is_zero(), + state_root_is_zero = last_state_root_zero, will_compute_state_root = compute_state_root, "Building from flashblock sequence" ); @@ -214,8 +238,8 @@ impl SequenceManager

{ base, transactions, cached_state, - last_flashblock_index: last_flashblock.index(), - last_flashblock_hash: last_flashblock.diff().block_hash(), + last_flashblock_index: last_index, + last_flashblock_hash: last_hash, compute_state_root, }) }